diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 9602d5405..0954585f2 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:b8c131c558606d3cea6e18f8e87befbd448c1482319b0db3c5d5388fa6ea72e3 + digest: sha256:df50e8d462f86d6bcb42f27ecad55bb12c404f1c65de9c6fe4c4d25120080bd6 diff --git a/.kokoro/samples/python3.9/common.cfg b/.kokoro/samples/python3.9/common.cfg new file mode 100644 index 000000000..b4dd47038 --- /dev/null +++ b/.kokoro/samples/python3.9/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.9" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py39" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-storage/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-storage/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.9/continuous.cfg b/.kokoro/samples/python3.9/continuous.cfg new file mode 100644 index 000000000..a1c8d9759 --- /dev/null +++ b/.kokoro/samples/python3.9/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.9/periodic-head.cfg b/.kokoro/samples/python3.9/periodic-head.cfg new file mode 100644 index 000000000..f9cfcd33e --- /dev/null +++ b/.kokoro/samples/python3.9/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.9/periodic.cfg b/.kokoro/samples/python3.9/periodic.cfg new file mode 100644 index 000000000..50fec9649 --- /dev/null +++ b/.kokoro/samples/python3.9/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.9/presubmit.cfg b/.kokoro/samples/python3.9/presubmit.cfg new file mode 100644 index 000000000..a1c8d9759 --- /dev/null +++ b/.kokoro/samples/python3.9/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 066b75505..381c32f2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,19 @@ [1]: https://pypi.org/project/google-cloud-storage/#history +## [1.40.0](https://www.github.com/googleapis/python-storage/compare/v1.39.0...v1.40.0) (2021-06-30) + + +### Features + +* add preconditions and retry configuration to blob.create_resumable_upload_session ([#484](https://www.github.com/googleapis/python-storage/issues/484)) ([0ae35ee](https://www.github.com/googleapis/python-storage/commit/0ae35eef0fe82fe60bc095c4b183102bb1dabeeb)) +* add public access prevention to bucket IAM configuration ([#304](https://www.github.com/googleapis/python-storage/issues/304)) ([e3e57a9](https://www.github.com/googleapis/python-storage/commit/e3e57a9c779d6b87852063787f19e27c76b1bb14)) + + +### Bug Fixes + +* replace default retry for upload operations ([#480](https://www.github.com/googleapis/python-storage/issues/480)) ([c027ccf](https://www.github.com/googleapis/python-storage/commit/c027ccf4279fb05e041754294f10744b7d81beea)) + ## [1.39.0](https://www.github.com/googleapis/python-storage/compare/v1.38.0...v1.39.0) (2021-06-21) diff --git a/google/cloud/storage/blob.py b/google/cloud/storage/blob.py index 60178aa2e..e0745daa8 100644 --- a/google/cloud/storage/blob.py +++ b/google/cloud/storage/blob.py @@ -82,7 +82,6 @@ from google.cloud.storage.retry import DEFAULT_RETRY from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED -from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED from google.cloud.storage.fileio import BlobReader from google.cloud.storage.fileio import BlobWriter @@ -1703,10 +1702,10 @@ def _do_multipart_upload( :type num_retries: int :param num_retries: Number of upload retries. By default, only uploads with - if_metageneration_match set will be retried, as uploads without the + if_generation_match set will be retried, as uploads without the argument are not guaranteed to be idempotent. Setting num_retries will override this default behavior and guarantee retries even when - if_metageneration_match is not set. (Deprecated: This argument + if_generation_match is not set. (Deprecated: This argument will be removed in a future release.) :type predefined_acl: str @@ -1750,7 +1749,7 @@ def _do_multipart_upload( This private method does not accept ConditionalRetryPolicy values because the information necessary to evaluate the policy is instead - evaluated in client.download_blob_to_file(). + evaluated in blob._do_upload(). See the retry.py source code and docstrings in this package (google.cloud.storage.retry) for information on retry types and how @@ -1877,10 +1876,10 @@ def _initiate_resumable_upload( :type num_retries: int :param num_retries: Number of upload retries. By default, only uploads with - if_metageneration_match set will be retried, as uploads without the + if_generation_match set will be retried, as uploads without the argument are not guaranteed to be idempotent. Setting num_retries will override this default behavior and guarantee retries even when - if_metageneration_match is not set. (Deprecated: This argument + if_generation_match is not set. (Deprecated: This argument will be removed in a future release.) :type extra_headers: dict @@ -1936,7 +1935,7 @@ def _initiate_resumable_upload( This private method does not accept ConditionalRetryPolicy values because the information necessary to evaluate the policy is instead - evaluated in client.download_blob_to_file(). + evaluated in blob._do_upload(). See the retry.py source code and docstrings in this package (google.cloud.storage.retry) for information on retry types and how @@ -2070,10 +2069,10 @@ def _do_resumable_upload( :type num_retries: int :param num_retries: Number of upload retries. By default, only uploads with - if_metageneration_match set will be retried, as uploads without the + if_generation_match set will be retried, as uploads without the argument are not guaranteed to be idempotent. Setting num_retries will override this default behavior and guarantee retries even when - if_metageneration_match is not set. (Deprecated: This argument + if_generation_match is not set. (Deprecated: This argument will be removed in a future release.) :type predefined_acl: str @@ -2119,7 +2118,7 @@ def _do_resumable_upload( This private method does not accept ConditionalRetryPolicy values because the information necessary to evaluate the policy is instead - evaluated in client.download_blob_to_file(). + evaluated in blob._do_upload(). See the retry.py source code and docstrings in this package (google.cloud.storage.retry) for information on retry types and how @@ -2204,10 +2203,10 @@ def _do_upload( :type num_retries: int :param num_retries: Number of upload retries. By default, only uploads with - if_metageneration_match set will be retried, as uploads without the + if_generation_match set will be retried, as uploads without the argument are not guaranteed to be idempotent. Setting num_retries will override this default behavior and guarantee retries even when - if_metageneration_match is not set. (Deprecated: This argument + if_generation_match is not set. (Deprecated: This argument will be removed in a future release.) :type predefined_acl: str @@ -2258,7 +2257,7 @@ def _do_upload( This class exists to provide safe defaults for RPC calls that are not technically safe to retry normally (due to potential data duplication or other side-effects) but become safe to retry if a - condition such as if_metageneration_match is set. + condition such as if_generation_match is set. See the retry.py source code and docstrings in this package (google.cloud.storage.retry) for information on retry types and how @@ -2337,7 +2336,7 @@ def upload_from_file( if_metageneration_not_match=None, timeout=_DEFAULT_TIMEOUT, checksum=None, - retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, + retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, ): """Upload the contents of this blob from a file-like object. @@ -2397,10 +2396,10 @@ def upload_from_file( :type num_retries: int :param num_retries: Number of upload retries. By default, only uploads with - if_metageneration_match set will be retried, as uploads without the + if_generation_match set will be retried, as uploads without the argument are not guaranteed to be idempotent. Setting num_retries will override this default behavior and guarantee retries even when - if_metageneration_match is not set. (Deprecated: This argument + if_generation_match is not set. (Deprecated: This argument will be removed in a future release.) :type client: :class:`~google.cloud.storage.client.Client` @@ -2456,7 +2455,7 @@ def upload_from_file( This class exists to provide safe defaults for RPC calls that are not technically safe to retry normally (due to potential data duplication or other side-effects) but become safe to retry if a - condition such as if_metageneration_match is set. + condition such as if_generation_match is set. See the retry.py source code and docstrings in this package (google.cloud.storage.retry) for information on retry types and how @@ -2479,7 +2478,7 @@ def upload_from_file( # num_retries and retry are mutually exclusive. If num_retries is # set and retry is exactly the default, then nullify retry for # backwards compatibility. - if retry is DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED: + if retry is DEFAULT_RETRY_IF_GENERATION_SPECIFIED: retry = None _maybe_rewind(file_obj, rewind=rewind) @@ -2518,7 +2517,7 @@ def upload_from_filename( if_metageneration_not_match=None, timeout=_DEFAULT_TIMEOUT, checksum=None, - retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, + retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, ): """Upload this blob's contents from the content of a named file. @@ -2558,10 +2557,10 @@ def upload_from_filename( :type num_retries: int :param num_retries: Number of upload retries. By default, only uploads with - if_metageneration_match set will be retried, as uploads without the + if_generation_match set will be retried, as uploads without the argument are not guaranteed to be idempotent. Setting num_retries will override this default behavior and guarantee retries even when - if_metageneration_match is not set. (Deprecated: This argument + if_generation_match is not set. (Deprecated: This argument will be removed in a future release.) :type predefined_acl: str @@ -2612,7 +2611,7 @@ def upload_from_filename( This class exists to provide safe defaults for RPC calls that are not technically safe to retry normally (due to potential data duplication or other side-effects) but become safe to retry if a - condition such as if_metageneration_match is set. + condition such as if_generation_match is set. See the retry.py source code and docstrings in this package (google.cloud.storage.retry) for information on retry types and how @@ -2656,7 +2655,7 @@ def upload_from_string( if_metageneration_not_match=None, timeout=_DEFAULT_TIMEOUT, checksum=None, - retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, + retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, ): """Upload contents of this blob from the provided string. @@ -2687,10 +2686,10 @@ def upload_from_string( :type num_retries: int :param num_retries: Number of upload retries. By default, only uploads with - if_metageneration_match set will be retried, as uploads without the + if_generation_match set will be retried, as uploads without the argument are not guaranteed to be idempotent. Setting num_retries will override this default behavior and guarantee retries even when - if_metageneration_match is not set. (Deprecated: This argument + if_generation_match is not set. (Deprecated: This argument will be removed in a future release.) :type client: :class:`~google.cloud.storage.client.Client` @@ -2746,7 +2745,7 @@ def upload_from_string( This class exists to provide safe defaults for RPC calls that are not technically safe to retry normally (due to potential data duplication or other side-effects) but become safe to retry if a - condition such as if_metageneration_match is set. + condition such as if_generation_match is set. See the retry.py source code and docstrings in this package (google.cloud.storage.retry) for information on retry types and how @@ -2783,6 +2782,11 @@ def create_resumable_upload_session( client=None, timeout=_DEFAULT_TIMEOUT, checksum=None, + if_generation_match=None, + if_generation_not_match=None, + if_metageneration_match=None, + if_metageneration_not_match=None, + retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, ): """Create a resumable upload session. @@ -2858,6 +2862,41 @@ def create_resumable_upload_session( delete the uploaded object automatically. Supported values are "md5", "crc32c" and None. The default is None. + :type if_generation_match: long + :param if_generation_match: + (Optional) See :ref:`using-if-generation-match` + + :type if_generation_not_match: long + :param if_generation_not_match: + (Optional) See :ref:`using-if-generation-not-match` + + :type if_metageneration_match: long + :param if_metageneration_match: + (Optional) See :ref:`using-if-metageneration-match` + + :type if_metageneration_not_match: long + :param if_metageneration_not_match: + (Optional) See :ref:`using-if-metageneration-not-match` + + :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy + :param retry: (Optional) How to retry the RPC. A None value will disable + retries. A google.api_core.retry.Retry value will enable retries, + and the object will define retriable response codes and errors and + configure backoff and timeout options. + A google.cloud.storage.retry.ConditionalRetryPolicy value wraps a + Retry object and activates it only if certain conditions are met. + This class exists to provide safe defaults for RPC calls that are + not technically safe to retry normally (due to potential data + duplication or other side-effects) but become safe to retry if a + condition such as if_generation_match is set. + See the retry.py source code and docstrings in this package + (google.cloud.storage.retry) for information on retry types and how + to configure them. + Media operations (downloads and uploads) do not support non-default + predicates in a Retry object. The default will always be used. Other + configuration changes for Retry objects such as delays and deadlines + are respected. + :rtype: str :returns: The resumable upload session URL. The upload can be completed by making an HTTP PUT request with the @@ -2866,6 +2905,19 @@ def create_resumable_upload_session( :raises: :class:`google.cloud.exceptions.GoogleCloudError` if the session creation response returns an error status. """ + + # Handle ConditionalRetryPolicy. + if isinstance(retry, ConditionalRetryPolicy): + # Conditional retries are designed for non-media calls, which change + # arguments into query_params dictionaries. Media operations work + # differently, so here we make a "fake" query_params to feed to the + # ConditionalRetryPolicy. + query_params = { + "ifGenerationMatch": if_generation_match, + "ifMetagenerationMatch": if_metageneration_match, + } + retry = retry.get_retry_policy_if_conditions_met(query_params=query_params) + extra_headers = {} if origin is not None: # This header is specifically for client-side uploads, it @@ -2884,10 +2936,15 @@ def create_resumable_upload_session( size, None, predefined_acl=None, + if_generation_match=if_generation_match, + if_generation_not_match=if_generation_not_match, + if_metageneration_match=if_metageneration_match, + if_metageneration_not_match=if_metageneration_not_match, extra_headers=extra_headers, chunk_size=self._CHUNK_SIZE_MULTIPLE, timeout=timeout, checksum=checksum, + retry=retry, ) return upload.resumable_url diff --git a/google/cloud/storage/bucket.py b/google/cloud/storage/bucket.py index 0dc4ef76d..48531fdf3 100644 --- a/google/cloud/storage/bucket.py +++ b/google/cloud/storage/bucket.py @@ -50,6 +50,7 @@ from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS from google.cloud.storage.constants import MULTI_REGION_LOCATION_TYPE from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS +from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_UNSPECIFIED from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS from google.cloud.storage.constants import REGION_LOCATION_TYPE from google.cloud.storage.constants import STANDARD_STORAGE_CLASS @@ -383,6 +384,12 @@ class IAMConfiguration(dict): :type bucket: :class:`Bucket` :params bucket: Bucket for which this instance is the policy. + :type public_access_prevention: str + :params public_access_prevention: + (Optional) Whether the public access prevention policy is 'unspecified' (default) or 'enforced' + See: https://cloud.google.com/storage/docs/public-access-prevention + See: https://cloud.google.com/storage/docs/public-access-prevention + :type uniform_bucket_level_access_enabled: bool :params bucket_policy_only_enabled: (Optional) Whether the IAM-only policy is enabled for the bucket. @@ -404,6 +411,7 @@ class IAMConfiguration(dict): def __init__( self, bucket, + public_access_prevention=_default, uniform_bucket_level_access_enabled=_default, uniform_bucket_level_access_locked_time=_default, bucket_policy_only_enabled=_default, @@ -428,8 +436,14 @@ def __init__( if uniform_bucket_level_access_enabled is _default: uniform_bucket_level_access_enabled = False + if public_access_prevention is _default: + public_access_prevention = PUBLIC_ACCESS_PREVENTION_UNSPECIFIED + data = { - "uniformBucketLevelAccess": {"enabled": uniform_bucket_level_access_enabled} + "uniformBucketLevelAccess": { + "enabled": uniform_bucket_level_access_enabled + }, + "publicAccessPrevention": public_access_prevention, } if uniform_bucket_level_access_locked_time is not _default: data["uniformBucketLevelAccess"]["lockedTime"] = _datetime_to_rfc3339( @@ -464,6 +478,21 @@ def bucket(self): """ return self._bucket + @property + def public_access_prevention(self): + """Setting for public access prevention policy. Options are 'unspecified' (default) or 'enforced'. + More information can be found at https://cloud.google.com/storage/docs/public-access-prevention + + :rtype: string + :returns: the public access prevention status, either 'enforced' or 'unspecified'. + """ + return self["publicAccessPrevention"] + + @public_access_prevention.setter + def public_access_prevention(self, value): + self["publicAccessPrevention"] = value + self.bucket._patch_property("iamConfiguration", self) + @property def uniform_bucket_level_access_enabled(self): """If set, access checks only use bucket-level IAM policies or above. diff --git a/google/cloud/storage/constants.py b/google/cloud/storage/constants.py index 621508669..d0c13f633 100644 --- a/google/cloud/storage/constants.py +++ b/google/cloud/storage/constants.py @@ -96,3 +96,16 @@ _DEFAULT_TIMEOUT = 60 # in seconds """The default request timeout in seconds if a timeout is not explicitly given. """ + +# Public Access Prevention +PUBLIC_ACCESS_PREVENTION_ENFORCED = "enforced" +"""Enforced public access prevention value. + +See: https://cloud.google.com/storage/docs/public-access-prevention +""" + +PUBLIC_ACCESS_PREVENTION_UNSPECIFIED = "unspecified" +"""Unspecified public access prevention value. + +See: https://cloud.google.com/storage/docs/public-access-prevention +""" diff --git a/google/cloud/storage/fileio.py b/google/cloud/storage/fileio.py index 6ac8e057f..e9b4c23cf 100644 --- a/google/cloud/storage/fileio.py +++ b/google/cloud/storage/fileio.py @@ -18,7 +18,7 @@ from google.api_core.exceptions import RequestRangeNotSatisfiable from google.cloud.storage._helpers import _NUM_RETRIES_MESSAGE from google.cloud.storage.retry import DEFAULT_RETRY -from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED +from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED from google.cloud.storage.retry import ConditionalRetryPolicy @@ -278,7 +278,7 @@ def __init__( blob, chunk_size=None, text_mode=False, - retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, + retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, **upload_kwargs ): for kwarg in upload_kwargs: @@ -346,7 +346,7 @@ def _initiate_upload(self): # num_retries and retry are mutually exclusive. If num_retries is # set and retry is exactly the default, then nullify retry for # backwards compatibility. - if retry is DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED: + if retry is DEFAULT_RETRY_IF_GENERATION_SPECIFIED: retry = None # Handle ConditionalRetryPolicy. diff --git a/google/cloud/storage/version.py b/google/cloud/storage/version.py index 05c5a222e..88c21afb9 100644 --- a/google/cloud/storage/version.py +++ b/google/cloud/storage/version.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "1.39.0" +__version__ = "1.40.0" diff --git a/tests/system/__init__.py b/tests/system/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/system/_helpers.py b/tests/system/_helpers.py new file mode 100644 index 000000000..033450da3 --- /dev/null +++ b/tests/system/_helpers.py @@ -0,0 +1,86 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import six + +from google.api_core import exceptions + +from test_utils.retry import RetryErrors +from test_utils.retry import RetryInstanceState +from test_utils.system import unique_resource_id + +retry_429 = RetryErrors(exceptions.TooManyRequests) +retry_429_harder = RetryErrors(exceptions.TooManyRequests, max_tries=10) +retry_429_503 = RetryErrors( + [exceptions.TooManyRequests, exceptions.ServiceUnavailable], max_tries=10 +) + +# Work around https://github.com/googleapis/python-test-utils/issues/36 +if six.PY3: + retry_failures = RetryErrors(AssertionError) +else: + + def retry_failures(decorated): # no-op + wrapped = RetryErrors(AssertionError)(decorated) + wrapped.__wrapped__ = decorated + return wrapped + + +user_project = os.environ.get("GOOGLE_CLOUD_TESTS_USER_PROJECT") +testing_mtls = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") == "true" +signing_blob_content = b"This time for sure, Rocky!" + + +def _bad_copy(bad_request): + """Predicate: pass only exceptions for a failed copyTo.""" + err_msg = bad_request.message + return err_msg.startswith("No file found in request. (POST") and "copyTo" in err_msg + + +def _no_event_based_hold(blob): + return not blob.event_based_hold + + +retry_bad_copy = RetryErrors(exceptions.BadRequest, error_predicate=_bad_copy) +retry_no_event_based_hold = RetryInstanceState(_no_event_based_hold) + + +def unique_name(prefix): + return prefix + unique_resource_id("-") + + +def empty_bucket(bucket): + for blob in list(bucket.list_blobs(versions=True)): + try: + blob.delete() + except exceptions.NotFound: + pass + + +def delete_blob(blob): + errors = (exceptions.Conflict, exceptions.TooManyRequests) + retry = RetryErrors(errors) + try: + retry(blob.delete)() + except exceptions.NotFound: # race + pass + + +def delete_bucket(bucket): + errors = (exceptions.Conflict, exceptions.TooManyRequests) + retry = RetryErrors(errors, max_tries=15) + retry(empty_bucket)(bucket) + retry(bucket.delete)(force=True) diff --git a/tests/system/conftest.py b/tests/system/conftest.py new file mode 100644 index 000000000..02a13d140 --- /dev/null +++ b/tests/system/conftest.py @@ -0,0 +1,192 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import os + +import pytest + +from google.cloud.storage._helpers import _base64_md5hash +from . import _helpers + + +dirname = os.path.realpath(os.path.dirname(__file__)) +data_dirname = os.path.abspath(os.path.join(dirname, "..", "data")) +_filenames = [ + ("logo", "CloudPlatform_128px_Retina.png"), + ("big", "five-point-one-mb-file.zip"), + ("simple", "simple.txt"), +] +_file_data = { + key: {"path": os.path.join(data_dirname, file_name)} + for key, file_name in _filenames +} + +_listable_filenames = ["CloudLogo1", "CloudLogo2", "CloudLogo3", "CloudLogo4"] +_hierarchy_filenames = [ + "file01.txt", + "parent/", + "parent/file11.txt", + "parent/child/file21.txt", + "parent/child/file22.txt", + "parent/child/grand/file31.txt", + "parent/child/other/file32.txt", +] + + +@pytest.fixture(scope="session") +def storage_client(): + from google.cloud.storage import Client + + client = Client() + with contextlib.closing(client): + yield client + + +@pytest.fixture(scope="session") +def user_project(): + if _helpers.user_project is None: + pytest.skip("USER_PROJECT not set in environment.") + return _helpers.user_project + + +@pytest.fixture(scope="session") +def no_mtls(): + if _helpers.testing_mtls: + pytest.skip("Test incompatible with mTLS.") + + +@pytest.fixture(scope="session") +def service_account(storage_client): + from google.oauth2.service_account import Credentials + + if not isinstance(storage_client._credentials, Credentials): + pytest.skip("These tests require a service account credential") + return storage_client._credentials + + +@pytest.fixture(scope="session") +def shared_bucket_name(): + return _helpers.unique_name("gcp-systest") + + +@pytest.fixture(scope="session") +def shared_bucket(storage_client, shared_bucket_name): + bucket = storage_client.bucket(shared_bucket_name) + bucket.versioning_enabled = True + _helpers.retry_429_503(bucket.create)() + + yield bucket + + _helpers.delete_bucket(bucket) + + +@pytest.fixture(scope="session") +def listable_bucket_name(): + return _helpers.unique_name("gcp-systest-listable") + + +@pytest.fixture(scope="session") +def listable_bucket(storage_client, listable_bucket_name, file_data): + bucket = storage_client.bucket(listable_bucket_name) + _helpers.retry_429_503(bucket.create)() + + info = file_data["logo"] + source_blob = bucket.blob(_listable_filenames[0]) + source_blob.upload_from_filename(info["path"]) + + for filename in _listable_filenames[1:]: + _helpers.retry_bad_copy(bucket.copy_blob)( + source_blob, bucket, filename, + ) + + yield bucket + + _helpers.delete_bucket(bucket) + + +@pytest.fixture(scope="session") +def listable_filenames(): + return _listable_filenames + + +@pytest.fixture(scope="session") +def hierarchy_bucket_name(): + return _helpers.unique_name("gcp-systest-hierarchy") + + +@pytest.fixture(scope="session") +def hierarchy_bucket(storage_client, hierarchy_bucket_name, file_data): + bucket = storage_client.bucket(hierarchy_bucket_name) + _helpers.retry_429_503(bucket.create)() + + simple_path = _file_data["simple"]["path"] + for filename in _hierarchy_filenames: + blob = bucket.blob(filename) + blob.upload_from_filename(simple_path) + + yield bucket + + _helpers.delete_bucket(bucket) + + +@pytest.fixture(scope="session") +def hierarchy_filenames(): + return _hierarchy_filenames + + +@pytest.fixture(scope="session") +def signing_bucket_name(): + return _helpers.unique_name("gcp-systest-signing") + + +@pytest.fixture(scope="session") +def signing_bucket(storage_client, signing_bucket_name): + bucket = storage_client.bucket(signing_bucket_name) + _helpers.retry_429_503(bucket.create)() + blob = bucket.blob("README.txt") + blob.upload_from_string(_helpers.signing_blob_content) + + yield bucket + + _helpers.delete_bucket(bucket) + + +@pytest.fixture(scope="function") +def buckets_to_delete(): + buckets_to_delete = [] + + yield buckets_to_delete + + for bucket in buckets_to_delete: + _helpers.delete_bucket(bucket) + + +@pytest.fixture(scope="function") +def blobs_to_delete(): + blobs_to_delete = [] + + yield blobs_to_delete + + for blob in blobs_to_delete: + _helpers.delete_blob(blob) + + +@pytest.fixture(scope="session") +def file_data(): + for file_data in _file_data.values(): + with open(file_data["path"], "rb") as file_obj: + file_data["hash"] = _base64_md5hash(file_obj) + + return _file_data diff --git a/tests/system/test__signing.py b/tests/system/test__signing.py new file mode 100644 index 000000000..72e392bde --- /dev/null +++ b/tests/system/test__signing.py @@ -0,0 +1,395 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import datetime +import hashlib +import os +import time + +import requests + +from google.api_core import path_template +from google.cloud import iam_credentials_v1 +from . import _helpers + + +def _morph_expiration(version, expiration): + if expiration is not None: + return expiration + + if version == "v2": + return int(time.time()) + 10 + + return 10 + + +def _create_signed_list_blobs_url_helper( + client, bucket, version, expiration=None, method="GET" +): + expiration = _morph_expiration(version, expiration) + + signed_url = bucket.generate_signed_url( + expiration=expiration, method=method, client=client, version=version + ) + + response = requests.get(signed_url) + assert response.status_code == 200 + + +def test_create_signed_list_blobs_url_v2(storage_client, signing_bucket): + _create_signed_list_blobs_url_helper( + storage_client, signing_bucket, version="v2", + ) + + +def test_create_signed_list_blobs_url_v2_w_expiration(storage_client, signing_bucket): + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=10) + + _create_signed_list_blobs_url_helper( + storage_client, signing_bucket, expiration=now + delta, version="v2", + ) + + +def test_create_signed_list_blobs_url_v4(storage_client, signing_bucket): + _create_signed_list_blobs_url_helper( + storage_client, signing_bucket, version="v4", + ) + + +def test_create_signed_list_blobs_url_v4_w_expiration(storage_client, signing_bucket): + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=10) + _create_signed_list_blobs_url_helper( + storage_client, signing_bucket, expiration=now + delta, version="v4", + ) + + +def _create_signed_read_url_helper( + client, + bucket, + blob_name="LogoToSign.jpg", + method="GET", + version="v2", + payload=None, + expiration=None, + encryption_key=None, + service_account_email=None, + access_token=None, +): + expiration = _morph_expiration(version, expiration) + + if payload is not None: + blob = bucket.blob(blob_name, encryption_key=encryption_key) + blob.upload_from_string(payload) + else: + blob = bucket.get_blob("README.txt") + + signed_url = blob.generate_signed_url( + expiration=expiration, + method=method, + client=client, + version=version, + service_account_email=service_account_email, + access_token=access_token, + ) + + headers = {} + + if encryption_key is not None: + headers["x-goog-encryption-algorithm"] = "AES256" + encoded_key = base64.b64encode(encryption_key).decode("utf-8") + headers["x-goog-encryption-key"] = encoded_key + key_hash = hashlib.sha256(encryption_key).digest() + key_hash = base64.b64encode(key_hash).decode("utf-8") + headers["x-goog-encryption-key-sha256"] = key_hash + + response = requests.get(signed_url, headers=headers) + assert response.status_code == 200 + + if payload is not None: + assert response.content == payload + else: + assert response.content == _helpers.signing_blob_content + + +def test_create_signed_read_url_v2(storage_client, signing_bucket): + _create_signed_read_url_helper(storage_client, signing_bucket) + + +def test_create_signed_read_url_v4(storage_client, signing_bucket): + _create_signed_read_url_helper( + storage_client, signing_bucket, version="v4", + ) + + +def test_create_signed_read_url_v2_w_expiration(storage_client, signing_bucket): + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=10) + + _create_signed_read_url_helper( + storage_client, signing_bucket, expiration=now + delta + ) + + +def test_create_signed_read_url_v4_w_expiration(storage_client, signing_bucket): + now = datetime.datetime.utcnow() + delta = datetime.timedelta(seconds=10) + _create_signed_read_url_helper( + storage_client, signing_bucket, expiration=now + delta, version="v4" + ) + + +def test_create_signed_read_url_v2_lowercase_method(storage_client, signing_bucket): + _create_signed_read_url_helper(storage_client, signing_bucket, method="get") + + +def test_create_signed_read_url_v4_lowercase_method(storage_client, signing_bucket): + _create_signed_read_url_helper( + storage_client, signing_bucket, method="get", version="v4" + ) + + +def test_create_signed_read_url_v2_w_non_ascii_name(storage_client, signing_bucket): + _create_signed_read_url_helper( + storage_client, + signing_bucket, + blob_name=u"Caf\xe9.txt", + payload=b"Test signed URL for blob w/ non-ASCII name", + ) + + +def test_create_signed_read_url_v4_w_non_ascii_name(storage_client, signing_bucket): + _create_signed_read_url_helper( + storage_client, + signing_bucket, + blob_name=u"Caf\xe9.txt", + payload=b"Test signed URL for blob w/ non-ASCII name", + version="v4", + ) + + +def test_create_signed_read_url_v2_w_csek(storage_client, signing_bucket): + encryption_key = os.urandom(32) + _create_signed_read_url_helper( + storage_client, + signing_bucket, + blob_name="v2-w-csek.txt", + payload=b"Test signed URL for blob w/ CSEK", + encryption_key=encryption_key, + ) + + +def test_create_signed_read_url_v4_w_csek(storage_client, signing_bucket): + encryption_key = os.urandom(32) + _create_signed_read_url_helper( + storage_client, + signing_bucket, + blob_name="v2-w-csek.txt", + payload=b"Test signed URL for blob w/ CSEK", + encryption_key=encryption_key, + version="v4", + ) + + +def test_create_signed_read_url_v2_w_access_token( + storage_client, signing_bucket, service_account, +): + client = iam_credentials_v1.IAMCredentialsClient() + service_account_email = service_account.service_account_email + name = path_template.expand( + "projects/{project}/serviceAccounts/{service_account}", + project="-", + service_account=service_account_email, + ) + scope = [ + "https://www.googleapis.com/auth/devstorage.read_write", + "https://www.googleapis.com/auth/iam", + ] + response = client.generate_access_token(name=name, scope=scope) + + _create_signed_read_url_helper( + storage_client, + signing_bucket, + service_account_email=service_account_email, + access_token=response.access_token, + ) + + +def test_create_signed_read_url_v4_w_access_token( + storage_client, signing_bucket, service_account, +): + client = iam_credentials_v1.IAMCredentialsClient() + service_account_email = service_account.service_account_email + name = path_template.expand( + "projects/{project}/serviceAccounts/{service_account}", + project="-", + service_account=service_account_email, + ) + scope = [ + "https://www.googleapis.com/auth/devstorage.read_write", + "https://www.googleapis.com/auth/iam", + ] + response = client.generate_access_token(name=name, scope=scope) + + _create_signed_read_url_helper( + storage_client, + signing_bucket, + version="v4", + service_account_email=service_account_email, + access_token=response.access_token, + ) + + +def _create_signed_delete_url_helper(client, bucket, version="v2", expiration=None): + expiration = _morph_expiration(version, expiration) + + blob = bucket.blob("DELETE_ME.txt") + blob.upload_from_string(b"DELETE ME!") + + signed_delete_url = blob.generate_signed_url( + expiration=expiration, method="DELETE", client=client, version=version, + ) + + response = requests.request("DELETE", signed_delete_url) + + assert response.status_code == 204 + assert response.content == b"" + assert not blob.exists() + + +def test_create_signed_delete_url_v2(storage_client, signing_bucket): + _create_signed_delete_url_helper(storage_client, signing_bucket) + + +def test_create_signed_delete_url_v4(storage_client, signing_bucket): + _create_signed_delete_url_helper(storage_client, signing_bucket, version="v4") + + +def _create_signed_resumable_upload_url_helper( + client, bucket, version="v2", expiration=None +): + expiration = _morph_expiration(version, expiration) + blob = bucket.blob("cruddy.txt") + payload = b"DEADBEEF" + + # Initiate the upload using a signed URL. + signed_resumable_upload_url = blob.generate_signed_url( + expiration=expiration, method="RESUMABLE", client=client, version=version, + ) + + post_headers = {"x-goog-resumable": "start"} + post_response = requests.post(signed_resumable_upload_url, headers=post_headers) + assert post_response.status_code == 201 + + # Finish uploading the body. + location = post_response.headers["Location"] + put_headers = {"content-length": str(len(payload))} + put_response = requests.put(location, headers=put_headers, data=payload) + assert put_response.status_code == 200 + + # Download using a signed URL and verify. + signed_download_url = blob.generate_signed_url( + expiration=expiration, method="GET", client=client, version=version + ) + + get_response = requests.get(signed_download_url) + assert get_response.status_code == 200 + assert get_response.content == payload + + # Finally, delete the blob using a signed URL. + signed_delete_url = blob.generate_signed_url( + expiration=expiration, method="DELETE", client=client, version=version, + ) + + delete_response = requests.delete(signed_delete_url) + assert delete_response.status_code == 204 + + +def test_create_signed_resumable_upload_url_v2(storage_client, signing_bucket): + _create_signed_resumable_upload_url_helper( + storage_client, signing_bucket, version="v2", + ) + + +def test_create_signed_resumable_upload_url_v4(storage_client, signing_bucket): + _create_signed_resumable_upload_url_helper( + storage_client, signing_bucket, version="v4", + ) + + +def test_generate_signed_post_policy_v4( + storage_client, buckets_to_delete, blobs_to_delete, service_account, +): + bucket_name = _helpers.unique_name("post_policy") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + blob_name = "post_policy_obj.txt" + payload = b"DEADBEEF" + with open(blob_name, "wb") as f: + f.write(payload) + + policy = storage_client.generate_signed_post_policy_v4( + bucket_name, + blob_name, + conditions=[ + {"bucket": bucket_name}, + ["starts-with", "$Content-Type", "text/pla"], + ], + expiration=datetime.datetime.utcnow() + datetime.timedelta(hours=1), + fields={"content-type": "text/plain"}, + ) + with open(blob_name, "r") as f: + files = {"file": (blob_name, f)} + response = requests.post(policy["url"], data=policy["fields"], files=files) + + os.remove(blob_name) + assert response.status_code == 204 + + blob = bucket.get_blob(blob_name) + assert blob.download_as_bytes() == payload + + +def test_generate_signed_post_policy_v4_invalid_field( + storage_client, buckets_to_delete, blobs_to_delete, service_account, +): + bucket_name = _helpers.unique_name("post_policy-invalid") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + blob_name = "post_policy_obj.txt" + payload = b"DEADBEEF" + with open(blob_name, "wb") as f: + f.write(payload) + + policy = storage_client.generate_signed_post_policy_v4( + bucket_name, + blob_name, + conditions=[ + {"bucket": bucket_name}, + ["starts-with", "$Content-Type", "text/pla"], + ], + expiration=datetime.datetime.utcnow() + datetime.timedelta(hours=1), + fields={"x-goog-random": "invalid_field", "content-type": "text/plain"}, + ) + with open(blob_name, "r") as f: + files = {"file": (blob_name, f)} + response = requests.post(policy["url"], data=policy["fields"], files=files) + + os.remove(blob_name) + assert response.status_code == 400 + + assert list(bucket.list_blobs()) == [] diff --git a/tests/system/test_blob.py b/tests/system/test_blob.py new file mode 100644 index 000000000..67cabb521 --- /dev/null +++ b/tests/system/test_blob.py @@ -0,0 +1,946 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import gzip +import io +import os +import tempfile +import warnings + +import pytest +import six +import mock + +from google import resumable_media +from google.api_core import exceptions +from google.cloud.storage._helpers import _base64_md5hash +from . import _helpers + +encryption_key = "b23ff11bba187db8c37077e6af3b25b8" + + +def _check_blob_hash(blob, info): + md5_hash = blob.md5_hash + if not isinstance(md5_hash, six.binary_type): + md5_hash = md5_hash.encode("utf-8") + + assert md5_hash == info["hash"] + + +def test_large_file_write_from_stream( + shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("LargeFile") + + info = file_data["big"] + with open(info["path"], "rb") as file_obj: + blob.upload_from_file(file_obj) + blobs_to_delete.append(blob) + + _check_blob_hash(blob, info) + + +def test_large_file_write_from_stream_w_checksum( + shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("LargeFile") + + info = file_data["big"] + with open(info["path"], "rb") as file_obj: + blob.upload_from_file(file_obj, checksum="crc32c") + blobs_to_delete.append(blob) + + _check_blob_hash(blob, info) + + +def test_large_file_write_from_stream_w_failed_checksum( + shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("LargeFile") + + # Intercept the digest processing at the last stage and replace it + # with garbage. This is done with a patch to monkey-patch the + # resumable media library's checksum # processing; + # it does not mock a remote interface like a unit test would. + # The # remote API is still exercised. + info = file_data["big"] + with open(info["path"], "rb") as file_obj: + + with mock.patch( + "google.resumable_media._helpers.prepare_checksum_digest", + return_value="FFFFFF==", + ): + with pytest.raises(resumable_media.DataCorruption): + blob.upload_from_file(file_obj, checksum="crc32c") + + assert not blob.exists() + + +def test_large_file_write_from_stream_w_encryption_key( + storage_client, shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("LargeFile", encryption_key=encryption_key) + + info = file_data["big"] + with open(info["path"], "rb") as file_obj: + blob.upload_from_file(file_obj) + blobs_to_delete.append(blob) + + _check_blob_hash(blob, info) + + with tempfile.NamedTemporaryFile() as temp_f: + with open(temp_f.name, "wb") as file_obj: + storage_client.download_blob_to_file(blob, file_obj) + + with open(temp_f.name, "rb") as file_obj: + md5_temp_hash = _base64_md5hash(file_obj) + + assert md5_temp_hash == info["hash"] + + +def test_small_file_write_from_filename( + shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("SmallFile") + + info = file_data["simple"] + blob.upload_from_filename(info["path"]) + blobs_to_delete.append(blob) + + _check_blob_hash(blob, info) + + +def test_small_file_write_from_filename_with_checksum( + shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("SmallFile") + + info = file_data["simple"] + blob.upload_from_filename(info["path"], checksum="crc32c") + blobs_to_delete.append(blob) + + _check_blob_hash(blob, info) + + +def test_small_file_write_from_filename_with_failed_checksum( + shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("SmallFile") + + info = file_data["simple"] + # Intercept the digest processing at the last stage and replace + # it with garbage + with mock.patch( + "google.resumable_media._helpers.prepare_checksum_digest", + return_value="FFFFFF==", + ): + with pytest.raises(exceptions.BadRequest): + blob.upload_from_filename(info["path"], checksum="crc32c") + + assert not blob.exists() + + +def test_blob_crud_w_user_project( + storage_client, + shared_bucket, + blobs_to_delete, + file_data, + service_account, + user_project, +): + gen1_payload = b"gen1" + with_user_project = storage_client.bucket( + shared_bucket.name, user_project=user_project + ) + blob = with_user_project.blob("SmallFile") + + info = file_data["simple"] + with open(info["path"], mode="rb") as to_read: + gen0_payload = to_read.read() + + # Exercise 'objects.insert' w/ userProject. + blob.upload_from_filename(info["path"]) + gen0 = blob.generation + + # Upload a second generation of the blob + blob.upload_from_string(gen1_payload) + gen1 = blob.generation + + blob0 = with_user_project.blob("SmallFile", generation=gen0) + blob1 = with_user_project.blob("SmallFile", generation=gen1) + + # Exercise 'objects.get' w/ generation + assert with_user_project.get_blob(blob.name).generation == gen1 + assert with_user_project.get_blob(blob.name, generation=gen0).generation == gen0 + + try: + # Exercise 'objects.get' (metadata) w/ userProject. + assert blob.exists() + blob.reload() + + # Exercise 'objects.get' (media) w/ userProject. + assert blob0.download_as_bytes() == gen0_payload + assert blob1.download_as_bytes() == gen1_payload + + # Exercise 'objects.patch' w/ userProject. + blob0.content_language = "en" + blob0.patch() + assert blob0.content_language == "en" + assert blob1.content_language is None + + # Exercise 'objects.update' w/ userProject. + metadata = {"foo": "Foo", "bar": "Bar"} + blob0.metadata = metadata + blob0.update() + assert blob0.metadata == metadata + assert blob1.metadata is None + + finally: + # Exercise 'objects.delete' (metadata) w/ userProject. + blobs = storage_client.list_blobs( + with_user_project, prefix=blob.name, versions=True + ) + assert [each.generation for each in blobs] == [gen0, gen1] + + blob0.delete() + blobs = storage_client.list_blobs( + with_user_project, prefix=blob.name, versions=True + ) + assert [each.generation for each in blobs] == [gen1] + + blob1.delete() + + +def test_blob_crud_w_generation_match( + shared_bucket, blobs_to_delete, file_data, service_account, +): + wrong_generation_number = 6 + wrong_metageneration_number = 9 + gen1_payload = b"gen1" + + blob = shared_bucket.blob("SmallFile") + + info = file_data["simple"] + with open(info["path"], mode="rb") as to_read: + gen0_payload = to_read.read() + + blob.upload_from_filename(info["path"]) + gen0 = blob.generation + + # Upload a second generation of the blob + blob.upload_from_string(gen1_payload) + gen1 = blob.generation + + blob0 = shared_bucket.blob("SmallFile", generation=gen0) + blob1 = shared_bucket.blob("SmallFile", generation=gen1) + + try: + # Exercise 'objects.get' (metadata) w/ generation match. + with pytest.raises(exceptions.PreconditionFailed): + blob.exists(if_generation_match=wrong_generation_number) + + assert blob.exists(if_generation_match=gen1) + + with pytest.raises(exceptions.PreconditionFailed): + blob.reload(if_metageneration_match=wrong_metageneration_number) + + blob.reload(if_generation_match=gen1) + + # Exercise 'objects.get' (media) w/ generation match. + assert blob0.download_as_bytes(if_generation_match=gen0) == gen0_payload + assert blob1.download_as_bytes(if_generation_not_match=gen0) == gen1_payload + + # Exercise 'objects.patch' w/ generation match. + blob0.content_language = "en" + blob0.patch(if_generation_match=gen0) + + assert blob0.content_language == "en" + assert blob1.content_language is None + + # Exercise 'objects.update' w/ generation match. + metadata = {"foo": "Foo", "bar": "Bar"} + blob0.metadata = metadata + blob0.update(if_generation_match=gen0) + + assert blob0.metadata == metadata + assert blob1.metadata is None + finally: + # Exercise 'objects.delete' (metadata) w/ generation match. + with pytest.raises(exceptions.PreconditionFailed): + blob0.delete(if_metageneration_match=wrong_metageneration_number) + + blob0.delete(if_generation_match=gen0) + blob1.delete(if_metageneration_not_match=wrong_metageneration_number) + + +def test_blob_acl_w_user_project( + storage_client, + shared_bucket, + blobs_to_delete, + file_data, + service_account, + user_project, +): + with_user_project = storage_client.bucket( + shared_bucket.name, user_project=user_project + ) + blob = with_user_project.blob("SmallFile") + + info = file_data["simple"] + + blob.upload_from_filename(info["path"]) + blobs_to_delete.append(blob) + + # Exercise blob ACL w/ userProject + acl = blob.acl + acl.reload() + acl.all().grant_read() + acl.save() + assert "READER" in acl.all().get_roles() + + del acl.entities["allUsers"] + acl.save() + assert not acl.has_entity("allUsers") + + +def test_blob_acl_upload_predefined( + shared_bucket, blobs_to_delete, file_data, service_account, +): + control = shared_bucket.blob("logo") + control_info = file_data["logo"] + + blob = shared_bucket.blob("SmallFile") + info = file_data["simple"] + + try: + control.upload_from_filename(control_info["path"]) + finally: + blobs_to_delete.append(control) + + try: + blob.upload_from_filename(info["path"], predefined_acl="publicRead") + finally: + blobs_to_delete.append(blob) + + control_acl = control.acl + assert "READER" not in control_acl.all().get_roles() + + acl = blob.acl + assert "READER" in acl.all().get_roles() + + acl.all().revoke_read() + assert acl.all().get_roles() == set() + assert control_acl.all().get_roles() == acl.all().get_roles() + + +def test_blob_patch_metadata( + shared_bucket, blobs_to_delete, file_data, service_account, +): + filename = file_data["logo"]["path"] + blob_name = os.path.basename(filename) + + blob = shared_bucket.blob(blob_name) + blob.upload_from_filename(filename) + blobs_to_delete.append(blob) + + # NOTE: This should not be necessary. We should be able to pass + # it in to upload_file and also to upload_from_string. + blob.content_type = "image/png" + assert blob.content_type == "image/png" + + metadata = {"foo": "Foo", "bar": "Bar"} + blob.metadata = metadata + blob.patch() + blob.reload() + assert blob.metadata == metadata + + # Ensure that metadata keys can be deleted by setting equal to None. + new_metadata = {"foo": "Foo", "bar": None} + blob.metadata = new_metadata + blob.patch() + blob.reload() + assert blob.metadata == {"foo": "Foo"} + + +def test_blob_direct_write_and_read_into_file( + shared_bucket, blobs_to_delete, service_account, +): + payload = b"Hello World" + blob = shared_bucket.blob("MyBuffer") + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + same_blob = shared_bucket.blob("MyBuffer") + same_blob.reload() # Initialize properties. + + with tempfile.NamedTemporaryFile() as temp_f: + + with open(temp_f.name, "wb") as file_obj: + same_blob.download_to_file(file_obj) + + with open(temp_f.name, "rb") as file_obj: + stored_contents = file_obj.read() + + assert stored_contents == payload + + +def test_blob_download_w_generation_match( + shared_bucket, blobs_to_delete, service_account, +): + wrong_generation_number = 6 + + blob = shared_bucket.blob("MyBuffer") + payload = b"Hello World" + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + same_blob = shared_bucket.blob("MyBuffer") + same_blob.reload() # Initialize properties. + + with tempfile.NamedTemporaryFile() as temp_f: + + with open(temp_f.name, "wb") as file_obj: + with pytest.raises(exceptions.PreconditionFailed): + same_blob.download_to_file( + file_obj, if_generation_match=wrong_generation_number + ) + + same_blob.download_to_file( + file_obj, + if_generation_match=blob.generation, + if_metageneration_match=blob.metageneration, + ) + + with open(temp_f.name, "rb") as file_obj: + stored_contents = file_obj.read() + + assert stored_contents == payload + + +def test_blob_download_w_failed_crc32c_checksum( + shared_bucket, blobs_to_delete, service_account, +): + blob = shared_bucket.blob("FailedChecksumBlob") + payload = b"Hello World" + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + with tempfile.NamedTemporaryFile() as temp_f: + # Intercept the digest processing at the last stage and replace + # it with garbage. This is done with a patch to monkey-patch + # the resumable media library's checksum processing; it does not + # mock a remote interface like a unit test would. + # The remote API is still exercised. + with mock.patch( + "google.resumable_media._helpers.prepare_checksum_digest", + return_value="FFFFFF==", + ): + with pytest.raises(resumable_media.DataCorruption): + blob.download_to_filename(temp_f.name, checksum="crc32c") + + # Confirm the file was deleted on failure + assert not os.path.isfile(temp_f.name) + + # Now download with checksumming turned off + blob.download_to_filename(temp_f.name, checksum=None) + + with open(temp_f.name, "rb") as file_obj: + stored_contents = file_obj.read() + + assert stored_contents == payload + + +def test_blob_download_as_text( + shared_bucket, blobs_to_delete, service_account, +): + blob = shared_bucket.blob("MyBuffer") + payload = "Hello World" + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + stored_contents = blob.download_as_text() + assert stored_contents == payload + + +def test_blob_upload_w_gzip_encoded_download_raw( + shared_bucket, blobs_to_delete, service_account, +): + payload = b"DEADBEEF" * 1000 + raw_stream = io.BytesIO() + with gzip.GzipFile(fileobj=raw_stream, mode="wb") as gzip_stream: + gzip_stream.write(payload) + zipped = raw_stream.getvalue() + + blob = shared_bucket.blob("test_gzipped.gz") + blob.content_encoding = "gzip" + blob.upload_from_file(raw_stream, rewind=True) + blobs_to_delete.append(blob) + + expanded = blob.download_as_bytes() + assert expanded == payload + + raw = blob.download_as_bytes(raw_download=True) + assert raw == zipped + + +def test_blob_upload_from_file_resumable_with_generation( + shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("LargeFile") + wrong_generation = 3 + wrong_meta_generation = 3 + + # uploading the file + info = file_data["big"] + with open(info["path"], "rb") as file_obj: + blob.upload_from_file(file_obj) + blobs_to_delete.append(blob) + + # reuploading with correct generations numbers + with open(info["path"], "rb") as file_obj: + blob.upload_from_file( + file_obj, + if_generation_match=blob.generation, + if_metageneration_match=blob.metageneration, + ) + + # reuploading with generations numbers that doesn't match original + with pytest.raises(exceptions.PreconditionFailed): + with open(info["path"], "rb") as file_obj: + blob.upload_from_file( + file_obj, if_generation_match=wrong_generation, + ) + + with pytest.raises(exceptions.PreconditionFailed): + with open(info["path"], "rb") as file_obj: + blob.upload_from_file( + file_obj, if_metageneration_match=wrong_meta_generation, + ) + + +def test_blob_upload_from_string_w_owner( + shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("MyBuffer") + payload = b"Hello World" + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + same_blob = shared_bucket.blob("MyBuffer") + same_blob.reload(projection="full") # Initialize properties. + user_email = service_account.service_account_email + owner = same_blob.owner + assert user_email in owner["entity"] + + +def test_blob_upload_from_string_w_custom_time( + shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("CustomTimeBlob") + payload = b"Hello World" + current_time = datetime.datetime.now() + blob.custom_time = current_time + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + same_blob = shared_bucket.blob("CustomTimeBlob") + same_blob.reload(projection="full") + custom_time = same_blob.custom_time.replace(tzinfo=None) + assert custom_time == current_time + + +def test_blob_upload_from_string_w_custom_time_no_micros( + shared_bucket, blobs_to_delete, file_data, service_account, +): + # Test that timestamps without microseconds are treated correctly by + # custom_time encoding/decoding. + blob = shared_bucket.blob("CustomTimeNoMicrosBlob") + payload = b"Hello World" + time_without_micros = datetime.datetime(2021, 2, 10, 12, 30) + blob.custom_time = time_without_micros + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + same_blob = shared_bucket.blob(("CustomTimeNoMicrosBlob")) + same_blob.reload(projection="full") + custom_time = same_blob.custom_time.replace(tzinfo=None) + assert custom_time == time_without_micros + + +def test_blob_upload_download_crc32_md5_hash( + shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("MyBuffer") + payload = b"Hello World" + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + download_blob = shared_bucket.blob("MyBuffer") + + assert download_blob.download_as_string() == payload + assert download_blob.crc32c == blob.crc32c + assert download_blob.md5_hash == blob.md5_hash + + +@pytest.mark.parametrize( + "blob_name,payload", + [ + (u"Caf\u00e9", b"Normalization Form C"), + (u"Cafe\u0301", b"Normalization Form D"), + ], +) +def test_blob_w_unicode_names(blob_name, payload, shared_bucket, blobs_to_delete): + # Historical note: This test when originally written accessed public + # files with Unicode names. These files are no longer available, so it + # was rewritten to upload them first. + + # Normalization form C: a single character for e-acute; + # URL should end with Cafe%CC%81 + # Normalization Form D: an ASCII e followed by U+0301 combining + # character; URL should end with Caf%C3%A9 + + blob = shared_bucket.blob(blob_name) + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + same_blob = shared_bucket.blob(blob_name) + assert same_blob.download_as_bytes() == payload + assert same_blob.name == blob_name + + +def test_blob_compose_new_blob(shared_bucket, blobs_to_delete): + payload_1 = b"AAA\n" + source_1 = shared_bucket.blob("source-1") + source_1.upload_from_string(payload_1) + blobs_to_delete.append(source_1) + + payload_2 = b"BBB\n" + source_2 = shared_bucket.blob("source-2") + source_2.upload_from_string(payload_2) + blobs_to_delete.append(source_2) + + destination = shared_bucket.blob("destination") + destination.content_type = "text/plain" + destination.compose([source_1, source_2]) + blobs_to_delete.append(destination) + + assert destination.download_as_bytes() == payload_1 + payload_2 + + +def test_blob_compose_new_blob_wo_content_type(shared_bucket, blobs_to_delete): + payload_1 = b"AAA\n" + source_1 = shared_bucket.blob("source-1") + source_1.upload_from_string(payload_1) + blobs_to_delete.append(source_1) + + payload_2 = b"BBB\n" + source_2 = shared_bucket.blob("source-2") + source_2.upload_from_string(payload_2) + blobs_to_delete.append(source_2) + + destination = shared_bucket.blob("destination") + + destination.compose([source_1, source_2]) + blobs_to_delete.append(destination) + + assert destination.content_type is None + assert destination.download_as_bytes() == payload_1 + payload_2 + + +def test_blob_compose_replace_existing_blob(shared_bucket, blobs_to_delete): + payload_before = b"AAA\n" + original = shared_bucket.blob("original") + original.content_type = "text/plain" + original.upload_from_string(payload_before) + blobs_to_delete.append(original) + + payload_to_append = b"BBB\n" + to_append = shared_bucket.blob("to_append") + to_append.upload_from_string(payload_to_append) + blobs_to_delete.append(to_append) + + original.compose([original, to_append]) + + assert original.download_as_bytes() == payload_before + payload_to_append + + +def test_blob_compose_w_generation_match_list(shared_bucket, blobs_to_delete): + payload_before = b"AAA\n" + original = shared_bucket.blob("original") + original.content_type = "text/plain" + original.upload_from_string(payload_before) + blobs_to_delete.append(original) + wrong_generations = [6, 7] + wrong_metagenerations = [8, 9] + + payload_to_append = b"BBB\n" + to_append = shared_bucket.blob("to_append") + to_append.upload_from_string(payload_to_append) + blobs_to_delete.append(to_append) + + with warnings.catch_warnings(record=True) as log: + with pytest.raises(exceptions.PreconditionFailed): + original.compose( + [original, to_append], + if_generation_match=wrong_generations, + if_metageneration_match=wrong_metagenerations, + ) + assert len(log) == 2 + + with warnings.catch_warnings(record=True) as log: + original.compose( + [original, to_append], + if_generation_match=[original.generation, to_append.generation], + if_metageneration_match=[original.metageneration, to_append.metageneration], + ) + assert len(log) == 2 + + assert original.download_as_bytes() == payload_before + payload_to_append + + +def test_blob_compose_w_generation_match_long(shared_bucket, blobs_to_delete): + payload_before = b"AAA\n" + original = shared_bucket.blob("original") + original.content_type = "text/plain" + original.upload_from_string(payload_before) + blobs_to_delete.append(original) + + payload_to_append = b"BBB\n" + to_append = shared_bucket.blob("to_append") + to_append.upload_from_string(payload_to_append) + blobs_to_delete.append(to_append) + + with pytest.raises(exceptions.PreconditionFailed): + original.compose([original, to_append], if_generation_match=0) + + original.compose([original, to_append], if_generation_match=original.generation) + + assert original.download_as_bytes() == payload_before + payload_to_append + + +def test_blob_compose_w_source_generation_match(shared_bucket, blobs_to_delete): + payload_before = b"AAA\n" + original = shared_bucket.blob("original") + original.content_type = "text/plain" + original.upload_from_string(payload_before) + blobs_to_delete.append(original) + wrong_source_generations = [6, 7] + + payload_to_append = b"BBB\n" + to_append = shared_bucket.blob("to_append") + to_append.upload_from_string(payload_to_append) + blobs_to_delete.append(to_append) + + with pytest.raises(exceptions.PreconditionFailed): + original.compose( + [original, to_append], if_source_generation_match=wrong_source_generations, + ) + + original.compose( + [original, to_append], + if_source_generation_match=[original.generation, to_append.generation], + ) + + assert original.download_as_bytes() == payload_before + payload_to_append + + +def test_blob_compose_w_user_project(storage_client, buckets_to_delete, user_project): + new_bucket_name = _helpers.unique_name("compose-user-project") + created = _helpers.retry_429_503(storage_client.create_bucket)(new_bucket_name) + buckets_to_delete.append(created) + created.requester_pays = True + + payload_1 = b"AAA\n" + source_1 = created.blob("source-1") + source_1.upload_from_string(payload_1) + + payload_2 = b"BBB\n" + source_2 = created.blob("source-2") + source_2.upload_from_string(payload_2) + + with_user_project = storage_client.bucket( + new_bucket_name, user_project=user_project + ) + + destination = with_user_project.blob("destination") + destination.content_type = "text/plain" + destination.compose([source_1, source_2]) + + assert destination.download_as_bytes() == payload_1 + payload_2 + + +def test_blob_rewrite_new_blob_add_key(shared_bucket, blobs_to_delete, file_data): + info = file_data["simple"] + source = shared_bucket.blob("source") + source.upload_from_filename(info["path"]) + blobs_to_delete.append(source) + source_data = source.download_as_bytes() + + key = os.urandom(32) + dest = shared_bucket.blob("dest", encryption_key=key) + token, rewritten, total = dest.rewrite(source) + blobs_to_delete.append(dest) + + assert token is None + assert rewritten == len(source_data) + assert total == len(source_data) + assert dest.download_as_bytes() == source_data + + +def test_blob_rewrite_rotate_key(shared_bucket, blobs_to_delete, file_data): + blob_name = "rotating-keys" + info = file_data["simple"] + + source_key = os.urandom(32) + source = shared_bucket.blob(blob_name, encryption_key=source_key) + source.upload_from_filename(info["path"]) + blobs_to_delete.append(source) + source_data = source.download_as_bytes() + + dest_key = os.urandom(32) + dest = shared_bucket.blob(blob_name, encryption_key=dest_key) + token, rewritten, total = dest.rewrite(source) + # Not adding 'dest' to 'blobs_to_delete': it is the + # same object as 'source'. + + assert token is None + assert rewritten == len(source_data) + assert total == len(source_data) + assert dest.download_as_bytes() == source_data + + +def test_blob_rewrite_add_key_w_user_project( + storage_client, buckets_to_delete, user_project, file_data +): + info = file_data["simple"] + new_bucket_name = _helpers.unique_name("rewrite-key-up") + created = _helpers.retry_429_503(storage_client.create_bucket)(new_bucket_name) + buckets_to_delete.append(created) + created.requester_pays = True + + with_user_project = storage_client.bucket( + new_bucket_name, user_project=user_project + ) + + source = with_user_project.blob("source") + source.upload_from_filename(info["path"]) + source_data = source.download_as_bytes() + + key = os.urandom(32) + dest = with_user_project.blob("dest", encryption_key=key) + token, rewritten, total = dest.rewrite(source) + + assert token is None + assert rewritten == len(source_data) + assert total == len(source_data) + assert dest.download_as_bytes() == source_data + + +def test_blob_rewrite_rotate_key_w_user_project( + storage_client, buckets_to_delete, user_project, file_data +): + blob_name = "rotating-keys" + info = file_data["simple"] + new_bucket_name = _helpers.unique_name("rewrite-key-up") + created = _helpers.retry_429_503(storage_client.create_bucket)(new_bucket_name) + buckets_to_delete.append(created) + created.requester_pays = True + + with_user_project = storage_client.bucket( + new_bucket_name, user_project=user_project + ) + + source_key = os.urandom(32) + source = with_user_project.blob(blob_name, encryption_key=source_key) + source.upload_from_filename(info["path"]) + source_data = source.download_as_bytes() + + dest_key = os.urandom(32) + dest = with_user_project.blob(blob_name, encryption_key=dest_key) + token, rewritten, total = dest.rewrite(source) + + assert token is None + assert rewritten == len(source_data) + assert total == len(source_data) + assert dest.download_as_bytes() == source_data + + +def test_blob_rewrite_w_generation_match(shared_bucket, blobs_to_delete, file_data): + wrong_generation_number = 6 + blob_name = "generation-match" + info = file_data["simple"] + + source = shared_bucket.blob(blob_name) + source.upload_from_filename(info["path"]) + source_data = source.download_as_bytes() + blobs_to_delete.append(source) + + dest = shared_bucket.blob(blob_name) + dest.reload() + + with pytest.raises(exceptions.PreconditionFailed): + dest.rewrite(source, if_generation_match=wrong_generation_number) + + token, rewritten, total = dest.rewrite( + source, + if_generation_match=dest.generation, + if_source_generation_match=source.generation, + if_source_metageneration_match=source.metageneration, + ) + + assert token is None + assert rewritten == len(source_data) + assert total == len(source_data) + assert dest.download_as_bytes() == source_data + + +def test_blob_update_storage_class_small_file( + shared_bucket, blobs_to_delete, file_data +): + from google.cloud.storage import constants + + blob = shared_bucket.blob("SmallFile") + + info = file_data["simple"] + blob.upload_from_filename(info["path"]) + blobs_to_delete.append(blob) + + blob.update_storage_class(constants.NEARLINE_STORAGE_CLASS) + blob.reload() + assert blob.storage_class == constants.NEARLINE_STORAGE_CLASS + + blob.update_storage_class(constants.COLDLINE_STORAGE_CLASS) + blob.reload() + assert blob.storage_class == constants.COLDLINE_STORAGE_CLASS + + +def test_blob_update_storage_class_large_file( + shared_bucket, blobs_to_delete, file_data +): + from google.cloud.storage import constants + + blob = shared_bucket.blob("BigFile") + + info = file_data["big"] + blob.upload_from_filename(info["path"]) + blobs_to_delete.append(blob) + + blob.update_storage_class(constants.NEARLINE_STORAGE_CLASS) + blob.reload() + assert blob.storage_class == constants.NEARLINE_STORAGE_CLASS + + blob.update_storage_class(constants.COLDLINE_STORAGE_CLASS) + blob.reload() + assert blob.storage_class == constants.COLDLINE_STORAGE_CLASS diff --git a/tests/system/test_bucket.py b/tests/system/test_bucket.py new file mode 100644 index 000000000..2fdd64fe4 --- /dev/null +++ b/tests/system/test_bucket.py @@ -0,0 +1,847 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import pytest +import six + +from google.api_core import exceptions +from . import _helpers + + +def test_bucket_create_w_alt_storage_class(storage_client, buckets_to_delete): + from google.cloud.storage import constants + + bucket_name = _helpers.unique_name("bucket-w-archive") + + with pytest.raises(exceptions.NotFound): + storage_client.get_bucket(bucket_name) + + bucket = storage_client.bucket(bucket_name) + bucket.storage_class = constants.ARCHIVE_STORAGE_CLASS + + _helpers.retry_429_503(bucket.create)() + buckets_to_delete.append(bucket) + + created = storage_client.get_bucket(bucket_name) + assert created.storage_class == constants.ARCHIVE_STORAGE_CLASS + + +def test_bucket_lifecycle_rules(storage_client, buckets_to_delete): + from google.cloud.storage import constants + from google.cloud.storage.bucket import LifecycleRuleDelete + from google.cloud.storage.bucket import LifecycleRuleSetStorageClass + + bucket_name = _helpers.unique_name("w-lifcycle-rules") + custom_time_before = datetime.date(2018, 8, 1) + noncurrent_before = datetime.date(2018, 8, 1) + + with pytest.raises(exceptions.NotFound): + storage_client.get_bucket(bucket_name) + + bucket = storage_client.bucket(bucket_name) + bucket.add_lifecycle_delete_rule( + age=42, + number_of_newer_versions=3, + days_since_custom_time=2, + custom_time_before=custom_time_before, + days_since_noncurrent_time=2, + noncurrent_time_before=noncurrent_before, + ) + bucket.add_lifecycle_set_storage_class_rule( + constants.COLDLINE_STORAGE_CLASS, + is_live=False, + matches_storage_class=[constants.NEARLINE_STORAGE_CLASS], + ) + + expected_rules = [ + LifecycleRuleDelete( + age=42, + number_of_newer_versions=3, + days_since_custom_time=2, + custom_time_before=custom_time_before, + days_since_noncurrent_time=2, + noncurrent_time_before=noncurrent_before, + ), + LifecycleRuleSetStorageClass( + constants.COLDLINE_STORAGE_CLASS, + is_live=False, + matches_storage_class=[constants.NEARLINE_STORAGE_CLASS], + ), + ] + + _helpers.retry_429_503(bucket.create)(location="us") + buckets_to_delete.append(bucket) + + assert bucket.name == bucket_name + assert list(bucket.lifecycle_rules) == expected_rules + + bucket.clear_lifecyle_rules() + bucket.patch() + + assert list(bucket.lifecycle_rules) == [] + + +def test_bucket_update_labels(storage_client, buckets_to_delete): + bucket_name = _helpers.unique_name("update-labels") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + assert bucket.exists() + + updated_labels = {"test-label": "label-value"} + bucket.labels = updated_labels + bucket.update() + assert bucket.labels == updated_labels + + new_labels = {"another-label": "another-value"} + bucket.labels = new_labels + bucket.patch() + assert bucket.labels == new_labels + + bucket.labels = {} + bucket.update() + assert bucket.labels == {} + + +def test_bucket_get_set_iam_policy( + storage_client, buckets_to_delete, service_account, +): + from google.cloud.storage.iam import STORAGE_OBJECT_VIEWER_ROLE + from google.api_core.exceptions import BadRequest + from google.api_core.exceptions import PreconditionFailed + + bucket_name = _helpers.unique_name("iam-policy") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + assert bucket.exists() + + policy_no_version = bucket.get_iam_policy() + assert policy_no_version.version == 1 + + policy = bucket.get_iam_policy(requested_policy_version=3) + assert policy == policy_no_version + + member = "serviceAccount:{}".format(storage_client.get_service_account_email()) + + binding_w_condition = { + "role": STORAGE_OBJECT_VIEWER_ROLE, + "members": {member}, + "condition": { + "title": "always-true", + "description": "test condition always-true", + "expression": "true", + }, + } + policy.bindings.append(binding_w_condition) + + with pytest.raises(PreconditionFailed, match="enable uniform bucket-level access"): + bucket.set_iam_policy(policy) + + bucket.iam_configuration.uniform_bucket_level_access_enabled = True + bucket.patch() + + policy = bucket.get_iam_policy(requested_policy_version=3) + policy.bindings.append(binding_w_condition) + + with pytest.raises(BadRequest, match="at least 3"): + bucket.set_iam_policy(policy) + + policy.version = 3 + returned_policy = bucket.set_iam_policy(policy) + assert returned_policy.version == 3 + assert returned_policy.bindings == policy.bindings + + fetched_policy = bucket.get_iam_policy(requested_policy_version=3) + assert fetched_policy.bindings == returned_policy.bindings + + +def test_bucket_crud_w_requester_pays(storage_client, buckets_to_delete, user_project): + bucket_name = _helpers.unique_name("w-requester-pays") + created = _helpers.retry_429_503(storage_client.create_bucket)( + bucket_name, requester_pays=True + ) + buckets_to_delete.append(created) + assert created.name == bucket_name + assert created.requester_pays + + with_user_project = storage_client.bucket(bucket_name, user_project=user_project,) + + try: + # Exercise 'buckets.get' w/ userProject. + assert with_user_project.exists() + with_user_project.reload() + assert with_user_project.requester_pays + + # Exercise 'buckets.patch' w/ userProject. + with_user_project.configure_website( + main_page_suffix="index.html", not_found_page="404.html" + ) + with_user_project.patch() + expected_website = {"mainPageSuffix": "index.html", "notFoundPage": "404.html"} + assert with_user_project._properties["website"] == expected_website + + # Exercise 'buckets.update' w/ userProject. + new_labels = {"another-label": "another-value"} + with_user_project.labels = new_labels + with_user_project.update() + assert with_user_project.labels == new_labels + + finally: + # Exercise 'buckets.delete' w/ userProject. + with_user_project.delete() + buckets_to_delete.remove(created) + + +def test_bucket_acls_iam_w_user_project( + storage_client, buckets_to_delete, user_project +): + bucket_name = _helpers.unique_name("acl-w-user-project") + created = _helpers.retry_429_503(storage_client.create_bucket)( + bucket_name, requester_pays=True, + ) + buckets_to_delete.append(created) + + with_user_project = storage_client.bucket(bucket_name, user_project=user_project) + + # Exercise bucket ACL w/ userProject + acl = with_user_project.acl + acl.reload() + acl.all().grant_read() + acl.save() + assert "READER" in acl.all().get_roles() + + del acl.entities["allUsers"] + acl.save() + assert not acl.has_entity("allUsers") + + # Exercise default object ACL w/ userProject + doa = with_user_project.default_object_acl + doa.reload() + doa.all().grant_read() + doa.save() + assert "READER" in doa.all().get_roles() + + # Exercise IAM w/ userProject + test_permissions = ["storage.buckets.get"] + found = with_user_project.test_iam_permissions(test_permissions) + assert found == test_permissions + + policy = with_user_project.get_iam_policy() + viewers = policy.setdefault("roles/storage.objectViewer", set()) + viewers.add(policy.all_users()) + with_user_project.set_iam_policy(policy) + + +def test_bucket_copy_blob( + storage_client, buckets_to_delete, blobs_to_delete, user_project, +): + payload = b"DEADBEEF" + bucket_name = _helpers.unique_name("copy-blob") + created = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(created) + assert created.name == bucket_name + + blob = created.blob("CloudLogo") + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + new_blob = _helpers.retry_bad_copy(created.copy_blob)( + blob, created, "CloudLogoCopy" + ) + blobs_to_delete.append(new_blob) + + copied_contents = new_blob.download_as_bytes() + assert copied_contents == payload + + +def test_bucket_copy_blob_w_user_project( + storage_client, buckets_to_delete, blobs_to_delete, user_project, +): + payload = b"DEADBEEF" + bucket_name = _helpers.unique_name("copy-w-requester-pays") + created = _helpers.retry_429_503(storage_client.create_bucket)( + bucket_name, requester_pays=True + ) + buckets_to_delete.append(created) + assert created.name == bucket_name + assert created.requester_pays + + blob = created.blob("simple") + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + with_user_project = storage_client.bucket(bucket_name, user_project=user_project) + + new_blob = _helpers.retry_bad_copy(with_user_project.copy_blob)( + blob, with_user_project, "simple-copy" + ) + blobs_to_delete.append(new_blob) + + assert new_blob.download_as_bytes() == payload + + +def test_bucket_copy_blob_w_generation_match( + storage_client, buckets_to_delete, blobs_to_delete, +): + payload = b"DEADBEEF" + bucket_name = _helpers.unique_name("generation-match") + created = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(created) + assert created.name == bucket_name + + blob = created.blob("simple") + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + dest_bucket = storage_client.bucket(bucket_name) + + new_blob = dest_bucket.copy_blob( + blob, dest_bucket, "simple-copy", if_source_generation_match=blob.generation, + ) + blobs_to_delete.append(new_blob) + + assert new_blob.download_as_bytes() == payload + + +def test_bucket_copy_blob_w_metageneration_match( + storage_client, buckets_to_delete, blobs_to_delete, +): + payload = b"DEADBEEF" + bucket_name = _helpers.unique_name("generation-match") + created = _helpers.retry_429_503(storage_client.create_bucket)( + bucket_name, requester_pays=True + ) + buckets_to_delete.append(created) + assert created.name == bucket_name + + blob = created.blob("simple") + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + dest_bucket = storage_client.bucket(bucket_name) + + new_blob = dest_bucket.copy_blob( + blob, + dest_bucket, + "simple-copy", + if_source_metageneration_match=blob.metageneration, + ) + blobs_to_delete.append(new_blob) + + assert new_blob.download_as_bytes() == payload + + +def test_bucket_get_blob_with_user_project( + storage_client, buckets_to_delete, blobs_to_delete, user_project, +): + blob_name = "blob-name" + payload = b"DEADBEEF" + bucket_name = _helpers.unique_name("w-requester-pays") + created = _helpers.retry_429_503(storage_client.create_bucket)( + bucket_name, requester_pays=True + ) + buckets_to_delete.append(created) + assert created.name == bucket_name + assert created.requester_pays + + with_user_project = storage_client.bucket(bucket_name, user_project=user_project) + + assert with_user_project.get_blob("nonesuch") is None + + to_add = created.blob(blob_name) + to_add.upload_from_string(payload) + blobs_to_delete.append(to_add) + + found = with_user_project.get_blob(blob_name) + assert found.download_as_bytes() == payload + + +@_helpers.retry_failures +def test_bucket_list_blobs(listable_bucket, listable_filenames): + all_blobs = list(listable_bucket.list_blobs()) + assert sorted(blob.name for blob in all_blobs) == sorted(listable_filenames) + + +@_helpers.retry_failures +def test_bucket_list_blobs_w_user_project( + storage_client, listable_bucket, listable_filenames, user_project, +): + with_user_project = storage_client.bucket( + listable_bucket.name, user_project=user_project + ) + all_blobs = list(with_user_project.list_blobs()) + assert sorted(blob.name for blob in all_blobs) == sorted(listable_filenames) + + +@_helpers.retry_failures +def test_bucket_list_blobs_paginated(listable_bucket, listable_filenames): + truncation_size = 1 + count = len(listable_filenames) - truncation_size + iterator = listable_bucket.list_blobs(max_results=count) + page_iter = iterator.pages + + page1 = six.next(page_iter) + blobs = list(page1) + assert len(blobs) == count + assert iterator.next_page_token is not None + # Technically the iterator is exhausted. + assert iterator.num_results == iterator.max_results + # But we modify the iterator to continue paging after + # artificially stopping after ``count`` items. + iterator.max_results = None + + page2 = six.next(page_iter) + last_blobs = list(page2) + assert len(last_blobs) == truncation_size + + +@_helpers.retry_failures +def test_bucket_list_blobs_paginated_w_offset(listable_bucket, listable_filenames): + truncation_size = 1 + inclusive_start_offset = listable_filenames[1] + exclusive_end_offset = listable_filenames[-1] + desired_files = listable_filenames[1:-1] + count = len(desired_files) - truncation_size + iterator = listable_bucket.list_blobs( + max_results=count, + start_offset=inclusive_start_offset, + end_offset=exclusive_end_offset, + ) + page_iter = iterator.pages + + page1 = six.next(page_iter) + blobs = list(page1) + assert len(blobs) == count + assert blobs[0].name == desired_files[0] + assert iterator.next_page_token is not None + # Technically the iterator is exhausted. + assert iterator.num_results == iterator.max_results + # But we modify the iterator to continue paging after + # artificially stopping after ``count`` items. + iterator.max_results = None + + page2 = six.next(page_iter) + last_blobs = list(page2) + assert len(last_blobs) == truncation_size + assert last_blobs[-1].name == desired_files[-1] + + +@_helpers.retry_failures +def test_blob_exists_hierarchy(hierarchy_bucket, hierarchy_filenames): + for filename in hierarchy_filenames: + blob = hierarchy_bucket.blob(filename) + assert blob.exists() + + +@_helpers.retry_failures +def test_bucket_list_blobs_hierarchy_root_level(hierarchy_bucket, hierarchy_filenames): + expected_names = ["file01.txt"] + expected_prefixes = set(["parent/"]) + + iterator = hierarchy_bucket.list_blobs(delimiter="/") + page = six.next(iterator.pages) + blobs = list(page) + + assert [blob.name for blob in blobs] == expected_names + assert iterator.next_page_token is None + assert iterator.prefixes == expected_prefixes + + +@_helpers.retry_failures +def test_bucket_list_blobs_hierarchy_first_level(hierarchy_bucket, hierarchy_filenames): + expected_names = ["parent/", "parent/file11.txt"] + expected_prefixes = set(["parent/child/"]) + + iterator = hierarchy_bucket.list_blobs(delimiter="/", prefix="parent/") + page = six.next(iterator.pages) + blobs = list(page) + + assert [blob.name for blob in blobs] == expected_names + assert iterator.next_page_token is None + assert iterator.prefixes == expected_prefixes + + +@_helpers.retry_failures +def test_bucket_list_blobs_hierarchy_second_level( + hierarchy_bucket, hierarchy_filenames +): + expected_names = ["parent/child/file21.txt", "parent/child/file22.txt"] + expected_prefixes = set(["parent/child/grand/", "parent/child/other/"]) + + iterator = hierarchy_bucket.list_blobs(delimiter="/", prefix="parent/child/") + page = six.next(iterator.pages) + blobs = list(page) + assert [blob.name for blob in blobs] == expected_names + assert iterator.next_page_token is None + assert iterator.prefixes == expected_prefixes + + +@_helpers.retry_failures +def test_bucket_list_blobs_hierarchy_third_level(hierarchy_bucket, hierarchy_filenames): + # Pseudo-hierarchy can be arbitrarily deep, subject to the limit + # of 1024 characters in the UTF-8 encoded name: + # https://cloud.google.com/storage/docs/bucketnaming#objectnames + # Exercise a layer deeper to illustrate this. + expected_names = ["parent/child/grand/file31.txt"] + expected_prefixes = set() + + iterator = hierarchy_bucket.list_blobs(delimiter="/", prefix="parent/child/grand/") + page = six.next(iterator.pages) + blobs = list(page) + + assert [blob.name for blob in blobs] == expected_names + assert iterator.next_page_token is None + assert iterator.prefixes == expected_prefixes + + +@_helpers.retry_failures +def test_bucket_list_blobs_hierarchy_w_include_trailing_delimiter( + hierarchy_bucket, hierarchy_filenames, +): + expected_names = ["file01.txt", "parent/"] + expected_prefixes = set(["parent/"]) + + iterator = hierarchy_bucket.list_blobs( + delimiter="/", include_trailing_delimiter=True + ) + page = six.next(iterator.pages) + blobs = list(page) + + assert [blob.name for blob in blobs] == expected_names + assert iterator.next_page_token is None + assert iterator.prefixes == expected_prefixes + + +def test_bucket_w_retention_period( + storage_client, buckets_to_delete, blobs_to_delete, +): + period_secs = 10 + bucket_name = _helpers.unique_name("w-retention-period") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + bucket.retention_period = period_secs + bucket.default_event_based_hold = False + bucket.patch() + + assert bucket.retention_period == period_secs + assert isinstance(bucket.retention_policy_effective_time, datetime.datetime) + assert not bucket.default_event_based_hold + assert not bucket.retention_policy_locked + + blob_name = "test-blob" + payload = b"DEADBEEF" + blob = bucket.blob(blob_name) + blob.upload_from_string(payload) + + blobs_to_delete.append(blob) + + other = bucket.get_blob(blob_name) + + assert not other.event_based_hold + assert not other.temporary_hold + assert isinstance(other.retention_expiration_time, datetime.datetime) + + with pytest.raises(exceptions.Forbidden): + other.delete() + + bucket.retention_period = None + bucket.patch() + + assert bucket.retention_period is None + assert bucket.retention_policy_effective_time is None + assert not bucket.default_event_based_hold + assert not bucket.retention_policy_locked + + other.reload() + + assert not other.event_based_hold + assert not other.temporary_hold + assert other.retention_expiration_time is None + + other.delete() + blobs_to_delete.pop() + + +def test_bucket_w_default_event_based_hold( + storage_client, buckets_to_delete, blobs_to_delete, +): + bucket_name = _helpers.unique_name("w-def-ebh") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + bucket.default_event_based_hold = True + bucket.patch() + + assert bucket.default_event_based_hold + assert bucket.retention_period is None + assert bucket.retention_policy_effective_time is None + assert not bucket.retention_policy_locked + + blob_name = "test-blob" + payload = b"DEADBEEF" + blob = bucket.blob(blob_name) + blob.upload_from_string(payload) + + blobs_to_delete.append(blob) + + other = bucket.get_blob(blob_name) + + assert other.event_based_hold + assert not other.temporary_hold + assert other.retention_expiration_time is None + + with pytest.raises(exceptions.Forbidden): + other.delete() + + other.event_based_hold = False + other.patch() + other.delete() + + bucket.default_event_based_hold = False + bucket.patch() + + assert not bucket.default_event_based_hold + assert bucket.retention_period is None + assert bucket.retention_policy_effective_time is None + assert not bucket.retention_policy_locked + + blob.upload_from_string(payload) + + # https://github.com/googleapis/python-storage/issues/435 + if blob.event_based_hold: + _helpers.retry_no_event_based_hold(blob.reload)() + + assert not blob.event_based_hold + assert not blob.temporary_hold + assert blob.retention_expiration_time is None + + blob.delete() + blobs_to_delete.pop() + + +def test_blob_w_temporary_hold( + storage_client, buckets_to_delete, blobs_to_delete, +): + bucket_name = _helpers.unique_name("w-tmp-hold") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + blob_name = "test-blob" + payload = b"DEADBEEF" + blob = bucket.blob(blob_name) + blob.upload_from_string(payload) + + blobs_to_delete.append(blob) + + other = bucket.get_blob(blob_name) + other.temporary_hold = True + other.patch() + + assert other.temporary_hold + assert not other.event_based_hold + assert other.retention_expiration_time is None + + with pytest.raises(exceptions.Forbidden): + other.delete() + + other.temporary_hold = False + other.patch() + + other.delete() + blobs_to_delete.pop() + + +def test_bucket_lock_retention_policy( + storage_client, buckets_to_delete, +): + period_secs = 10 + bucket_name = _helpers.unique_name("loc-ret-policy") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + bucket.retention_period = period_secs + bucket.patch() + + assert bucket.retention_period == period_secs + assert isinstance(bucket.retention_policy_effective_time, datetime.datetime) + assert not bucket.default_event_based_hold + assert not bucket.retention_policy_locked + + bucket.lock_retention_policy() + + bucket.reload() + assert bucket.retention_policy_locked + + bucket.retention_period = None + with pytest.raises(exceptions.Forbidden): + bucket.patch() + + +def test_new_bucket_w_ubla( + storage_client, buckets_to_delete, blobs_to_delete, +): + bucket_name = _helpers.unique_name("new-w-ubla") + bucket = storage_client.bucket(bucket_name) + bucket.iam_configuration.uniform_bucket_level_access_enabled = True + _helpers.retry_429_503(bucket.create)() + buckets_to_delete.append(bucket) + + bucket_acl = bucket.acl + with pytest.raises(exceptions.BadRequest): + bucket_acl.reload() + + bucket_acl.loaded = True # Fake that we somehow loaded the ACL + bucket_acl.all().grant_read() + with pytest.raises(exceptions.BadRequest): + bucket_acl.save() + + blob_name = "my-blob.txt" + blob = bucket.blob(blob_name) + payload = b"DEADBEEF" + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + found = bucket.get_blob(blob_name) + assert found.download_as_bytes() == payload + + blob_acl = blob.acl + with pytest.raises(exceptions.BadRequest): + blob_acl.reload() + + blob_acl.loaded = True # Fake that we somehow loaded the ACL + blob_acl.all().grant_read() + with pytest.raises(exceptions.BadRequest): + blob_acl.save() + + +def test_ubla_set_unset_preserves_acls( + storage_client, buckets_to_delete, blobs_to_delete, +): + bucket_name = _helpers.unique_name("ubla-acls") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + blob_name = "my-blob.txt" + blob = bucket.blob(blob_name) + payload = b"DEADBEEF" + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + # Preserve ACLs before setting UBLA + bucket_acl_before = list(bucket.acl) + blob_acl_before = list(bucket.acl) + + # Set UBLA + bucket.iam_configuration.uniform_bucket_level_access_enabled = True + bucket.patch() + + assert bucket.iam_configuration.uniform_bucket_level_access_enabled + + # While UBLA is set, cannot get / set ACLs + with pytest.raises(exceptions.BadRequest): + bucket.acl.reload() + + # Clear UBLA + bucket.iam_configuration.uniform_bucket_level_access_enabled = False + bucket.patch() + + # Query ACLs after clearing UBLA + bucket.acl.reload() + bucket_acl_after = list(bucket.acl) + blob.acl.reload() + blob_acl_after = list(bucket.acl) + + assert bucket_acl_before == bucket_acl_after + assert blob_acl_before == blob_acl_after + + +def test_new_bucket_created_w_unspecified_pap( + storage_client, buckets_to_delete, blobs_to_delete, +): + from google.cloud.storage import constants + + bucket_name = _helpers.unique_name("new-w-pap-unspecified") + bucket = storage_client.bucket(bucket_name) + bucket.iam_configuration.uniform_bucket_level_access_enabled = True + bucket.create() + buckets_to_delete.append(bucket) + + assert ( + bucket.iam_configuration.public_access_prevention + == constants.PUBLIC_ACCESS_PREVENTION_UNSPECIFIED + ) + + bucket.iam_configuration.public_access_prevention = ( + constants.PUBLIC_ACCESS_PREVENTION_ENFORCED + ) + bucket.patch() + assert ( + bucket.iam_configuration.public_access_prevention + == constants.PUBLIC_ACCESS_PREVENTION_ENFORCED + ) + assert bucket.iam_configuration.uniform_bucket_level_access_enabled + + bucket.iam_configuration.uniform_bucket_level_access_enabled = False + bucket.patch() + assert ( + bucket.iam_configuration.public_access_prevention + == constants.PUBLIC_ACCESS_PREVENTION_ENFORCED + ) + + with pytest.raises(exceptions.BadRequest): + bucket.iam_configuration.public_access_prevention = "unexpected value" + bucket.patch() + + with pytest.raises(exceptions.PreconditionFailed): + bucket.make_public() + + blob_name = "my-blob.txt" + blob = bucket.blob(blob_name) + payload = b"DEADBEEF" + blob.upload_from_string(payload) + + with pytest.raises(exceptions.PreconditionFailed): + blob.make_public() + + +def test_new_bucket_created_w_enforced_pap( + storage_client, buckets_to_delete, blobs_to_delete, +): + from google.cloud.storage import constants + + bucket_name = _helpers.unique_name("new-w-pap-enforced") + bucket = storage_client.bucket(bucket_name) + bucket.iam_configuration.public_access_prevention = ( + constants.PUBLIC_ACCESS_PREVENTION_ENFORCED + ) + bucket.create() + buckets_to_delete.append(bucket) + + assert ( + bucket.iam_configuration.public_access_prevention + == constants.PUBLIC_ACCESS_PREVENTION_ENFORCED + ) + + bucket.iam_configuration.public_access_prevention = ( + constants.PUBLIC_ACCESS_PREVENTION_UNSPECIFIED + ) + bucket.patch() + + assert ( + bucket.iam_configuration.public_access_prevention + == constants.PUBLIC_ACCESS_PREVENTION_UNSPECIFIED + ) + assert not bucket.iam_configuration.uniform_bucket_level_access_enabled diff --git a/tests/system/test_client.py b/tests/system/test_client.py new file mode 100644 index 000000000..d33450eb7 --- /dev/null +++ b/tests/system/test_client.py @@ -0,0 +1,104 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import tempfile + +import pytest + +from google.cloud import exceptions +from test_utils.vpcsc_config import vpcsc_config +from . import _helpers + + +public_bucket = "gcp-public-data-landsat" + + +@vpcsc_config.skip_if_inside_vpcsc +def test_anonymous_client_access_to_public_bucket(): + from google.cloud.storage.client import Client + + anonymous_client = Client.create_anonymous_client() + bucket = anonymous_client.bucket(public_bucket) + (blob,) = _helpers.retry_429_503(anonymous_client.list_blobs)( + bucket, max_results=1, + ) + with tempfile.TemporaryFile() as stream: + _helpers.retry_429_503(blob.download_to_file)(stream) + + +def test_get_service_account_email(storage_client, service_account): + domain = "gs-project-accounts.iam.gserviceaccount.com" + email = storage_client.get_service_account_email() + + new_style = re.compile(r"service-(?P[^@]+)@{}".format(domain)) + old_style = re.compile(r"{}@{}".format(storage_client.project, domain)) + patterns = [new_style, old_style] + matches = [pattern.match(email) for pattern in patterns] + + assert any(match for match in matches if match is not None) + + +def test_create_bucket_simple(storage_client, buckets_to_delete): + new_bucket_name = _helpers.unique_name("a-new-bucket") + + with pytest.raises(exceptions.NotFound): + storage_client.get_bucket(new_bucket_name) + + created = _helpers.retry_429_503(storage_client.create_bucket)(new_bucket_name) + buckets_to_delete.append(created) + + assert created.name == new_bucket_name + + +def test_list_buckets(storage_client, buckets_to_delete): + buckets_to_create = [ + _helpers.unique_name("new"), + _helpers.unique_name("newer"), + _helpers.unique_name("newest"), + ] + created_buckets = [] + + for bucket_name in buckets_to_create: + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + all_buckets = storage_client.list_buckets() + + created_buckets = [ + bucket.name for bucket in all_buckets if bucket.name in buckets_to_create + ] + + assert sorted(created_buckets) == sorted(buckets_to_create) + + +def test_download_blob_to_file_w_uri( + storage_client, shared_bucket, blobs_to_delete, service_account, +): + blob = shared_bucket.blob("MyBuffer") + payload = b"Hello World" + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + with tempfile.NamedTemporaryFile() as temp_f: + + with open(temp_f.name, "wb") as file_obj: + storage_client.download_blob_to_file( + "gs://" + shared_bucket.name + "/MyBuffer", file_obj + ) + + with open(temp_f.name, "rb") as file_obj: + stored_contents = file_obj.read() + + assert stored_contents == payload diff --git a/tests/system/test_fileio.py b/tests/system/test_fileio.py new file mode 100644 index 000000000..79bf0c1eb --- /dev/null +++ b/tests/system/test_fileio.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from .test_blob import _check_blob_hash + + +def test_blobwriter_and_blobreader( + shared_bucket, blobs_to_delete, file_data, service_account, +): + blob = shared_bucket.blob("LargeFile") + + # Test BlobWriter works. + info = file_data["big"] + with open(info["path"], "rb") as file_obj: + with blob.open("wb", chunk_size=256 * 1024) as writer: + writer.write(file_obj.read(100)) + writer.write(file_obj.read(256 * 1024)) + writer.write(file_obj.read()) + blobs_to_delete.append(blob) + + blob.reload() + _check_blob_hash(blob, info) + + # Test BlobReader read and seek behave identically to filesystem file. + with open(info["path"], "rb") as file_obj: + with blob.open("rb", chunk_size=256 * 1024) as reader: + assert file_obj.read(100) == reader.read(100) + assert file_obj.read(256 * 1024) == reader.read(256 * 1024) + reader.seek(20) + file_obj.seek(20) + assert file_obj.read(256 * 1024 * 2) == reader.read(256 * 1024 * 2) + assert file_obj.read() == reader.read() + # End of file reached; further reads should be blank but not + # raise an error. + assert reader.read() == b"" + + +def test_blobwriter_and_blobreader_text_mode( + shared_bucket, blobs_to_delete, service_account, +): + blob = shared_bucket.blob("MultibyteTextFile") + + # Construct a multibyte text_data sample file. + base_multibyte_text_string = u"abcde あいうえお line: " + text_data = "\n".join([base_multibyte_text_string + str(x) for x in range(100)]) + + # Test text BlobWriter works. + with blob.open("wt") as writer: + writer.write(text_data[:100]) + writer.write(text_data[100:]) + blobs_to_delete.append(blob) + + # Test text BlobReader read and seek to 0. Seeking to an non-0 byte on a + # multibyte text stream is not safe in Python but the API expects + # seek() to work regadless. + with blob.open("rt") as reader: + # This should produce 100 characters, not 100 bytes. + assert text_data[:100] == reader.read(100) + assert 0 == reader.seek(0) + assert reader.read() == text_data diff --git a/tests/system/test_hmac_key_metadata.py b/tests/system/test_hmac_key_metadata.py new file mode 100644 index 000000000..5c062dbc3 --- /dev/null +++ b/tests/system/test_hmac_key_metadata.py @@ -0,0 +1,88 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import pytest +import six + +from google.cloud import _helpers as _cloud_helpers + +from . import _helpers + + +def ensure_hmac_key_deleted(hmac_key): + from google.cloud.storage.hmac_key import HMACKeyMetadata + + if hmac_key.state != HMACKeyMetadata.INACTIVE_STATE: + hmac_key.state = HMACKeyMetadata.INACTIVE_STATE + hmac_key.update() + _helpers.retry_429_harder(hmac_key.delete)() + + +@pytest.fixture +def scrubbed_hmac_keys(storage_client): + before_hmac_keys = set(storage_client.list_hmac_keys()) + + now = datetime.datetime.utcnow().replace(tzinfo=_cloud_helpers.UTC) + yesterday = now - datetime.timedelta(days=1) + + # Delete any HMAC keys older than a day. + for hmac_key in list(before_hmac_keys): + if hmac_key.time_created < yesterday: + ensure_hmac_key_deleted(hmac_key) + before_hmac_keys.remove(hmac_key) + + hmac_keys_to_delete = [] + yield before_hmac_keys, hmac_keys_to_delete + + # Delete any HMAC keys we created + for hmac_key in hmac_keys_to_delete: + ensure_hmac_key_deleted(hmac_key) + + +def test_hmac_key_crud(storage_client, scrubbed_hmac_keys, service_account): + from google.cloud.storage.hmac_key import HMACKeyMetadata + + before_hmac_keys, hmac_keys_to_delete = scrubbed_hmac_keys + + email = service_account.service_account_email + + metadata, secret = storage_client.create_hmac_key(email) + hmac_keys_to_delete.append(metadata) + + assert isinstance(secret, six.text_type) + assert len(secret) == 40 + + after_hmac_keys = set(storage_client.list_hmac_keys()) + assert metadata not in before_hmac_keys + assert metadata in after_hmac_keys + + another = HMACKeyMetadata(storage_client) + another._properties["accessId"] = "nonesuch" + + assert not another.exists() + + another._properties["accessId"] = metadata.access_id + assert another.exists() + + another.reload() + + assert another._properties == metadata._properties + + metadata.state = HMACKeyMetadata.INACTIVE_STATE + metadata.update() + + metadata.delete() + hmac_keys_to_delete.remove(metadata) diff --git a/tests/system/test_kms_integration.py b/tests/system/test_kms_integration.py new file mode 100644 index 000000000..123658a4e --- /dev/null +++ b/tests/system/test_kms_integration.py @@ -0,0 +1,243 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pytest + +from google.api_core import exceptions +from google.cloud import kms +from . import _helpers + +keyring_name = "gcs-test" +default_key_name = "gcs-test" +alt_key_name = "gcs-test-alternate" +_key_name_format = "projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}" + + +def _kms_key_name(client, bucket, key_name): + return _key_name_format.format( + client.project, bucket.location.lower(), keyring_name, key_name, + ) + + +@pytest.fixture(scope="session") +def kms_bucket_name(): + return _helpers.unique_name("gcp-systest-kms") + + +@pytest.fixture(scope="session") +def kms_bucket(storage_client, kms_bucket_name, no_mtls): + bucket = _helpers.retry_429_503(storage_client.create_bucket)(kms_bucket_name) + + yield bucket + + _helpers.delete_bucket(bucket) + + +@pytest.fixture(scope="session") +def kms_client(): + return kms.KeyManagementServiceClient() + + +@pytest.fixture(scope="function") +def keyring(storage_client, kms_bucket, kms_client): + project = storage_client.project + location = kms_bucket.location.lower() + purpose = kms.enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT + + # If the keyring doesn't exist create it. + keyring_path = kms_client.key_ring_path(project, location, keyring_name) + + try: + kms_client.get_key_ring(keyring_path) + except exceptions.NotFound: + parent = kms_client.location_path(project, location) + kms_client.create_key_ring(parent, keyring_name, {}) + + # Mark this service account as an owner of the new keyring + service_account_email = storage_client.get_service_account_email() + policy = { + "bindings": [ + { + "role": "roles/cloudkms.cryptoKeyEncrypterDecrypter", + "members": ["serviceAccount:" + service_account_email], + } + ] + } + kms_client.set_iam_policy(keyring_path, policy) + + # Populate the keyring with the keys we use in the tests + key_names = [ + "gcs-test", + "gcs-test-alternate", + "explicit-kms-key-name", + "default-kms-key-name", + "override-default-kms-key-name", + "alt-default-kms-key-name", + ] + for key_name in key_names: + key_path = kms_client.crypto_key_path(project, location, keyring_name, key_name) + try: + kms_client.get_crypto_key(key_path) + except exceptions.NotFound: + key = {"purpose": purpose} + kms_client.create_crypto_key(keyring_path, key_name, key) + + +@pytest.fixture(scope="session") +def kms_key_name(storage_client, kms_bucket): + return _kms_key_name(storage_client, kms_bucket, default_key_name) + + +@pytest.fixture(scope="session") +def alt_kms_key_name(storage_client, kms_bucket): + return _kms_key_name(storage_client, kms_bucket, alt_key_name) + + +def test_blob_w_explicit_kms_key_name( + kms_bucket, blobs_to_delete, kms_key_name, file_data +): + blob_name = "explicit-kms-key-name" + info = file_data["simple"] + blob = kms_bucket.blob(blob_name, kms_key_name=kms_key_name) + blob.upload_from_filename(info["path"]) + blobs_to_delete.append(blob) + + with open(info["path"], "rb") as file_obj: + assert blob.download_as_bytes() == file_obj.read() + + # We don't know the current version of the key. + assert blob.kms_key_name.startswith(kms_key_name) + + (listed,) = list(kms_bucket.list_blobs()) + assert listed.kms_key_name.startswith(kms_key_name) + + +@_helpers.retry_failures +def test_bucket_w_default_kms_key_name( + kms_bucket, blobs_to_delete, kms_key_name, alt_kms_key_name, file_data, +): + blob_name = "default-kms-key-name" + override_blob_name = "override-default-kms-key-name" + alt_blob_name = "alt-default-kms-key-name" + cleartext_blob_name = "cleartext" + + info = file_data["simple"] + + with open(info["path"], "rb") as file_obj: + payload = file_obj.read() + + kms_bucket.default_kms_key_name = kms_key_name + kms_bucket.patch() + assert kms_bucket.default_kms_key_name == kms_key_name + + defaulted_blob = kms_bucket.blob(blob_name) + defaulted_blob.upload_from_filename(info["path"]) + blobs_to_delete.append(defaulted_blob) + + assert defaulted_blob.download_as_bytes() == payload + # We don't know the current version of the key. + assert defaulted_blob.kms_key_name.startswith(kms_key_name) + + override_blob = kms_bucket.blob(override_blob_name, kms_key_name=alt_kms_key_name) + override_blob.upload_from_filename(info["path"]) + blobs_to_delete.append(override_blob) + + assert override_blob.download_as_bytes() == payload + # We don't know the current version of the key. + assert override_blob.kms_key_name.startswith(alt_kms_key_name) + + kms_bucket.default_kms_key_name = alt_kms_key_name + kms_bucket.patch() + + alt_blob = kms_bucket.blob(alt_blob_name) + alt_blob.upload_from_filename(info["path"]) + blobs_to_delete.append(alt_blob) + + assert alt_blob.download_as_bytes() == payload + # We don't know the current version of the key. + assert alt_blob.kms_key_name.startswith(alt_kms_key_name) + + kms_bucket.default_kms_key_name = None + kms_bucket.patch() + + cleartext_blob = kms_bucket.blob(cleartext_blob_name) + cleartext_blob.upload_from_filename(info["path"]) + blobs_to_delete.append(cleartext_blob) + + assert cleartext_blob.download_as_bytes() == payload + assert cleartext_blob.kms_key_name is None + + +def test_blob_rewrite_rotate_csek_to_cmek( + kms_bucket, blobs_to_delete, kms_key_name, file_data, +): + blob_name = "rotating-keys" + source_key = os.urandom(32) + info = file_data["simple"] + + source = kms_bucket.blob(blob_name, encryption_key=source_key) + source.upload_from_filename(info["path"]) + blobs_to_delete.append(source) + source_data = source.download_as_bytes() + + # We can't verify it, but ideally we would check that the following + # URL was resolvable with our credentials + # KEY_URL = 'https://cloudkms.googleapis.com/v1/{}'.format( + # kms_key_name) + + dest = kms_bucket.blob(blob_name, kms_key_name=kms_key_name) + token, rewritten, total = dest.rewrite(source) + + while token is not None: + token, rewritten, total = dest.rewrite(source, token=token) + + # Not adding 'dest' to 'self.case_blobs_to_delete': it is the + # same object as 'source'. + + assert token is None + assert rewritten == len(source_data) + assert total == len(source_data) + + assert dest.download_as_bytes() == source_data + + +def test_blob_upload_w_bucket_cmek_enabled( + kms_bucket, blobs_to_delete, kms_key_name, file_data, +): + blob_name = "test-blob" + payload = b"DEADBEEF" + alt_payload = b"NEWDEADBEEF" + + kms_bucket.default_kms_key_name = kms_key_name + kms_bucket.patch() + assert kms_bucket.default_kms_key_name == kms_key_name + + blob = kms_bucket.blob(blob_name) + blob.upload_from_string(payload) + blobs_to_delete.append(blob) + + _helpers.retry_429_harder(blob.reload)() + # We don't know the current version of the key. + assert blob.kms_key_name.startswith(kms_key_name) + + blob.upload_from_string(alt_payload, if_generation_match=blob.generation) + + assert blob.download_as_bytes() == alt_payload + + kms_bucket.default_kms_key_name = None + _helpers.retry_429_harder(kms_bucket.patch)() + + assert kms_bucket.default_kms_key_name is None diff --git a/tests/system/test_notification.py b/tests/system/test_notification.py new file mode 100644 index 000000000..3f03ac39a --- /dev/null +++ b/tests/system/test_notification.py @@ -0,0 +1,180 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from . import _helpers + +custom_attributes = {"attr1": "value1", "attr2": "value2"} +blob_name_prefix = "blob-name-prefix/" + + +@pytest.fixture(scope="session") +def event_types(): + from google.cloud.storage.notification import ( + OBJECT_FINALIZE_EVENT_TYPE, + OBJECT_DELETE_EVENT_TYPE, + ) + + return [OBJECT_FINALIZE_EVENT_TYPE, OBJECT_DELETE_EVENT_TYPE] + + +@pytest.fixture(scope="session") +def payload_format(): + from google.cloud.storage.notification import JSON_API_V1_PAYLOAD_FORMAT + + return JSON_API_V1_PAYLOAD_FORMAT + + +@pytest.fixture(scope="session") +def publisher_client(): + try: + from google.cloud.pubsub_v1 import PublisherClient + except ImportError: + pytest.skip("Cannot import pubsub") + + return PublisherClient() + + +@pytest.fixture(scope="session") +def topic_name(): + return _helpers.unique_name("notification") + + +@pytest.fixture(scope="session") +def topic_path(storage_client, topic_name): + return "projects/{}/topics/{}".format(storage_client.project, topic_name) + + +@pytest.fixture(scope="session") +def notification_topic(storage_client, publisher_client, topic_path, no_mtls): + _helpers.retry_429(publisher_client.create_topic)(topic_path) + policy = publisher_client.get_iam_policy(topic_path) + binding = policy.bindings.add() + binding.role = "roles/pubsub.publisher" + binding.members.append( + "serviceAccount:{}".format(storage_client.get_service_account_email()) + ) + publisher_client.set_iam_policy(topic_path, policy) + + +def test_notification_create_minimal( + storage_client, buckets_to_delete, topic_name, notification_topic, +): + bucket_name = _helpers.unique_name("notification-minimal") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + assert list(bucket.list_notifications()) == [] + + notification = bucket.notification(topic_name) + _helpers.retry_429_503(notification.create)() + + try: + assert notification.exists() + assert notification.notification_id is not None + notifications = list(bucket.list_notifications()) + assert len(notifications) == 1 + assert notifications[0].topic_name == topic_name + finally: + notification.delete() + + +def test_notification_create_explicit( + storage_client, + buckets_to_delete, + topic_name, + notification_topic, + event_types, + payload_format, +): + bucket_name = _helpers.unique_name("notification-explicit") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + assert list(bucket.list_notifications()) == [] + + notification = bucket.notification( + topic_name=topic_name, + custom_attributes=custom_attributes, + event_types=event_types, + blob_name_prefix=blob_name_prefix, + payload_format=payload_format, + ) + _helpers.retry_429_503(notification.create)() + + try: + assert notification.exists() + assert notification.notification_id is not None + assert notification.custom_attributes == custom_attributes + assert notification.event_types == event_types + assert notification.blob_name_prefix == blob_name_prefix + assert notification.payload_format == payload_format + finally: + notification.delete() + + +def test_notification_create_w_user_project( + storage_client, buckets_to_delete, topic_name, notification_topic, user_project, +): + bucket_name = _helpers.unique_name("notification-w-up") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + with_user_project = storage_client.bucket(bucket_name, user_project=user_project) + + assert list(with_user_project.list_notifications()) == [] + + notification = with_user_project.notification(topic_name) + _helpers.retry_429_503(notification.create)() + + try: + assert notification.exists() + assert notification.notification_id is not None + notifications = list(bucket.list_notifications()) + assert len(notifications) == 1 + assert notifications[0].topic_name == topic_name + finally: + notification.delete() + + +def test_bucket_get_notification( + storage_client, + buckets_to_delete, + topic_name, + notification_topic, + event_types, + payload_format, +): + bucket_name = _helpers.unique_name("notification-get") + bucket = _helpers.retry_429_503(storage_client.create_bucket)(bucket_name) + buckets_to_delete.append(bucket) + + notification = bucket.notification( + topic_name=topic_name, + custom_attributes=custom_attributes, + payload_format=payload_format, + ) + _helpers.retry_429_503(notification.create)() + try: + assert notification.exists() + assert notification.notification_id is not None + + fetched = bucket.get_notification(notification.notification_id) + + assert fetched.notification_id == notification.notification_id + assert fetched.custom_attributes == custom_attributes + assert fetched.payload_format == payload_format + finally: + notification.delete() diff --git a/tests/system/test_system.py b/tests/system/test_system.py deleted file mode 100644 index 6fbaa02c2..000000000 --- a/tests/system/test_system.py +++ /dev/null @@ -1,2747 +0,0 @@ -# coding=utf-8 - -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import datetime -import gzip -import hashlib -import io -import os -import re -import tempfile -import time -import unittest -import mock - -import requests -import pytest -import six - -from google.cloud import exceptions -from google.cloud import iam_credentials_v1 -from google.cloud import storage -from google.cloud.storage._helpers import _base64_md5hash -from google.cloud.storage.bucket import LifecycleRuleDelete -from google.cloud.storage.bucket import LifecycleRuleSetStorageClass -from google.cloud import _helpers -from google.cloud import kms -from google import resumable_media -import google.auth -import google.api_core -from google.api_core import path_template -import google.oauth2 -from test_utils.retry import RetryErrors -from test_utils.retry import RetryInstanceState -from test_utils.system import unique_resource_id -from test_utils.vpcsc_config import vpcsc_config - - -USER_PROJECT = os.environ.get("GOOGLE_CLOUD_TESTS_USER_PROJECT") -DIRNAME = os.path.realpath(os.path.dirname(__file__)) -DATA_DIRNAME = os.path.abspath(os.path.join(DIRNAME, "..", "data")) - - -def _bad_copy(bad_request): - """Predicate: pass only exceptions for a failed copyTo.""" - err_msg = bad_request.message - return err_msg.startswith("No file found in request. (POST") and "copyTo" in err_msg - - -def _no_event_based_hold(blob): - return not blob.event_based_hold - - -retry_429 = RetryErrors(exceptions.TooManyRequests, max_tries=6) -retry_429_harder = RetryErrors(exceptions.TooManyRequests, max_tries=10) -retry_429_503 = RetryErrors( - [exceptions.TooManyRequests, exceptions.ServiceUnavailable], max_tries=10 -) -retry_bad_copy = RetryErrors(exceptions.BadRequest, error_predicate=_bad_copy) -retry_no_event_based_hold = RetryInstanceState(_no_event_based_hold) - - -def _empty_bucket(client, bucket): - """Empty a bucket of all existing blobs (including multiple versions).""" - for blob in list(client.list_blobs(bucket, versions=True)): - try: - blob.delete() - except exceptions.NotFound: - pass - - -class Config(object): - """Run-time configuration to be modified at set-up. - - This is a mutable stand-in to allow test set-up to modify - global state. - """ - - CLIENT = None - TEST_BUCKET = None - TESTING_MTLS = False - - -def setUpModule(): - Config.CLIENT = storage.Client() - bucket_name = "new" + unique_resource_id() - # In the **very** rare case the bucket name is reserved, this - # fails with a ConnectionError. - Config.TEST_BUCKET = Config.CLIENT.bucket(bucket_name) - Config.TEST_BUCKET.versioning_enabled = True - retry_429_503(Config.TEST_BUCKET.create)() - # mTLS testing uses the system test as well. For mTLS testing, - # GOOGLE_API_USE_CLIENT_CERTIFICATE env var will be set to "true" - # explicitly. - Config.TESTING_MTLS = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") == "true" - - -def tearDownModule(): - errors = (exceptions.Conflict, exceptions.TooManyRequests) - retry = RetryErrors(errors, max_tries=15) - retry(_empty_bucket)(Config.CLIENT, Config.TEST_BUCKET) - retry(Config.TEST_BUCKET.delete)(force=True) - - -class TestClient(unittest.TestCase): - @classmethod - def setUpClass(cls): - super(TestClient, cls).setUpClass() - if ( - type(Config.CLIENT._credentials) - is not google.oauth2.service_account.Credentials - ): - raise unittest.SkipTest("These tests require a service account credential") - - def setUp(self): - self.case_hmac_keys_to_delete = [] - - def tearDown(self): - from google.cloud.storage.hmac_key import HMACKeyMetadata - - for hmac_key in self.case_hmac_keys_to_delete: - if hmac_key.state == HMACKeyMetadata.ACTIVE_STATE: - hmac_key.state = HMACKeyMetadata.INACTIVE_STATE - hmac_key.update() - if hmac_key.state == HMACKeyMetadata.INACTIVE_STATE: - retry_429_harder(hmac_key.delete)() - - def test_get_service_account_email(self): - domain = "gs-project-accounts.iam.gserviceaccount.com" - email = Config.CLIENT.get_service_account_email() - - new_style = re.compile(r"service-(?P[^@]+)@" + domain) - old_style = re.compile(r"{}@{}".format(Config.CLIENT.project, domain)) - patterns = [new_style, old_style] - matches = [pattern.match(email) for pattern in patterns] - - self.assertTrue(any(match for match in matches if match is not None)) - - @staticmethod - def _get_before_hmac_keys(client): - from google.cloud.storage.hmac_key import HMACKeyMetadata - - before_hmac_keys = set(client.list_hmac_keys()) - - now = datetime.datetime.utcnow().replace(tzinfo=_helpers.UTC) - yesterday = now - datetime.timedelta(days=1) - - # Delete any HMAC keys older than a day. - for hmac_key in list(before_hmac_keys): - if hmac_key.time_created < yesterday: - if hmac_key.state != HMACKeyMetadata.INACTIVE_STATE: - hmac_key.state = HMACKeyMetadata.INACTIVE_STATE - hmac_key.update() - hmac_key.delete() - before_hmac_keys.remove(hmac_key) - - return before_hmac_keys - - def test_hmac_key_crud(self): - from google.cloud.storage.hmac_key import HMACKeyMetadata - - credentials = Config.CLIENT._credentials - email = credentials.service_account_email - - before_hmac_keys = self._get_before_hmac_keys(Config.CLIENT) - - metadata, secret = Config.CLIENT.create_hmac_key(email) - self.case_hmac_keys_to_delete.append(metadata) - - self.assertIsInstance(secret, six.text_type) - self.assertEqual(len(secret), 40) - - after_hmac_keys = set(Config.CLIENT.list_hmac_keys()) - self.assertFalse(metadata in before_hmac_keys) - self.assertTrue(metadata in after_hmac_keys) - - another = HMACKeyMetadata(Config.CLIENT) - - another._properties["accessId"] = "nonesuch" - self.assertFalse(another.exists()) - - another._properties["accessId"] = metadata.access_id - self.assertTrue(another.exists()) - - another.reload() - - self.assertEqual(another._properties, metadata._properties) - - metadata.state = HMACKeyMetadata.INACTIVE_STATE - metadata.update() - - metadata.delete() - self.case_hmac_keys_to_delete.remove(metadata) - - -class TestStorageBuckets(unittest.TestCase): - def setUp(self): - self.case_buckets_to_delete = [] - - def tearDown(self): - for bucket_name in self.case_buckets_to_delete: - bucket = Config.CLIENT.bucket(bucket_name) - retry_429_harder(bucket.delete)() - - def test_create_bucket(self): - new_bucket_name = "a-new-bucket" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - created = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(created.name, new_bucket_name) - - def test_bucket_create_w_alt_storage_class(self): - from google.cloud.storage import constants - - new_bucket_name = "bucket-w-archive" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = Config.CLIENT.bucket(new_bucket_name) - bucket.storage_class = constants.ARCHIVE_STORAGE_CLASS - retry_429_503(bucket.create)() - self.case_buckets_to_delete.append(new_bucket_name) - created = Config.CLIENT.get_bucket(new_bucket_name) - self.assertEqual(created.storage_class, constants.ARCHIVE_STORAGE_CLASS) - - def test_lifecycle_rules(self): - import datetime - from google.cloud.storage import constants - - new_bucket_name = "w-lifcycle-rules" + unique_resource_id("-") - custom_time_before = datetime.date(2018, 8, 1) - noncurrent_before = datetime.date(2018, 8, 1) - - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = Config.CLIENT.bucket(new_bucket_name) - bucket.add_lifecycle_delete_rule( - age=42, - number_of_newer_versions=3, - days_since_custom_time=2, - custom_time_before=custom_time_before, - days_since_noncurrent_time=2, - noncurrent_time_before=noncurrent_before, - ) - bucket.add_lifecycle_set_storage_class_rule( - constants.COLDLINE_STORAGE_CLASS, - is_live=False, - matches_storage_class=[constants.NEARLINE_STORAGE_CLASS], - ) - - expected_rules = [ - LifecycleRuleDelete( - age=42, - number_of_newer_versions=3, - days_since_custom_time=2, - custom_time_before=custom_time_before, - days_since_noncurrent_time=2, - noncurrent_time_before=noncurrent_before, - ), - LifecycleRuleSetStorageClass( - constants.COLDLINE_STORAGE_CLASS, - is_live=False, - matches_storage_class=[constants.NEARLINE_STORAGE_CLASS], - ), - ] - - retry_429_503(bucket.create)(location="us") - - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(bucket.name, new_bucket_name) - self.assertEqual(list(bucket.lifecycle_rules), expected_rules) - - bucket.clear_lifecyle_rules() - bucket.patch() - - self.assertEqual(list(bucket.lifecycle_rules), []) - - def test_list_buckets(self): - buckets_to_create = [ - "new" + unique_resource_id(), - "newer" + unique_resource_id(), - "newest" + unique_resource_id(), - ] - created_buckets = [] - for bucket_name in buckets_to_create: - bucket = Config.CLIENT.bucket(bucket_name) - retry_429_503(bucket.create)() - self.case_buckets_to_delete.append(bucket_name) - - # Retrieve the buckets. - all_buckets = Config.CLIENT.list_buckets() - created_buckets = [ - bucket for bucket in all_buckets if bucket.name in buckets_to_create - ] - self.assertEqual(len(created_buckets), len(buckets_to_create)) - - def test_bucket_update_labels(self): - bucket_name = "update-labels" + unique_resource_id("-") - bucket = retry_429_503(Config.CLIENT.create_bucket)(bucket_name) - self.case_buckets_to_delete.append(bucket_name) - self.assertTrue(bucket.exists()) - - updated_labels = {"test-label": "label-value"} - bucket.labels = updated_labels - bucket.update() - self.assertEqual(bucket.labels, updated_labels) - - new_labels = {"another-label": "another-value"} - bucket.labels = new_labels - bucket.patch() - self.assertEqual(bucket.labels, new_labels) - - bucket.labels = {} - bucket.update() - self.assertEqual(bucket.labels, {}) - - def test_get_set_iam_policy(self): - from google.cloud.storage.iam import STORAGE_OBJECT_VIEWER_ROLE - from google.api_core.exceptions import BadRequest, PreconditionFailed - - bucket_name = "iam-policy" + unique_resource_id("-") - bucket = retry_429_503(Config.CLIENT.create_bucket)(bucket_name) - self.case_buckets_to_delete.append(bucket_name) - self.assertTrue(bucket.exists()) - - policy_no_version = bucket.get_iam_policy() - self.assertEqual(policy_no_version.version, 1) - - policy = bucket.get_iam_policy(requested_policy_version=3) - self.assertEqual(policy, policy_no_version) - - member = "serviceAccount:{}".format(Config.CLIENT.get_service_account_email()) - - BINDING_W_CONDITION = { - "role": STORAGE_OBJECT_VIEWER_ROLE, - "members": {member}, - "condition": { - "title": "always-true", - "description": "test condition always-true", - "expression": "true", - }, - } - policy.bindings.append(BINDING_W_CONDITION) - - with pytest.raises( - PreconditionFailed, match="enable uniform bucket-level access" - ): - bucket.set_iam_policy(policy) - - bucket.iam_configuration.uniform_bucket_level_access_enabled = True - bucket.patch() - - policy = bucket.get_iam_policy(requested_policy_version=3) - policy.bindings.append(BINDING_W_CONDITION) - - with pytest.raises(BadRequest, match="at least 3"): - bucket.set_iam_policy(policy) - - policy.version = 3 - returned_policy = bucket.set_iam_policy(policy) - self.assertEqual(returned_policy.version, 3) - self.assertEqual(returned_policy.bindings, policy.bindings) - - fetched_policy = bucket.get_iam_policy(requested_policy_version=3) - self.assertEqual(fetched_policy.bindings, returned_policy.bindings) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_crud_bucket_with_requester_pays(self): - new_bucket_name = "w-requester-pays" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(created.name, new_bucket_name) - self.assertTrue(created.requester_pays) - - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - # Bucket will be deleted in-line below. - self.case_buckets_to_delete.remove(new_bucket_name) - - try: - # Exercise 'buckets.get' w/ userProject. - self.assertTrue(with_user_project.exists()) - with_user_project.reload() - self.assertTrue(with_user_project.requester_pays) - - # Exercise 'buckets.patch' w/ userProject. - with_user_project.configure_website( - main_page_suffix="index.html", not_found_page="404.html" - ) - with_user_project.patch() - self.assertEqual( - with_user_project._properties["website"], - {"mainPageSuffix": "index.html", "notFoundPage": "404.html"}, - ) - - # Exercise 'buckets.update' w/ userProject. - new_labels = {"another-label": "another-value"} - with_user_project.labels = new_labels - with_user_project.update() - self.assertEqual(with_user_project.labels, new_labels) - - finally: - # Exercise 'buckets.delete' w/ userProject. - with_user_project.delete() - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_bucket_acls_iam_with_user_project(self): - new_bucket_name = "acl-w-user-project" + unique_resource_id("-") - retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name, requester_pays=True) - self.case_buckets_to_delete.append(new_bucket_name) - - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - # Exercise bucket ACL w/ userProject - acl = with_user_project.acl - acl.reload() - acl.all().grant_read() - acl.save() - self.assertIn("READER", acl.all().get_roles()) - del acl.entities["allUsers"] - acl.save() - self.assertFalse(acl.has_entity("allUsers")) - - # Exercise default object ACL w/ userProject - doa = with_user_project.default_object_acl - doa.reload() - doa.all().grant_read() - doa.save() - self.assertIn("READER", doa.all().get_roles()) - - # Exercise IAM w/ userProject - test_permissions = ["storage.buckets.get"] - self.assertEqual( - with_user_project.test_iam_permissions(test_permissions), test_permissions - ) - - policy = with_user_project.get_iam_policy() - viewers = policy.setdefault("roles/storage.objectViewer", set()) - viewers.add(policy.all_users()) - with_user_project.set_iam_policy(policy) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_copy_existing_file_with_user_project(self): - new_bucket_name = "copy-w-requester-pays" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(created.name, new_bucket_name) - self.assertTrue(created.requester_pays) - - to_delete = [] - blob = storage.Blob("simple", bucket=created) - blob.upload_from_string(b"DEADBEEF") - to_delete.append(blob) - try: - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - new_blob = retry_bad_copy(with_user_project.copy_blob)( - blob, with_user_project, "simple-copy" - ) - to_delete.append(new_blob) - - base_contents = blob.download_as_bytes() - copied_contents = new_blob.download_as_bytes() - self.assertEqual(base_contents, copied_contents) - finally: - for blob in to_delete: - retry_429_harder(blob.delete)() - - def test_copy_file_with_generation_match(self): - new_bucket_name = "generation-match" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(created.name, new_bucket_name) - - to_delete = [] - blob = storage.Blob("simple", bucket=created) - blob.upload_from_string(b"DEADBEEF") - to_delete.append(blob) - try: - dest_bucket = Config.CLIENT.bucket(new_bucket_name) - - new_blob = dest_bucket.copy_blob( - blob, - dest_bucket, - "simple-copy", - if_source_generation_match=blob.generation, - ) - to_delete.append(new_blob) - - base_contents = blob.download_as_bytes() - copied_contents = new_blob.download_as_bytes() - self.assertEqual(base_contents, copied_contents) - finally: - for blob in to_delete: - retry_429_harder(blob.delete)() - - def test_copy_file_with_metageneration_match(self): - new_bucket_name = "generation-match" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(created.name, new_bucket_name) - - to_delete = [] - blob = storage.Blob("simple", bucket=created) - blob.upload_from_string(b"DEADBEEF") - to_delete.append(blob) - try: - dest_bucket = Config.CLIENT.bucket(new_bucket_name) - - new_blob = dest_bucket.copy_blob( - blob, - dest_bucket, - "simple-copy", - if_source_metageneration_match=blob.metageneration, - ) - to_delete.append(new_blob) - - base_contents = blob.download_as_bytes() - copied_contents = new_blob.download_as_bytes() - self.assertEqual(base_contents, copied_contents) - finally: - for blob in to_delete: - retry_429_harder(blob.delete)() - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_bucket_get_blob_with_user_project(self): - new_bucket_name = "w-requester-pays" + unique_resource_id("-") - data = b"DEADBEEF" - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(created.name, new_bucket_name) - self.assertTrue(created.requester_pays) - - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - self.assertIsNone(with_user_project.get_blob("nonesuch")) - to_add = created.blob("blob-name") - to_add.upload_from_string(data) - try: - found = with_user_project.get_blob("blob-name") - self.assertEqual(found.download_as_bytes(), data) - finally: - to_add.delete() - - -class TestStorageFiles(unittest.TestCase): - - FILES = { - "logo": {"path": os.path.join(DATA_DIRNAME, "CloudPlatform_128px_Retina.png")}, - "big": {"path": os.path.join(DATA_DIRNAME, "five-point-one-mb-file.zip")}, - "simple": {"path": os.path.join(DATA_DIRNAME, "simple.txt")}, - } - - @classmethod - def setUpClass(cls): - super(TestStorageFiles, cls).setUpClass() - for file_data in cls.FILES.values(): - with open(file_data["path"], "rb") as file_obj: - file_data["hash"] = _base64_md5hash(file_obj) - cls.bucket = Config.TEST_BUCKET - - def setUp(self): - self.case_blobs_to_delete = [] - - def tearDown(self): - errors = (exceptions.TooManyRequests, exceptions.ServiceUnavailable) - retry = RetryErrors(errors, max_tries=6) - for blob in self.case_blobs_to_delete: - retry(blob.delete)() - - -class TestStorageWriteFiles(TestStorageFiles): - ENCRYPTION_KEY = "b23ff11bba187db8c37077e6af3b25b8" - - @classmethod - def setUpClass(cls): - super(TestStorageWriteFiles, cls).setUpClass() - if ( - type(Config.CLIENT._credentials) - is not google.oauth2.service_account.Credentials - ): - raise unittest.SkipTest("These tests require a service account credential") - - def test_large_file_write_from_stream(self): - blob = self.bucket.blob("LargeFile") - - file_data = self.FILES["big"] - with open(file_data["path"], "rb") as file_obj: - blob.upload_from_file(file_obj) - self.case_blobs_to_delete.append(blob) - - md5_hash = blob.md5_hash - if not isinstance(md5_hash, six.binary_type): - md5_hash = md5_hash.encode("utf-8") - self.assertEqual(md5_hash, file_data["hash"]) - - def test_large_file_write_from_stream_with_checksum(self): - blob = self.bucket.blob("LargeFile") - - file_data = self.FILES["big"] - with open(file_data["path"], "rb") as file_obj: - blob.upload_from_file(file_obj, checksum="crc32c") - self.case_blobs_to_delete.append(blob) - - md5_hash = blob.md5_hash - if not isinstance(md5_hash, six.binary_type): - md5_hash = md5_hash.encode("utf-8") - self.assertEqual(md5_hash, file_data["hash"]) - - def test_large_file_write_from_stream_with_failed_checksum(self): - blob = self.bucket.blob("LargeFile") - - file_data = self.FILES["big"] - - # Intercept the digest processing at the last stage and replace it with garbage. - # This is done with a patch to monkey-patch the resumable media library's checksum - # processing; it does not mock a remote interface like a unit test would. The - # remote API is still exercised. - with open(file_data["path"], "rb") as file_obj: - with mock.patch( - "google.resumable_media._helpers.prepare_checksum_digest", - return_value="FFFFFF==", - ): - with self.assertRaises(resumable_media.DataCorruption): - blob.upload_from_file(file_obj, checksum="crc32c") - self.assertFalse(blob.exists()) - - def test_large_encrypted_file_write_from_stream(self): - blob = self.bucket.blob("LargeFile", encryption_key=self.ENCRYPTION_KEY) - - file_data = self.FILES["big"] - with open(file_data["path"], "rb") as file_obj: - blob.upload_from_file(file_obj) - self.case_blobs_to_delete.append(blob) - - md5_hash = blob.md5_hash - if not isinstance(md5_hash, six.binary_type): - md5_hash = md5_hash.encode("utf-8") - self.assertEqual(md5_hash, file_data["hash"]) - - with tempfile.NamedTemporaryFile() as temp_f: - with open(temp_f.name, "wb") as file_obj: - Config.CLIENT.download_blob_to_file(blob, file_obj) - - with open(temp_f.name, "rb") as file_obj: - md5_temp_hash = _base64_md5hash(file_obj) - - self.assertEqual(md5_temp_hash, file_data["hash"]) - - def test_small_file_write_from_filename(self): - blob = self.bucket.blob("SmallFile") - - file_data = self.FILES["simple"] - blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(blob) - - md5_hash = blob.md5_hash - if not isinstance(md5_hash, six.binary_type): - md5_hash = md5_hash.encode("utf-8") - self.assertEqual(md5_hash, file_data["hash"]) - - def test_small_file_write_from_filename_with_checksum(self): - blob = self.bucket.blob("SmallFile") - - file_data = self.FILES["simple"] - blob.upload_from_filename(file_data["path"], checksum="crc32c") - self.case_blobs_to_delete.append(blob) - - md5_hash = blob.md5_hash - if not isinstance(md5_hash, six.binary_type): - md5_hash = md5_hash.encode("utf-8") - self.assertEqual(md5_hash, file_data["hash"]) - - def test_small_file_write_from_filename_with_failed_checksum(self): - blob = self.bucket.blob("SmallFile") - - file_data = self.FILES["simple"] - # Intercept the digest processing at the last stage and replace it with garbage - with mock.patch( - "google.resumable_media._helpers.prepare_checksum_digest", - return_value="FFFFFF==", - ): - with self.assertRaises(google.api_core.exceptions.BadRequest): - blob.upload_from_filename(file_data["path"], checksum="crc32c") - - self.assertFalse(blob.exists()) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_crud_blob_w_user_project(self): - with_user_project = Config.CLIENT.bucket( - self.bucket.name, user_project=USER_PROJECT - ) - blob = with_user_project.blob("SmallFile") - - file_data = self.FILES["simple"] - with open(file_data["path"], mode="rb") as to_read: - file_contents = to_read.read() - - # Exercise 'objects.insert' w/ userProject. - blob.upload_from_filename(file_data["path"]) - gen0 = blob.generation - - # Upload a second generation of the blob - blob.upload_from_string(b"gen1") - gen1 = blob.generation - - blob0 = with_user_project.blob("SmallFile", generation=gen0) - blob1 = with_user_project.blob("SmallFile", generation=gen1) - - # Exercise 'objects.get' w/ generation - self.assertEqual(with_user_project.get_blob(blob.name).generation, gen1) - self.assertEqual( - with_user_project.get_blob(blob.name, generation=gen0).generation, gen0 - ) - - try: - # Exercise 'objects.get' (metadata) w/ userProject. - self.assertTrue(blob.exists()) - blob.reload() - - # Exercise 'objects.get' (media) w/ userProject. - self.assertEqual(blob0.download_as_bytes(), file_contents) - self.assertEqual(blob1.download_as_bytes(), b"gen1") - - # Exercise 'objects.patch' w/ userProject. - blob0.content_language = "en" - blob0.patch() - self.assertEqual(blob0.content_language, "en") - self.assertIsNone(blob1.content_language) - - # Exercise 'objects.update' w/ userProject. - metadata = {"foo": "Foo", "bar": "Bar"} - blob0.metadata = metadata - blob0.update() - self.assertEqual(blob0.metadata, metadata) - self.assertIsNone(blob1.metadata) - finally: - # Exercise 'objects.delete' (metadata) w/ userProject. - blobs = Config.CLIENT.list_blobs( - with_user_project, prefix=blob.name, versions=True - ) - self.assertEqual([each.generation for each in blobs], [gen0, gen1]) - - blob0.delete() - blobs = Config.CLIENT.list_blobs( - with_user_project, prefix=blob.name, versions=True - ) - self.assertEqual([each.generation for each in blobs], [gen1]) - - blob1.delete() - - def test_crud_blob_w_generation_match(self): - WRONG_GENERATION_NUMBER = 6 - WRONG_METAGENERATION_NUMBER = 9 - - bucket = Config.CLIENT.bucket(self.bucket.name) - blob = bucket.blob("SmallFile") - - file_data = self.FILES["simple"] - with open(file_data["path"], mode="rb") as to_read: - file_contents = to_read.read() - - blob.upload_from_filename(file_data["path"]) - gen0 = blob.generation - - # Upload a second generation of the blob - blob.upload_from_string(b"gen1") - gen1 = blob.generation - - blob0 = bucket.blob("SmallFile", generation=gen0) - blob1 = bucket.blob("SmallFile", generation=gen1) - - try: - # Exercise 'objects.get' (metadata) w/ generation match. - with self.assertRaises(google.api_core.exceptions.PreconditionFailed): - blob.exists(if_generation_match=WRONG_GENERATION_NUMBER) - - self.assertTrue(blob.exists(if_generation_match=gen1)) - - with self.assertRaises(google.api_core.exceptions.PreconditionFailed): - blob.reload(if_metageneration_match=WRONG_METAGENERATION_NUMBER) - - blob.reload(if_generation_match=gen1) - - # Exercise 'objects.get' (media) w/ generation match. - self.assertEqual( - blob0.download_as_bytes(if_generation_match=gen0), file_contents - ) - self.assertEqual( - blob1.download_as_bytes(if_generation_not_match=gen0), b"gen1" - ) - - # Exercise 'objects.patch' w/ generation match. - blob0.content_language = "en" - blob0.patch(if_generation_match=gen0) - - self.assertEqual(blob0.content_language, "en") - self.assertIsNone(blob1.content_language) - - # Exercise 'objects.update' w/ generation match. - metadata = {"foo": "Foo", "bar": "Bar"} - blob0.metadata = metadata - blob0.update(if_generation_match=gen0) - - self.assertEqual(blob0.metadata, metadata) - self.assertIsNone(blob1.metadata) - finally: - # Exercise 'objects.delete' (metadata) w/ generation match. - with self.assertRaises(google.api_core.exceptions.PreconditionFailed): - blob0.delete(if_metageneration_match=WRONG_METAGENERATION_NUMBER) - - blob0.delete(if_generation_match=gen0) - blob1.delete(if_metageneration_not_match=WRONG_METAGENERATION_NUMBER) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_blob_acl_w_user_project(self): - with_user_project = Config.CLIENT.bucket( - self.bucket.name, user_project=USER_PROJECT - ) - blob = with_user_project.blob("SmallFile") - - file_data = self.FILES["simple"] - - blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(blob) - - # Exercise bucket ACL w/ userProject - acl = blob.acl - acl.reload() - acl.all().grant_read() - acl.save() - self.assertIn("READER", acl.all().get_roles()) - del acl.entities["allUsers"] - acl.save() - self.assertFalse(acl.has_entity("allUsers")) - - def test_upload_blob_acl(self): - control = self.bucket.blob("logo") - control_data = self.FILES["logo"] - - blob = self.bucket.blob("SmallFile") - file_data = self.FILES["simple"] - - try: - control.upload_from_filename(control_data["path"]) - blob.upload_from_filename(file_data["path"], predefined_acl="publicRead") - finally: - self.case_blobs_to_delete.append(blob) - self.case_blobs_to_delete.append(control) - - control_acl = control.acl - self.assertNotIn("READER", control_acl.all().get_roles()) - acl = blob.acl - self.assertIn("READER", acl.all().get_roles()) - acl.all().revoke_read() - self.assertSequenceEqual(acl.all().get_roles(), set([])) - self.assertEqual(control_acl.all().get_roles(), acl.all().get_roles()) - - def test_write_metadata(self): - filename = self.FILES["logo"]["path"] - blob_name = os.path.basename(filename) - - blob = storage.Blob(blob_name, bucket=self.bucket) - blob.upload_from_filename(filename) - self.case_blobs_to_delete.append(blob) - - # NOTE: This should not be necessary. We should be able to pass - # it in to upload_file and also to upload_from_string. - blob.content_type = "image/png" - self.assertEqual(blob.content_type, "image/png") - - metadata = {"foo": "Foo", "bar": "Bar"} - blob.metadata = metadata - blob.patch() - blob.reload() - self.assertEqual(blob.metadata, metadata) - - # Ensure that metadata keys can be deleted by setting equal to None. - new_metadata = {"foo": "Foo", "bar": None} - blob.metadata = new_metadata - blob.patch() - blob.reload() - self.assertEqual(blob.metadata, {"foo": "Foo"}) - - def test_direct_write_and_read_into_file(self): - blob = self.bucket.blob("MyBuffer") - file_contents = b"Hello World" - blob.upload_from_string(file_contents) - self.case_blobs_to_delete.append(blob) - - same_blob = self.bucket.blob("MyBuffer") - same_blob.reload() # Initialize properties. - - with tempfile.NamedTemporaryFile() as temp_f: - - with open(temp_f.name, "wb") as file_obj: - Config.CLIENT.download_blob_to_file(same_blob, file_obj) - - with open(temp_f.name, "rb") as file_obj: - stored_contents = file_obj.read() - - self.assertEqual(file_contents, stored_contents) - - def test_download_w_generation_match(self): - WRONG_GENERATION_NUMBER = 6 - - blob = self.bucket.blob("MyBuffer") - file_contents = b"Hello World" - blob.upload_from_string(file_contents) - self.case_blobs_to_delete.append(blob) - - same_blob = self.bucket.blob("MyBuffer") - same_blob.reload() # Initialize properties. - - with tempfile.NamedTemporaryFile() as temp_f: - - with open(temp_f.name, "wb") as file_obj: - with self.assertRaises(google.api_core.exceptions.PreconditionFailed): - Config.CLIENT.download_blob_to_file( - same_blob, file_obj, if_generation_match=WRONG_GENERATION_NUMBER - ) - - Config.CLIENT.download_blob_to_file( - same_blob, - file_obj, - if_generation_match=blob.generation, - if_metageneration_match=blob.metageneration, - ) - - with open(temp_f.name, "rb") as file_obj: - stored_contents = file_obj.read() - - self.assertEqual(file_contents, stored_contents) - - def test_download_w_failed_crc32c_checksum(self): - blob = self.bucket.blob("FailedChecksumBlob") - file_contents = b"Hello World" - blob.upload_from_string(file_contents) - self.case_blobs_to_delete.append(blob) - - with tempfile.NamedTemporaryFile() as temp_f: - # Intercept the digest processing at the last stage and replace it with garbage. - # This is done with a patch to monkey-patch the resumable media library's checksum - # processing; it does not mock a remote interface like a unit test would. The - # remote API is still exercised. - with mock.patch( - "google.resumable_media._helpers.prepare_checksum_digest", - return_value="FFFFFF==", - ): - with self.assertRaises(resumable_media.DataCorruption): - blob.download_to_filename(temp_f.name, checksum="crc32c") - - # Confirm the file was deleted on failure - self.assertFalse(os.path.isfile(temp_f.name)) - - # Now download with checksumming turned off - blob.download_to_filename(temp_f.name, checksum=None) - - with open(temp_f.name, "rb") as file_obj: - stored_contents = file_obj.read() - - self.assertEqual(file_contents, stored_contents) - - def test_copy_existing_file(self): - filename = self.FILES["logo"]["path"] - blob = storage.Blob("CloudLogo", bucket=self.bucket) - blob.upload_from_filename(filename) - self.case_blobs_to_delete.append(blob) - - new_blob = retry_bad_copy(self.bucket.copy_blob)( - blob, self.bucket, "CloudLogoCopy" - ) - self.case_blobs_to_delete.append(new_blob) - - base_contents = blob.download_as_bytes() - copied_contents = new_blob.download_as_bytes() - self.assertEqual(base_contents, copied_contents) - - def test_download_blob_w_uri(self): - blob = self.bucket.blob("MyBuffer") - file_contents = b"Hello World" - blob.upload_from_string(file_contents) - self.case_blobs_to_delete.append(blob) - - with tempfile.NamedTemporaryFile() as temp_f: - - with open(temp_f.name, "wb") as file_obj: - Config.CLIENT.download_blob_to_file( - "gs://" + self.bucket.name + "/MyBuffer", file_obj - ) - - with open(temp_f.name, "rb") as file_obj: - stored_contents = file_obj.read() - - self.assertEqual(file_contents, stored_contents) - - def test_download_blob_as_text(self): - blob = self.bucket.blob("MyBuffer") - file_contents = "Hello World" - blob.upload_from_string(file_contents) - self.case_blobs_to_delete.append(blob) - - stored_contents = blob.download_as_text() - self.assertEqual(file_contents, stored_contents) - - def test_upload_gzip_encoded_download_raw(self): - payload = b"DEADBEEF" * 1000 - raw_stream = io.BytesIO() - with gzip.GzipFile(fileobj=raw_stream, mode="wb") as gzip_stream: - gzip_stream.write(payload) - zipped = raw_stream.getvalue() - - blob = self.bucket.blob("test_gzipped.gz") - blob.content_encoding = "gzip" - blob.upload_from_file(raw_stream, rewind=True) - - expanded = blob.download_as_bytes() - self.assertEqual(expanded, payload) - - raw = blob.download_as_bytes(raw_download=True) - self.assertEqual(raw, zipped) - - def test_resumable_upload_with_generation_match(self): - blob = self.bucket.blob("LargeFile") - - # uploading the file - file_data = self.FILES["big"] - with open(file_data["path"], "rb") as file_obj: - blob.upload_from_file(file_obj) - self.case_blobs_to_delete.append(blob) - - # reuploading with correct generations numbers - with open(file_data["path"], "rb") as file_obj: - blob.upload_from_file( - file_obj, - if_generation_match=blob.generation, - if_metageneration_match=blob.metageneration, - ) - - # reuploading with generations numbers that doesn't match original - with self.assertRaises(google.api_core.exceptions.PreconditionFailed): - with open(file_data["path"], "rb") as file_obj: - blob.upload_from_file(file_obj, if_generation_match=3) - - with self.assertRaises(google.api_core.exceptions.PreconditionFailed): - with open(file_data["path"], "rb") as file_obj: - blob.upload_from_file(file_obj, if_metageneration_match=3) - - def test_upload_blob_owner(self): - blob = self.bucket.blob("MyBuffer") - file_contents = b"Hello World" - blob.upload_from_string(file_contents) - self.case_blobs_to_delete.append(blob) - - same_blob = self.bucket.blob("MyBuffer") - same_blob.reload(projection="full") # Initialize properties. - user_email = Config.CLIENT._credentials.service_account_email - owner = same_blob.owner - self.assertIn(user_email, owner["entity"]) - - def test_upload_blob_custom_time(self): - blob = self.bucket.blob("CustomTimeBlob") - file_contents = b"Hello World" - current_time = datetime.datetime.now() - blob.custom_time = current_time - blob.upload_from_string(file_contents) - self.case_blobs_to_delete.append(blob) - - same_blob = self.bucket.blob("CustomTimeBlob") - same_blob.reload(projection="full") - custom_time = same_blob.custom_time.replace(tzinfo=None) - self.assertEqual(custom_time, current_time) - - def test_blob_custom_time_no_micros(self): - # Test that timestamps without microseconds are treated correctly by - # custom_time encoding/decoding. - blob = self.bucket.blob("CustomTimeNoMicrosBlob") - file_contents = b"Hello World" - time_without_micros = datetime.datetime(2021, 2, 10, 12, 30) - blob.custom_time = time_without_micros - blob.upload_from_string(file_contents) - self.case_blobs_to_delete.append(blob) - - same_blob = self.bucket.blob(("CustomTimeNoMicrosBlob")) - same_blob.reload(projection="full") - custom_time = same_blob.custom_time.replace(tzinfo=None) - self.assertEqual(custom_time, time_without_micros) - - def test_blob_crc32_md5_hash(self): - blob = self.bucket.blob("MyBuffer") - file_contents = b"Hello World" - blob.upload_from_string(file_contents) - self.case_blobs_to_delete.append(blob) - - download_blob = self.bucket.blob("MyBuffer") - - self.assertEqual(download_blob.download_as_string(), file_contents) - self.assertEqual(download_blob.crc32c, blob.crc32c) - self.assertEqual(download_blob.md5_hash, blob.md5_hash) - - def test_blobwriter_and_blobreader(self): - blob = self.bucket.blob("LargeFile") - - # Test BlobWriter works. - file_data = self.FILES["big"] - with open(file_data["path"], "rb") as file_obj: - with blob.open("wb", chunk_size=256 * 1024) as writer: - writer.write(file_obj.read(100)) - writer.write(file_obj.read(256 * 1024)) - writer.write(file_obj.read()) - self.case_blobs_to_delete.append(blob) - - blob.reload() - md5_hash = blob.md5_hash - if not isinstance(md5_hash, six.binary_type): - md5_hash = md5_hash.encode("utf-8") - self.assertEqual(md5_hash, file_data["hash"]) - - # Test BlobReader read and seek behave identically to filesystem file. - with open(file_data["path"], "rb") as file_obj: - with blob.open("rb", chunk_size=256 * 1024) as reader: - self.assertEqual(file_obj.read(100), reader.read(100)) - self.assertEqual(file_obj.read(256 * 1024), reader.read(256 * 1024)) - reader.seek(20) - file_obj.seek(20) - self.assertEqual( - file_obj.read(256 * 1024 * 2), reader.read(256 * 1024 * 2) - ) - self.assertEqual(file_obj.read(), reader.read()) - # End of file reached; further reads should be blank but not - # raise an error. - self.assertEqual(b"", reader.read()) - - def test_blobwriter_and_blobreader_text_mode(self): - blob = self.bucket.blob("MultibyteTextFile") - - # Construct a multibyte text_data sample file. - base_multibyte_text_string = u"abcde あいうえお line: " - text_data = "\n".join([base_multibyte_text_string + str(x) for x in range(100)]) - - # Test text BlobWriter works. - with blob.open("wt") as writer: - writer.write(text_data[:100]) - writer.write(text_data[100:]) - self.case_blobs_to_delete.append(blob) - - # Test text BlobReader read and seek to 0. Seeking to an non-0 byte on a - # multibyte text stream is not safe in Python but the API expects - # seek() to work regadless. - with blob.open("rt") as reader: - # This should produce 100 characters, not 100 bytes. - self.assertEqual(text_data[:100], reader.read(100)) - self.assertEqual(0, reader.seek(0)) - self.assertEqual(text_data, reader.read()) - - -class TestUnicode(TestStorageFiles): - def test_fetch_object_and_check_content(self): - # Historical note: This test when originally written accessed public - # files with Unicode names. These files are no longer available, so it - # was rewritten to upload them first. - - # Normalization form C: a single character for e-acute; - # URL should end with Cafe%CC%81 - # Normalization Form D: an ASCII e followed by U+0301 combining - # character; URL should end with Caf%C3%A9 - test_data = { - u"Caf\u00e9": b"Normalization Form C", - u"Cafe\u0301": b"Normalization Form D", - } - - for blob_name, file_contents in test_data.items(): - blob = self.bucket.blob(blob_name) - blob.upload_from_string(file_contents) - - for blob_name, file_contents in test_data.items(): - blob = self.bucket.blob(blob_name) - self.assertEqual(blob.download_as_bytes(), file_contents) - self.assertEqual(blob.name, blob_name) - - -class TestStorageListFiles(TestStorageFiles): - - FILENAMES = ("CloudLogo1", "CloudLogo2", "CloudLogo3", "CloudLogo4") - - @classmethod - def setUpClass(cls): - super(TestStorageListFiles, cls).setUpClass() - # Make sure bucket empty before beginning. - _empty_bucket(Config.CLIENT, cls.bucket) - - logo_path = cls.FILES["logo"]["path"] - blob = storage.Blob(cls.FILENAMES[0], bucket=cls.bucket) - blob.upload_from_filename(logo_path) - cls.suite_blobs_to_delete = [blob] - - # Copy main blob onto remaining in FILENAMES. - for filename in cls.FILENAMES[1:]: - new_blob = retry_bad_copy(cls.bucket.copy_blob)(blob, cls.bucket, filename) - cls.suite_blobs_to_delete.append(new_blob) - - @classmethod - def tearDownClass(cls): - errors = (exceptions.TooManyRequests, exceptions.ServiceUnavailable) - retry = RetryErrors(errors, max_tries=6) - for blob in cls.suite_blobs_to_delete: - retry(blob.delete)() - - @RetryErrors(unittest.TestCase.failureException) - def test_list_files(self): - all_blobs = list(Config.CLIENT.list_blobs(self.bucket)) - self.assertEqual( - sorted(blob.name for blob in all_blobs), sorted(self.FILENAMES) - ) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - @RetryErrors(unittest.TestCase.failureException) - def test_list_files_with_user_project(self): - with_user_project = Config.CLIENT.bucket( - self.bucket.name, user_project=USER_PROJECT - ) - all_blobs = list(Config.CLIENT.list_blobs(with_user_project)) - self.assertEqual( - sorted(blob.name for blob in all_blobs), sorted(self.FILENAMES) - ) - - @RetryErrors(unittest.TestCase.failureException) - def test_paginate_files(self): - truncation_size = 1 - count = len(self.FILENAMES) - truncation_size - iterator = Config.CLIENT.list_blobs(self.bucket, max_results=count) - page_iter = iterator.pages - - page1 = six.next(page_iter) - blobs = list(page1) - self.assertEqual(len(blobs), count) - self.assertIsNotNone(iterator.next_page_token) - # Technically the iterator is exhausted. - self.assertEqual(iterator.num_results, iterator.max_results) - # But we modify the iterator to continue paging after - # artificially stopping after ``count`` items. - iterator.max_results = None - - page2 = six.next(page_iter) - last_blobs = list(page2) - self.assertEqual(len(last_blobs), truncation_size) - - @RetryErrors(unittest.TestCase.failureException) - def test_paginate_files_with_offset(self): - truncation_size = 1 - inclusive_start_offset = self.FILENAMES[1] - exclusive_end_offset = self.FILENAMES[-1] - desired_files = self.FILENAMES[1:-1] - count = len(desired_files) - truncation_size - iterator = Config.CLIENT.list_blobs( - self.bucket, - max_results=count, - start_offset=inclusive_start_offset, - end_offset=exclusive_end_offset, - ) - page_iter = iterator.pages - - page1 = six.next(page_iter) - blobs = list(page1) - self.assertEqual(len(blobs), count) - self.assertEqual(blobs[0].name, desired_files[0]) - self.assertIsNotNone(iterator.next_page_token) - # Technically the iterator is exhausted. - self.assertEqual(iterator.num_results, iterator.max_results) - # But we modify the iterator to continue paging after - # artificially stopping after ``count`` items. - iterator.max_results = None - - page2 = six.next(page_iter) - last_blobs = list(page2) - self.assertEqual(len(last_blobs), truncation_size) - self.assertEqual(last_blobs[-1].name, desired_files[-1]) - - -class TestStoragePseudoHierarchy(TestStorageFiles): - - FILENAMES = ( - "file01.txt", - "parent/", - "parent/file11.txt", - "parent/child/file21.txt", - "parent/child/file22.txt", - "parent/child/grand/file31.txt", - "parent/child/other/file32.txt", - ) - - @classmethod - def setUpClass(cls): - super(TestStoragePseudoHierarchy, cls).setUpClass() - # Make sure bucket empty before beginning. - _empty_bucket(Config.CLIENT, cls.bucket) - - cls.suite_blobs_to_delete = [] - simple_path = cls.FILES["simple"]["path"] - for filename in cls.FILENAMES: - blob = storage.Blob(filename, bucket=cls.bucket) - blob.upload_from_filename(simple_path) - cls.suite_blobs_to_delete.append(blob) - - @classmethod - def tearDownClass(cls): - errors = (exceptions.TooManyRequests, exceptions.ServiceUnavailable) - retry = RetryErrors(errors, max_tries=6) - for blob in cls.suite_blobs_to_delete: - retry(blob.delete)() - - @RetryErrors(unittest.TestCase.failureException) - def test_blob_get_w_delimiter(self): - for filename in self.FILENAMES: - blob = self.bucket.blob(filename) - self.assertTrue(blob.exists(), filename) - - @RetryErrors(unittest.TestCase.failureException) - def test_root_level_w_delimiter(self): - iterator = Config.CLIENT.list_blobs(self.bucket, delimiter="/") - page = six.next(iterator.pages) - blobs = list(page) - self.assertEqual([blob.name for blob in blobs], ["file01.txt"]) - self.assertIsNone(iterator.next_page_token) - self.assertEqual(iterator.prefixes, set(["parent/"])) - - @RetryErrors(unittest.TestCase.failureException) - def test_first_level(self): - iterator = Config.CLIENT.list_blobs( - self.bucket, delimiter="/", prefix="parent/" - ) - page = six.next(iterator.pages) - blobs = list(page) - self.assertEqual( - [blob.name for blob in blobs], ["parent/", "parent/file11.txt"] - ) - self.assertIsNone(iterator.next_page_token) - self.assertEqual(iterator.prefixes, set(["parent/child/"])) - - @RetryErrors(unittest.TestCase.failureException) - def test_second_level(self): - expected_names = ["parent/child/file21.txt", "parent/child/file22.txt"] - - iterator = Config.CLIENT.list_blobs( - self.bucket, delimiter="/", prefix="parent/child/" - ) - page = six.next(iterator.pages) - blobs = list(page) - self.assertEqual([blob.name for blob in blobs], expected_names) - self.assertIsNone(iterator.next_page_token) - self.assertEqual( - iterator.prefixes, set(["parent/child/grand/", "parent/child/other/"]) - ) - - @RetryErrors(unittest.TestCase.failureException) - def test_third_level(self): - # Pseudo-hierarchy can be arbitrarily deep, subject to the limit - # of 1024 characters in the UTF-8 encoded name: - # https://cloud.google.com/storage/docs/bucketnaming#objectnames - # Exercise a layer deeper to illustrate this. - iterator = Config.CLIENT.list_blobs( - self.bucket, delimiter="/", prefix="parent/child/grand/" - ) - page = six.next(iterator.pages) - blobs = list(page) - self.assertEqual( - [blob.name for blob in blobs], ["parent/child/grand/file31.txt"] - ) - self.assertIsNone(iterator.next_page_token) - self.assertEqual(iterator.prefixes, set()) - - @RetryErrors(unittest.TestCase.failureException) - def test_include_trailing_delimiter(self): - iterator = Config.CLIENT.list_blobs( - self.bucket, delimiter="/", include_trailing_delimiter=True - ) - page = six.next(iterator.pages) - blobs = list(page) - self.assertEqual([blob.name for blob in blobs], ["file01.txt", "parent/"]) - self.assertIsNone(iterator.next_page_token) - self.assertEqual(iterator.prefixes, set(["parent/"])) - - -class TestStorageSignURLs(unittest.TestCase): - BLOB_CONTENT = b"This time for sure, Rocky!" - - @classmethod - def setUpClass(cls): - super(TestStorageSignURLs, cls).setUpClass() - if ( - type(Config.CLIENT._credentials) - is not google.oauth2.service_account.Credentials - ): - raise unittest.SkipTest( - "Signing tests requires a service account credential" - ) - - bucket_name = "gcp-signing" + unique_resource_id() - cls.bucket = retry_429_503(Config.CLIENT.create_bucket)(bucket_name) - cls.blob = cls.bucket.blob("README.txt") - cls.blob.upload_from_string(cls.BLOB_CONTENT) - - @classmethod - def tearDownClass(cls): - _empty_bucket(Config.CLIENT, cls.bucket) - errors = (exceptions.Conflict, exceptions.TooManyRequests) - retry = RetryErrors(errors, max_tries=6) - retry(cls.bucket.delete)(force=True) - - @staticmethod - def _morph_expiration(version, expiration): - if expiration is not None: - return expiration - - if version == "v2": - return int(time.time()) + 10 - - return 10 - - def _create_signed_list_blobs_url_helper( - self, version, expiration=None, method="GET" - ): - expiration = self._morph_expiration(version, expiration) - - signed_url = self.bucket.generate_signed_url( - expiration=expiration, method=method, client=Config.CLIENT, version=version - ) - - response = requests.get(signed_url) - self.assertEqual(response.status_code, 200) - - def test_create_signed_list_blobs_url_v2(self): - self._create_signed_list_blobs_url_helper(version="v2") - - def test_create_signed_list_blobs_url_v2_w_expiration(self): - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=10) - - self._create_signed_list_blobs_url_helper(expiration=now + delta, version="v2") - - def test_create_signed_list_blobs_url_v4(self): - self._create_signed_list_blobs_url_helper(version="v4") - - def test_create_signed_list_blobs_url_v4_w_expiration(self): - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=10) - self._create_signed_list_blobs_url_helper(expiration=now + delta, version="v4") - - def _create_signed_read_url_helper( - self, - blob_name="LogoToSign.jpg", - method="GET", - version="v2", - payload=None, - expiration=None, - encryption_key=None, - service_account_email=None, - access_token=None, - ): - expiration = self._morph_expiration(version, expiration) - - if payload is not None: - blob = self.bucket.blob(blob_name, encryption_key=encryption_key) - blob.upload_from_string(payload) - else: - blob = self.blob - - signed_url = blob.generate_signed_url( - expiration=expiration, - method=method, - client=Config.CLIENT, - version=version, - service_account_email=service_account_email, - access_token=access_token, - ) - - headers = {} - - if encryption_key is not None: - headers["x-goog-encryption-algorithm"] = "AES256" - encoded_key = base64.b64encode(encryption_key).decode("utf-8") - headers["x-goog-encryption-key"] = encoded_key - key_hash = hashlib.sha256(encryption_key).digest() - key_hash = base64.b64encode(key_hash).decode("utf-8") - headers["x-goog-encryption-key-sha256"] = key_hash - - response = requests.get(signed_url, headers=headers) - self.assertEqual(response.status_code, 200) - if payload is not None: - self.assertEqual(response.content, payload) - else: - self.assertEqual(response.content, self.BLOB_CONTENT) - - def test_create_signed_read_url_v2(self): - self._create_signed_read_url_helper() - - def test_create_signed_read_url_v4(self): - self._create_signed_read_url_helper(version="v4") - - def test_create_signed_read_url_v2_w_expiration(self): - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=10) - - self._create_signed_read_url_helper(expiration=now + delta) - - def test_create_signed_read_url_v4_w_expiration(self): - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=10) - self._create_signed_read_url_helper(expiration=now + delta, version="v4") - - def test_create_signed_read_url_v2_lowercase_method(self): - self._create_signed_read_url_helper(method="get") - - def test_create_signed_read_url_v4_lowercase_method(self): - self._create_signed_read_url_helper(method="get", version="v4") - - def test_create_signed_read_url_v2_w_non_ascii_name(self): - self._create_signed_read_url_helper( - blob_name=u"Caf\xe9.txt", - payload=b"Test signed URL for blob w/ non-ASCII name", - ) - - def test_create_signed_read_url_v4_w_non_ascii_name(self): - self._create_signed_read_url_helper( - blob_name=u"Caf\xe9.txt", - payload=b"Test signed URL for blob w/ non-ASCII name", - version="v4", - ) - - def test_create_signed_read_url_v2_w_csek(self): - encryption_key = os.urandom(32) - self._create_signed_read_url_helper( - blob_name="v2-w-csek.txt", - payload=b"Test signed URL for blob w/ CSEK", - encryption_key=encryption_key, - ) - - def test_create_signed_read_url_v4_w_csek(self): - encryption_key = os.urandom(32) - self._create_signed_read_url_helper( - blob_name="v2-w-csek.txt", - payload=b"Test signed URL for blob w/ CSEK", - encryption_key=encryption_key, - version="v4", - ) - - def test_create_signed_read_url_v2_w_access_token(self): - client = iam_credentials_v1.IAMCredentialsClient() - service_account_email = Config.CLIENT._credentials.service_account_email - name = path_template.expand( - "projects/{project}/serviceAccounts/{service_account}", - project="-", - service_account=service_account_email, - ) - scope = [ - "https://www.googleapis.com/auth/devstorage.read_write", - "https://www.googleapis.com/auth/iam", - ] - response = client.generate_access_token(name=name, scope=scope) - self._create_signed_read_url_helper( - service_account_email=service_account_email, - access_token=response.access_token, - ) - - def test_create_signed_read_url_v4_w_access_token(self): - client = iam_credentials_v1.IAMCredentialsClient() - service_account_email = Config.CLIENT._credentials.service_account_email - name = path_template.expand( - "projects/{project}/serviceAccounts/{service_account}", - project="-", - service_account=service_account_email, - ) - scope = [ - "https://www.googleapis.com/auth/devstorage.read_write", - "https://www.googleapis.com/auth/iam", - ] - response = client.generate_access_token(name=name, scope=scope) - self._create_signed_read_url_helper( - version="v4", - service_account_email=service_account_email, - access_token=response.access_token, - ) - - def _create_signed_delete_url_helper(self, version="v2", expiration=None): - expiration = self._morph_expiration(version, expiration) - - blob = self.bucket.blob("DELETE_ME.txt") - blob.upload_from_string(b"DELETE ME!") - - signed_delete_url = blob.generate_signed_url( - expiration=expiration, - method="DELETE", - client=Config.CLIENT, - version=version, - ) - - response = requests.request("DELETE", signed_delete_url) - self.assertEqual(response.status_code, 204) - self.assertEqual(response.content, b"") - - self.assertFalse(blob.exists()) - - def test_create_signed_delete_url_v2(self): - self._create_signed_delete_url_helper() - - def test_create_signed_delete_url_v4(self): - self._create_signed_delete_url_helper(version="v4") - - def _signed_resumable_upload_url_helper(self, version="v2", expiration=None): - expiration = self._morph_expiration(version, expiration) - blob = self.bucket.blob("cruddy.txt") - payload = b"DEADBEEF" - - # Initiate the upload using a signed URL. - signed_resumable_upload_url = blob.generate_signed_url( - expiration=expiration, - method="RESUMABLE", - client=Config.CLIENT, - version=version, - ) - - post_headers = {"x-goog-resumable": "start"} - post_response = requests.post(signed_resumable_upload_url, headers=post_headers) - self.assertEqual(post_response.status_code, 201) - - # Finish uploading the body. - location = post_response.headers["Location"] - put_headers = {"content-length": str(len(payload))} - put_response = requests.put(location, headers=put_headers, data=payload) - self.assertEqual(put_response.status_code, 200) - - # Download using a signed URL and verify. - signed_download_url = blob.generate_signed_url( - expiration=expiration, method="GET", client=Config.CLIENT, version=version - ) - - get_response = requests.get(signed_download_url) - self.assertEqual(get_response.status_code, 200) - self.assertEqual(get_response.content, payload) - - # Finally, delete the blob using a signed URL. - signed_delete_url = blob.generate_signed_url( - expiration=expiration, - method="DELETE", - client=Config.CLIENT, - version=version, - ) - - delete_response = requests.delete(signed_delete_url) - self.assertEqual(delete_response.status_code, 204) - - def test_signed_resumable_upload_url_v2(self): - self._signed_resumable_upload_url_helper(version="v2") - - def test_signed_resumable_upload_url_v4(self): - self._signed_resumable_upload_url_helper(version="v4") - - -class TestStorageCompose(TestStorageFiles): - - FILES = {} - - def test_compose_create_new_blob(self): - SOURCE_1 = b"AAA\n" - source_1 = self.bucket.blob("source-1") - source_1.upload_from_string(SOURCE_1) - self.case_blobs_to_delete.append(source_1) - - SOURCE_2 = b"BBB\n" - source_2 = self.bucket.blob("source-2") - source_2.upload_from_string(SOURCE_2) - self.case_blobs_to_delete.append(source_2) - - destination = self.bucket.blob("destination") - destination.content_type = "text/plain" - destination.compose([source_1, source_2]) - self.case_blobs_to_delete.append(destination) - - composed = destination.download_as_bytes() - self.assertEqual(composed, SOURCE_1 + SOURCE_2) - - def test_compose_create_new_blob_wo_content_type(self): - SOURCE_1 = b"AAA\n" - source_1 = self.bucket.blob("source-1") - source_1.upload_from_string(SOURCE_1) - self.case_blobs_to_delete.append(source_1) - - SOURCE_2 = b"BBB\n" - source_2 = self.bucket.blob("source-2") - source_2.upload_from_string(SOURCE_2) - self.case_blobs_to_delete.append(source_2) - - destination = self.bucket.blob("destination") - - destination.compose([source_1, source_2]) - self.case_blobs_to_delete.append(destination) - - self.assertIsNone(destination.content_type) - composed = destination.download_as_bytes() - self.assertEqual(composed, SOURCE_1 + SOURCE_2) - - def test_compose_replace_existing_blob(self): - BEFORE = b"AAA\n" - original = self.bucket.blob("original") - original.content_type = "text/plain" - original.upload_from_string(BEFORE) - self.case_blobs_to_delete.append(original) - - TO_APPEND = b"BBB\n" - to_append = self.bucket.blob("to_append") - to_append.upload_from_string(TO_APPEND) - self.case_blobs_to_delete.append(to_append) - - original.compose([original, to_append]) - - composed = original.download_as_bytes() - self.assertEqual(composed, BEFORE + TO_APPEND) - - def test_compose_with_generation_match_list(self): - BEFORE = b"AAA\n" - original = self.bucket.blob("original") - original.content_type = "text/plain" - original.upload_from_string(BEFORE) - self.case_blobs_to_delete.append(original) - - TO_APPEND = b"BBB\n" - to_append = self.bucket.blob("to_append") - to_append.upload_from_string(TO_APPEND) - self.case_blobs_to_delete.append(to_append) - - with self.assertRaises(google.api_core.exceptions.PreconditionFailed): - original.compose( - [original, to_append], - if_generation_match=[6, 7], - if_metageneration_match=[8, 9], - ) - - original.compose( - [original, to_append], - if_generation_match=[original.generation, to_append.generation], - if_metageneration_match=[original.metageneration, to_append.metageneration], - ) - - composed = original.download_as_bytes() - self.assertEqual(composed, BEFORE + TO_APPEND) - - def test_compose_with_generation_match_long(self): - BEFORE = b"AAA\n" - original = self.bucket.blob("original") - original.content_type = "text/plain" - original.upload_from_string(BEFORE) - self.case_blobs_to_delete.append(original) - - TO_APPEND = b"BBB\n" - to_append = self.bucket.blob("to_append") - to_append.upload_from_string(TO_APPEND) - self.case_blobs_to_delete.append(to_append) - - with self.assertRaises(google.api_core.exceptions.PreconditionFailed): - original.compose([original, to_append], if_generation_match=0) - - original.compose([original, to_append], if_generation_match=original.generation) - - composed = original.download_as_bytes() - self.assertEqual(composed, BEFORE + TO_APPEND) - - def test_compose_with_source_generation_match(self): - BEFORE = b"AAA\n" - original = self.bucket.blob("original") - original.content_type = "text/plain" - original.upload_from_string(BEFORE) - self.case_blobs_to_delete.append(original) - - TO_APPEND = b"BBB\n" - to_append = self.bucket.blob("to_append") - to_append.upload_from_string(TO_APPEND) - self.case_blobs_to_delete.append(to_append) - - with self.assertRaises(google.api_core.exceptions.PreconditionFailed): - original.compose([original, to_append], if_source_generation_match=[6, 7]) - - original.compose( - [original, to_append], - if_source_generation_match=[original.generation, to_append.generation], - ) - - composed = original.download_as_bytes() - self.assertEqual(composed, BEFORE + TO_APPEND) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_compose_with_user_project(self): - new_bucket_name = "compose-user-project" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - try: - SOURCE_1 = b"AAA\n" - source_1 = created.blob("source-1") - source_1.upload_from_string(SOURCE_1) - - SOURCE_2 = b"BBB\n" - source_2 = created.blob("source-2") - source_2.upload_from_string(SOURCE_2) - - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - destination = with_user_project.blob("destination") - destination.content_type = "text/plain" - destination.compose([source_1, source_2]) - - composed = destination.download_as_bytes() - self.assertEqual(composed, SOURCE_1 + SOURCE_2) - finally: - retry_429_harder(created.delete)(force=True) - - -class TestStorageRewrite(TestStorageFiles): - - FILENAMES = ("file01.txt",) - - def test_rewrite_create_new_blob_add_encryption_key(self): - file_data = self.FILES["simple"] - - source = self.bucket.blob("source") - source.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(source) - source_data = source.download_as_bytes() - - KEY = os.urandom(32) - dest = self.bucket.blob("dest", encryption_key=KEY) - token, rewritten, total = dest.rewrite(source) - self.case_blobs_to_delete.append(dest) - - self.assertEqual(token, None) - self.assertEqual(rewritten, len(source_data)) - self.assertEqual(total, len(source_data)) - - self.assertEqual(source.download_as_bytes(), dest.download_as_bytes()) - - def test_rewrite_rotate_encryption_key(self): - BLOB_NAME = "rotating-keys" - file_data = self.FILES["simple"] - - SOURCE_KEY = os.urandom(32) - source = self.bucket.blob(BLOB_NAME, encryption_key=SOURCE_KEY) - source.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(source) - source_data = source.download_as_bytes() - - DEST_KEY = os.urandom(32) - dest = self.bucket.blob(BLOB_NAME, encryption_key=DEST_KEY) - token, rewritten, total = dest.rewrite(source) - # Not adding 'dest' to 'self.case_blobs_to_delete': it is the - # same object as 'source'. - - self.assertIsNone(token) - self.assertEqual(rewritten, len(source_data)) - self.assertEqual(total, len(source_data)) - - self.assertEqual(dest.download_as_bytes(), source_data) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_rewrite_add_key_with_user_project(self): - file_data = self.FILES["simple"] - new_bucket_name = "rewrite-key-up" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - try: - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - source = with_user_project.blob("source") - source.upload_from_filename(file_data["path"]) - source_data = source.download_as_bytes() - - KEY = os.urandom(32) - dest = with_user_project.blob("dest", encryption_key=KEY) - token, rewritten, total = dest.rewrite(source) - - self.assertEqual(token, None) - self.assertEqual(rewritten, len(source_data)) - self.assertEqual(total, len(source_data)) - - self.assertEqual(source.download_as_bytes(), dest.download_as_bytes()) - finally: - retry_429_harder(created.delete)(force=True) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_rewrite_rotate_with_user_project(self): - BLOB_NAME = "rotating-keys" - file_data = self.FILES["simple"] - new_bucket_name = "rewrite-rotate-up" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - try: - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - SOURCE_KEY = os.urandom(32) - source = with_user_project.blob(BLOB_NAME, encryption_key=SOURCE_KEY) - source.upload_from_filename(file_data["path"]) - source_data = source.download_as_bytes() - - DEST_KEY = os.urandom(32) - dest = with_user_project.blob(BLOB_NAME, encryption_key=DEST_KEY) - token, rewritten, total = dest.rewrite(source) - - self.assertEqual(token, None) - self.assertEqual(rewritten, len(source_data)) - self.assertEqual(total, len(source_data)) - - self.assertEqual(dest.download_as_bytes(), source_data) - finally: - retry_429_harder(created.delete)(force=True) - - def test_rewrite_with_generation_match(self): - WRONG_GENERATION_NUMBER = 6 - BLOB_NAME = "generation-match" - - file_data = self.FILES["simple"] - new_bucket_name = "rewrite-generation-match" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - try: - bucket = Config.CLIENT.bucket(new_bucket_name) - - source = bucket.blob(BLOB_NAME) - source.upload_from_filename(file_data["path"]) - source_data = source.download_as_bytes() - - dest = bucket.blob(BLOB_NAME) - - with self.assertRaises(google.api_core.exceptions.PreconditionFailed): - token, rewritten, total = dest.rewrite( - source, if_generation_match=WRONG_GENERATION_NUMBER - ) - - token, rewritten, total = dest.rewrite( - source, - if_generation_match=dest.generation, - if_source_generation_match=source.generation, - if_source_metageneration_match=source.metageneration, - ) - self.assertEqual(token, None) - self.assertEqual(rewritten, len(source_data)) - self.assertEqual(total, len(source_data)) - self.assertEqual(dest.download_as_bytes(), source_data) - finally: - retry_429_harder(created.delete)(force=True) - - -class TestStorageUpdateStorageClass(TestStorageFiles): - def test_update_storage_class_small_file(self): - from google.cloud.storage import constants - - blob = self.bucket.blob("SmallFile") - - file_data = self.FILES["simple"] - blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(blob) - - blob.update_storage_class(constants.NEARLINE_STORAGE_CLASS) - blob.reload() - self.assertEqual(blob.storage_class, constants.NEARLINE_STORAGE_CLASS) - - blob.update_storage_class(constants.COLDLINE_STORAGE_CLASS) - blob.reload() - self.assertEqual(blob.storage_class, constants.COLDLINE_STORAGE_CLASS) - - def test_update_storage_class_large_file(self): - from google.cloud.storage import constants - - blob = self.bucket.blob("BigFile") - - file_data = self.FILES["big"] - blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(blob) - - blob.update_storage_class(constants.NEARLINE_STORAGE_CLASS) - blob.reload() - self.assertEqual(blob.storage_class, constants.NEARLINE_STORAGE_CLASS) - - blob.update_storage_class(constants.COLDLINE_STORAGE_CLASS) - blob.reload() - self.assertEqual(blob.storage_class, constants.COLDLINE_STORAGE_CLASS) - - -class TestStorageNotificationCRUD(unittest.TestCase): - - topic = None - TOPIC_NAME = "notification" + unique_resource_id("-") - CUSTOM_ATTRIBUTES = {"attr1": "value1", "attr2": "value2"} - BLOB_NAME_PREFIX = "blob-name-prefix/" - - @classmethod - def setUpClass(cls): - super(TestStorageNotificationCRUD, cls).setUpClass() - if Config.TESTING_MTLS: - # mTLS is only available for python-pubsub >= 2.2.0. However, the - # system test uses python-pubsub < 2.0, so we skip those tests. - # Note that python-pubsub >= 2.0 no longer supports python 2.7, so - # we can only upgrade it after python 2.7 system test is removed. - # Since python-pubsub >= 2.0 has a new set of api, the test code - # also needs to be updated. - raise unittest.SkipTest("Skip pubsub tests for mTLS testing") - - @property - def topic_path(self): - return "projects/{}/topics/{}".format(Config.CLIENT.project, self.TOPIC_NAME) - - def _initialize_topic(self): - try: - from google.cloud.pubsub_v1 import PublisherClient - except ImportError: - raise unittest.SkipTest("Cannot import pubsub") - self.publisher_client = PublisherClient() - retry_429(self.publisher_client.create_topic)(self.topic_path) - policy = self.publisher_client.get_iam_policy(self.topic_path) - binding = policy.bindings.add() - binding.role = "roles/pubsub.publisher" - binding.members.append( - "serviceAccount:{}".format(Config.CLIENT.get_service_account_email()) - ) - self.publisher_client.set_iam_policy(self.topic_path, policy) - - def setUp(self): - self.case_buckets_to_delete = [] - self._initialize_topic() - - def tearDown(self): - retry_429(self.publisher_client.delete_topic)(self.topic_path) - with Config.CLIENT.batch(): - for bucket_name in self.case_buckets_to_delete: - bucket = Config.CLIENT.bucket(bucket_name) - retry_429_harder(bucket.delete)() - - @staticmethod - def event_types(): - from google.cloud.storage.notification import ( - OBJECT_FINALIZE_EVENT_TYPE, - OBJECT_DELETE_EVENT_TYPE, - ) - - return [OBJECT_FINALIZE_EVENT_TYPE, OBJECT_DELETE_EVENT_TYPE] - - @staticmethod - def payload_format(): - from google.cloud.storage.notification import JSON_API_V1_PAYLOAD_FORMAT - - return JSON_API_V1_PAYLOAD_FORMAT - - def test_notification_minimal(self): - new_bucket_name = "notification-minimal" + unique_resource_id("-") - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(list(bucket.list_notifications()), []) - - notification = bucket.notification(self.TOPIC_NAME) - retry_429_503(notification.create)() - try: - self.assertTrue(notification.exists()) - self.assertIsNotNone(notification.notification_id) - notifications = list(bucket.list_notifications()) - self.assertEqual(len(notifications), 1) - self.assertEqual(notifications[0].topic_name, self.TOPIC_NAME) - finally: - notification.delete() - - def test_notification_explicit(self): - new_bucket_name = "notification-explicit" + unique_resource_id("-") - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - notification = bucket.notification( - topic_name=self.TOPIC_NAME, - custom_attributes=self.CUSTOM_ATTRIBUTES, - event_types=self.event_types(), - blob_name_prefix=self.BLOB_NAME_PREFIX, - payload_format=self.payload_format(), - ) - retry_429_503(notification.create)() - try: - self.assertTrue(notification.exists()) - self.assertIsNotNone(notification.notification_id) - self.assertEqual(notification.custom_attributes, self.CUSTOM_ATTRIBUTES) - self.assertEqual(notification.event_types, self.event_types()) - self.assertEqual(notification.blob_name_prefix, self.BLOB_NAME_PREFIX) - self.assertEqual(notification.payload_format, self.payload_format()) - - finally: - notification.delete() - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_notification_w_user_project(self): - new_bucket_name = "notification-minimal" + unique_resource_id("-") - retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name, requester_pays=True) - self.case_buckets_to_delete.append(new_bucket_name) - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - self.assertEqual(list(with_user_project.list_notifications()), []) - notification = with_user_project.notification(self.TOPIC_NAME) - retry_429(notification.create)() - try: - self.assertTrue(notification.exists()) - self.assertIsNotNone(notification.notification_id) - notifications = list(with_user_project.list_notifications()) - self.assertEqual(len(notifications), 1) - self.assertEqual(notifications[0].topic_name, self.TOPIC_NAME) - finally: - notification.delete() - - def test_get_notification(self): - new_bucket_name = "get-notification" + unique_resource_id("-") - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - - notification = bucket.notification( - topic_name=self.TOPIC_NAME, - custom_attributes=self.CUSTOM_ATTRIBUTES, - payload_format=self.payload_format(), - ) - retry_429_503(notification.create)() - try: - self.assertTrue(notification.exists()) - self.assertIsNotNone(notification.notification_id) - notification_id = notification.notification_id - notification = bucket.get_notification(notification_id) - self.assertEqual(notification.notification_id, notification_id) - self.assertEqual(notification.custom_attributes, self.CUSTOM_ATTRIBUTES) - self.assertEqual(notification.payload_format, self.payload_format()) - finally: - notification.delete() - - -class TestAnonymousClient(unittest.TestCase): - - PUBLIC_BUCKET = "gcp-public-data-landsat" - - @vpcsc_config.skip_if_inside_vpcsc - def test_access_to_public_bucket(self): - anonymous = storage.Client.create_anonymous_client() - bucket = anonymous.bucket(self.PUBLIC_BUCKET) - (blob,) = retry_429_503(anonymous.list_blobs)(bucket, max_results=1) - with tempfile.TemporaryFile() as stream: - retry_429_503(blob.download_to_file)(stream) - - -class TestKMSIntegration(TestStorageFiles): - - FILENAMES = ("file01.txt",) - - KEYRING_NAME = "gcs-test" - KEY_NAME = "gcs-test" - ALT_KEY_NAME = "gcs-test-alternate" - - def _kms_key_name(self, key_name=None): - if key_name is None: - key_name = self.KEY_NAME - - return ("projects/{}/" "locations/{}/" "keyRings/{}/" "cryptoKeys/{}").format( - Config.CLIENT.project, - self.bucket.location.lower(), - self.KEYRING_NAME, - key_name, - ) - - @classmethod - def setUpClass(cls): - super(TestKMSIntegration, cls).setUpClass() - if Config.TESTING_MTLS: - # mTLS is only available for python-kms >= 2.2.0. However, the - # system test uses python-kms < 2.0, so we skip those tests. - # Note that python-kms >= 2.0 no longer supports python 2.7, so - # we can only upgrade it after python 2.7 system test is removed. - # Since python-kms >= 2.0 has a new set of api, the test code - # also needs to be updated. - raise unittest.SkipTest("Skip kms tests for mTLS testing") - - _empty_bucket(Config.CLIENT, cls.bucket) - - def setUp(self): - super(TestKMSIntegration, self).setUp() - client = kms.KeyManagementServiceClient() - project = Config.CLIENT.project - location = self.bucket.location.lower() - keyring_name = self.KEYRING_NAME - purpose = kms.enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT - - # If the keyring doesn't exist create it. - keyring_path = client.key_ring_path(project, location, keyring_name) - - try: - client.get_key_ring(keyring_path) - except exceptions.NotFound: - parent = client.location_path(project, location) - client.create_key_ring(parent, keyring_name, {}) - - # Mark this service account as an owner of the new keyring - service_account = Config.CLIENT.get_service_account_email() - policy = { - "bindings": [ - { - "role": "roles/cloudkms.cryptoKeyEncrypterDecrypter", - "members": ["serviceAccount:" + service_account], - } - ] - } - client.set_iam_policy(keyring_path, policy) - - # Populate the keyring with the keys we use in the tests - key_names = [ - "gcs-test", - "gcs-test-alternate", - "explicit-kms-key-name", - "default-kms-key-name", - "override-default-kms-key-name", - "alt-default-kms-key-name", - ] - for key_name in key_names: - key_path = client.crypto_key_path(project, location, keyring_name, key_name) - try: - client.get_crypto_key(key_path) - except exceptions.NotFound: - key = {"purpose": purpose} - client.create_crypto_key(keyring_path, key_name, key) - - def test_blob_w_explicit_kms_key_name(self): - BLOB_NAME = "explicit-kms-key-name" - file_data = self.FILES["simple"] - kms_key_name = self._kms_key_name() - blob = self.bucket.blob(BLOB_NAME, kms_key_name=kms_key_name) - blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(blob) - with open(file_data["path"], "rb") as _file_data: - self.assertEqual(blob.download_as_bytes(), _file_data.read()) - # We don't know the current version of the key. - self.assertTrue(blob.kms_key_name.startswith(kms_key_name)) - - (listed,) = list(Config.CLIENT.list_blobs(self.bucket)) - self.assertTrue(listed.kms_key_name.startswith(kms_key_name)) - - @RetryErrors(unittest.TestCase.failureException) - def test_bucket_w_default_kms_key_name(self): - BLOB_NAME = "default-kms-key-name" - OVERRIDE_BLOB_NAME = "override-default-kms-key-name" - ALT_BLOB_NAME = "alt-default-kms-key-name" - CLEARTEXT_BLOB_NAME = "cleartext" - - file_data = self.FILES["simple"] - - with open(file_data["path"], "rb") as _file_data: - contents = _file_data.read() - - kms_key_name = self._kms_key_name() - self.bucket.default_kms_key_name = kms_key_name - self.bucket.patch() - self.assertEqual(self.bucket.default_kms_key_name, kms_key_name) - - defaulted_blob = self.bucket.blob(BLOB_NAME) - defaulted_blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(defaulted_blob) - - self.assertEqual(defaulted_blob.download_as_bytes(), contents) - # We don't know the current version of the key. - self.assertTrue(defaulted_blob.kms_key_name.startswith(kms_key_name)) - - alt_kms_key_name = self._kms_key_name(self.ALT_KEY_NAME) - - override_blob = self.bucket.blob( - OVERRIDE_BLOB_NAME, kms_key_name=alt_kms_key_name - ) - override_blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(override_blob) - - self.assertEqual(override_blob.download_as_bytes(), contents) - # We don't know the current version of the key. - self.assertTrue(override_blob.kms_key_name.startswith(alt_kms_key_name)) - - self.bucket.default_kms_key_name = alt_kms_key_name - self.bucket.patch() - - alt_blob = self.bucket.blob(ALT_BLOB_NAME) - alt_blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(alt_blob) - - self.assertEqual(alt_blob.download_as_bytes(), contents) - # We don't know the current version of the key. - self.assertTrue(alt_blob.kms_key_name.startswith(alt_kms_key_name)) - - self.bucket.default_kms_key_name = None - self.bucket.patch() - - cleartext_blob = self.bucket.blob(CLEARTEXT_BLOB_NAME) - cleartext_blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(cleartext_blob) - - self.assertEqual(cleartext_blob.download_as_bytes(), contents) - self.assertIsNone(cleartext_blob.kms_key_name) - - def test_rewrite_rotate_csek_to_cmek(self): - BLOB_NAME = "rotating-keys" - file_data = self.FILES["simple"] - - SOURCE_KEY = os.urandom(32) - source = self.bucket.blob(BLOB_NAME, encryption_key=SOURCE_KEY) - source.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(source) - source_data = source.download_as_bytes() - - kms_key_name = self._kms_key_name() - - # We can't verify it, but ideally we would check that the following - # URL was resolvable with our credentials - # KEY_URL = 'https://cloudkms.googleapis.com/v1/{}'.format( - # kms_key_name) - - dest = self.bucket.blob(BLOB_NAME, kms_key_name=kms_key_name) - token, rewritten, total = dest.rewrite(source) - - while token is not None: - token, rewritten, total = dest.rewrite(source, token=token) - - # Not adding 'dest' to 'self.case_blobs_to_delete': it is the - # same object as 'source'. - - self.assertIsNone(token) - self.assertEqual(rewritten, len(source_data)) - self.assertEqual(total, len(source_data)) - - self.assertEqual(dest.download_as_bytes(), source_data) - - def test_upload_new_blob_w_bucket_cmek_enabled(self): - blob_name = "test-blob" - payload = b"DEADBEEF" - alt_payload = b"NEWDEADBEEF" - - kms_key_name = self._kms_key_name() - self.bucket.default_kms_key_name = kms_key_name - self.bucket.patch() - self.assertEqual(self.bucket.default_kms_key_name, kms_key_name) - - blob = self.bucket.blob(blob_name) - blob.upload_from_string(payload) - retry_429_harder(blob.reload)() - # We don't know the current version of the key. - self.assertTrue(blob.kms_key_name.startswith(kms_key_name)) - - blob.upload_from_string(alt_payload, if_generation_match=blob.generation) - self.case_blobs_to_delete.append(blob) - - self.assertEqual(blob.download_as_bytes(), alt_payload) - - self.bucket.default_kms_key_name = None - retry_429_harder(self.bucket.patch)() - self.assertIsNone(self.bucket.default_kms_key_name) - - -class TestRetentionPolicy(unittest.TestCase): - def setUp(self): - self.case_buckets_to_delete = [] - self.case_blobs_to_delete = [] - - def tearDown(self): - # discard test blobs retention policy settings - for blob in self.case_blobs_to_delete: - blob.event_based_hold = False - blob.temporary_hold = False - blob.patch() - - for bucket_name in self.case_buckets_to_delete: - bucket = Config.CLIENT.bucket(bucket_name) - retry_429_harder(bucket.delete)(force=True) - - def test_bucket_w_retention_period(self): - import datetime - from google.api_core import exceptions - - period_secs = 10 - - new_bucket_name = "w-retention-period" + unique_resource_id("-") - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - - bucket.retention_period = period_secs - bucket.default_event_based_hold = False - bucket.patch() - - self.assertEqual(bucket.retention_period, period_secs) - self.assertIsInstance(bucket.retention_policy_effective_time, datetime.datetime) - self.assertFalse(bucket.default_event_based_hold) - self.assertFalse(bucket.retention_policy_locked) - - blob_name = "test-blob" - payload = b"DEADBEEF" - blob = bucket.blob(blob_name) - blob.upload_from_string(payload) - - self.case_blobs_to_delete.append(blob) - - other = bucket.get_blob(blob_name) - - self.assertFalse(other.event_based_hold) - self.assertFalse(other.temporary_hold) - self.assertIsInstance(other.retention_expiration_time, datetime.datetime) - - with self.assertRaises(exceptions.Forbidden): - other.delete() - - bucket.retention_period = None - bucket.patch() - - self.assertIsNone(bucket.retention_period) - self.assertIsNone(bucket.retention_policy_effective_time) - self.assertFalse(bucket.default_event_based_hold) - self.assertFalse(bucket.retention_policy_locked) - - other.reload() - - self.assertFalse(other.event_based_hold) - self.assertFalse(other.temporary_hold) - self.assertIsNone(other.retention_expiration_time) - - other.delete() - self.case_blobs_to_delete.pop() - - def test_bucket_w_default_event_based_hold(self): - from google.api_core import exceptions - - new_bucket_name = "w-def-ebh" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - - bucket.default_event_based_hold = True - bucket.patch() - - self.assertTrue(bucket.default_event_based_hold) - self.assertIsNone(bucket.retention_period) - self.assertIsNone(bucket.retention_policy_effective_time) - self.assertFalse(bucket.retention_policy_locked) - - blob_name = "test-blob" - payload = b"DEADBEEF" - blob = bucket.blob(blob_name) - blob.upload_from_string(payload) - - self.case_blobs_to_delete.append(blob) - - other = bucket.get_blob(blob_name) - - self.assertTrue(other.event_based_hold) - self.assertFalse(other.temporary_hold) - self.assertIsNone(other.retention_expiration_time) - - with self.assertRaises(exceptions.Forbidden): - other.delete() - - other.event_based_hold = False - other.patch() - other.delete() - - bucket.default_event_based_hold = False - bucket.patch() - - self.assertFalse(bucket.default_event_based_hold) - self.assertIsNone(bucket.retention_period) - self.assertIsNone(bucket.retention_policy_effective_time) - self.assertFalse(bucket.retention_policy_locked) - - blob.upload_from_string(payload) - - # https://github.com/googleapis/python-storage/issues/435 - if blob.event_based_hold: - retry_no_event_based_hold(blob.reload)() - - self.assertFalse(blob.event_based_hold) - self.assertFalse(blob.temporary_hold) - self.assertIsNone(blob.retention_expiration_time) - - blob.delete() - self.case_blobs_to_delete.pop() - - def test_blob_w_temporary_hold(self): - from google.api_core import exceptions - - new_bucket_name = "w-tmp-hold" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - - blob_name = "test-blob" - payload = b"DEADBEEF" - blob = bucket.blob(blob_name) - blob.upload_from_string(payload) - - self.case_blobs_to_delete.append(blob) - - other = bucket.get_blob(blob_name) - other.temporary_hold = True - other.patch() - - self.assertTrue(other.temporary_hold) - self.assertFalse(other.event_based_hold) - self.assertIsNone(other.retention_expiration_time) - - with self.assertRaises(exceptions.Forbidden): - other.delete() - - other.temporary_hold = False - other.patch() - - other.delete() - self.case_blobs_to_delete.pop() - - def test_bucket_lock_retention_policy(self): - import datetime - from google.api_core import exceptions - - period_secs = 10 - - new_bucket_name = "loc-ret-policy" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - - bucket.retention_period = period_secs - bucket.patch() - - self.assertEqual(bucket.retention_period, period_secs) - self.assertIsInstance(bucket.retention_policy_effective_time, datetime.datetime) - self.assertFalse(bucket.default_event_based_hold) - self.assertFalse(bucket.retention_policy_locked) - - bucket.lock_retention_policy() - - bucket.reload() - self.assertTrue(bucket.retention_policy_locked) - - bucket.retention_period = None - with self.assertRaises(exceptions.Forbidden): - bucket.patch() - - -class TestIAMConfiguration(unittest.TestCase): - def setUp(self): - self.case_buckets_to_delete = [] - - def tearDown(self): - for bucket_name in self.case_buckets_to_delete: - bucket = Config.CLIENT.bucket(bucket_name) - retry_429_harder(bucket.delete)(force=True) - - def test_new_bucket_w_ubla(self): - new_bucket_name = "new-w-ubla" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = Config.CLIENT.bucket(new_bucket_name) - bucket.iam_configuration.uniform_bucket_level_access_enabled = True - retry_429_503(bucket.create)() - self.case_buckets_to_delete.append(new_bucket_name) - - bucket_acl = bucket.acl - with self.assertRaises(exceptions.BadRequest): - bucket_acl.reload() - - bucket_acl.loaded = True # Fake that we somehow loaded the ACL - bucket_acl.all().grant_read() - with self.assertRaises(exceptions.BadRequest): - bucket_acl.save() - - blob_name = "my-blob.txt" - blob = bucket.blob(blob_name) - payload = b"DEADBEEF" - blob.upload_from_string(payload) - - found = bucket.get_blob(blob_name) - self.assertEqual(found.download_as_bytes(), payload) - - blob_acl = blob.acl - with self.assertRaises(exceptions.BadRequest): - blob_acl.reload() - - blob_acl.loaded = True # Fake that we somehow loaded the ACL - blob_acl.all().grant_read() - with self.assertRaises(exceptions.BadRequest): - blob_acl.save() - - def test_ubla_set_unset_preserves_acls(self): - new_bucket_name = "ubla-acls" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - - blob_name = "my-blob.txt" - blob = bucket.blob(blob_name) - payload = b"DEADBEEF" - blob.upload_from_string(payload) - - # Preserve ACLs before setting UBLA - bucket_acl_before = list(bucket.acl) - blob_acl_before = list(bucket.acl) - - # Set UBLA - bucket.iam_configuration.uniform_bucket_level_access_enabled = True - bucket.patch() - - self.assertTrue(bucket.iam_configuration.uniform_bucket_level_access_enabled) - - # While UBLA is set, cannot get / set ACLs - with self.assertRaises(exceptions.BadRequest): - bucket.acl.reload() - - # Clear UBLA - bucket.iam_configuration.uniform_bucket_level_access_enabled = False - bucket.patch() - - # Query ACLs after clearing UBLA - bucket.acl.reload() - bucket_acl_after = list(bucket.acl) - blob.acl.reload() - blob_acl_after = list(bucket.acl) - - self.assertEqual(bucket_acl_before, bucket_acl_after) - self.assertEqual(blob_acl_before, blob_acl_after) - - -class TestV4POSTPolicies(unittest.TestCase): - @classmethod - def setUpClass(cls): - super(TestV4POSTPolicies, cls).setUpClass() - if ( - type(Config.CLIENT._credentials) - is not google.oauth2.service_account.Credentials - ): - # mTLS only works for user credentials, it doesn't work for - # service account credentials. - raise unittest.SkipTest("These tests require a service account credential") - - def setUp(self): - self.case_buckets_to_delete = [] - - def tearDown(self): - for bucket_name in self.case_buckets_to_delete: - bucket = Config.CLIENT.bucket(bucket_name) - retry_429_harder(bucket.delete)(force=True) - - def test_get_signed_policy_v4(self): - bucket_name = "post_policy" + unique_resource_id("-") - self.assertRaises(exceptions.NotFound, Config.CLIENT.get_bucket, bucket_name) - retry_429_503(Config.CLIENT.create_bucket)(bucket_name) - self.case_buckets_to_delete.append(bucket_name) - - blob_name = "post_policy_obj.txt" - with open(blob_name, "w") as f: - f.write("DEADBEEF") - - policy = Config.CLIENT.generate_signed_post_policy_v4( - bucket_name, - blob_name, - conditions=[ - {"bucket": bucket_name}, - ["starts-with", "$Content-Type", "text/pla"], - ], - expiration=datetime.datetime.utcnow() + datetime.timedelta(hours=1), - fields={"content-type": "text/plain"}, - ) - with open(blob_name, "r") as f: - files = {"file": (blob_name, f)} - response = requests.post(policy["url"], data=policy["fields"], files=files) - - os.remove(blob_name) - self.assertEqual(response.status_code, 204) - - def test_get_signed_policy_v4_invalid_field(self): - bucket_name = "post_policy" + unique_resource_id("-") - self.assertRaises(exceptions.NotFound, Config.CLIENT.get_bucket, bucket_name) - retry_429_503(Config.CLIENT.create_bucket)(bucket_name) - self.case_buckets_to_delete.append(bucket_name) - - blob_name = "post_policy_obj.txt" - with open(blob_name, "w") as f: - f.write("DEADBEEF") - - policy = Config.CLIENT.generate_signed_post_policy_v4( - bucket_name, - blob_name, - conditions=[ - {"bucket": bucket_name}, - ["starts-with", "$Content-Type", "text/pla"], - ], - expiration=datetime.datetime.utcnow() + datetime.timedelta(hours=1), - fields={"x-goog-random": "invalid_field", "content-type": "text/plain"}, - ) - with open(blob_name, "r") as f: - files = {"file": (blob_name, f)} - response = requests.post(policy["url"], data=policy["fields"], files=files) - - os.remove(blob_name) - self.assertEqual(response.status_code, 400) diff --git a/tests/unit/test_blob.py b/tests/unit/test_blob.py index a21385821..d9d08cb4d 100644 --- a/tests/unit/test_blob.py +++ b/tests/unit/test_blob.py @@ -25,11 +25,11 @@ import pytest import six from six.moves import http_client +from six.moves.urllib.parse import urlencode from google.cloud.storage.retry import DEFAULT_RETRY from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED -from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED def _make_credentials(): @@ -2042,8 +2042,6 @@ def _do_multipart_success( mtls=False, retry=None, ): - from six.moves.urllib.parse import urlencode - bucket = _Bucket(name="w00t", user_project=user_project) blob = self._make_one(u"blob-name", bucket=bucket, kms_key_name=kms_key_name) self.assertIsNone(blob.chunk_size) @@ -2287,7 +2285,6 @@ def _initiate_resumable_helper( mtls=False, retry=None, ): - from six.moves.urllib.parse import urlencode from google.resumable_media.requests import ResumableUpload from google.cloud.storage.blob import _DEFAULT_CHUNKSIZE @@ -2853,8 +2850,8 @@ def _do_upload_helper( **timeout_kwarg ) - if retry is DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED: - retry = DEFAULT_RETRY if if_metageneration_match else None + if retry is DEFAULT_RETRY_IF_GENERATION_SPECIFIED: + retry = DEFAULT_RETRY if if_generation_match else None self.assertIs(created_json, mock.sentinel.json) response.json.assert_called_once_with() @@ -2925,11 +2922,11 @@ def test__do_upload_with_num_retries(self): def test__do_upload_with_conditional_retry_success(self): self._do_upload_helper( - retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED, if_metageneration_match=1 + retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, if_generation_match=123456 ) def test__do_upload_with_conditional_retry_failure(self): - self._do_upload_helper(retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED) + self._do_upload_helper(retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED) def _upload_from_file_helper(self, side_effect=None, **kwargs): from google.cloud._helpers import UTC @@ -2955,7 +2952,7 @@ def _upload_from_file_helper(self, side_effect=None, **kwargs): if_metageneration_not_match = kwargs.get("if_metageneration_not_match", None) num_retries = kwargs.get("num_retries", None) default_retry = ( - DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED if not num_retries else None + DEFAULT_RETRY_IF_GENERATION_SPECIFIED if not num_retries else None ) retry = kwargs.get("retry", default_retry) ret_val = blob.upload_from_file( @@ -3062,9 +3059,7 @@ def _do_upload_mock_call_helper( expected_timeout = self._get_default_timeout() if timeout is None else timeout if not retry: - retry = ( - DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED if not num_retries else None - ) + retry = DEFAULT_RETRY_IF_GENERATION_SPECIFIED if not num_retries else None self.assertEqual( kwargs, {"timeout": expected_timeout, "checksum": None, "retry": retry} ) @@ -3251,7 +3246,15 @@ def test_upload_from_string_w_text_w_num_retries(self): self._upload_from_string_helper(data, num_retries=2) def _create_resumable_upload_session_helper( - self, origin=None, side_effect=None, timeout=None + self, + origin=None, + side_effect=None, + timeout=None, + if_generation_match=None, + if_generation_not_match=None, + if_metageneration_match=None, + if_metageneration_not_match=None, + retry=None, ): bucket = _Bucket(name="alex-trebek") blob = self._make_one("blob-name", bucket=bucket) @@ -3283,6 +3286,11 @@ def _create_resumable_upload_session_helper( size=size, origin=origin, client=client, + if_generation_match=if_generation_match, + if_generation_not_match=if_generation_not_match, + if_metageneration_match=if_metageneration_match, + if_metageneration_not_match=if_metageneration_not_match, + retry=retry, **timeout_kwarg ) @@ -3292,10 +3300,23 @@ def _create_resumable_upload_session_helper( # Check the mocks. upload_url = ( - "https://storage.googleapis.com/upload/storage/v1" - + bucket.path - + "/o?uploadType=resumable" + "https://storage.googleapis.com/upload/storage/v1" + bucket.path + "/o" ) + + qs_params = [("uploadType", "resumable")] + if if_generation_match is not None: + qs_params.append(("ifGenerationMatch", if_generation_match)) + + if if_generation_not_match is not None: + qs_params.append(("ifGenerationNotMatch", if_generation_not_match)) + + if if_metageneration_match is not None: + qs_params.append(("ifMetagenerationMatch", if_metageneration_match)) + + if if_metageneration_not_match is not None: + qs_params.append(("ifMetaGenerationNotMatch", if_metageneration_not_match)) + + upload_url += "?" + urlencode(qs_params) payload = b'{"name": "blob-name"}' expected_headers = { "content-type": "application/json; charset=UTF-8", @@ -3321,6 +3342,26 @@ def test_create_resumable_upload_session_with_custom_timeout(self): def test_create_resumable_upload_session_with_origin(self): self._create_resumable_upload_session_helper(origin="http://google.com") + def test_create_resumable_upload_session_with_generation_match(self): + self._create_resumable_upload_session_helper( + if_generation_match=123456, if_metageneration_match=2 + ) + + def test_create_resumable_upload_session_with_generation_not_match(self): + self._create_resumable_upload_session_helper( + if_generation_not_match=0, if_metageneration_not_match=3 + ) + + def test_create_resumable_upload_session_with_conditional_retry_success(self): + self._create_resumable_upload_session_helper( + retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED, if_generation_match=123456 + ) + + def test_create_resumable_upload_session_with_conditional_retry_failure(self): + self._create_resumable_upload_session_helper( + retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED + ) + def test_create_resumable_upload_session_with_failure(self): from google.resumable_media import InvalidResponse from google.cloud import exceptions diff --git a/tests/unit/test_bucket.py b/tests/unit/test_bucket.py index 244c26b2a..4f2932865 100644 --- a/tests/unit/test_bucket.py +++ b/tests/unit/test_bucket.py @@ -22,6 +22,8 @@ from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED +from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_ENFORCED +from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_UNSPECIFIED def _create_signing_credentials(): @@ -356,6 +358,9 @@ def test_ctor_defaults(self): self.assertIs(config.bucket, bucket) self.assertFalse(config.uniform_bucket_level_access_enabled) self.assertIsNone(config.uniform_bucket_level_access_locked_time) + self.assertEqual( + config.public_access_prevention, PUBLIC_ACCESS_PREVENTION_UNSPECIFIED + ) self.assertFalse(config.bucket_policy_only_enabled) self.assertIsNone(config.bucket_policy_only_locked_time) @@ -378,6 +383,24 @@ def test_ctor_explicit_ubla(self): self.assertTrue(config.bucket_policy_only_enabled) self.assertEqual(config.bucket_policy_only_locked_time, now) + def test_ctor_explicit_pap(self): + bucket = self._make_bucket() + + config = self._make_one( + bucket, public_access_prevention=PUBLIC_ACCESS_PREVENTION_ENFORCED, + ) + + self.assertIs(config.bucket, bucket) + self.assertFalse(config.uniform_bucket_level_access_enabled) + self.assertEqual( + config.public_access_prevention, PUBLIC_ACCESS_PREVENTION_ENFORCED + ) + + config.public_access_prevention = PUBLIC_ACCESS_PREVENTION_UNSPECIFIED + self.assertEqual( + config.public_access_prevention, PUBLIC_ACCESS_PREVENTION_UNSPECIFIED + ) + def test_ctor_explicit_bpo(self): import datetime import pytz diff --git a/tests/unit/test_fileio.py b/tests/unit/test_fileio.py index 6ce9b4990..9fadc967c 100644 --- a/tests/unit/test_fileio.py +++ b/tests/unit/test_fileio.py @@ -395,7 +395,7 @@ def test_conditional_retry_pass(self): blob, chunk_size=chunk_size, content_type=PLAIN_CONTENT_TYPE, - if_metageneration_match=1, + if_generation_match=123456, ) # The transmit_next_chunk method must actually consume bytes from the @@ -421,7 +421,7 @@ def test_conditional_retry_pass(self): None, # num_retries chunk_size=chunk_size, retry=DEFAULT_RETRY, - if_metageneration_match=1, + if_generation_match=123456, ) upload.transmit_next_chunk.assert_called_with(transport) self.assertEqual(upload.transmit_next_chunk.call_count, 4)