1# Copyright 2014 Google LLC
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""Create / interact with Google Cloud Storage buckets."""
16
17import base64
18import copy
19import datetime
20import json
21import warnings
22
23import six
24from six.moves.urllib.parse import urlsplit
25
26from google.api_core import datetime_helpers
27from google.cloud._helpers import _datetime_to_rfc3339
28from google.cloud._helpers import _NOW
29from google.cloud._helpers import _rfc3339_nanos_to_datetime
30from google.cloud.exceptions import NotFound
31from google.api_core.iam import Policy
32from google.cloud.storage import _signing
33from google.cloud.storage._helpers import _add_etag_match_headers
34from google.cloud.storage._helpers import _add_generation_match_parameters
35from google.cloud.storage._helpers import _PropertyMixin
36from google.cloud.storage._helpers import _scalar_property
37from google.cloud.storage._helpers import _validate_name
38from google.cloud.storage._signing import generate_signed_url_v2
39from google.cloud.storage._signing import generate_signed_url_v4
40from google.cloud.storage._helpers import _bucket_bound_hostname_url
41from google.cloud.storage.acl import BucketACL
42from google.cloud.storage.acl import DefaultObjectACL
43from google.cloud.storage.blob import Blob
44from google.cloud.storage.constants import _DEFAULT_TIMEOUT
45from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS
46from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS
47from google.cloud.storage.constants import DUAL_REGION_LOCATION_TYPE
48from google.cloud.storage.constants import (
49    DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS,
50)
51from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS
52from google.cloud.storage.constants import MULTI_REGION_LOCATION_TYPE
53from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS
54from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_INHERITED
55from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS
56from google.cloud.storage.constants import REGION_LOCATION_TYPE
57from google.cloud.storage.constants import STANDARD_STORAGE_CLASS
58from google.cloud.storage.notification import BucketNotification
59from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT
60from google.cloud.storage.retry import DEFAULT_RETRY
61from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED
62from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON
63from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED
64
65
66_UBLA_BPO_ENABLED_MESSAGE = (
67    "Pass only one of 'uniform_bucket_level_access_enabled' / "
68    "'bucket_policy_only_enabled' to 'IAMConfiguration'."
69)
70_BPO_ENABLED_MESSAGE = (
71    "'IAMConfiguration.bucket_policy_only_enabled' is deprecated.  "
72    "Instead, use 'IAMConfiguration.uniform_bucket_level_access_enabled'."
73)
74_UBLA_BPO_LOCK_TIME_MESSAGE = (
75    "Pass only one of 'uniform_bucket_level_access_lock_time' / "
76    "'bucket_policy_only_lock_time' to 'IAMConfiguration'."
77)
78_BPO_LOCK_TIME_MESSAGE = (
79    "'IAMConfiguration.bucket_policy_only_lock_time' is deprecated.  "
80    "Instead, use 'IAMConfiguration.uniform_bucket_level_access_lock_time'."
81)
82_LOCATION_SETTER_MESSAGE = (
83    "Assignment to 'Bucket.location' is deprecated, as it is only "
84    "valid before the bucket is created. Instead, pass the location "
85    "to `Bucket.create`."
86)
87_API_ACCESS_ENDPOINT = "https://storage.googleapis.com"
88
89
90def _blobs_page_start(iterator, page, response):
91    """Grab prefixes after a :class:`~google.cloud.iterator.Page` started.
92
93    :type iterator: :class:`~google.api_core.page_iterator.Iterator`
94    :param iterator: The iterator that is currently in use.
95
96    :type page: :class:`~google.cloud.api.core.page_iterator.Page`
97    :param page: The page that was just created.
98
99    :type response: dict
100    :param response: The JSON API response for a page of blobs.
101    """
102    page.prefixes = tuple(response.get("prefixes", ()))
103    iterator.prefixes.update(page.prefixes)
104
105
106def _item_to_blob(iterator, item):
107    """Convert a JSON blob to the native object.
108
109    .. note::
110
111        This assumes that the ``bucket`` attribute has been
112        added to the iterator after being created.
113
114    :type iterator: :class:`~google.api_core.page_iterator.Iterator`
115    :param iterator: The iterator that has retrieved the item.
116
117    :type item: dict
118    :param item: An item to be converted to a blob.
119
120    :rtype: :class:`.Blob`
121    :returns: The next blob in the page.
122    """
123    name = item.get("name")
124    blob = Blob(name, bucket=iterator.bucket)
125    blob._set_properties(item)
126    return blob
127
128
129def _item_to_notification(iterator, item):
130    """Convert a JSON blob to the native object.
131
132    .. note::
133
134        This assumes that the ``bucket`` attribute has been
135        added to the iterator after being created.
136
137    :type iterator: :class:`~google.api_core.page_iterator.Iterator`
138    :param iterator: The iterator that has retrieved the item.
139
140    :type item: dict
141    :param item: An item to be converted to a blob.
142
143    :rtype: :class:`.BucketNotification`
144    :returns: The next notification being iterated.
145    """
146    return BucketNotification.from_api_repr(item, bucket=iterator.bucket)
147
148
149class LifecycleRuleConditions(dict):
150    """Map a single lifecycle rule for a bucket.
151
152    See: https://cloud.google.com/storage/docs/lifecycle
153
154    :type age: int
155    :param age: (Optional) Apply rule action to items whose age, in days,
156                exceeds this value.
157
158    :type created_before: datetime.date
159    :param created_before: (Optional) Apply rule action to items created
160                           before this date.
161
162    :type is_live: bool
163    :param is_live: (Optional) If true, apply rule action to non-versioned
164                    items, or to items with no newer versions. If false, apply
165                    rule action to versioned items with at least one newer
166                    version.
167
168    :type matches_storage_class: list(str), one or more of
169                                 :attr:`Bucket.STORAGE_CLASSES`.
170    :param matches_storage_class: (Optional) Apply rule action to items which
171                                  whose storage class matches this value.
172
173    :type number_of_newer_versions: int
174    :param number_of_newer_versions: (Optional) Apply rule action to versioned
175                                     items having N newer versions.
176
177    :type days_since_custom_time: int
178    :param days_since_custom_time: (Optional) Apply rule action to items whose number of days
179                                   elapsed since the custom timestamp. This condition is relevant
180                                   only for versioned objects. The value of the field must be a non
181                                   negative integer. If it's zero, the object version will become
182                                   eligible for lifecycle action as soon as it becomes custom.
183
184    :type custom_time_before: :class:`datetime.date`
185    :param custom_time_before: (Optional)  Date object parsed from RFC3339 valid date, apply rule action
186                               to items whose custom time is before this date. This condition is relevant
187                               only for versioned objects, e.g., 2019-03-16.
188
189    :type days_since_noncurrent_time: int
190    :param days_since_noncurrent_time: (Optional) Apply rule action to items whose number of days
191                                        elapsed since the non current timestamp. This condition
192                                        is relevant only for versioned objects. The value of the field
193                                        must be a non negative integer. If it's zero, the object version
194                                        will become eligible for lifecycle action as soon as it becomes
195                                        non current.
196
197    :type noncurrent_time_before: :class:`datetime.date`
198    :param noncurrent_time_before: (Optional) Date object parsed from RFC3339 valid date, apply
199                                   rule action to items whose non current time is before this date.
200                                   This condition is relevant only for versioned objects, e.g, 2019-03-16.
201
202    :raises ValueError: if no arguments are passed.
203    """
204
205    def __init__(
206        self,
207        age=None,
208        created_before=None,
209        is_live=None,
210        matches_storage_class=None,
211        number_of_newer_versions=None,
212        days_since_custom_time=None,
213        custom_time_before=None,
214        days_since_noncurrent_time=None,
215        noncurrent_time_before=None,
216        _factory=False,
217    ):
218        conditions = {}
219
220        if age is not None:
221            conditions["age"] = age
222
223        if created_before is not None:
224            conditions["createdBefore"] = created_before.isoformat()
225
226        if is_live is not None:
227            conditions["isLive"] = is_live
228
229        if matches_storage_class is not None:
230            conditions["matchesStorageClass"] = matches_storage_class
231
232        if number_of_newer_versions is not None:
233            conditions["numNewerVersions"] = number_of_newer_versions
234
235        if days_since_custom_time is not None:
236            conditions["daysSinceCustomTime"] = days_since_custom_time
237
238        if custom_time_before is not None:
239            conditions["customTimeBefore"] = custom_time_before.isoformat()
240
241        if not _factory and not conditions:
242            raise ValueError("Supply at least one condition")
243
244        if days_since_noncurrent_time is not None:
245            conditions["daysSinceNoncurrentTime"] = days_since_noncurrent_time
246
247        if noncurrent_time_before is not None:
248            conditions["noncurrentTimeBefore"] = noncurrent_time_before.isoformat()
249
250        super(LifecycleRuleConditions, self).__init__(conditions)
251
252    @classmethod
253    def from_api_repr(cls, resource):
254        """Factory:  construct instance from resource.
255
256        :type resource: dict
257        :param resource: mapping as returned from API call.
258
259        :rtype: :class:`LifecycleRuleConditions`
260        :returns: Instance created from resource.
261        """
262        instance = cls(_factory=True)
263        instance.update(resource)
264        return instance
265
266    @property
267    def age(self):
268        """Conditon's age value."""
269        return self.get("age")
270
271    @property
272    def created_before(self):
273        """Conditon's created_before value."""
274        before = self.get("createdBefore")
275        if before is not None:
276            return datetime_helpers.from_iso8601_date(before)
277
278    @property
279    def is_live(self):
280        """Conditon's 'is_live' value."""
281        return self.get("isLive")
282
283    @property
284    def matches_storage_class(self):
285        """Conditon's 'matches_storage_class' value."""
286        return self.get("matchesStorageClass")
287
288    @property
289    def number_of_newer_versions(self):
290        """Conditon's 'number_of_newer_versions' value."""
291        return self.get("numNewerVersions")
292
293    @property
294    def days_since_custom_time(self):
295        """Conditon's 'days_since_custom_time' value."""
296        return self.get("daysSinceCustomTime")
297
298    @property
299    def custom_time_before(self):
300        """Conditon's 'custom_time_before' value."""
301        before = self.get("customTimeBefore")
302        if before is not None:
303            return datetime_helpers.from_iso8601_date(before)
304
305    @property
306    def days_since_noncurrent_time(self):
307        """Conditon's 'days_since_noncurrent_time' value."""
308        return self.get("daysSinceNoncurrentTime")
309
310    @property
311    def noncurrent_time_before(self):
312        """Conditon's 'noncurrent_time_before' value."""
313        before = self.get("noncurrentTimeBefore")
314        if before is not None:
315            return datetime_helpers.from_iso8601_date(before)
316
317
318class LifecycleRuleDelete(dict):
319    """Map a lifecycle rule deleting matching items.
320
321    :type kw: dict
322    :params kw: arguments passed to :class:`LifecycleRuleConditions`.
323    """
324
325    def __init__(self, **kw):
326        conditions = LifecycleRuleConditions(**kw)
327        rule = {"action": {"type": "Delete"}, "condition": dict(conditions)}
328        super(LifecycleRuleDelete, self).__init__(rule)
329
330    @classmethod
331    def from_api_repr(cls, resource):
332        """Factory:  construct instance from resource.
333
334        :type resource: dict
335        :param resource: mapping as returned from API call.
336
337        :rtype: :class:`LifecycleRuleDelete`
338        :returns: Instance created from resource.
339        """
340        instance = cls(_factory=True)
341        instance.update(resource)
342        return instance
343
344
345class LifecycleRuleSetStorageClass(dict):
346    """Map a lifecycle rule updating storage class of matching items.
347
348    :type storage_class: str, one of :attr:`Bucket.STORAGE_CLASSES`.
349    :param storage_class: new storage class to assign to matching items.
350
351    :type kw: dict
352    :params kw: arguments passed to :class:`LifecycleRuleConditions`.
353    """
354
355    def __init__(self, storage_class, **kw):
356        conditions = LifecycleRuleConditions(**kw)
357        rule = {
358            "action": {"type": "SetStorageClass", "storageClass": storage_class},
359            "condition": dict(conditions),
360        }
361        super(LifecycleRuleSetStorageClass, self).__init__(rule)
362
363    @classmethod
364    def from_api_repr(cls, resource):
365        """Factory:  construct instance from resource.
366
367        :type resource: dict
368        :param resource: mapping as returned from API call.
369
370        :rtype: :class:`LifecycleRuleDelete`
371        :returns: Instance created from resource.
372        """
373        action = resource["action"]
374        instance = cls(action["storageClass"], _factory=True)
375        instance.update(resource)
376        return instance
377
378
379_default = object()
380
381
382class IAMConfiguration(dict):
383    """Map a bucket's IAM configuration.
384
385    :type bucket: :class:`Bucket`
386    :params bucket: Bucket for which this instance is the policy.
387
388    :type public_access_prevention: str
389    :params public_access_prevention:
390        (Optional) Whether the public access prevention policy is 'inherited' (default) or 'enforced'
391        See: https://cloud.google.com/storage/docs/public-access-prevention
392
393    :type uniform_bucket_level_access_enabled: bool
394    :params bucket_policy_only_enabled:
395        (Optional) Whether the IAM-only policy is enabled for the bucket.
396
397    :type uniform_bucket_level_access_locked_time: :class:`datetime.datetime`
398    :params uniform_bucket_level_locked_time:
399        (Optional) When the bucket's IAM-only policy was enabled.
400        This value should normally only be set by the back-end API.
401
402    :type bucket_policy_only_enabled: bool
403    :params bucket_policy_only_enabled:
404        Deprecated alias for :data:`uniform_bucket_level_access_enabled`.
405
406    :type bucket_policy_only_locked_time: :class:`datetime.datetime`
407    :params bucket_policy_only_locked_time:
408        Deprecated alias for :data:`uniform_bucket_level_access_locked_time`.
409    """
410
411    def __init__(
412        self,
413        bucket,
414        public_access_prevention=_default,
415        uniform_bucket_level_access_enabled=_default,
416        uniform_bucket_level_access_locked_time=_default,
417        bucket_policy_only_enabled=_default,
418        bucket_policy_only_locked_time=_default,
419    ):
420        if bucket_policy_only_enabled is not _default:
421
422            if uniform_bucket_level_access_enabled is not _default:
423                raise ValueError(_UBLA_BPO_ENABLED_MESSAGE)
424
425            warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2)
426            uniform_bucket_level_access_enabled = bucket_policy_only_enabled
427
428        if bucket_policy_only_locked_time is not _default:
429
430            if uniform_bucket_level_access_locked_time is not _default:
431                raise ValueError(_UBLA_BPO_LOCK_TIME_MESSAGE)
432
433            warnings.warn(_BPO_LOCK_TIME_MESSAGE, DeprecationWarning, stacklevel=2)
434            uniform_bucket_level_access_locked_time = bucket_policy_only_locked_time
435
436        if uniform_bucket_level_access_enabled is _default:
437            uniform_bucket_level_access_enabled = False
438
439        if public_access_prevention is _default:
440            public_access_prevention = PUBLIC_ACCESS_PREVENTION_INHERITED
441
442        data = {
443            "uniformBucketLevelAccess": {
444                "enabled": uniform_bucket_level_access_enabled
445            },
446            "publicAccessPrevention": public_access_prevention,
447        }
448        if uniform_bucket_level_access_locked_time is not _default:
449            data["uniformBucketLevelAccess"]["lockedTime"] = _datetime_to_rfc3339(
450                uniform_bucket_level_access_locked_time
451            )
452        super(IAMConfiguration, self).__init__(data)
453        self._bucket = bucket
454
455    @classmethod
456    def from_api_repr(cls, resource, bucket):
457        """Factory:  construct instance from resource.
458
459        :type bucket: :class:`Bucket`
460        :params bucket: Bucket for which this instance is the policy.
461
462        :type resource: dict
463        :param resource: mapping as returned from API call.
464
465        :rtype: :class:`IAMConfiguration`
466        :returns: Instance created from resource.
467        """
468        instance = cls(bucket)
469        instance.update(resource)
470        return instance
471
472    @property
473    def bucket(self):
474        """Bucket for which this instance is the policy.
475
476        :rtype: :class:`Bucket`
477        :returns: the instance's bucket.
478        """
479        return self._bucket
480
481    @property
482    def public_access_prevention(self):
483        """Setting for public access prevention policy. Options are 'inherited' (default) or 'enforced'.
484
485            See: https://cloud.google.com/storage/docs/public-access-prevention
486
487        :rtype: string
488        :returns: the public access prevention status, either 'enforced' or 'inherited'.
489        """
490        return self["publicAccessPrevention"]
491
492    @public_access_prevention.setter
493    def public_access_prevention(self, value):
494        self["publicAccessPrevention"] = value
495        self.bucket._patch_property("iamConfiguration", self)
496
497    @property
498    def uniform_bucket_level_access_enabled(self):
499        """If set, access checks only use bucket-level IAM policies or above.
500
501        :rtype: bool
502        :returns: whether the bucket is configured to allow only IAM.
503        """
504        ubla = self.get("uniformBucketLevelAccess", {})
505        return ubla.get("enabled", False)
506
507    @uniform_bucket_level_access_enabled.setter
508    def uniform_bucket_level_access_enabled(self, value):
509        ubla = self.setdefault("uniformBucketLevelAccess", {})
510        ubla["enabled"] = bool(value)
511        self.bucket._patch_property("iamConfiguration", self)
512
513    @property
514    def uniform_bucket_level_access_locked_time(self):
515        """Deadline for changing :attr:`uniform_bucket_level_access_enabled` from true to false.
516
517        If the bucket's :attr:`uniform_bucket_level_access_enabled` is true, this property
518        is time time after which that setting becomes immutable.
519
520        If the bucket's :attr:`uniform_bucket_level_access_enabled` is false, this property
521        is ``None``.
522
523        :rtype: Union[:class:`datetime.datetime`, None]
524        :returns:  (readonly) Time after which :attr:`uniform_bucket_level_access_enabled` will
525                   be frozen as true.
526        """
527        ubla = self.get("uniformBucketLevelAccess", {})
528        stamp = ubla.get("lockedTime")
529        if stamp is not None:
530            stamp = _rfc3339_nanos_to_datetime(stamp)
531        return stamp
532
533    @property
534    def bucket_policy_only_enabled(self):
535        """Deprecated alias for :attr:`uniform_bucket_level_access_enabled`.
536
537        :rtype: bool
538        :returns: whether the bucket is configured to allow only IAM.
539        """
540        return self.uniform_bucket_level_access_enabled
541
542    @bucket_policy_only_enabled.setter
543    def bucket_policy_only_enabled(self, value):
544        warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2)
545        self.uniform_bucket_level_access_enabled = value
546
547    @property
548    def bucket_policy_only_locked_time(self):
549        """Deprecated alias for :attr:`uniform_bucket_level_access_locked_time`.
550
551        :rtype: Union[:class:`datetime.datetime`, None]
552        :returns:
553            (readonly) Time after which :attr:`bucket_policy_only_enabled` will
554            be frozen as true.
555        """
556        return self.uniform_bucket_level_access_locked_time
557
558
559class Bucket(_PropertyMixin):
560    """A class representing a Bucket on Cloud Storage.
561
562    :type client: :class:`google.cloud.storage.client.Client`
563    :param client: A client which holds credentials and project configuration
564                   for the bucket (which requires a project).
565
566    :type name: str
567    :param name: The name of the bucket. Bucket names must start and end with a
568                 number or letter.
569
570    :type user_project: str
571    :param user_project: (Optional) the project ID to be billed for API
572                         requests made via this instance.
573    """
574
575    _MAX_OBJECTS_FOR_ITERATION = 256
576    """Maximum number of existing objects allowed in iteration.
577
578    This is used in Bucket.delete() and Bucket.make_public().
579    """
580
581    STORAGE_CLASSES = (
582        STANDARD_STORAGE_CLASS,
583        NEARLINE_STORAGE_CLASS,
584        COLDLINE_STORAGE_CLASS,
585        ARCHIVE_STORAGE_CLASS,
586        MULTI_REGIONAL_LEGACY_STORAGE_CLASS,  # legacy
587        REGIONAL_LEGACY_STORAGE_CLASS,  # legacy
588        DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS,  # legacy
589    )
590    """Allowed values for :attr:`storage_class`.
591
592    Default value is :attr:`STANDARD_STORAGE_CLASS`.
593
594    See
595    https://cloud.google.com/storage/docs/json_api/v1/buckets#storageClass
596    https://cloud.google.com/storage/docs/storage-classes
597    """
598
599    _LOCATION_TYPES = (
600        MULTI_REGION_LOCATION_TYPE,
601        REGION_LOCATION_TYPE,
602        DUAL_REGION_LOCATION_TYPE,
603    )
604    """Allowed values for :attr:`location_type`."""
605
606    def __init__(self, client, name=None, user_project=None):
607        """
608        property :attr:`name`
609            Get the bucket's name.
610        """
611        name = _validate_name(name)
612        super(Bucket, self).__init__(name=name)
613        self._client = client
614        self._acl = BucketACL(self)
615        self._default_object_acl = DefaultObjectACL(self)
616        self._label_removals = set()
617        self._user_project = user_project
618
619    def __repr__(self):
620        return "<Bucket: %s>" % (self.name,)
621
622    @property
623    def client(self):
624        """The client bound to this bucket."""
625        return self._client
626
627    def _set_properties(self, value):
628        """Set the properties for the current object.
629
630        :type value: dict or :class:`google.cloud.storage.batch._FutureDict`
631        :param value: The properties to be set.
632        """
633        self._label_removals.clear()
634        return super(Bucket, self)._set_properties(value)
635
636    @property
637    def user_project(self):
638        """Project ID to be billed for API requests made via this bucket.
639
640        If unset, API requests are billed to the bucket owner.
641
642        A user project is required for all operations on Requester Pays buckets.
643
644        See https://cloud.google.com/storage/docs/requester-pays#requirements for details.
645
646        :rtype: str
647        """
648        return self._user_project
649
650    @classmethod
651    def from_string(cls, uri, client=None):
652        """Get a constructor for bucket object by URI.
653
654        :type uri: str
655        :param uri: The bucket uri pass to get bucket object.
656
657        :type client: :class:`~google.cloud.storage.client.Client` or
658                      ``NoneType``
659        :param client: (Optional) The client to use.  Application code should
660            *always* pass ``client``.
661
662        :rtype: :class:`google.cloud.storage.bucket.Bucket`
663        :returns: The bucket object created.
664
665        Example:
666            Get a constructor for bucket object by URI..
667
668            >>> from google.cloud import storage
669            >>> from google.cloud.storage.bucket import Bucket
670            >>> client = storage.Client()
671            >>> bucket = Bucket.from_string("gs://bucket", client=client)
672        """
673        scheme, netloc, path, query, frag = urlsplit(uri)
674
675        if scheme != "gs":
676            raise ValueError("URI scheme must be gs")
677
678        return cls(client, name=netloc)
679
680    def blob(
681        self,
682        blob_name,
683        chunk_size=None,
684        encryption_key=None,
685        kms_key_name=None,
686        generation=None,
687    ):
688        """Factory constructor for blob object.
689
690        .. note::
691          This will not make an HTTP request; it simply instantiates
692          a blob object owned by this bucket.
693
694        :type blob_name: str
695        :param blob_name: The name of the blob to be instantiated.
696
697        :type chunk_size: int
698        :param chunk_size: The size of a chunk of data whenever iterating
699                           (in bytes). This must be a multiple of 256 KB per
700                           the API specification.
701
702        :type encryption_key: bytes
703        :param encryption_key:
704            (Optional) 32 byte encryption key for customer-supplied encryption.
705
706        :type kms_key_name: str
707        :param kms_key_name:
708            (Optional) Resource name of KMS key used to encrypt blob's content.
709
710        :type generation: long
711        :param generation: (Optional) If present, selects a specific revision of
712                           this object.
713
714        :rtype: :class:`google.cloud.storage.blob.Blob`
715        :returns: The blob object created.
716        """
717        return Blob(
718            name=blob_name,
719            bucket=self,
720            chunk_size=chunk_size,
721            encryption_key=encryption_key,
722            kms_key_name=kms_key_name,
723            generation=generation,
724        )
725
726    def notification(
727        self,
728        topic_name=None,
729        topic_project=None,
730        custom_attributes=None,
731        event_types=None,
732        blob_name_prefix=None,
733        payload_format=NONE_PAYLOAD_FORMAT,
734        notification_id=None,
735    ):
736        """Factory:  create a notification resource for the bucket.
737
738        See: :class:`.BucketNotification` for parameters.
739
740        :rtype: :class:`.BucketNotification`
741        """
742        return BucketNotification(
743            self,
744            topic_name=topic_name,
745            topic_project=topic_project,
746            custom_attributes=custom_attributes,
747            event_types=event_types,
748            blob_name_prefix=blob_name_prefix,
749            payload_format=payload_format,
750            notification_id=notification_id,
751        )
752
753    def exists(
754        self,
755        client=None,
756        timeout=_DEFAULT_TIMEOUT,
757        if_etag_match=None,
758        if_etag_not_match=None,
759        if_metageneration_match=None,
760        if_metageneration_not_match=None,
761        retry=DEFAULT_RETRY,
762    ):
763        """Determines whether or not this bucket exists.
764
765        If :attr:`user_project` is set, bills the API request to that project.
766
767        :type client: :class:`~google.cloud.storage.client.Client` or
768                      ``NoneType``
769        :param client: (Optional) The client to use. If not passed, falls back
770                       to the ``client`` stored on the current bucket.
771
772        :type timeout: float or tuple
773        :param timeout:
774            (Optional) The amount of time, in seconds, to wait
775            for the server response.  See: :ref:`configuring_timeouts`
776
777        :type if_etag_match: Union[str, Set[str]]
778        :param if_etag_match: (Optional) Make the operation conditional on whether the
779                              bucket's current ETag matches the given value.
780
781        :type if_etag_not_match: Union[str, Set[str]])
782        :param if_etag_not_match: (Optional) Make the operation conditional on whether the
783                                  bucket's current ETag does not match the given value.
784
785        :type if_metageneration_match: long
786        :param if_metageneration_match: (Optional) Make the operation conditional on whether the
787                                        bucket's current metageneration matches the given value.
788
789        :type if_metageneration_not_match: long
790        :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
791                                            bucket's current metageneration does not match the given value.
792
793        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
794        :param retry:
795            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
796
797        :rtype: bool
798        :returns: True if the bucket exists in Cloud Storage.
799        """
800        client = self._require_client(client)
801        # We only need the status code (200 or not) so we seek to
802        # minimize the returned payload.
803        query_params = {"fields": "name"}
804
805        if self.user_project is not None:
806            query_params["userProject"] = self.user_project
807
808        _add_generation_match_parameters(
809            query_params,
810            if_metageneration_match=if_metageneration_match,
811            if_metageneration_not_match=if_metageneration_not_match,
812        )
813
814        headers = {}
815        _add_etag_match_headers(
816            headers, if_etag_match=if_etag_match, if_etag_not_match=if_etag_not_match
817        )
818
819        try:
820            # We intentionally pass `_target_object=None` since fields=name
821            # would limit the local properties.
822            client._get_resource(
823                self.path,
824                query_params=query_params,
825                headers=headers,
826                timeout=timeout,
827                retry=retry,
828                _target_object=None,
829            )
830        except NotFound:
831            # NOTE: This will not fail immediately in a batch. However, when
832            #       Batch.finish() is called, the resulting `NotFound` will be
833            #       raised.
834            return False
835        return True
836
837    def create(
838        self,
839        client=None,
840        project=None,
841        location=None,
842        predefined_acl=None,
843        predefined_default_object_acl=None,
844        timeout=_DEFAULT_TIMEOUT,
845        retry=DEFAULT_RETRY,
846    ):
847        """DEPRECATED. Creates current bucket.
848
849        .. note::
850          Direct use of this method is deprecated. Use ``Client.create_bucket()`` instead.
851
852        If the bucket already exists, will raise
853        :class:`google.cloud.exceptions.Conflict`.
854
855        This implements "storage.buckets.insert".
856
857        If :attr:`user_project` is set, bills the API request to that project.
858
859        :type client: :class:`~google.cloud.storage.client.Client` or
860                      ``NoneType``
861        :param client: (Optional) The client to use. If not passed, falls back
862                       to the ``client`` stored on the current bucket.
863
864        :type project: str
865        :param project: (Optional) The project under which the bucket is to
866                        be created. If not passed, uses the project set on
867                        the client.
868        :raises ValueError: if ``project`` is None and client's
869                            :attr:`project` is also None.
870
871        :type location: str
872        :param location: (Optional) The location of the bucket. If not passed,
873                         the default location, US, will be used. See
874                         https://cloud.google.com/storage/docs/bucket-locations
875
876        :type predefined_acl: str
877        :param predefined_acl:
878            (Optional) Name of predefined ACL to apply to bucket. See:
879            https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
880
881        :type predefined_default_object_acl: str
882        :param predefined_default_object_acl:
883            (Optional) Name of predefined ACL to apply to bucket's objects. See:
884            https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
885
886        :type timeout: float or tuple
887        :param timeout:
888            (Optional) The amount of time, in seconds, to wait
889            for the server response.  See: :ref:`configuring_timeouts`
890
891        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
892        :param retry:
893            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
894        """
895        warnings.warn(
896            "Bucket.create() is deprecated and will be removed in future."
897            "Use Client.create_bucket() instead.",
898            PendingDeprecationWarning,
899            stacklevel=1,
900        )
901
902        client = self._require_client(client)
903        client.create_bucket(
904            bucket_or_name=self,
905            project=project,
906            user_project=self.user_project,
907            location=location,
908            predefined_acl=predefined_acl,
909            predefined_default_object_acl=predefined_default_object_acl,
910            timeout=timeout,
911            retry=retry,
912        )
913
914    def update(
915        self,
916        client=None,
917        timeout=_DEFAULT_TIMEOUT,
918        if_metageneration_match=None,
919        if_metageneration_not_match=None,
920        retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
921    ):
922        """Sends all properties in a PUT request.
923
924        Updates the ``_properties`` with the response from the backend.
925
926        If :attr:`user_project` is set, bills the API request to that project.
927
928        :type client: :class:`~google.cloud.storage.client.Client` or
929                      ``NoneType``
930        :param client: the client to use. If not passed, falls back to the
931                       ``client`` stored on the current object.
932
933        :type timeout: float or tuple
934        :param timeout:
935            (Optional) The amount of time, in seconds, to wait
936            for the server response.  See: :ref:`configuring_timeouts`
937
938        :type if_metageneration_match: long
939        :param if_metageneration_match: (Optional) Make the operation conditional on whether the
940                                        blob's current metageneration matches the given value.
941
942        :type if_metageneration_not_match: long
943        :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
944                                            blob's current metageneration does not match the given value.
945
946        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
947        :param retry:
948            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
949        """
950        super(Bucket, self).update(
951            client=client,
952            timeout=timeout,
953            if_metageneration_match=if_metageneration_match,
954            if_metageneration_not_match=if_metageneration_not_match,
955            retry=retry,
956        )
957
958    def reload(
959        self,
960        client=None,
961        projection="noAcl",
962        timeout=_DEFAULT_TIMEOUT,
963        if_etag_match=None,
964        if_etag_not_match=None,
965        if_metageneration_match=None,
966        if_metageneration_not_match=None,
967        retry=DEFAULT_RETRY,
968    ):
969        """Reload properties from Cloud Storage.
970
971        If :attr:`user_project` is set, bills the API request to that project.
972
973        :type client: :class:`~google.cloud.storage.client.Client` or
974                      ``NoneType``
975        :param client: the client to use. If not passed, falls back to the
976                       ``client`` stored on the current object.
977
978        :type projection: str
979        :param projection: (Optional) If used, must be 'full' or 'noAcl'.
980                           Defaults to ``'noAcl'``. Specifies the set of
981                           properties to return.
982
983        :type timeout: float or tuple
984        :param timeout:
985            (Optional) The amount of time, in seconds, to wait
986            for the server response.  See: :ref:`configuring_timeouts`
987
988        :type if_etag_match: Union[str, Set[str]]
989        :param if_etag_match: (Optional) Make the operation conditional on whether the
990                              bucket's current ETag matches the given value.
991
992        :type if_etag_not_match: Union[str, Set[str]])
993        :param if_etag_not_match: (Optional) Make the operation conditional on whether the
994                                  bucket's current ETag does not match the given value.
995
996        :type if_metageneration_match: long
997        :param if_metageneration_match: (Optional) Make the operation conditional on whether the
998                                        bucket's current metageneration matches the given value.
999
1000        :type if_metageneration_not_match: long
1001        :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1002                                            bucket's current metageneration does not match the given value.
1003
1004        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1005        :param retry:
1006            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1007        """
1008        super(Bucket, self).reload(
1009            client=client,
1010            projection=projection,
1011            timeout=timeout,
1012            if_etag_match=if_etag_match,
1013            if_etag_not_match=if_etag_not_match,
1014            if_metageneration_match=if_metageneration_match,
1015            if_metageneration_not_match=if_metageneration_not_match,
1016            retry=retry,
1017        )
1018
1019    def patch(
1020        self,
1021        client=None,
1022        timeout=_DEFAULT_TIMEOUT,
1023        if_metageneration_match=None,
1024        if_metageneration_not_match=None,
1025        retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
1026    ):
1027        """Sends all changed properties in a PATCH request.
1028
1029        Updates the ``_properties`` with the response from the backend.
1030
1031        If :attr:`user_project` is set, bills the API request to that project.
1032
1033        :type client: :class:`~google.cloud.storage.client.Client` or
1034                      ``NoneType``
1035        :param client: the client to use. If not passed, falls back to the
1036                       ``client`` stored on the current object.
1037
1038        :type timeout: float or tuple
1039        :param timeout:
1040            (Optional) The amount of time, in seconds, to wait
1041            for the server response.  See: :ref:`configuring_timeouts`
1042
1043        :type if_metageneration_match: long
1044        :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1045                                        blob's current metageneration matches the given value.
1046
1047        :type if_metageneration_not_match: long
1048        :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1049                                            blob's current metageneration does not match the given value.
1050
1051        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1052        :param retry:
1053            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1054        """
1055        # Special case: For buckets, it is possible that labels are being
1056        # removed; this requires special handling.
1057        if self._label_removals:
1058            self._changes.add("labels")
1059            self._properties.setdefault("labels", {})
1060            for removed_label in self._label_removals:
1061                self._properties["labels"][removed_label] = None
1062
1063        # Call the superclass method.
1064        super(Bucket, self).patch(
1065            client=client,
1066            if_metageneration_match=if_metageneration_match,
1067            if_metageneration_not_match=if_metageneration_not_match,
1068            timeout=timeout,
1069            retry=retry,
1070        )
1071
1072    @property
1073    def acl(self):
1074        """Create our ACL on demand."""
1075        return self._acl
1076
1077    @property
1078    def default_object_acl(self):
1079        """Create our defaultObjectACL on demand."""
1080        return self._default_object_acl
1081
1082    @staticmethod
1083    def path_helper(bucket_name):
1084        """Relative URL path for a bucket.
1085
1086        :type bucket_name: str
1087        :param bucket_name: The bucket name in the path.
1088
1089        :rtype: str
1090        :returns: The relative URL path for ``bucket_name``.
1091        """
1092        return "/b/" + bucket_name
1093
1094    @property
1095    def path(self):
1096        """The URL path to this bucket."""
1097        if not self.name:
1098            raise ValueError("Cannot determine path without bucket name.")
1099
1100        return self.path_helper(self.name)
1101
1102    def get_blob(
1103        self,
1104        blob_name,
1105        client=None,
1106        encryption_key=None,
1107        generation=None,
1108        if_etag_match=None,
1109        if_etag_not_match=None,
1110        if_generation_match=None,
1111        if_generation_not_match=None,
1112        if_metageneration_match=None,
1113        if_metageneration_not_match=None,
1114        timeout=_DEFAULT_TIMEOUT,
1115        retry=DEFAULT_RETRY,
1116        **kwargs
1117    ):
1118        """Get a blob object by name.
1119
1120        This will return None if the blob doesn't exist:
1121
1122        .. literalinclude:: snippets.py
1123          :start-after: [START get_blob]
1124          :end-before: [END get_blob]
1125          :dedent: 4
1126
1127        If :attr:`user_project` is set, bills the API request to that project.
1128
1129        :type blob_name: str
1130        :param blob_name: The name of the blob to retrieve.
1131
1132        :type client: :class:`~google.cloud.storage.client.Client` or
1133                      ``NoneType``
1134        :param client: (Optional) The client to use.  If not passed, falls back
1135                       to the ``client`` stored on the current bucket.
1136
1137        :type encryption_key: bytes
1138        :param encryption_key:
1139            (Optional) 32 byte encryption key for customer-supplied encryption.
1140            See
1141            https://cloud.google.com/storage/docs/encryption#customer-supplied.
1142
1143        :type generation: long
1144        :param generation:
1145            (Optional) If present, selects a specific revision of this object.
1146
1147        :type if_etag_match: Union[str, Set[str]]
1148        :param if_etag_match:
1149            (Optional) See :ref:`using-if-etag-match`
1150
1151        :type if_etag_not_match: Union[str, Set[str]]
1152        :param if_etag_not_match:
1153            (Optional) See :ref:`using-if-etag-not-match`
1154
1155        :type if_generation_match: long
1156        :param if_generation_match:
1157            (Optional) See :ref:`using-if-generation-match`
1158
1159        :type if_generation_not_match: long
1160        :param if_generation_not_match:
1161            (Optional) See :ref:`using-if-generation-not-match`
1162
1163        :type if_metageneration_match: long
1164        :param if_metageneration_match:
1165            (Optional) See :ref:`using-if-metageneration-match`
1166
1167        :type if_metageneration_not_match: long
1168        :param if_metageneration_not_match:
1169            (Optional) See :ref:`using-if-metageneration-not-match`
1170
1171        :type timeout: float or tuple
1172        :param timeout:
1173            (Optional) The amount of time, in seconds, to wait
1174            for the server response.  See: :ref:`configuring_timeouts`
1175
1176        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1177        :param retry:
1178            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1179
1180        :param kwargs: Keyword arguments to pass to the
1181                       :class:`~google.cloud.storage.blob.Blob` constructor.
1182
1183        :rtype: :class:`google.cloud.storage.blob.Blob` or None
1184        :returns: The blob object if it exists, otherwise None.
1185        """
1186        blob = Blob(
1187            bucket=self,
1188            name=blob_name,
1189            encryption_key=encryption_key,
1190            generation=generation,
1191            **kwargs
1192        )
1193        try:
1194            # NOTE: This will not fail immediately in a batch. However, when
1195            #       Batch.finish() is called, the resulting `NotFound` will be
1196            #       raised.
1197            blob.reload(
1198                client=client,
1199                timeout=timeout,
1200                if_etag_match=if_etag_match,
1201                if_etag_not_match=if_etag_not_match,
1202                if_generation_match=if_generation_match,
1203                if_generation_not_match=if_generation_not_match,
1204                if_metageneration_match=if_metageneration_match,
1205                if_metageneration_not_match=if_metageneration_not_match,
1206                retry=retry,
1207            )
1208        except NotFound:
1209            return None
1210        else:
1211            return blob
1212
1213    def list_blobs(
1214        self,
1215        max_results=None,
1216        page_token=None,
1217        prefix=None,
1218        delimiter=None,
1219        start_offset=None,
1220        end_offset=None,
1221        include_trailing_delimiter=None,
1222        versions=None,
1223        projection="noAcl",
1224        fields=None,
1225        client=None,
1226        timeout=_DEFAULT_TIMEOUT,
1227        retry=DEFAULT_RETRY,
1228    ):
1229        """DEPRECATED. Return an iterator used to find blobs in the bucket.
1230
1231        .. note::
1232          Direct use of this method is deprecated. Use ``Client.list_blobs`` instead.
1233
1234        If :attr:`user_project` is set, bills the API request to that project.
1235
1236        :type max_results: int
1237        :param max_results:
1238            (Optional) The maximum number of blobs to return.
1239
1240        :type page_token: str
1241        :param page_token:
1242            (Optional) If present, return the next batch of blobs, using the
1243            value, which must correspond to the ``nextPageToken`` value
1244            returned in the previous response.  Deprecated: use the ``pages``
1245            property of the returned iterator instead of manually passing the
1246            token.
1247
1248        :type prefix: str
1249        :param prefix: (Optional) Prefix used to filter blobs.
1250
1251        :type delimiter: str
1252        :param delimiter: (Optional) Delimiter, used with ``prefix`` to
1253                          emulate hierarchy.
1254
1255        :type start_offset: str
1256        :param start_offset:
1257            (Optional) Filter results to objects whose names are
1258            lexicographically equal to or after ``startOffset``. If
1259            ``endOffset`` is also set, the objects listed will have names
1260            between ``startOffset`` (inclusive) and ``endOffset`` (exclusive).
1261
1262        :type end_offset: str
1263        :param end_offset:
1264            (Optional) Filter results to objects whose names are
1265            lexicographically before ``endOffset``. If ``startOffset`` is also
1266            set, the objects listed will have names between ``startOffset``
1267            (inclusive) and ``endOffset`` (exclusive).
1268
1269        :type include_trailing_delimiter: boolean
1270        :param include_trailing_delimiter:
1271            (Optional) If true, objects that end in exactly one instance of
1272            ``delimiter`` will have their metadata included in ``items`` in
1273            addition to ``prefixes``.
1274
1275        :type versions: bool
1276        :param versions: (Optional) Whether object versions should be returned
1277                         as separate blobs.
1278
1279        :type projection: str
1280        :param projection: (Optional) If used, must be 'full' or 'noAcl'.
1281                           Defaults to ``'noAcl'``. Specifies the set of
1282                           properties to return.
1283
1284        :type fields: str
1285        :param fields:
1286            (Optional) Selector specifying which fields to include
1287            in a partial response. Must be a list of fields. For
1288            example to get a partial response with just the next
1289            page token and the name and language of each blob returned:
1290            ``'items(name,contentLanguage),nextPageToken'``.
1291            See: https://cloud.google.com/storage/docs/json_api/v1/parameters#fields
1292
1293        :type client: :class:`~google.cloud.storage.client.Client`
1294        :param client: (Optional) The client to use.  If not passed, falls back
1295                       to the ``client`` stored on the current bucket.
1296
1297        :type timeout: float or tuple
1298        :param timeout:
1299            (Optional) The amount of time, in seconds, to wait
1300            for the server response.  See: :ref:`configuring_timeouts`
1301
1302        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1303        :param retry:
1304            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1305
1306        :rtype: :class:`~google.api_core.page_iterator.Iterator`
1307        :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob`
1308                  in this bucket matching the arguments.
1309
1310        Example:
1311            List blobs in the bucket with user_project.
1312
1313            >>> from google.cloud import storage
1314            >>> client = storage.Client()
1315
1316            >>> bucket = storage.Bucket(client, "my-bucket-name", user_project="my-project")
1317            >>> all_blobs = list(client.list_blobs(bucket))
1318        """
1319        client = self._require_client(client)
1320        return client.list_blobs(
1321            self,
1322            max_results=max_results,
1323            page_token=page_token,
1324            prefix=prefix,
1325            delimiter=delimiter,
1326            start_offset=start_offset,
1327            end_offset=end_offset,
1328            include_trailing_delimiter=include_trailing_delimiter,
1329            versions=versions,
1330            projection=projection,
1331            fields=fields,
1332            timeout=timeout,
1333            retry=retry,
1334        )
1335
1336    def list_notifications(
1337        self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
1338    ):
1339        """List Pub / Sub notifications for this bucket.
1340
1341        See:
1342        https://cloud.google.com/storage/docs/json_api/v1/notifications/list
1343
1344        If :attr:`user_project` is set, bills the API request to that project.
1345
1346        :type client: :class:`~google.cloud.storage.client.Client` or
1347                      ``NoneType``
1348        :param client: (Optional) The client to use.  If not passed, falls back
1349                       to the ``client`` stored on the current bucket.
1350        :type timeout: float or tuple
1351        :param timeout:
1352            (Optional) The amount of time, in seconds, to wait
1353            for the server response.  See: :ref:`configuring_timeouts`
1354
1355        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1356        :param retry:
1357            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1358
1359        :rtype: list of :class:`.BucketNotification`
1360        :returns: notification instances
1361        """
1362        client = self._require_client(client)
1363        path = self.path + "/notificationConfigs"
1364        iterator = client._list_resource(
1365            path, _item_to_notification, timeout=timeout, retry=retry,
1366        )
1367        iterator.bucket = self
1368        return iterator
1369
1370    def get_notification(
1371        self,
1372        notification_id,
1373        client=None,
1374        timeout=_DEFAULT_TIMEOUT,
1375        retry=DEFAULT_RETRY,
1376    ):
1377        """Get Pub / Sub notification for this bucket.
1378
1379        See:
1380        https://cloud.google.com/storage/docs/json_api/v1/notifications/get
1381
1382        If :attr:`user_project` is set, bills the API request to that project.
1383
1384        :type notification_id: str
1385        :param notification_id: The notification id to retrieve the notification configuration.
1386
1387        :type client: :class:`~google.cloud.storage.client.Client` or
1388                      ``NoneType``
1389        :param client: (Optional) The client to use.  If not passed, falls back
1390                       to the ``client`` stored on the current bucket.
1391        :type timeout: float or tuple
1392        :param timeout:
1393            (Optional) The amount of time, in seconds, to wait
1394            for the server response.  See: :ref:`configuring_timeouts`
1395
1396        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1397        :param retry:
1398            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1399
1400        :rtype: :class:`.BucketNotification`
1401        :returns: notification instance.
1402
1403        Example:
1404            Get notification using notification id.
1405
1406            >>> from google.cloud import storage
1407            >>> client = storage.Client()
1408            >>> bucket = client.get_bucket('my-bucket-name')  # API request.
1409            >>> notification = bucket.get_notification(notification_id='id')  # API request.
1410
1411        """
1412        notification = self.notification(notification_id=notification_id)
1413        notification.reload(client=client, timeout=timeout, retry=retry)
1414        return notification
1415
1416    def delete(
1417        self,
1418        force=False,
1419        client=None,
1420        if_metageneration_match=None,
1421        if_metageneration_not_match=None,
1422        timeout=_DEFAULT_TIMEOUT,
1423        retry=DEFAULT_RETRY,
1424    ):
1425        """Delete this bucket.
1426
1427        The bucket **must** be empty in order to submit a delete request. If
1428        ``force=True`` is passed, this will first attempt to delete all the
1429        objects / blobs in the bucket (i.e. try to empty the bucket).
1430
1431        If the bucket doesn't exist, this will raise
1432        :class:`google.cloud.exceptions.NotFound`. If the bucket is not empty
1433        (and ``force=False``), will raise :class:`google.cloud.exceptions.Conflict`.
1434
1435        If ``force=True`` and the bucket contains more than 256 objects / blobs
1436        this will cowardly refuse to delete the objects (or the bucket). This
1437        is to prevent accidental bucket deletion and to prevent extremely long
1438        runtime of this method.
1439
1440        If :attr:`user_project` is set, bills the API request to that project.
1441
1442        :type force: bool
1443        :param force: If True, empties the bucket's objects then deletes it.
1444
1445        :type client: :class:`~google.cloud.storage.client.Client` or
1446                      ``NoneType``
1447        :param client: (Optional) The client to use. If not passed, falls back
1448                       to the ``client`` stored on the current bucket.
1449
1450        :type if_metageneration_match: long
1451        :param if_metageneration_match: (Optional) Make the operation conditional on whether the
1452                                        blob's current metageneration matches the given value.
1453
1454        :type if_metageneration_not_match: long
1455        :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
1456                                            blob's current metageneration does not match the given value.
1457
1458        :type timeout: float or tuple
1459        :param timeout:
1460            (Optional) The amount of time, in seconds, to wait
1461            for the server response.  See: :ref:`configuring_timeouts`
1462
1463        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1464        :param retry:
1465            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1466
1467        :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket
1468                 contains more than 256 objects / blobs.
1469        """
1470        client = self._require_client(client)
1471        query_params = {}
1472
1473        if self.user_project is not None:
1474            query_params["userProject"] = self.user_project
1475
1476        _add_generation_match_parameters(
1477            query_params,
1478            if_metageneration_match=if_metageneration_match,
1479            if_metageneration_not_match=if_metageneration_not_match,
1480        )
1481        if force:
1482            blobs = list(
1483                self.list_blobs(
1484                    max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
1485                    client=client,
1486                    timeout=timeout,
1487                    retry=retry,
1488                )
1489            )
1490            if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
1491                message = (
1492                    "Refusing to delete bucket with more than "
1493                    "%d objects. If you actually want to delete "
1494                    "this bucket, please delete the objects "
1495                    "yourself before calling Bucket.delete()."
1496                ) % (self._MAX_OBJECTS_FOR_ITERATION,)
1497                raise ValueError(message)
1498
1499            # Ignore 404 errors on delete.
1500            self.delete_blobs(
1501                blobs,
1502                on_error=lambda blob: None,
1503                client=client,
1504                timeout=timeout,
1505                retry=retry,
1506            )
1507
1508        # We intentionally pass `_target_object=None` since a DELETE
1509        # request has no response value (whether in a standard request or
1510        # in a batch request).
1511        client._delete_resource(
1512            self.path,
1513            query_params=query_params,
1514            timeout=timeout,
1515            retry=retry,
1516            _target_object=None,
1517        )
1518
1519    def delete_blob(
1520        self,
1521        blob_name,
1522        client=None,
1523        generation=None,
1524        if_generation_match=None,
1525        if_generation_not_match=None,
1526        if_metageneration_match=None,
1527        if_metageneration_not_match=None,
1528        timeout=_DEFAULT_TIMEOUT,
1529        retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
1530    ):
1531        """Deletes a blob from the current bucket.
1532
1533        If the blob isn't found (backend 404), raises a
1534        :class:`google.cloud.exceptions.NotFound`.
1535
1536        For example:
1537
1538        .. literalinclude:: snippets.py
1539          :start-after: [START delete_blob]
1540          :end-before: [END delete_blob]
1541          :dedent: 4
1542
1543        If :attr:`user_project` is set, bills the API request to that project.
1544
1545        :type blob_name: str
1546        :param blob_name: A blob name to delete.
1547
1548        :type client: :class:`~google.cloud.storage.client.Client` or
1549                      ``NoneType``
1550        :param client: (Optional) The client to use. If not passed, falls back
1551                       to the ``client`` stored on the current bucket.
1552
1553        :type generation: long
1554        :param generation: (Optional) If present, permanently deletes a specific
1555                           revision of this object.
1556
1557        :type if_generation_match: long
1558        :param if_generation_match:
1559            (Optional) See :ref:`using-if-generation-match`
1560
1561        :type if_generation_not_match: long
1562        :param if_generation_not_match:
1563            (Optional) See :ref:`using-if-generation-not-match`
1564
1565        :type if_metageneration_match: long
1566        :param if_metageneration_match:
1567            (Optional) See :ref:`using-if-metageneration-match`
1568
1569        :type if_metageneration_not_match: long
1570        :param if_metageneration_not_match:
1571            (Optional) See :ref:`using-if-metageneration-not-match`
1572
1573        :type timeout: float or tuple
1574        :param timeout:
1575            (Optional) The amount of time, in seconds, to wait
1576            for the server response.  See: :ref:`configuring_timeouts`
1577
1578        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1579        :param retry:
1580            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1581
1582        :raises: :class:`google.cloud.exceptions.NotFound` (to suppress
1583                 the exception, call ``delete_blobs``, passing a no-op
1584                 ``on_error`` callback, e.g.:
1585
1586        .. literalinclude:: snippets.py
1587            :start-after: [START delete_blobs]
1588            :end-before: [END delete_blobs]
1589            :dedent: 4
1590
1591        """
1592        client = self._require_client(client)
1593        blob = Blob(blob_name, bucket=self, generation=generation)
1594
1595        query_params = copy.deepcopy(blob._query_params)
1596        _add_generation_match_parameters(
1597            query_params,
1598            if_generation_match=if_generation_match,
1599            if_generation_not_match=if_generation_not_match,
1600            if_metageneration_match=if_metageneration_match,
1601            if_metageneration_not_match=if_metageneration_not_match,
1602        )
1603        # We intentionally pass `_target_object=None` since a DELETE
1604        # request has no response value (whether in a standard request or
1605        # in a batch request).
1606        client._delete_resource(
1607            blob.path,
1608            query_params=query_params,
1609            timeout=timeout,
1610            retry=retry,
1611            _target_object=None,
1612        )
1613
1614    def delete_blobs(
1615        self,
1616        blobs,
1617        on_error=None,
1618        client=None,
1619        timeout=_DEFAULT_TIMEOUT,
1620        if_generation_match=None,
1621        if_generation_not_match=None,
1622        if_metageneration_match=None,
1623        if_metageneration_not_match=None,
1624        retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
1625    ):
1626        """Deletes a list of blobs from the current bucket.
1627
1628        Uses :meth:`delete_blob` to delete each individual blob.
1629
1630        If :attr:`user_project` is set, bills the API request to that project.
1631
1632        :type blobs: list
1633        :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or
1634                      blob names to delete.
1635
1636        :type on_error: callable
1637        :param on_error: (Optional) Takes single argument: ``blob``. Called
1638                         called once for each blob raising
1639                         :class:`~google.cloud.exceptions.NotFound`;
1640                         otherwise, the exception is propagated.
1641
1642        :type client: :class:`~google.cloud.storage.client.Client`
1643        :param client: (Optional) The client to use.  If not passed, falls back
1644                       to the ``client`` stored on the current bucket.
1645
1646        :type if_generation_match: list of long
1647        :param if_generation_match:
1648            (Optional) See :ref:`using-if-generation-match`
1649            Note that the length of the list must match the length of
1650            The list must match ``blobs`` item-to-item.
1651
1652        :type if_generation_not_match: list of long
1653        :param if_generation_not_match:
1654            (Optional) See :ref:`using-if-generation-not-match`
1655            The list must match ``blobs`` item-to-item.
1656
1657        :type if_metageneration_match: list of long
1658        :param if_metageneration_match:
1659            (Optional) See :ref:`using-if-metageneration-match`
1660            The list must match ``blobs`` item-to-item.
1661
1662        :type if_metageneration_not_match: list of long
1663        :param if_metageneration_not_match:
1664            (Optional) See :ref:`using-if-metageneration-not-match`
1665            The list must match ``blobs`` item-to-item.
1666
1667        :type timeout: float or tuple
1668        :param timeout:
1669            (Optional) The amount of time, in seconds, to wait
1670            for the server response.  See: :ref:`configuring_timeouts`
1671
1672        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1673        :param retry:
1674            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1675
1676        :raises: :class:`~google.cloud.exceptions.NotFound` (if
1677                 `on_error` is not passed).
1678
1679        Example:
1680            Delete blobs using generation match preconditions.
1681
1682            >>> from google.cloud import storage
1683
1684            >>> client = storage.Client()
1685            >>> bucket = client.bucket("bucket-name")
1686
1687            >>> blobs = [bucket.blob("blob-name-1"), bucket.blob("blob-name-2")]
1688            >>> if_generation_match = [None] * len(blobs)
1689            >>> if_generation_match[0] = "123"  # precondition for "blob-name-1"
1690
1691            >>> bucket.delete_blobs(blobs, if_generation_match=if_generation_match)
1692        """
1693        _raise_if_len_differs(
1694            len(blobs),
1695            if_generation_match=if_generation_match,
1696            if_generation_not_match=if_generation_not_match,
1697            if_metageneration_match=if_metageneration_match,
1698            if_metageneration_not_match=if_metageneration_not_match,
1699        )
1700        if_generation_match = iter(if_generation_match or [])
1701        if_generation_not_match = iter(if_generation_not_match or [])
1702        if_metageneration_match = iter(if_metageneration_match or [])
1703        if_metageneration_not_match = iter(if_metageneration_not_match or [])
1704
1705        for blob in blobs:
1706            try:
1707                blob_name = blob
1708                if not isinstance(blob_name, six.string_types):
1709                    blob_name = blob.name
1710                self.delete_blob(
1711                    blob_name,
1712                    client=client,
1713                    if_generation_match=next(if_generation_match, None),
1714                    if_generation_not_match=next(if_generation_not_match, None),
1715                    if_metageneration_match=next(if_metageneration_match, None),
1716                    if_metageneration_not_match=next(if_metageneration_not_match, None),
1717                    timeout=timeout,
1718                    retry=retry,
1719                )
1720            except NotFound:
1721                if on_error is not None:
1722                    on_error(blob)
1723                else:
1724                    raise
1725
1726    def copy_blob(
1727        self,
1728        blob,
1729        destination_bucket,
1730        new_name=None,
1731        client=None,
1732        preserve_acl=True,
1733        source_generation=None,
1734        if_generation_match=None,
1735        if_generation_not_match=None,
1736        if_metageneration_match=None,
1737        if_metageneration_not_match=None,
1738        if_source_generation_match=None,
1739        if_source_generation_not_match=None,
1740        if_source_metageneration_match=None,
1741        if_source_metageneration_not_match=None,
1742        timeout=_DEFAULT_TIMEOUT,
1743        retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
1744    ):
1745        """Copy the given blob to the given bucket, optionally with a new name.
1746
1747        If :attr:`user_project` is set, bills the API request to that project.
1748
1749        :type blob: :class:`google.cloud.storage.blob.Blob`
1750        :param blob: The blob to be copied.
1751
1752        :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket`
1753        :param destination_bucket: The bucket into which the blob should be
1754                                   copied.
1755
1756        :type new_name: str
1757        :param new_name: (Optional) The new name for the copied file.
1758
1759        :type client: :class:`~google.cloud.storage.client.Client` or
1760                      ``NoneType``
1761        :param client: (Optional) The client to use. If not passed, falls back
1762                       to the ``client`` stored on the current bucket.
1763
1764        :type preserve_acl: bool
1765        :param preserve_acl: DEPRECATED. This argument is not functional!
1766                             (Optional) Copies ACL from old blob to new blob.
1767                             Default: True.
1768
1769        :type source_generation: long
1770        :param source_generation: (Optional) The generation of the blob to be
1771                                  copied.
1772
1773        :type if_generation_match: long
1774        :param if_generation_match:
1775            (Optional) See :ref:`using-if-generation-match`
1776            Note that the generation to be matched is that of the
1777            ``destination`` blob.
1778
1779        :type if_generation_not_match: long
1780        :param if_generation_not_match:
1781            (Optional) See :ref:`using-if-generation-not-match`
1782            Note that the generation to be matched is that of the
1783            ``destination`` blob.
1784
1785        :type if_metageneration_match: long
1786        :param if_metageneration_match:
1787            (Optional) See :ref:`using-if-metageneration-match`
1788            Note that the metageneration to be matched is that of the
1789            ``destination`` blob.
1790
1791        :type if_metageneration_not_match: long
1792        :param if_metageneration_not_match:
1793            (Optional) See :ref:`using-if-metageneration-not-match`
1794            Note that the metageneration to be matched is that of the
1795            ``destination`` blob.
1796
1797        :type if_source_generation_match: long
1798        :param if_source_generation_match:
1799            (Optional) Makes the operation conditional on whether the source
1800            object's generation matches the given value.
1801
1802        :type if_source_generation_not_match: long
1803        :param if_source_generation_not_match:
1804            (Optional) Makes the operation conditional on whether the source
1805            object's generation does not match the given value.
1806
1807        :type if_source_metageneration_match: long
1808        :param if_source_metageneration_match:
1809            (Optional) Makes the operation conditional on whether the source
1810            object's current metageneration matches the given value.
1811
1812        :type if_source_metageneration_not_match: long
1813        :param if_source_metageneration_not_match:
1814            (Optional) Makes the operation conditional on whether the source
1815            object's current metageneration does not match the given value.
1816
1817        :type timeout: float or tuple
1818        :param timeout:
1819            (Optional) The amount of time, in seconds, to wait
1820            for the server response.  See: :ref:`configuring_timeouts`
1821
1822        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1823        :param retry:
1824            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1825
1826        :rtype: :class:`google.cloud.storage.blob.Blob`
1827        :returns: The new Blob.
1828
1829        Example:
1830            Copy a blob including ACL.
1831
1832            >>> from google.cloud import storage
1833
1834            >>> client = storage.Client(project="project")
1835
1836            >>> bucket = client.bucket("bucket")
1837            >>> dst_bucket = client.bucket("destination-bucket")
1838
1839            >>> blob = bucket.blob("file.ext")
1840            >>> new_blob = bucket.copy_blob(blob, dst_bucket)
1841            >>> new_blob.acl.save(blob.acl)
1842        """
1843        client = self._require_client(client)
1844        query_params = {}
1845
1846        if self.user_project is not None:
1847            query_params["userProject"] = self.user_project
1848
1849        if source_generation is not None:
1850            query_params["sourceGeneration"] = source_generation
1851
1852        _add_generation_match_parameters(
1853            query_params,
1854            if_generation_match=if_generation_match,
1855            if_generation_not_match=if_generation_not_match,
1856            if_metageneration_match=if_metageneration_match,
1857            if_metageneration_not_match=if_metageneration_not_match,
1858            if_source_generation_match=if_source_generation_match,
1859            if_source_generation_not_match=if_source_generation_not_match,
1860            if_source_metageneration_match=if_source_metageneration_match,
1861            if_source_metageneration_not_match=if_source_metageneration_not_match,
1862        )
1863
1864        if new_name is None:
1865            new_name = blob.name
1866
1867        new_blob = Blob(bucket=destination_bucket, name=new_name)
1868        api_path = blob.path + "/copyTo" + new_blob.path
1869        copy_result = client._post_resource(
1870            api_path,
1871            None,
1872            query_params=query_params,
1873            timeout=timeout,
1874            retry=retry,
1875            _target_object=new_blob,
1876        )
1877
1878        if not preserve_acl:
1879            new_blob.acl.save(acl={}, client=client, timeout=timeout)
1880
1881        new_blob._set_properties(copy_result)
1882        return new_blob
1883
1884    def rename_blob(
1885        self,
1886        blob,
1887        new_name,
1888        client=None,
1889        if_generation_match=None,
1890        if_generation_not_match=None,
1891        if_metageneration_match=None,
1892        if_metageneration_not_match=None,
1893        if_source_generation_match=None,
1894        if_source_generation_not_match=None,
1895        if_source_metageneration_match=None,
1896        if_source_metageneration_not_match=None,
1897        timeout=_DEFAULT_TIMEOUT,
1898        retry=DEFAULT_RETRY_IF_GENERATION_SPECIFIED,
1899    ):
1900        """Rename the given blob using copy and delete operations.
1901
1902        If :attr:`user_project` is set, bills the API request to that project.
1903
1904        Effectively, copies blob to the same bucket with a new name, then
1905        deletes the blob.
1906
1907        .. warning::
1908
1909          This method will first duplicate the data and then delete the
1910          old blob.  This means that with very large objects renaming
1911          could be a very (temporarily) costly or a very slow operation.
1912          If you need more control over the copy and deletion, instead
1913          use `google.cloud.storage.blob.Blob.copy_to` and
1914          `google.cloud.storage.blob.Blob.delete` directly.
1915
1916        :type blob: :class:`google.cloud.storage.blob.Blob`
1917        :param blob: The blob to be renamed.
1918
1919        :type new_name: str
1920        :param new_name: The new name for this blob.
1921
1922        :type client: :class:`~google.cloud.storage.client.Client` or
1923                      ``NoneType``
1924        :param client: (Optional) The client to use.  If not passed, falls back
1925                       to the ``client`` stored on the current bucket.
1926
1927        :type if_generation_match: long
1928        :param if_generation_match:
1929            (Optional) See :ref:`using-if-generation-match`
1930            Note that the generation to be matched is that of the
1931            ``destination`` blob.
1932
1933        :type if_generation_not_match: long
1934        :param if_generation_not_match:
1935            (Optional) See :ref:`using-if-generation-not-match`
1936            Note that the generation to be matched is that of the
1937            ``destination`` blob.
1938
1939        :type if_metageneration_match: long
1940        :param if_metageneration_match:
1941            (Optional) See :ref:`using-if-metageneration-match`
1942            Note that the metageneration to be matched is that of the
1943            ``destination`` blob.
1944
1945        :type if_metageneration_not_match: long
1946        :param if_metageneration_not_match:
1947            (Optional) See :ref:`using-if-metageneration-not-match`
1948            Note that the metageneration to be matched is that of the
1949            ``destination`` blob.
1950
1951        :type if_source_generation_match: long
1952        :param if_source_generation_match:
1953            (Optional) Makes the operation conditional on whether the source
1954            object's generation matches the given value. Also used in the
1955            (implied) delete request.
1956
1957        :type if_source_generation_not_match: long
1958        :param if_source_generation_not_match:
1959            (Optional) Makes the operation conditional on whether the source
1960            object's generation does not match the given value. Also used in
1961            the (implied) delete request.
1962
1963        :type if_source_metageneration_match: long
1964        :param if_source_metageneration_match:
1965            (Optional) Makes the operation conditional on whether the source
1966            object's current metageneration matches the given value. Also used
1967            in the (implied) delete request.
1968
1969        :type if_source_metageneration_not_match: long
1970        :param if_source_metageneration_not_match:
1971            (Optional) Makes the operation conditional on whether the source
1972            object's current metageneration does not match the given value.
1973            Also used in the (implied) delete request.
1974
1975        :type timeout: float or tuple
1976        :param timeout:
1977            (Optional) The amount of time, in seconds, to wait
1978            for the server response.  See: :ref:`configuring_timeouts`
1979
1980        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
1981        :param retry:
1982            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
1983
1984        :rtype: :class:`Blob`
1985        :returns: The newly-renamed blob.
1986        """
1987        same_name = blob.name == new_name
1988
1989        new_blob = self.copy_blob(
1990            blob,
1991            self,
1992            new_name,
1993            client=client,
1994            timeout=timeout,
1995            if_generation_match=if_generation_match,
1996            if_generation_not_match=if_generation_not_match,
1997            if_metageneration_match=if_metageneration_match,
1998            if_metageneration_not_match=if_metageneration_not_match,
1999            if_source_generation_match=if_source_generation_match,
2000            if_source_generation_not_match=if_source_generation_not_match,
2001            if_source_metageneration_match=if_source_metageneration_match,
2002            if_source_metageneration_not_match=if_source_metageneration_not_match,
2003            retry=retry,
2004        )
2005
2006        if not same_name:
2007            blob.delete(
2008                client=client,
2009                timeout=timeout,
2010                if_generation_match=if_source_generation_match,
2011                if_generation_not_match=if_source_generation_not_match,
2012                if_metageneration_match=if_source_metageneration_match,
2013                if_metageneration_not_match=if_source_metageneration_not_match,
2014                retry=retry,
2015            )
2016        return new_blob
2017
2018    @property
2019    def cors(self):
2020        """Retrieve or set CORS policies configured for this bucket.
2021
2022        See http://www.w3.org/TR/cors/ and
2023             https://cloud.google.com/storage/docs/json_api/v1/buckets
2024
2025        .. note::
2026
2027           The getter for this property returns a list which contains
2028           *copies* of the bucket's CORS policy mappings.  Mutating the list
2029           or one of its dicts has no effect unless you then re-assign the
2030           dict via the setter.  E.g.:
2031
2032           >>> policies = bucket.cors
2033           >>> policies.append({'origin': '/foo', ...})
2034           >>> policies[1]['maxAgeSeconds'] = 3600
2035           >>> del policies[0]
2036           >>> bucket.cors = policies
2037           >>> bucket.update()
2038
2039        :setter: Set CORS policies for this bucket.
2040        :getter: Gets the CORS policies for this bucket.
2041
2042        :rtype: list of dictionaries
2043        :returns: A sequence of mappings describing each CORS policy.
2044        """
2045        return [copy.deepcopy(policy) for policy in self._properties.get("cors", ())]
2046
2047    @cors.setter
2048    def cors(self, entries):
2049        """Set CORS policies configured for this bucket.
2050
2051        See http://www.w3.org/TR/cors/ and
2052             https://cloud.google.com/storage/docs/json_api/v1/buckets
2053
2054        :type entries: list of dictionaries
2055        :param entries: A sequence of mappings describing each CORS policy.
2056        """
2057        self._patch_property("cors", entries)
2058
2059    default_event_based_hold = _scalar_property("defaultEventBasedHold")
2060    """Are uploaded objects automatically placed under an even-based hold?
2061
2062    If True, uploaded objects will be placed under an event-based hold to
2063    be released at a future time. When released an object will then begin
2064    the retention period determined by the policy retention period for the
2065    object bucket.
2066
2067    See https://cloud.google.com/storage/docs/json_api/v1/buckets
2068
2069    If the property is not set locally, returns ``None``.
2070
2071    :rtype: bool or ``NoneType``
2072    """
2073
2074    @property
2075    def default_kms_key_name(self):
2076        """Retrieve / set default KMS encryption key for objects in the bucket.
2077
2078        See https://cloud.google.com/storage/docs/json_api/v1/buckets
2079
2080        :setter: Set default KMS encryption key for items in this bucket.
2081        :getter: Get default KMS encryption key for items in this bucket.
2082
2083        :rtype: str
2084        :returns: Default KMS encryption key, or ``None`` if not set.
2085        """
2086        encryption_config = self._properties.get("encryption", {})
2087        return encryption_config.get("defaultKmsKeyName")
2088
2089    @default_kms_key_name.setter
2090    def default_kms_key_name(self, value):
2091        """Set default KMS encryption key for objects in the bucket.
2092
2093        :type value: str or None
2094        :param value: new KMS key name (None to clear any existing key).
2095        """
2096        encryption_config = self._properties.get("encryption", {})
2097        encryption_config["defaultKmsKeyName"] = value
2098        self._patch_property("encryption", encryption_config)
2099
2100    @property
2101    def labels(self):
2102        """Retrieve or set labels assigned to this bucket.
2103
2104        See
2105        https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
2106
2107        .. note::
2108
2109           The getter for this property returns a dict which is a *copy*
2110           of the bucket's labels.  Mutating that dict has no effect unless
2111           you then re-assign the dict via the setter.  E.g.:
2112
2113           >>> labels = bucket.labels
2114           >>> labels['new_key'] = 'some-label'
2115           >>> del labels['old_key']
2116           >>> bucket.labels = labels
2117           >>> bucket.update()
2118
2119        :setter: Set labels for this bucket.
2120        :getter: Gets the labels for this bucket.
2121
2122        :rtype: :class:`dict`
2123        :returns: Name-value pairs (string->string) labelling the bucket.
2124        """
2125        labels = self._properties.get("labels")
2126        if labels is None:
2127            return {}
2128        return copy.deepcopy(labels)
2129
2130    @labels.setter
2131    def labels(self, mapping):
2132        """Set labels assigned to this bucket.
2133
2134        See
2135        https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
2136
2137        :type mapping: :class:`dict`
2138        :param mapping: Name-value pairs (string->string) labelling the bucket.
2139        """
2140        # If any labels have been expressly removed, we need to track this
2141        # so that a future .patch() call can do the correct thing.
2142        existing = set([k for k in self.labels.keys()])
2143        incoming = set([k for k in mapping.keys()])
2144        self._label_removals = self._label_removals.union(existing.difference(incoming))
2145        mapping = {k: str(v) for k, v in mapping.items()}
2146
2147        # Actually update the labels on the object.
2148        self._patch_property("labels", copy.deepcopy(mapping))
2149
2150    @property
2151    def etag(self):
2152        """Retrieve the ETag for the bucket.
2153
2154        See https://tools.ietf.org/html/rfc2616#section-3.11 and
2155             https://cloud.google.com/storage/docs/json_api/v1/buckets
2156
2157        :rtype: str or ``NoneType``
2158        :returns: The bucket etag or ``None`` if the bucket's
2159                  resource has not been loaded from the server.
2160        """
2161        return self._properties.get("etag")
2162
2163    @property
2164    def id(self):
2165        """Retrieve the ID for the bucket.
2166
2167        See https://cloud.google.com/storage/docs/json_api/v1/buckets
2168
2169        :rtype: str or ``NoneType``
2170        :returns: The ID of the bucket or ``None`` if the bucket's
2171                  resource has not been loaded from the server.
2172        """
2173        return self._properties.get("id")
2174
2175    @property
2176    def iam_configuration(self):
2177        """Retrieve IAM configuration for this bucket.
2178
2179        :rtype: :class:`IAMConfiguration`
2180        :returns: an instance for managing the bucket's IAM configuration.
2181        """
2182        info = self._properties.get("iamConfiguration", {})
2183        return IAMConfiguration.from_api_repr(info, self)
2184
2185    @property
2186    def lifecycle_rules(self):
2187        """Retrieve or set lifecycle rules configured for this bucket.
2188
2189        See https://cloud.google.com/storage/docs/lifecycle and
2190             https://cloud.google.com/storage/docs/json_api/v1/buckets
2191
2192        .. note::
2193
2194           The getter for this property returns a list which contains
2195           *copies* of the bucket's lifecycle rules mappings.  Mutating the
2196           list or one of its dicts has no effect unless you then re-assign
2197           the dict via the setter.  E.g.:
2198
2199           >>> rules = bucket.lifecycle_rules
2200           >>> rules.append({'origin': '/foo', ...})
2201           >>> rules[1]['rule']['action']['type'] = 'Delete'
2202           >>> del rules[0]
2203           >>> bucket.lifecycle_rules = rules
2204           >>> bucket.update()
2205
2206        :setter: Set lifestyle rules for this bucket.
2207        :getter: Gets the lifestyle rules for this bucket.
2208
2209        :rtype: generator(dict)
2210        :returns: A sequence of mappings describing each lifecycle rule.
2211        """
2212        info = self._properties.get("lifecycle", {})
2213        for rule in info.get("rule", ()):
2214            action_type = rule["action"]["type"]
2215            if action_type == "Delete":
2216                yield LifecycleRuleDelete.from_api_repr(rule)
2217            elif action_type == "SetStorageClass":
2218                yield LifecycleRuleSetStorageClass.from_api_repr(rule)
2219            else:
2220                warnings.warn(
2221                    "Unknown lifecycle rule type received: {}. Please upgrade to the latest version of google-cloud-storage.".format(
2222                        rule
2223                    ),
2224                    UserWarning,
2225                    stacklevel=1,
2226                )
2227
2228    @lifecycle_rules.setter
2229    def lifecycle_rules(self, rules):
2230        """Set lifestyle rules configured for this bucket.
2231
2232        See https://cloud.google.com/storage/docs/lifecycle and
2233             https://cloud.google.com/storage/docs/json_api/v1/buckets
2234
2235        :type rules: list of dictionaries
2236        :param rules: A sequence of mappings describing each lifecycle rule.
2237        """
2238        rules = [dict(rule) for rule in rules]  # Convert helpers if needed
2239        self._patch_property("lifecycle", {"rule": rules})
2240
2241    def clear_lifecyle_rules(self):
2242        """Set lifestyle rules configured for this bucket.
2243
2244        See https://cloud.google.com/storage/docs/lifecycle and
2245             https://cloud.google.com/storage/docs/json_api/v1/buckets
2246        """
2247        self.lifecycle_rules = []
2248
2249    def add_lifecycle_delete_rule(self, **kw):
2250        """Add a "delete" rule to lifestyle rules configured for this bucket.
2251
2252        See https://cloud.google.com/storage/docs/lifecycle and
2253             https://cloud.google.com/storage/docs/json_api/v1/buckets
2254
2255        .. literalinclude:: snippets.py
2256          :start-after: [START add_lifecycle_delete_rule]
2257          :end-before: [END add_lifecycle_delete_rule]
2258          :dedent: 4
2259
2260        :type kw: dict
2261        :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2262        """
2263        rules = list(self.lifecycle_rules)
2264        rules.append(LifecycleRuleDelete(**kw))
2265        self.lifecycle_rules = rules
2266
2267    def add_lifecycle_set_storage_class_rule(self, storage_class, **kw):
2268        """Add a "delete" rule to lifestyle rules configured for this bucket.
2269
2270        See https://cloud.google.com/storage/docs/lifecycle and
2271             https://cloud.google.com/storage/docs/json_api/v1/buckets
2272
2273        .. literalinclude:: snippets.py
2274          :start-after: [START add_lifecycle_set_storage_class_rule]
2275          :end-before: [END add_lifecycle_set_storage_class_rule]
2276          :dedent: 4
2277
2278        :type storage_class: str, one of :attr:`STORAGE_CLASSES`.
2279        :param storage_class: new storage class to assign to matching items.
2280
2281        :type kw: dict
2282        :params kw: arguments passed to :class:`LifecycleRuleConditions`.
2283        """
2284        rules = list(self.lifecycle_rules)
2285        rules.append(LifecycleRuleSetStorageClass(storage_class, **kw))
2286        self.lifecycle_rules = rules
2287
2288    _location = _scalar_property("location")
2289
2290    @property
2291    def location(self):
2292        """Retrieve location configured for this bucket.
2293
2294        See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2295        https://cloud.google.com/storage/docs/bucket-locations
2296
2297        Returns ``None`` if the property has not been set before creation,
2298        or if the bucket's resource has not been loaded from the server.
2299        :rtype: str or ``NoneType``
2300        """
2301        return self._location
2302
2303    @location.setter
2304    def location(self, value):
2305        """(Deprecated) Set `Bucket.location`
2306
2307        This can only be set at bucket **creation** time.
2308
2309        See https://cloud.google.com/storage/docs/json_api/v1/buckets and
2310        https://cloud.google.com/storage/docs/bucket-locations
2311
2312        .. warning::
2313
2314            Assignment to 'Bucket.location' is deprecated, as it is only
2315            valid before the bucket is created. Instead, pass the location
2316            to `Bucket.create`.
2317        """
2318        warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2)
2319        self._location = value
2320
2321    @property
2322    def location_type(self):
2323        """Retrieve or set the location type for the bucket.
2324
2325        See https://cloud.google.com/storage/docs/storage-classes
2326
2327        :setter: Set the location type for this bucket.
2328        :getter: Gets the the location type for this bucket.
2329
2330        :rtype: str or ``NoneType``
2331        :returns:
2332            If set, one of
2333            :attr:`~google.cloud.storage.constants.MULTI_REGION_LOCATION_TYPE`,
2334            :attr:`~google.cloud.storage.constants.REGION_LOCATION_TYPE`, or
2335            :attr:`~google.cloud.storage.constants.DUAL_REGION_LOCATION_TYPE`,
2336            else ``None``.
2337        """
2338        return self._properties.get("locationType")
2339
2340    def get_logging(self):
2341        """Return info about access logging for this bucket.
2342
2343        See https://cloud.google.com/storage/docs/access-logs#status
2344
2345        :rtype: dict or None
2346        :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix``
2347                  (if logging is enabled), or None (if not).
2348        """
2349        info = self._properties.get("logging")
2350        return copy.deepcopy(info)
2351
2352    def enable_logging(self, bucket_name, object_prefix=""):
2353        """Enable access logging for this bucket.
2354
2355        See https://cloud.google.com/storage/docs/access-logs
2356
2357        :type bucket_name: str
2358        :param bucket_name: name of bucket in which to store access logs
2359
2360        :type object_prefix: str
2361        :param object_prefix: prefix for access log filenames
2362        """
2363        info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix}
2364        self._patch_property("logging", info)
2365
2366    def disable_logging(self):
2367        """Disable access logging for this bucket.
2368
2369        See https://cloud.google.com/storage/docs/access-logs#disabling
2370        """
2371        self._patch_property("logging", None)
2372
2373    @property
2374    def metageneration(self):
2375        """Retrieve the metageneration for the bucket.
2376
2377        See https://cloud.google.com/storage/docs/json_api/v1/buckets
2378
2379        :rtype: int or ``NoneType``
2380        :returns: The metageneration of the bucket or ``None`` if the bucket's
2381                  resource has not been loaded from the server.
2382        """
2383        metageneration = self._properties.get("metageneration")
2384        if metageneration is not None:
2385            return int(metageneration)
2386
2387    @property
2388    def owner(self):
2389        """Retrieve info about the owner of the bucket.
2390
2391        See https://cloud.google.com/storage/docs/json_api/v1/buckets
2392
2393        :rtype: dict or ``NoneType``
2394        :returns: Mapping of owner's role/ID. Returns ``None`` if the bucket's
2395                  resource has not been loaded from the server.
2396        """
2397        return copy.deepcopy(self._properties.get("owner"))
2398
2399    @property
2400    def project_number(self):
2401        """Retrieve the number of the project to which the bucket is assigned.
2402
2403        See https://cloud.google.com/storage/docs/json_api/v1/buckets
2404
2405        :rtype: int or ``NoneType``
2406        :returns: The project number that owns the bucket or ``None`` if
2407                  the bucket's resource has not been loaded from the server.
2408        """
2409        project_number = self._properties.get("projectNumber")
2410        if project_number is not None:
2411            return int(project_number)
2412
2413    @property
2414    def retention_policy_effective_time(self):
2415        """Retrieve the effective time of the bucket's retention policy.
2416
2417        :rtype: datetime.datetime or ``NoneType``
2418        :returns: point-in time at which the bucket's retention policy is
2419                  effective, or ``None`` if the property is not
2420                  set locally.
2421        """
2422        policy = self._properties.get("retentionPolicy")
2423        if policy is not None:
2424            timestamp = policy.get("effectiveTime")
2425            if timestamp is not None:
2426                return _rfc3339_nanos_to_datetime(timestamp)
2427
2428    @property
2429    def retention_policy_locked(self):
2430        """Retrieve whthere the bucket's retention policy is locked.
2431
2432        :rtype: bool
2433        :returns: True if the bucket's policy is locked, or else False
2434                  if the policy is not locked, or the property is not
2435                  set locally.
2436        """
2437        policy = self._properties.get("retentionPolicy")
2438        if policy is not None:
2439            return policy.get("isLocked")
2440
2441    @property
2442    def retention_period(self):
2443        """Retrieve or set the retention period for items in the bucket.
2444
2445        :rtype: int or ``NoneType``
2446        :returns: number of seconds to retain items after upload or release
2447                  from event-based lock, or ``None`` if the property is not
2448                  set locally.
2449        """
2450        policy = self._properties.get("retentionPolicy")
2451        if policy is not None:
2452            period = policy.get("retentionPeriod")
2453            if period is not None:
2454                return int(period)
2455
2456    @retention_period.setter
2457    def retention_period(self, value):
2458        """Set the retention period for items in the bucket.
2459
2460        :type value: int
2461        :param value:
2462            number of seconds to retain items after upload or release from
2463            event-based lock.
2464
2465        :raises ValueError: if the bucket's retention policy is locked.
2466        """
2467        policy = self._properties.setdefault("retentionPolicy", {})
2468        if value is not None:
2469            policy["retentionPeriod"] = str(value)
2470        else:
2471            policy = None
2472        self._patch_property("retentionPolicy", policy)
2473
2474    @property
2475    def self_link(self):
2476        """Retrieve the URI for the bucket.
2477
2478        See https://cloud.google.com/storage/docs/json_api/v1/buckets
2479
2480        :rtype: str or ``NoneType``
2481        :returns: The self link for the bucket or ``None`` if
2482                  the bucket's resource has not been loaded from the server.
2483        """
2484        return self._properties.get("selfLink")
2485
2486    @property
2487    def storage_class(self):
2488        """Retrieve or set the storage class for the bucket.
2489
2490        See https://cloud.google.com/storage/docs/storage-classes
2491
2492        :setter: Set the storage class for this bucket.
2493        :getter: Gets the the storage class for this bucket.
2494
2495        :rtype: str or ``NoneType``
2496        :returns:
2497            If set, one of
2498            :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`,
2499            :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`,
2500            :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`,
2501            :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`,
2502            :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`,
2503            :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`,
2504            or
2505            :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`,
2506            else ``None``.
2507        """
2508        return self._properties.get("storageClass")
2509
2510    @storage_class.setter
2511    def storage_class(self, value):
2512        """Set the storage class for the bucket.
2513
2514        See https://cloud.google.com/storage/docs/storage-classes
2515
2516        :type value: str
2517        :param value:
2518            One of
2519            :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`,
2520            :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`,
2521            :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`,
2522            :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`,
2523            :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`,
2524            :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`,
2525            or
2526            :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`,
2527        """
2528        if value not in self.STORAGE_CLASSES:
2529            raise ValueError("Invalid storage class: %s" % (value,))
2530        self._patch_property("storageClass", value)
2531
2532    @property
2533    def time_created(self):
2534        """Retrieve the timestamp at which the bucket was created.
2535
2536        See https://cloud.google.com/storage/docs/json_api/v1/buckets
2537
2538        :rtype: :class:`datetime.datetime` or ``NoneType``
2539        :returns: Datetime object parsed from RFC3339 valid timestamp, or
2540                  ``None`` if the bucket's resource has not been loaded
2541                  from the server.
2542        """
2543        value = self._properties.get("timeCreated")
2544        if value is not None:
2545            return _rfc3339_nanos_to_datetime(value)
2546
2547    @property
2548    def versioning_enabled(self):
2549        """Is versioning enabled for this bucket?
2550
2551        See  https://cloud.google.com/storage/docs/object-versioning for
2552        details.
2553
2554        :setter: Update whether versioning is enabled for this bucket.
2555        :getter: Query whether versioning is enabled for this bucket.
2556
2557        :rtype: bool
2558        :returns: True if enabled, else False.
2559        """
2560        versioning = self._properties.get("versioning", {})
2561        return versioning.get("enabled", False)
2562
2563    @versioning_enabled.setter
2564    def versioning_enabled(self, value):
2565        """Enable versioning for this bucket.
2566
2567        See  https://cloud.google.com/storage/docs/object-versioning for
2568        details.
2569
2570        :type value: convertible to boolean
2571        :param value: should versioning be enabled for the bucket?
2572        """
2573        self._patch_property("versioning", {"enabled": bool(value)})
2574
2575    @property
2576    def requester_pays(self):
2577        """Does the requester pay for API requests for this bucket?
2578
2579        See https://cloud.google.com/storage/docs/requester-pays for
2580        details.
2581
2582        :setter: Update whether requester pays for this bucket.
2583        :getter: Query whether requester pays for this bucket.
2584
2585        :rtype: bool
2586        :returns: True if requester pays for API requests for the bucket,
2587                  else False.
2588        """
2589        versioning = self._properties.get("billing", {})
2590        return versioning.get("requesterPays", False)
2591
2592    @requester_pays.setter
2593    def requester_pays(self, value):
2594        """Update whether requester pays for API requests for this bucket.
2595
2596        See https://cloud.google.com/storage/docs/using-requester-pays for
2597        details.
2598
2599        :type value: convertible to boolean
2600        :param value: should requester pay for API requests for the bucket?
2601        """
2602        self._patch_property("billing", {"requesterPays": bool(value)})
2603
2604    def configure_website(self, main_page_suffix=None, not_found_page=None):
2605        """Configure website-related properties.
2606
2607        See https://cloud.google.com/storage/docs/hosting-static-website
2608
2609        .. note::
2610          This (apparently) only works
2611          if your bucket name is a domain name
2612          (and to do that, you need to get approved somehow...).
2613
2614        If you want this bucket to host a website, just provide the name
2615        of an index page and a page to use when a blob isn't found:
2616
2617        .. literalinclude:: snippets.py
2618          :start-after: [START configure_website]
2619          :end-before: [END configure_website]
2620          :dedent: 4
2621
2622        You probably should also make the whole bucket public:
2623
2624        .. literalinclude:: snippets.py
2625            :start-after: [START make_public]
2626            :end-before: [END make_public]
2627            :dedent: 4
2628
2629        This says: "Make the bucket public, and all the stuff already in
2630        the bucket, and anything else I add to the bucket.  Just make it
2631        all public."
2632
2633        :type main_page_suffix: str
2634        :param main_page_suffix: The page to use as the main page
2635                                 of a directory.
2636                                 Typically something like index.html.
2637
2638        :type not_found_page: str
2639        :param not_found_page: The file to use when a page isn't found.
2640        """
2641        data = {"mainPageSuffix": main_page_suffix, "notFoundPage": not_found_page}
2642        self._patch_property("website", data)
2643
2644    def disable_website(self):
2645        """Disable the website configuration for this bucket.
2646
2647        This is really just a shortcut for setting the website-related
2648        attributes to ``None``.
2649        """
2650        return self.configure_website(None, None)
2651
2652    def get_iam_policy(
2653        self,
2654        client=None,
2655        requested_policy_version=None,
2656        timeout=_DEFAULT_TIMEOUT,
2657        retry=DEFAULT_RETRY,
2658    ):
2659        """Retrieve the IAM policy for the bucket.
2660
2661        See
2662        https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy
2663
2664        If :attr:`user_project` is set, bills the API request to that project.
2665
2666        :type client: :class:`~google.cloud.storage.client.Client` or
2667                      ``NoneType``
2668        :param client: (Optional) The client to use.  If not passed, falls back
2669                       to the ``client`` stored on the current bucket.
2670
2671        :type requested_policy_version: int or ``NoneType``
2672        :param requested_policy_version: (Optional) The version of IAM policies to request.
2673                                         If a policy with a condition is requested without
2674                                         setting this, the server will return an error.
2675                                         This must be set to a value of 3 to retrieve IAM
2676                                         policies containing conditions. This is to prevent
2677                                         client code that isn't aware of IAM conditions from
2678                                         interpreting and modifying policies incorrectly.
2679                                         The service might return a policy with version lower
2680                                         than the one that was requested, based on the
2681                                         feature syntax in the policy fetched.
2682
2683        :type timeout: float or tuple
2684        :param timeout:
2685            (Optional) The amount of time, in seconds, to wait
2686            for the server response.  See: :ref:`configuring_timeouts`
2687
2688        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2689        :param retry:
2690            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
2691
2692        :rtype: :class:`google.api_core.iam.Policy`
2693        :returns: the policy instance, based on the resource returned from
2694                  the ``getIamPolicy`` API request.
2695
2696        Example:
2697
2698        .. code-block:: python
2699
2700           from google.cloud.storage.iam import STORAGE_OBJECT_VIEWER_ROLE
2701
2702           policy = bucket.get_iam_policy(requested_policy_version=3)
2703
2704           policy.version = 3
2705
2706           # Add a binding to the policy via it's bindings property
2707           policy.bindings.append({
2708               "role": STORAGE_OBJECT_VIEWER_ROLE,
2709               "members": {"serviceAccount:account@project.iam.gserviceaccount.com", ...},
2710               # Optional:
2711               "condition": {
2712                   "title": "prefix"
2713                   "description": "Objects matching prefix"
2714                   "expression": "resource.name.startsWith(\"projects/project-name/buckets/bucket-name/objects/prefix\")"
2715               }
2716           })
2717
2718           bucket.set_iam_policy(policy)
2719        """
2720        client = self._require_client(client)
2721        query_params = {}
2722
2723        if self.user_project is not None:
2724            query_params["userProject"] = self.user_project
2725
2726        if requested_policy_version is not None:
2727            query_params["optionsRequestedPolicyVersion"] = requested_policy_version
2728
2729        info = client._get_resource(
2730            "%s/iam" % (self.path,),
2731            query_params=query_params,
2732            timeout=timeout,
2733            retry=retry,
2734            _target_object=None,
2735        )
2736        return Policy.from_api_repr(info)
2737
2738    def set_iam_policy(
2739        self,
2740        policy,
2741        client=None,
2742        timeout=_DEFAULT_TIMEOUT,
2743        retry=DEFAULT_RETRY_IF_ETAG_IN_JSON,
2744    ):
2745        """Update the IAM policy for the bucket.
2746
2747        See
2748        https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy
2749
2750        If :attr:`user_project` is set, bills the API request to that project.
2751
2752        :type policy: :class:`google.api_core.iam.Policy`
2753        :param policy: policy instance used to update bucket's IAM policy.
2754
2755        :type client: :class:`~google.cloud.storage.client.Client` or
2756                      ``NoneType``
2757        :param client: (Optional) The client to use.  If not passed, falls back
2758                       to the ``client`` stored on the current bucket.
2759
2760        :type timeout: float or tuple
2761        :param timeout:
2762            (Optional) The amount of time, in seconds, to wait
2763            for the server response.  See: :ref:`configuring_timeouts`
2764
2765        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2766        :param retry:
2767            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
2768
2769        :rtype: :class:`google.api_core.iam.Policy`
2770        :returns: the policy instance, based on the resource returned from
2771                  the ``setIamPolicy`` API request.
2772        """
2773        client = self._require_client(client)
2774        query_params = {}
2775
2776        if self.user_project is not None:
2777            query_params["userProject"] = self.user_project
2778
2779        path = "{}/iam".format(self.path)
2780        resource = policy.to_api_repr()
2781        resource["resourceId"] = self.path
2782
2783        info = client._put_resource(
2784            path,
2785            resource,
2786            query_params=query_params,
2787            timeout=timeout,
2788            retry=retry,
2789            _target_object=None,
2790        )
2791
2792        return Policy.from_api_repr(info)
2793
2794    def test_iam_permissions(
2795        self, permissions, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
2796    ):
2797        """API call:  test permissions
2798
2799        See
2800        https://cloud.google.com/storage/docs/json_api/v1/buckets/testIamPermissions
2801
2802        If :attr:`user_project` is set, bills the API request to that project.
2803
2804        :type permissions: list of string
2805        :param permissions: the permissions to check
2806
2807        :type client: :class:`~google.cloud.storage.client.Client` or
2808                      ``NoneType``
2809        :param client: (Optional) The client to use.  If not passed, falls back
2810                       to the ``client`` stored on the current bucket.
2811
2812        :type timeout: float or tuple
2813        :param timeout:
2814            (Optional) The amount of time, in seconds, to wait
2815            for the server response.  See: :ref:`configuring_timeouts`
2816
2817        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2818        :param retry:
2819            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
2820
2821        :rtype: list of string
2822        :returns: the permissions returned by the ``testIamPermissions`` API
2823                  request.
2824        """
2825        client = self._require_client(client)
2826        query_params = {"permissions": permissions}
2827
2828        if self.user_project is not None:
2829            query_params["userProject"] = self.user_project
2830
2831        path = "%s/iam/testPermissions" % (self.path,)
2832        resp = client._get_resource(
2833            path,
2834            query_params=query_params,
2835            timeout=timeout,
2836            retry=retry,
2837            _target_object=None,
2838        )
2839        return resp.get("permissions", [])
2840
2841    def make_public(
2842        self,
2843        recursive=False,
2844        future=False,
2845        client=None,
2846        timeout=_DEFAULT_TIMEOUT,
2847        if_metageneration_match=None,
2848        if_metageneration_not_match=None,
2849        retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
2850    ):
2851        """Update bucket's ACL, granting read access to anonymous users.
2852
2853        :type recursive: bool
2854        :param recursive: If True, this will make all blobs inside the bucket
2855                          public as well.
2856
2857        :type future: bool
2858        :param future: If True, this will make all objects created in the
2859                       future public as well.
2860
2861        :type client: :class:`~google.cloud.storage.client.Client` or
2862                      ``NoneType``
2863        :param client: (Optional) The client to use.  If not passed, falls back
2864                       to the ``client`` stored on the current bucket.
2865        :type timeout: float or tuple
2866        :param timeout:
2867            (Optional) The amount of time, in seconds, to wait
2868            for the server response.  See: :ref:`configuring_timeouts`
2869
2870        :type if_metageneration_match: long
2871        :param if_metageneration_match: (Optional) Make the operation conditional on whether the
2872                                        blob's current metageneration matches the given value.
2873
2874        :type if_metageneration_not_match: long
2875        :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
2876                                            blob's current metageneration does not match the given value.
2877
2878        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2879        :param retry:
2880            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
2881
2882        :raises ValueError:
2883            If ``recursive`` is True, and the bucket contains more than 256
2884            blobs.  This is to prevent extremely long runtime of this
2885            method.  For such buckets, iterate over the blobs returned by
2886            :meth:`list_blobs` and call
2887            :meth:`~google.cloud.storage.blob.Blob.make_public`
2888            for each blob.
2889        """
2890        self.acl.all().grant_read()
2891        self.acl.save(
2892            client=client,
2893            timeout=timeout,
2894            if_metageneration_match=if_metageneration_match,
2895            if_metageneration_not_match=if_metageneration_not_match,
2896            retry=retry,
2897        )
2898
2899        if future:
2900            doa = self.default_object_acl
2901            if not doa.loaded:
2902                doa.reload(client=client, timeout=timeout)
2903            doa.all().grant_read()
2904            doa.save(
2905                client=client,
2906                timeout=timeout,
2907                if_metageneration_match=if_metageneration_match,
2908                if_metageneration_not_match=if_metageneration_not_match,
2909                retry=retry,
2910            )
2911
2912        if recursive:
2913            blobs = list(
2914                self.list_blobs(
2915                    projection="full",
2916                    max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
2917                    client=client,
2918                    timeout=timeout,
2919                )
2920            )
2921            if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
2922                message = (
2923                    "Refusing to make public recursively with more than "
2924                    "%d objects. If you actually want to make every object "
2925                    "in this bucket public, iterate through the blobs "
2926                    "returned by 'Bucket.list_blobs()' and call "
2927                    "'make_public' on each one."
2928                ) % (self._MAX_OBJECTS_FOR_ITERATION,)
2929                raise ValueError(message)
2930
2931            for blob in blobs:
2932                blob.acl.all().grant_read()
2933                blob.acl.save(
2934                    client=client, timeout=timeout,
2935                )
2936
2937    def make_private(
2938        self,
2939        recursive=False,
2940        future=False,
2941        client=None,
2942        timeout=_DEFAULT_TIMEOUT,
2943        if_metageneration_match=None,
2944        if_metageneration_not_match=None,
2945        retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,
2946    ):
2947        """Update bucket's ACL, revoking read access for anonymous users.
2948
2949        :type recursive: bool
2950        :param recursive: If True, this will make all blobs inside the bucket
2951                          private as well.
2952
2953        :type future: bool
2954        :param future: If True, this will make all objects created in the
2955                       future private as well.
2956
2957        :type client: :class:`~google.cloud.storage.client.Client` or
2958                      ``NoneType``
2959        :param client: (Optional) The client to use.  If not passed, falls back
2960                       to the ``client`` stored on the current bucket.
2961
2962        :type timeout: float or tuple
2963        :param timeout:
2964            (Optional) The amount of time, in seconds, to wait
2965            for the server response.  See: :ref:`configuring_timeouts`
2966
2967        :type if_metageneration_match: long
2968        :param if_metageneration_match: (Optional) Make the operation conditional on whether the
2969                                        blob's current metageneration matches the given value.
2970        :type if_metageneration_not_match: long
2971        :param if_metageneration_not_match: (Optional) Make the operation conditional on whether the
2972                                            blob's current metageneration does not match the given value.
2973        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
2974        :param retry:
2975            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
2976
2977        :raises ValueError:
2978            If ``recursive`` is True, and the bucket contains more than 256
2979            blobs.  This is to prevent extremely long runtime of this
2980            method.  For such buckets, iterate over the blobs returned by
2981            :meth:`list_blobs` and call
2982            :meth:`~google.cloud.storage.blob.Blob.make_private`
2983            for each blob.
2984        """
2985        self.acl.all().revoke_read()
2986        self.acl.save(
2987            client=client,
2988            timeout=timeout,
2989            if_metageneration_match=if_metageneration_match,
2990            if_metageneration_not_match=if_metageneration_not_match,
2991            retry=retry,
2992        )
2993
2994        if future:
2995            doa = self.default_object_acl
2996            if not doa.loaded:
2997                doa.reload(client=client, timeout=timeout)
2998            doa.all().revoke_read()
2999            doa.save(
3000                client=client,
3001                timeout=timeout,
3002                if_metageneration_match=if_metageneration_match,
3003                if_metageneration_not_match=if_metageneration_not_match,
3004                retry=retry,
3005            )
3006
3007        if recursive:
3008            blobs = list(
3009                self.list_blobs(
3010                    projection="full",
3011                    max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
3012                    client=client,
3013                    timeout=timeout,
3014                )
3015            )
3016            if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
3017                message = (
3018                    "Refusing to make private recursively with more than "
3019                    "%d objects. If you actually want to make every object "
3020                    "in this bucket private, iterate through the blobs "
3021                    "returned by 'Bucket.list_blobs()' and call "
3022                    "'make_private' on each one."
3023                ) % (self._MAX_OBJECTS_FOR_ITERATION,)
3024                raise ValueError(message)
3025
3026            for blob in blobs:
3027                blob.acl.all().revoke_read()
3028                blob.acl.save(client=client, timeout=timeout)
3029
3030    def generate_upload_policy(self, conditions, expiration=None, client=None):
3031        """Create a signed upload policy for uploading objects.
3032
3033        This method generates and signs a policy document. You can use
3034        `policy documents`_ to allow visitors to a website to upload files to
3035        Google Cloud Storage without giving them direct write access.
3036
3037        For example:
3038
3039        .. literalinclude:: snippets.py
3040            :start-after: [START policy_document]
3041            :end-before: [END policy_document]
3042            :dedent: 4
3043
3044        .. _policy documents:
3045            https://cloud.google.com/storage/docs/xml-api\
3046            /post-object#policydocument
3047
3048        :type expiration: datetime
3049        :param expiration: (Optional) Expiration in UTC. If not specified, the
3050                           policy will expire in 1 hour.
3051
3052        :type conditions: list
3053        :param conditions: A list of conditions as described in the
3054                          `policy documents`_ documentation.
3055
3056        :type client: :class:`~google.cloud.storage.client.Client`
3057        :param client: (Optional) The client to use.  If not passed, falls back
3058                       to the ``client`` stored on the current bucket.
3059
3060        :rtype: dict
3061        :returns: A dictionary of (form field name, form field value) of form
3062                  fields that should be added to your HTML upload form in order
3063                  to attach the signature.
3064        """
3065        client = self._require_client(client)
3066        credentials = client._credentials
3067        _signing.ensure_signed_credentials(credentials)
3068
3069        if expiration is None:
3070            expiration = _NOW() + datetime.timedelta(hours=1)
3071
3072        conditions = conditions + [{"bucket": self.name}]
3073
3074        policy_document = {
3075            "expiration": _datetime_to_rfc3339(expiration),
3076            "conditions": conditions,
3077        }
3078
3079        encoded_policy_document = base64.b64encode(
3080            json.dumps(policy_document).encode("utf-8")
3081        )
3082        signature = base64.b64encode(credentials.sign_bytes(encoded_policy_document))
3083
3084        fields = {
3085            "bucket": self.name,
3086            "GoogleAccessId": credentials.signer_email,
3087            "policy": encoded_policy_document.decode("utf-8"),
3088            "signature": signature.decode("utf-8"),
3089        }
3090
3091        return fields
3092
3093    def lock_retention_policy(
3094        self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
3095    ):
3096        """Lock the bucket's retention policy.
3097
3098        :type client: :class:`~google.cloud.storage.client.Client` or
3099                      ``NoneType``
3100        :param client: (Optional) The client to use.  If not passed, falls back
3101                       to the ``client`` stored on the blob's bucket.
3102
3103        :type timeout: float or tuple
3104        :param timeout:
3105            (Optional) The amount of time, in seconds, to wait
3106            for the server response.  See: :ref:`configuring_timeouts`
3107
3108        :type retry: google.api_core.retry.Retry or google.cloud.storage.retry.ConditionalRetryPolicy
3109        :param retry:
3110            (Optional) How to retry the RPC. See: :ref:`configuring_retries`
3111
3112        :raises ValueError:
3113            if the bucket has no metageneration (i.e., new or never reloaded);
3114            if the bucket has no retention policy assigned;
3115            if the bucket's retention policy is already locked.
3116        """
3117        if "metageneration" not in self._properties:
3118            raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
3119
3120        policy = self._properties.get("retentionPolicy")
3121
3122        if policy is None:
3123            raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
3124
3125        if policy.get("isLocked"):
3126            raise ValueError("Bucket's retention policy is already locked.")
3127
3128        client = self._require_client(client)
3129
3130        query_params = {"ifMetagenerationMatch": self.metageneration}
3131
3132        if self.user_project is not None:
3133            query_params["userProject"] = self.user_project
3134
3135        path = "/b/{}/lockRetentionPolicy".format(self.name)
3136        api_response = client._post_resource(
3137            path,
3138            None,
3139            query_params=query_params,
3140            timeout=timeout,
3141            retry=retry,
3142            _target_object=self,
3143        )
3144        self._set_properties(api_response)
3145
3146    def generate_signed_url(
3147        self,
3148        expiration=None,
3149        api_access_endpoint=_API_ACCESS_ENDPOINT,
3150        method="GET",
3151        headers=None,
3152        query_parameters=None,
3153        client=None,
3154        credentials=None,
3155        version=None,
3156        virtual_hosted_style=False,
3157        bucket_bound_hostname=None,
3158        scheme="http",
3159    ):
3160        """Generates a signed URL for this bucket.
3161
3162        .. note::
3163
3164            If you are on Google Compute Engine, you can't generate a signed
3165            URL using GCE service account. Follow `Issue 50`_ for updates on
3166            this. If you'd like to be able to generate a signed URL from GCE,
3167            you can use a standard service account from a JSON file rather
3168            than a GCE service account.
3169
3170        .. _Issue 50: https://github.com/GoogleCloudPlatform/\
3171                      google-auth-library-python/issues/50
3172
3173        If you have a bucket that you want to allow access to for a set
3174        amount of time, you can use this method to generate a URL that
3175        is only valid within a certain time period.
3176
3177        If ``bucket_bound_hostname`` is set as an argument of :attr:`api_access_endpoint`,
3178        ``https`` works only if using a ``CDN``.
3179
3180        Example:
3181            Generates a signed URL for this bucket using bucket_bound_hostname and scheme.
3182
3183            >>> from google.cloud import storage
3184            >>> client = storage.Client()
3185            >>> bucket = client.get_bucket('my-bucket-name')
3186            >>> url = bucket.generate_signed_url(expiration='url-expiration-time', bucket_bound_hostname='mydomain.tld',
3187            >>>                                  version='v4')
3188            >>> url = bucket.generate_signed_url(expiration='url-expiration-time', bucket_bound_hostname='mydomain.tld',
3189            >>>                                  version='v4',scheme='https')  # If using ``CDN``
3190
3191        This is particularly useful if you don't want publicly
3192        accessible buckets, but don't want to require users to explicitly
3193        log in.
3194
3195        :type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
3196        :param expiration: Point in time when the signed URL should expire. If
3197                           a ``datetime`` instance is passed without an explicit
3198                           ``tzinfo`` set,  it will be assumed to be ``UTC``.
3199
3200        :type api_access_endpoint: str
3201        :param api_access_endpoint: (Optional) URI base.
3202
3203        :type method: str
3204        :param method: The HTTP verb that will be used when requesting the URL.
3205
3206        :type headers: dict
3207        :param headers:
3208            (Optional) Additional HTTP headers to be included as part of the
3209            signed URLs.  See:
3210            https://cloud.google.com/storage/docs/xml-api/reference-headers
3211            Requests using the signed URL *must* pass the specified header
3212            (name and value) with each request for the URL.
3213
3214        :type query_parameters: dict
3215        :param query_parameters:
3216            (Optional) Additional query parameters to be included as part of the
3217            signed URLs.  See:
3218            https://cloud.google.com/storage/docs/xml-api/reference-headers#query
3219
3220        :type client: :class:`~google.cloud.storage.client.Client` or
3221                      ``NoneType``
3222        :param client: (Optional) The client to use.  If not passed, falls back
3223                       to the ``client`` stored on the blob's bucket.
3224
3225
3226        :type credentials: :class:`google.auth.credentials.Credentials` or
3227                           :class:`NoneType`
3228        :param credentials: The authorization credentials to attach to requests.
3229                            These credentials identify this application to the service.
3230                            If none are specified, the client will attempt to ascertain
3231                            the credentials from the environment.
3232
3233        :type version: str
3234        :param version: (Optional) The version of signed credential to create.
3235                        Must be one of 'v2' | 'v4'.
3236
3237        :type virtual_hosted_style: bool
3238        :param virtual_hosted_style:
3239            (Optional) If true, then construct the URL relative the bucket's
3240            virtual hostname, e.g., '<bucket-name>.storage.googleapis.com'.
3241
3242        :type bucket_bound_hostname: str
3243        :param bucket_bound_hostname:
3244            (Optional) If pass, then construct the URL relative to the bucket-bound hostname.
3245            Value cane be a bare or with scheme, e.g., 'example.com' or 'http://example.com'.
3246            See: https://cloud.google.com/storage/docs/request-endpoints#cname
3247
3248        :type scheme: str
3249        :param scheme:
3250            (Optional) If ``bucket_bound_hostname`` is passed as a bare hostname, use
3251            this value as the scheme.  ``https`` will work only when using a CDN.
3252            Defaults to ``"http"``.
3253
3254        :raises: :exc:`ValueError` when version is invalid.
3255        :raises: :exc:`TypeError` when expiration is not a valid type.
3256        :raises: :exc:`AttributeError` if credentials is not an instance
3257                of :class:`google.auth.credentials.Signing`.
3258
3259        :rtype: str
3260        :returns: A signed URL you can use to access the resource
3261                  until expiration.
3262        """
3263        if version is None:
3264            version = "v2"
3265        elif version not in ("v2", "v4"):
3266            raise ValueError("'version' must be either 'v2' or 'v4'")
3267
3268        if virtual_hosted_style:
3269            api_access_endpoint = "https://{bucket_name}.storage.googleapis.com".format(
3270                bucket_name=self.name
3271            )
3272        elif bucket_bound_hostname:
3273            api_access_endpoint = _bucket_bound_hostname_url(
3274                bucket_bound_hostname, scheme
3275            )
3276        else:
3277            resource = "/{bucket_name}".format(bucket_name=self.name)
3278
3279        if virtual_hosted_style or bucket_bound_hostname:
3280            resource = "/"
3281
3282        if credentials is None:
3283            client = self._require_client(client)
3284            credentials = client._credentials
3285
3286        if version == "v2":
3287            helper = generate_signed_url_v2
3288        else:
3289            helper = generate_signed_url_v4
3290
3291        return helper(
3292            credentials,
3293            resource=resource,
3294            expiration=expiration,
3295            api_access_endpoint=api_access_endpoint,
3296            method=method.upper(),
3297            headers=headers,
3298            query_parameters=query_parameters,
3299        )
3300
3301
3302def _raise_if_len_differs(expected_len, **generation_match_args):
3303    """
3304    Raise an error if any generation match argument
3305    is set and its len differs from the given value.
3306
3307    :type expected_len: int
3308    :param expected_len: Expected argument length in case it's set.
3309
3310    :type generation_match_args: dict
3311    :param generation_match_args: Lists, which length must be checked.
3312
3313    :raises: :exc:`ValueError` if any argument set, but has an unexpected length.
3314    """
3315    for name, value in generation_match_args.items():
3316        if value is not None and len(value) != expected_len:
3317            raise ValueError(
3318                "'{}' length must be the same as 'blobs' length".format(name)
3319            )
3320