1# Licensed to the Apache Software Foundation (ASF) under one or more
2# contributor license agreements.  See the NOTICE file distributed with
3# this work for additional information regarding copyright ownership.
4# The ASF licenses this file to You under the Apache License, Version 2.0
5# (the "License"); you may not use this file except in compliance with
6# the License.  You may obtain a copy of the License at
7#
8#     http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16from typing import Dict
17from typing import Optional
18
19import base64
20import hmac
21import time
22from hashlib import sha1
23import os
24from datetime import datetime
25
26import libcloud.utils.py3
27
28try:
29    if libcloud.utils.py3.DEFAULT_LXML:
30        from lxml.etree import Element, SubElement
31    else:
32        from xml.etree.ElementTree import Element, SubElement
33except ImportError:
34    from xml.etree.ElementTree import Element, SubElement
35
36from libcloud.utils.py3 import httplib
37from libcloud.utils.py3 import urlquote
38from libcloud.utils.py3 import b
39from libcloud.utils.py3 import tostring
40from libcloud.utils.py3 import urlencode
41
42from libcloud.utils.xml import fixxpath, findtext
43from libcloud.utils.files import read_in_chunks
44from libcloud.common.types import InvalidCredsError, LibcloudError
45from libcloud.common.base import ConnectionUserAndKey, RawResponse
46from libcloud.common.aws import AWSBaseResponse, AWSDriver, \
47    AWSTokenConnection, SignedAWSConnection, UnsignedPayloadSentinel
48
49from libcloud.storage.base import Object, Container, StorageDriver
50from libcloud.storage.types import ContainerError
51from libcloud.storage.types import ContainerIsNotEmptyError
52from libcloud.storage.types import ContainerAlreadyExistsError
53from libcloud.storage.types import InvalidContainerNameError
54from libcloud.storage.types import ContainerDoesNotExistError
55from libcloud.storage.types import ObjectDoesNotExistError
56from libcloud.storage.types import ObjectHashMismatchError
57
58# How long before the token expires
59EXPIRATION_SECONDS = 15 * 60
60
61S3_US_STANDARD_HOST = 's3.amazonaws.com'
62S3_US_EAST2_HOST = 's3-us-east-2.amazonaws.com'
63S3_US_WEST_HOST = 's3-us-west-1.amazonaws.com'
64S3_US_WEST_OREGON_HOST = 's3-us-west-2.amazonaws.com'
65S3_US_GOV_EAST_HOST = 's3-us-gov-east-1.amazonaws.com'
66S3_US_GOV_WEST_HOST = 's3-us-gov-west-1.amazonaws.com'
67S3_CN_NORTH_HOST = 's3.cn-north-1.amazonaws.com.cn'
68S3_CN_NORTHWEST_HOST = 's3.cn-northwest-1.amazonaws.com.cn'
69S3_EU_WEST_HOST = 's3-eu-west-1.amazonaws.com'
70S3_EU_WEST2_HOST = 's3-eu-west-2.amazonaws.com'
71S3_EU_CENTRAL_HOST = 's3-eu-central-1.amazonaws.com'
72S3_EU_NORTH1_HOST = 's3-eu-north-1.amazonaws.com'
73S3_AP_SOUTH_HOST = 's3-ap-south-1.amazonaws.com'
74S3_AP_SOUTHEAST_HOST = 's3-ap-southeast-1.amazonaws.com'
75S3_AP_SOUTHEAST2_HOST = 's3-ap-southeast-2.amazonaws.com'
76S3_AP_NORTHEAST1_HOST = 's3-ap-northeast-1.amazonaws.com'
77S3_AP_NORTHEAST2_HOST = 's3-ap-northeast-2.amazonaws.com'
78S3_AP_NORTHEAST_HOST = S3_AP_NORTHEAST1_HOST
79S3_SA_EAST_HOST = 's3-sa-east-1.amazonaws.com'
80S3_SA_SOUTHEAST2_HOST = 's3-sa-east-2.amazonaws.com'
81S3_CA_CENTRAL_HOST = 's3-ca-central-1.amazonaws.com'
82
83# Maps AWS region name to connection hostname
84REGION_TO_HOST_MAP = {
85    'us-east-1': S3_US_STANDARD_HOST,
86    'us-east-2': S3_US_EAST2_HOST,
87    'us-west-1': S3_US_WEST_HOST,
88    'us-west-2': S3_US_WEST_OREGON_HOST,
89    'us-gov-east-1': S3_US_GOV_EAST_HOST,
90    'us-gov-west-1': S3_US_GOV_WEST_HOST,
91    'cn-north-1': S3_CN_NORTH_HOST,
92    'cn-northwest-1': S3_CN_NORTHWEST_HOST,
93    'eu-west-1': S3_EU_WEST_HOST,
94    'eu-west-2': S3_EU_WEST2_HOST,
95    'eu-west-3': 's3.eu-west-3.amazonaws.com',
96    'eu-north-1': 's3.eu-north-1.amazonaws.com',
97    'eu-central-1': S3_EU_CENTRAL_HOST,
98    'ap-south-1': S3_AP_SOUTH_HOST,
99    'ap-southeast-1': S3_AP_SOUTHEAST_HOST,
100    'ap-southeast-2': S3_AP_SOUTHEAST2_HOST,
101    'ap-northeast-1': S3_AP_NORTHEAST1_HOST,
102    'ap-northeast-2': S3_AP_NORTHEAST2_HOST,
103    'ap-northeast-3': 's3.ap-northeast-3.amazonaws.com',
104    'sa-east-1': S3_SA_EAST_HOST,
105    'sa-east-2': S3_SA_SOUTHEAST2_HOST,
106    'ca-central-1': S3_CA_CENTRAL_HOST,
107    'me-south-1': 's3.me-south-1.amazonaws.com'
108}
109
110API_VERSION = '2006-03-01'
111NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION)
112
113# AWS multi-part chunks must be minimum 5MB
114CHUNK_SIZE = 5 * 1024 * 1024
115
116# Desired number of items in each response inside a paginated request in
117# ex_iterate_multipart_uploads.
118RESPONSES_PER_REQUEST = 100
119
120S3_CDN_URL_DATETIME_FORMAT = '%Y%m%dT%H%M%SZ'
121S3_CDN_URL_DATE_FORMAT = '%Y%m%d'
122S3_CDN_URL_EXPIRY_HOURS = float(
123    os.getenv('LIBCLOUD_S3_CDN_URL_EXPIRY_HOURS', '24')
124)
125
126
127class S3Response(AWSBaseResponse):
128    namespace = None
129    valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT,
130                            httplib.BAD_REQUEST, httplib.PARTIAL_CONTENT]
131
132    def success(self):
133        i = int(self.status)
134        return 200 <= i <= 299 or i in self.valid_response_codes
135
136    def parse_error(self):
137        if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]:
138            raise InvalidCredsError(self.body)
139        elif self.status == httplib.MOVED_PERMANENTLY:
140            bucket_region = self.headers.get('x-amz-bucket-region', None)
141            used_region = self.connection.driver.region
142            raise LibcloudError('This bucket is located in a different '
143                                'region. Please use the correct driver. '
144                                'Bucket region "%s", used region "%s".' %
145                                (bucket_region, used_region),
146                                driver=S3StorageDriver)
147        raise LibcloudError('Unknown error. Status code: %d' % (self.status),
148                            driver=S3StorageDriver)
149
150
151class S3RawResponse(S3Response, RawResponse):
152    pass
153
154
155class BaseS3Connection(ConnectionUserAndKey):
156    """
157    Represents a single connection to the S3 Endpoint
158    """
159
160    host = 's3.amazonaws.com'
161    responseCls = S3Response
162    rawResponseCls = S3RawResponse
163
164    @staticmethod
165    def get_auth_signature(method, headers, params, expires, secret_key, path,
166                           vendor_prefix):
167        """
168        Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID,
169                                    UTF-8-Encoding-Of( StringToSign ) ) ) );
170
171        StringToSign = HTTP-VERB + "\n" +
172            Content-MD5 + "\n" +
173            Content-Type + "\n" +
174            Expires + "\n" +
175            CanonicalizedVendorHeaders +
176            CanonicalizedResource;
177        """
178        special_headers = {'content-md5': '', 'content-type': '', 'date': ''}
179        vendor_headers = {}
180
181        for key, value in list(headers.items()):
182            key_lower = key.lower()
183            if key_lower in special_headers:
184                special_headers[key_lower] = value.strip()
185            elif key_lower.startswith(vendor_prefix):
186                vendor_headers[key_lower] = value.strip()
187
188        if expires:
189            special_headers['date'] = str(expires)
190
191        buf = [method]
192        for _, value in sorted(special_headers.items()):
193            buf.append(value)
194        string_to_sign = '\n'.join(buf)
195
196        buf = []
197        for key, value in sorted(vendor_headers.items()):
198            buf.append('%s:%s' % (key, value))
199        header_string = '\n'.join(buf)
200
201        values_to_sign = []
202        for value in [string_to_sign, header_string, path]:
203            if value:
204                values_to_sign.append(value)
205
206        string_to_sign = '\n'.join(values_to_sign)
207        b64_hmac = base64.b64encode(
208            hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest()
209        )
210        return b64_hmac.decode('utf-8')
211
212    def add_default_params(self, params):
213        expires = str(int(time.time()) + EXPIRATION_SECONDS)
214        params['AWSAccessKeyId'] = self.user_id
215        params['Expires'] = expires
216        return params
217
218    def pre_connect_hook(self, params, headers):
219        # pylint: disable=no-member
220        params['Signature'] = self.get_auth_signature(
221            method=self.method, headers=headers, params=params,
222            expires=params['Expires'], secret_key=self.key, path=self.action,
223            vendor_prefix=self.driver.http_vendor_prefix)
224        return params, headers
225
226
227class S3Connection(AWSTokenConnection, BaseS3Connection):
228    """
229    Represents a single connection to the S3 endpoint, with AWS-specific
230    features.
231    """
232    pass
233
234
235class S3SignatureV4Connection(SignedAWSConnection, BaseS3Connection):
236    service_name = 's3'
237    version = API_VERSION
238
239    def __init__(self, user_id, key, secure=True, host=None, port=None,
240                 url=None, timeout=None, proxy_url=None, token=None,
241                 retry_delay=None, backoff=None):
242        super(S3SignatureV4Connection, self).__init__(
243            user_id, key, secure, host,
244            port, url, timeout, proxy_url,
245            token, retry_delay, backoff,
246            4)  # force version 4
247
248
249class S3MultipartUpload(object):
250    """
251    Class representing an amazon s3 multipart upload
252    """
253
254    def __init__(self, key, id, created_at, initiator, owner):
255        """
256        Class representing an amazon s3 multipart upload
257
258        :param key: The object/key that was being uploaded
259        :type key: ``str``
260
261        :param id: The upload id assigned by amazon
262        :type id: ``str``
263
264        :param created_at: The date/time at which the upload was started
265        :type created_at: ``str``
266
267        :param initiator: The AWS owner/IAM user who initiated this
268        :type initiator: ``str``
269
270        :param owner: The AWS owner/IAM who will own this object
271        :type owner: ``str``
272        """
273        self.key = key
274        self.id = id
275        self.created_at = created_at
276        self.initiator = initiator
277        self.owner = owner
278
279    def __repr__(self):
280        return ('<S3MultipartUpload: key=%s>' % (self.key))
281
282
283class BaseS3StorageDriver(StorageDriver):
284    name = 'Amazon S3 (standard)'
285    website = 'http://aws.amazon.com/s3/'
286    connectionCls = BaseS3Connection
287    hash_type = 'md5'
288    supports_chunked_encoding = False
289    supports_s3_multipart_upload = True
290    ex_location_name = ''
291    namespace = NAMESPACE
292    http_vendor_prefix = 'x-amz'
293
294    def iterate_containers(self):
295        response = self.connection.request('/')
296        if response.status == httplib.OK:
297            containers = self._to_containers(obj=response.object,
298                                             xpath='Buckets/Bucket')
299            return containers
300
301        raise LibcloudError('Unexpected status code: %s' % (response.status),
302                            driver=self)
303
304    def iterate_container_objects(self, container, prefix=None,
305                                  ex_prefix=None):
306        """
307        Return a generator of objects for the given container.
308
309        :param container: Container instance
310        :type container: :class:`Container`
311
312        :param prefix: Only return objects starting with prefix
313        :type prefix: ``str``
314
315        :param ex_prefix: Only return objects starting with ex_prefix
316        :type ex_prefix: ``str``
317
318        :return: A generator of Object instances.
319        :rtype: ``generator`` of :class:`Object`
320        """
321        prefix = self._normalize_prefix_argument(prefix, ex_prefix)
322
323        params = {}
324
325        if prefix:
326            params['prefix'] = prefix
327
328        last_key = None
329        exhausted = False
330        container_path = self._get_container_path(container)
331
332        while not exhausted:
333            if last_key:
334                params['marker'] = last_key
335
336            response = self.connection.request(container_path,
337                                               params=params)
338
339            if response.status != httplib.OK:
340                raise LibcloudError('Unexpected status code: %s' %
341                                    (response.status), driver=self)
342
343            objects = self._to_objs(obj=response.object,
344                                    xpath='Contents', container=container)
345            is_truncated = response.object.findtext(fixxpath(
346                xpath='IsTruncated', namespace=self.namespace)).lower()
347            exhausted = (is_truncated == 'false')
348
349            last_key = None
350            for obj in objects:
351                last_key = obj.name
352                yield obj
353
354    def get_container(self, container_name):
355        try:
356            response = self.connection.request('/%s' % container_name,
357                                               method='HEAD')
358            if response.status == httplib.NOT_FOUND:
359                raise ContainerDoesNotExistError(value=None, driver=self,
360                                                 container_name=container_name)
361        except InvalidCredsError:
362            # This just means the user doesn't have IAM permissions to do a
363            # HEAD request but other requests might work.
364            pass
365        return Container(name=container_name, extra=None, driver=self)
366
367    def get_object(self, container_name, object_name):
368        container = self.get_container(container_name=container_name)
369        object_path = self._get_object_path(container, object_name)
370        response = self.connection.request(object_path, method='HEAD')
371
372        if response.status == httplib.OK:
373            obj = self._headers_to_object(object_name=object_name,
374                                          container=container,
375                                          headers=response.headers)
376            return obj
377
378        raise ObjectDoesNotExistError(value=None, driver=self,
379                                      object_name=object_name)
380
381    def _get_container_path(self, container):
382        """
383        Return a container path
384
385        :param container: Container instance
386        :type  container: :class:`Container`
387
388        :return: A path for this container.
389        :rtype: ``str``
390        """
391        return '/%s' % (container.name)
392
393    def _get_object_path(self, container, object_name):
394        """
395        Return an object's CDN path.
396
397        :param container: Container instance
398        :type  container: :class:`Container`
399
400        :param object_name: Object name
401        :type  object_name: :class:`str`
402
403        :return: A  path for this object.
404        :rtype: ``str``
405        """
406        container_url = self._get_container_path(container)
407        object_name_cleaned = self._clean_object_name(object_name)
408        object_path = '%s/%s' % (container_url, object_name_cleaned)
409        return object_path
410
411    def create_container(self, container_name):
412        if self.ex_location_name:
413            root = Element('CreateBucketConfiguration')
414            child = SubElement(root, 'LocationConstraint')
415            child.text = self.ex_location_name
416
417            data = tostring(root)
418        else:
419            data = ''
420
421        response = self.connection.request('/%s' % (container_name),
422                                           data=data,
423                                           method='PUT')
424
425        if response.status == httplib.OK:
426            container = Container(name=container_name, extra=None, driver=self)
427            return container
428        elif response.status == httplib.CONFLICT:
429            if "BucketAlreadyOwnedByYou" in response.body:
430                raise ContainerAlreadyExistsError(
431                    value='Container with this name already exists. The name '
432                          'be unique among all the containers in the system.',
433                    container_name=container_name, driver=self)
434
435            raise InvalidContainerNameError(
436                value='Container with this name already exists. The name must '
437                      'be unique among all the containers in the system.',
438                container_name=container_name, driver=self)
439        elif response.status == httplib.BAD_REQUEST:
440            raise ContainerError(
441                value='Bad request when creating container: %s' %
442                      response.body,
443                container_name=container_name, driver=self)
444
445        raise LibcloudError('Unexpected status code: %s' % (response.status),
446                            driver=self)
447
448    def delete_container(self, container):
449        # Note: All the objects in the container must be deleted first
450        response = self.connection.request('/%s' % (container.name),
451                                           method='DELETE')
452        if response.status == httplib.NO_CONTENT:
453            return True
454        elif response.status == httplib.CONFLICT:
455            raise ContainerIsNotEmptyError(
456                value='Container must be empty before it can be deleted.',
457                container_name=container.name, driver=self)
458        elif response.status == httplib.NOT_FOUND:
459            raise ContainerDoesNotExistError(value=None,
460                                             driver=self,
461                                             container_name=container.name)
462
463        return False
464
465    def download_object(self, obj, destination_path, overwrite_existing=False,
466                        delete_on_failure=True):
467        obj_path = self._get_object_path(obj.container, obj.name)
468
469        response = self.connection.request(obj_path, method='GET', raw=True)
470
471        return self._get_object(obj=obj, callback=self._save_object,
472                                response=response,
473                                callback_kwargs={
474                                    'obj': obj,
475                                    'response': response.response,
476                                    'destination_path': destination_path,
477                                    'overwrite_existing': overwrite_existing,
478                                    'delete_on_failure': delete_on_failure},
479                                success_status_code=httplib.OK)
480
481    def download_object_as_stream(self, obj, chunk_size=None):
482        obj_path = self._get_object_path(obj.container, obj.name)
483        response = self.connection.request(obj_path, method='GET',
484                                           stream=True, raw=True)
485
486        return self._get_object(
487            obj=obj, callback=read_in_chunks,
488            response=response,
489            callback_kwargs={'iterator': response.iter_content(CHUNK_SIZE),
490                             'chunk_size': chunk_size},
491            success_status_code=httplib.OK)
492
493    def download_object_range(self, obj, destination_path, start_bytes,
494                              end_bytes=None, overwrite_existing=False,
495                              delete_on_failure=True):
496        self._validate_start_and_end_bytes(start_bytes=start_bytes,
497                                           end_bytes=end_bytes)
498
499        obj_path = self._get_object_path(obj.container, obj.name)
500
501        headers = {'Range': self._get_standard_range_str(start_bytes,
502                                                         end_bytes)}
503        response = self.connection.request(obj_path, method='GET',
504                                           headers=headers, raw=True)
505
506        return self._get_object(obj=obj, callback=self._save_object,
507                                response=response,
508                                callback_kwargs={
509                                    'obj': obj,
510                                    'response': response.response,
511                                    'destination_path': destination_path,
512                                    'overwrite_existing': overwrite_existing,
513                                    'delete_on_failure': delete_on_failure,
514                                    'partial_download': True},
515                                success_status_code=httplib.PARTIAL_CONTENT)
516
517    def download_object_range_as_stream(self, obj, start_bytes, end_bytes=None,
518                                        chunk_size=None):
519        self._validate_start_and_end_bytes(start_bytes=start_bytes,
520                                           end_bytes=end_bytes)
521
522        obj_path = self._get_object_path(obj.container, obj.name)
523
524        headers = {'Range': self._get_standard_range_str(start_bytes,
525                                                         end_bytes)}
526        response = self.connection.request(obj_path, method='GET',
527                                           headers=headers,
528                                           stream=True, raw=True)
529
530        return self._get_object(
531            obj=obj, callback=read_in_chunks,
532            response=response,
533            callback_kwargs={'iterator': response.iter_content(CHUNK_SIZE),
534                             'chunk_size': chunk_size},
535            success_status_code=httplib.PARTIAL_CONTENT)
536
537    def upload_object(self, file_path, container, object_name, extra=None,
538                      verify_hash=True, headers=None, ex_storage_class=None):
539        """
540        @inherits: :class:`StorageDriver.upload_object`
541
542        :param ex_storage_class: Storage class
543        :type ex_storage_class: ``str``
544        """
545        return self._put_object(container=container, object_name=object_name,
546                                extra=extra, file_path=file_path,
547                                verify_hash=verify_hash,
548                                headers=headers,
549                                storage_class=ex_storage_class)
550
551    def _initiate_multipart(self, container, object_name, headers=None):
552        """
553        Initiates a multipart upload to S3
554
555        :param container: The destination container
556        :type container: :class:`Container`
557
558        :param object_name: The name of the object which we are uploading
559        :type object_name: ``str``
560
561        :keyword headers: Additional headers to send with the request
562        :type headers: ``dict``
563
564        :return: The id of the newly created multipart upload
565        :rtype: ``str``
566        """
567        headers = headers or {}
568
569        request_path = self._get_object_path(container, object_name)
570        params = {'uploads': ''}
571
572        response = self.connection.request(request_path, method='POST',
573                                           headers=headers, params=params)
574
575        if response.status != httplib.OK:
576            raise LibcloudError('Error initiating multipart upload',
577                                driver=self)
578
579        return findtext(element=response.object, xpath='UploadId',
580                        namespace=self.namespace)
581
582    def _upload_multipart_chunks(self, container, object_name, upload_id,
583                                 stream, calculate_hash=True):
584        """
585        Uploads data from an iterator in fixed sized chunks to S3
586
587        :param container: The destination container
588        :type container: :class:`Container`
589
590        :param object_name: The name of the object which we are uploading
591        :type object_name: ``str``
592
593        :param upload_id: The upload id allocated for this multipart upload
594        :type upload_id: ``str``
595
596        :param stream: The generator for fetching the upload data
597        :type stream: ``generator``
598
599        :keyword calculate_hash: Indicates if we must calculate the data hash
600        :type calculate_hash: ``bool``
601
602        :return: A tuple of (chunk info, checksum, bytes transferred)
603        :rtype: ``tuple``
604        """
605        data_hash = None
606        if calculate_hash:
607            data_hash = self._get_hash_function()
608
609        bytes_transferred = 0
610        count = 1
611        chunks = []
612        params = {'uploadId': upload_id}
613
614        request_path = self._get_object_path(container, object_name)
615
616        # Read the input data in chunk sizes suitable for AWS
617        for data in read_in_chunks(stream, chunk_size=CHUNK_SIZE,
618                                   fill_size=True, yield_empty=True):
619            bytes_transferred += len(data)
620
621            if calculate_hash:
622                data_hash.update(data)
623
624            chunk_hash = self._get_hash_function()
625            chunk_hash.update(data)
626            chunk_hash = base64.b64encode(chunk_hash.digest()).decode('utf-8')
627
628            # The Content-MD5 header provides an extra level of data check and
629            # is recommended by amazon
630            headers = {
631                'Content-Length': len(data),
632                'Content-MD5': chunk_hash,
633            }
634
635            params['partNumber'] = count
636
637            resp = self.connection.request(request_path, method='PUT',
638                                           data=data, headers=headers,
639                                           params=params)
640
641            if resp.status != httplib.OK:
642                raise LibcloudError('Error uploading chunk', driver=self)
643
644            server_hash = resp.headers['etag'].replace('"', '')
645
646            # Keep this data for a later commit
647            chunks.append((count, server_hash))
648            count += 1
649
650        if calculate_hash:
651            data_hash = data_hash.hexdigest()
652
653        return (chunks, data_hash, bytes_transferred)
654
655    def _commit_multipart(self, container, object_name, upload_id, chunks):
656        """
657        Makes a final commit of the data.
658
659        :param container: The destination container
660        :type container: :class:`Container`
661
662        :param object_name: The name of the object which we are uploading
663        :type object_name: ``str``
664
665        :param upload_id: The upload id allocated for this multipart upload
666        :type upload_id: ``str``
667
668        :param chunks: A list of (chunk_number, chunk_hash) tuples.
669        :type chunks: ``list``
670
671        :return: The server side hash of the uploaded data
672        :rtype: ``str``
673        """
674        root = Element('CompleteMultipartUpload')
675
676        for (count, etag) in chunks:
677            part = SubElement(root, 'Part')
678            part_no = SubElement(part, 'PartNumber')
679            part_no.text = str(count)
680
681            etag_id = SubElement(part, 'ETag')
682            etag_id.text = str(etag)
683
684        data = tostring(root)
685
686        headers = {'Content-Length': len(data)}
687        params = {'uploadId': upload_id}
688        request_path = self._get_object_path(container, object_name)
689        response = self.connection.request(request_path, headers=headers,
690                                           params=params, data=data,
691                                           method='POST')
692
693        if response.status != httplib.OK:
694            element = response.object
695            # pylint: disable=maybe-no-member
696            code, message = response._parse_error_details(element=element)
697            msg = 'Error in multipart commit: %s (%s)' % (message, code)
698            raise LibcloudError(msg, driver=self)
699
700        # Get the server's etag to be passed back to the caller
701        body = response.parse_body()
702        server_hash = body.find(fixxpath(xpath='ETag',
703                                         namespace=self.namespace)).text
704        return server_hash
705
706    def _abort_multipart(self, container, object_name, upload_id):
707        """
708        Aborts an already initiated multipart upload
709
710        :param container: The destination container
711        :type container: :class:`Container`
712
713        :param object_name: The name of the object which we are uploading
714        :type object_name: ``str``
715
716        :param upload_id: The upload id allocated for this multipart upload
717        :type upload_id: ``str``
718        """
719
720        params = {'uploadId': upload_id}
721        request_path = self._get_object_path(container, object_name)
722
723        resp = self.connection.request(request_path, method='DELETE',
724                                       params=params)
725
726        if resp.status != httplib.NO_CONTENT:
727            raise LibcloudError('Error in multipart abort. status_code=%d' %
728                                (resp.status), driver=self)
729
730    def upload_object_via_stream(self, iterator, container, object_name,
731                                 extra=None, headers=None,
732                                 ex_storage_class=None):
733        """
734        @inherits: :class:`StorageDriver.upload_object_via_stream`
735
736        :param ex_storage_class: Storage class
737        :type ex_storage_class: ``str``
738        """
739
740        method = 'PUT'
741        params = None
742
743        # This driver is used by other S3 API compatible drivers also.
744        # Amazon provides a different (complex?) mechanism to do multipart
745        # uploads
746        if self.supports_s3_multipart_upload:
747            return self._put_object_multipart(container=container,
748                                              object_name=object_name,
749                                              extra=extra,
750                                              stream=iterator,
751                                              verify_hash=False,
752                                              headers=headers,
753                                              storage_class=ex_storage_class)
754        return self._put_object(container=container, object_name=object_name,
755                                extra=extra, method=method, query_args=params,
756                                stream=iterator, verify_hash=False,
757                                headers=headers,
758                                storage_class=ex_storage_class)
759
760    def delete_object(self, obj):
761        object_path = self._get_object_path(obj.container, obj.name)
762        response = self.connection.request(object_path, method='DELETE')
763        if response.status == httplib.NO_CONTENT:
764            return True
765        elif response.status == httplib.NOT_FOUND:
766            raise ObjectDoesNotExistError(value=None, driver=self,
767                                          object_name=obj.name)
768
769        return False
770
771    def ex_iterate_multipart_uploads(self, container, prefix=None,
772                                     delimiter=None):
773        """
774        Extension method for listing all in-progress S3 multipart uploads.
775
776        Each multipart upload which has not been committed or aborted is
777        considered in-progress.
778
779        :param container: The container holding the uploads
780        :type container: :class:`Container`
781
782        :keyword prefix: Print only uploads of objects with this prefix
783        :type prefix: ``str``
784
785        :keyword delimiter: The object/key names are grouped based on
786            being split by this delimiter
787        :type delimiter: ``str``
788
789        :return: A generator of S3MultipartUpload instances.
790        :rtype: ``generator`` of :class:`S3MultipartUpload`
791        """
792
793        if not self.supports_s3_multipart_upload:
794            raise LibcloudError('Feature not supported', driver=self)
795
796        # Get the data for a specific container
797        request_path = self._get_container_path(container)
798        params = {'max-uploads': RESPONSES_PER_REQUEST, 'uploads': ''}
799
800        if prefix:
801            params['prefix'] = prefix
802
803        if delimiter:
804            params['delimiter'] = delimiter
805
806        def finder(node, text):
807            return node.findtext(fixxpath(xpath=text,
808                                          namespace=self.namespace))
809
810        while True:
811            response = self.connection.request(request_path, params=params)
812
813            if response.status != httplib.OK:
814                raise LibcloudError('Error fetching multipart uploads. '
815                                    'Got code: %s' % response.status,
816                                    driver=self)
817
818            body = response.parse_body()
819            # pylint: disable=maybe-no-member
820            for node in body.findall(fixxpath(xpath='Upload',
821                                              namespace=self.namespace)):
822                initiator = node.find(fixxpath(xpath='Initiator',
823                                               namespace=self.namespace))
824                owner = node.find(fixxpath(xpath='Owner',
825                                           namespace=self.namespace))
826
827                key = finder(node, 'Key')
828                upload_id = finder(node, 'UploadId')
829                created_at = finder(node, 'Initiated')
830                initiator = finder(initiator, 'DisplayName')
831                owner = finder(owner, 'DisplayName')
832
833                yield S3MultipartUpload(key, upload_id, created_at,
834                                        initiator, owner)
835
836            # Check if this is the last entry in the listing
837            # pylint: disable=maybe-no-member
838            is_truncated = body.findtext(fixxpath(xpath='IsTruncated',
839                                                  namespace=self.namespace))
840
841            if is_truncated.lower() == 'false':
842                break
843
844            # Provide params for the next request
845            upload_marker = body.findtext(fixxpath(xpath='NextUploadIdMarker',
846                                                   namespace=self.namespace))
847            key_marker = body.findtext(fixxpath(xpath='NextKeyMarker',
848                                                namespace=self.namespace))
849
850            params['key-marker'] = key_marker
851            params['upload-id-marker'] = upload_marker
852
853    def ex_cleanup_all_multipart_uploads(self, container, prefix=None):
854        """
855        Extension method for removing all partially completed S3 multipart
856        uploads.
857
858        :param container: The container holding the uploads
859        :type container: :class:`Container`
860
861        :keyword prefix: Delete only uploads of objects with this prefix
862        :type prefix: ``str``
863        """
864
865        # Iterate through the container and delete the upload ids
866        for upload in self.ex_iterate_multipart_uploads(container, prefix,
867                                                        delimiter=None):
868            self._abort_multipart(container, upload.key, upload.id)
869
870    def _clean_object_name(self, name):
871        name = urlquote(name, safe='/~')
872        return name
873
874    def _put_object(self, container, object_name, method='PUT',
875                    query_args=None, extra=None, file_path=None,
876                    stream=None, verify_hash=True, storage_class=None,
877                    headers=None):
878        headers = headers or {}
879        extra = extra or {}
880
881        headers.update(self._to_storage_class_headers(storage_class))
882
883        content_type = extra.get('content_type', None)
884        meta_data = extra.get('meta_data', None)
885        acl = extra.get('acl', None)
886
887        if meta_data:
888            for key, value in list(meta_data.items()):
889                key = self.http_vendor_prefix + '-meta-%s' % (key)
890                headers[key] = value
891
892        if acl:
893            headers[self.http_vendor_prefix + '-acl'] = acl
894
895        request_path = self._get_object_path(container, object_name)
896
897        if query_args:
898            request_path = '?'.join((request_path, query_args))
899
900        result_dict = self._upload_object(
901            object_name=object_name, content_type=content_type,
902            request_path=request_path, request_method=method,
903            headers=headers, file_path=file_path, stream=stream)
904
905        response = result_dict['response']
906        bytes_transferred = result_dict['bytes_transferred']
907        headers = response.headers
908        response = response
909        server_hash = headers.get('etag', '').replace('"', '')
910        server_side_encryption = headers.get('x-amz-server-side-encryption',
911                                             None)
912        aws_kms_encryption = (server_side_encryption == 'aws:kms')
913        hash_matches = (result_dict['data_hash'] == server_hash)
914
915        # NOTE: If AWS KMS server side encryption is enabled, ETag won't
916        # contain object MD5 digest so we skip the checksum check
917        # See https://docs.aws.amazon.com/AmazonS3/latest/API
918        # /RESTCommonResponseHeaders.html
919        # and https://github.com/apache/libcloud/issues/1401
920        # for details
921        if verify_hash and not aws_kms_encryption and not hash_matches:
922            raise ObjectHashMismatchError(
923                value='MD5 hash {0} checksum does not match {1}'.format(
924                    server_hash, result_dict['data_hash']),
925                object_name=object_name, driver=self)
926        elif response.status == httplib.OK:
927            obj = Object(
928                name=object_name, size=bytes_transferred, hash=server_hash,
929                extra={'acl': acl}, meta_data=meta_data, container=container,
930                driver=self)
931
932            return obj
933        else:
934            raise LibcloudError(
935                'Unexpected status code, status_code=%s' % (response.status),
936                driver=self)
937
938    def _put_object_multipart(self, container, object_name, stream,
939                              extra=None, verify_hash=False, headers=None,
940                              storage_class=None):
941        """
942        Uploads an object using the S3 multipart algorithm.
943
944        :param container: The destination container
945        :type container: :class:`Container`
946
947        :param object_name: The name of the object which we are uploading
948        :type object_name: ``str``
949
950        :param stream: The generator for fetching the upload data
951        :type stream: ``generator``
952
953        :keyword verify_hash: Indicates if we must calculate the data hash
954        :type verify_hash: ``bool``
955
956        :keyword extra: Additional options
957        :type extra: ``dict``
958
959        :keyword headers: Additional headers
960        :type headers: ``dict``
961
962        :keyword storage_class: The name of the S3 object's storage class
963        :type extra: ``str``
964
965        :return: The uploaded object
966        :rtype: :class:`Object`
967        """
968        headers = headers or {}
969        extra = extra or {}
970
971        headers.update(self._to_storage_class_headers(storage_class))
972
973        content_type = extra.get('content_type', None)
974        meta_data = extra.get('meta_data', None)
975        acl = extra.get('acl', None)
976
977        headers['Content-Type'] = self._determine_content_type(
978            content_type, object_name)
979
980        if meta_data:
981            for key, value in list(meta_data.items()):
982                key = self.http_vendor_prefix + '-meta-%s' % (key)
983                headers[key] = value
984
985        if acl:
986            headers[self.http_vendor_prefix + '-acl'] = acl
987
988        upload_id = self._initiate_multipart(container, object_name,
989                                             headers=headers)
990
991        try:
992            result = self._upload_multipart_chunks(container, object_name,
993                                                   upload_id, stream,
994                                                   calculate_hash=verify_hash)
995            chunks, data_hash, bytes_transferred = result
996
997            # Commit the chunk info and complete the upload
998            etag = self._commit_multipart(container, object_name, upload_id,
999                                          chunks)
1000        except Exception:
1001            # Amazon provides a mechanism for aborting an upload.
1002            self._abort_multipart(container, object_name, upload_id)
1003            raise
1004
1005        return Object(
1006            name=object_name, size=bytes_transferred, hash=etag,
1007            extra={'acl': acl}, meta_data=meta_data, container=container,
1008            driver=self)
1009
1010    def _to_storage_class_headers(self, storage_class):
1011        """
1012        Generates request headers given a storage class name.
1013
1014        :keyword storage_class: The name of the S3 object's storage class
1015        :type extra: ``str``
1016
1017        :return: Headers to include in a request
1018        :rtype: :dict:
1019        """
1020        headers = {}
1021        storage_class = storage_class or 'standard'
1022        if storage_class not in ['standard', 'reduced_redundancy']:
1023            raise ValueError(
1024                'Invalid storage class value: %s' % (storage_class))
1025
1026        key = self.http_vendor_prefix + '-storage-class'
1027        headers[key] = storage_class.upper()
1028        return headers
1029
1030    def _to_containers(self, obj, xpath):
1031        for element in obj.findall(fixxpath(xpath=xpath,
1032                                            namespace=self.namespace)):
1033            yield self._to_container(element)
1034
1035    def _to_objs(self, obj, xpath, container):
1036        return [self._to_obj(element, container) for element in
1037                obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))]
1038
1039    def _to_container(self, element):
1040        extra = {
1041            'creation_date': findtext(element=element, xpath='CreationDate',
1042                                      namespace=self.namespace)
1043        }
1044
1045        container = Container(name=findtext(element=element, xpath='Name',
1046                                            namespace=self.namespace),
1047                              extra=extra,
1048                              driver=self
1049                              )
1050
1051        return container
1052
1053    def _get_content_length_from_headers(self,
1054                                         headers: Dict[str, str]
1055                                         ) -> Optional[int]:
1056        """
1057        Prase object size from the provided response headers.
1058        """
1059        content_length = headers.get("content-length", None)
1060        return content_length
1061
1062    def _headers_to_object(self, object_name, container, headers):
1063        hash = headers['etag'].replace('"', '')
1064        extra = {'content_type': headers['content-type'],
1065                 'etag': headers['etag']}
1066        meta_data = {}
1067
1068        if 'content-encoding' in headers:
1069            extra['content_encoding'] = headers['content-encoding']
1070
1071        if 'last-modified' in headers:
1072            extra['last_modified'] = headers['last-modified']
1073
1074        for key, value in headers.items():
1075            if not key.lower().startswith(self.http_vendor_prefix + '-meta-'):
1076                continue
1077
1078            key = key.replace(self.http_vendor_prefix + '-meta-', '')
1079            meta_data[key] = value
1080
1081        content_length = self._get_content_length_from_headers(headers=headers)
1082
1083        if content_length is None:
1084            raise KeyError("Can not deduce object size from headers for "
1085                           "object %s" % (object_name))
1086
1087        obj = Object(name=object_name, size=int(content_length),
1088                     hash=hash, extra=extra,
1089                     meta_data=meta_data,
1090                     container=container,
1091                     driver=self)
1092        return obj
1093
1094    def _to_obj(self, element, container):
1095        owner_id = findtext(element=element, xpath='Owner/ID',
1096                            namespace=self.namespace)
1097        owner_display_name = findtext(element=element,
1098                                      xpath='Owner/DisplayName',
1099                                      namespace=self.namespace)
1100        meta_data = {'owner': {'id': owner_id,
1101                               'display_name': owner_display_name}}
1102        last_modified = findtext(element=element,
1103                                 xpath='LastModified',
1104                                 namespace=self.namespace)
1105        extra = {'last_modified': last_modified}
1106
1107        obj = Object(name=findtext(element=element, xpath='Key',
1108                                   namespace=self.namespace),
1109                     size=int(findtext(element=element, xpath='Size',
1110                                       namespace=self.namespace)),
1111                     hash=findtext(element=element, xpath='ETag',
1112                                   namespace=self.namespace).replace('"', ''),
1113                     extra=extra,
1114                     meta_data=meta_data,
1115                     container=container,
1116                     driver=self
1117                     )
1118
1119        return obj
1120
1121
1122class S3StorageDriver(AWSDriver, BaseS3StorageDriver):
1123    name = 'Amazon S3'
1124    connectionCls = S3SignatureV4Connection
1125    region_name = 'us-east-1'
1126
1127    def __init__(self, key, secret=None, secure=True, host=None, port=None,
1128                 region=None, token=None, **kwargs):
1129        # Here for backward compatibility for old and deprecated driver class
1130        # per region approach
1131        if hasattr(self, 'region_name') and not region:
1132            region = self.region_name  # pylint: disable=no-member
1133
1134        self.region_name = region
1135
1136        if region and region not in REGION_TO_HOST_MAP.keys():
1137            raise ValueError('Invalid or unsupported region: %s' % (region))
1138
1139        self.name = 'Amazon S3 (%s)' % (region)
1140
1141        if host is None:
1142            host = REGION_TO_HOST_MAP[region]
1143
1144        super(S3StorageDriver, self).__init__(key=key, secret=secret,
1145                                              secure=secure, host=host,
1146                                              port=port,
1147                                              region=region, token=token,
1148                                              **kwargs)
1149
1150    @classmethod
1151    def list_regions(self):
1152        return REGION_TO_HOST_MAP.keys()
1153
1154    def get_object_cdn_url(self, obj,
1155                           ex_expiry=S3_CDN_URL_EXPIRY_HOURS):
1156        """
1157        Return a "presigned URL" for read-only access to object
1158
1159        AWS only - requires AWS signature V4 authenticaiton.
1160
1161        :param obj: Object instance.
1162        :type  obj: :class:`Object`
1163
1164        :param ex_expiry: The number of hours after which the URL expires.
1165                          Defaults to 24 hours or the value of the environment
1166                          variable "LIBCLOUD_S3_STORAGE_CDN_URL_EXPIRY_HOURS",
1167                          if set.
1168        :type  ex_expiry: ``float``
1169
1170        :return: Presigned URL for the object.
1171        :rtype: ``str``
1172        """
1173
1174        # assemble data for the request we want to pre-sign
1175        # see: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html # noqa
1176        object_path = self._get_object_path(obj.container, obj.name)
1177        now = datetime.utcnow()
1178        duration_seconds = int(ex_expiry * 3600)
1179        credparts = (
1180            self.key,
1181            now.strftime(S3_CDN_URL_DATE_FORMAT),
1182            self.region,
1183            's3',
1184            'aws4_request')
1185        params_to_sign = {
1186            'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
1187            'X-Amz-Credential': '/'.join(credparts),
1188            'X-Amz-Date': now.strftime(S3_CDN_URL_DATETIME_FORMAT),
1189            'X-Amz-Expires': duration_seconds,
1190            'X-Amz-SignedHeaders': 'host'}
1191        headers_to_sign = {'host': self.connection.host}
1192
1193        # generate signature for the pre-signed request
1194        signature = self.connection.signer._get_signature(
1195            params=params_to_sign,
1196            headers=headers_to_sign,
1197            dt=now,
1198            method='GET',
1199            path=object_path,
1200            data=UnsignedPayloadSentinel
1201        )
1202
1203        # Create final params for pre-signed URL
1204        params = params_to_sign.copy()
1205        params['X-Amz-Signature'] = signature
1206
1207        return '{scheme}://{host}:{port}{path}?{params}'.format(
1208            scheme='https' if self.secure else 'http',
1209            host=self.connection.host,
1210            port=self.connection.port,
1211            path=object_path,
1212            params=urlencode(params),
1213        )
1214
1215
1216class S3USEast2Connection(S3SignatureV4Connection):
1217    host = S3_US_EAST2_HOST
1218
1219
1220class S3USEast2StorageDriver(S3StorageDriver):
1221    name = 'Amazon S3 (us-east-2)'
1222    connectionCls = S3USEast2Connection
1223    ex_location_name = 'us-east-2'
1224    region_name = 'us-east-2'
1225
1226
1227class S3USWestConnection(S3SignatureV4Connection):
1228    host = S3_US_WEST_HOST
1229
1230
1231class S3USWestStorageDriver(S3StorageDriver):
1232    name = 'Amazon S3 (us-west-1)'
1233    connectionCls = S3USWestConnection
1234    ex_location_name = 'us-west-1'
1235    region_name = 'us-west-1'
1236
1237
1238class S3USWestOregonConnection(S3SignatureV4Connection):
1239    host = S3_US_WEST_OREGON_HOST
1240
1241
1242class S3USWestOregonStorageDriver(S3StorageDriver):
1243    name = 'Amazon S3 (us-west-2)'
1244    connectionCls = S3USWestOregonConnection
1245    ex_location_name = 'us-west-2'
1246    region_name = 'us-west-2'
1247
1248
1249class S3USGovEastConnection(S3SignatureV4Connection):
1250    host = S3_US_GOV_EAST_HOST
1251
1252
1253class S3USGovEastStorageDriver(S3StorageDriver):
1254    name = 'Amazon S3 (us-gov-east-1)'
1255    connectionCls = S3USGovEastConnection
1256    ex_location_name = 'us-gov-east-1'
1257    region_name = 'us-gov-east-1'
1258
1259
1260class S3USGovWestConnection(S3SignatureV4Connection):
1261    host = S3_US_GOV_WEST_HOST
1262
1263
1264class S3USGovWestStorageDriver(S3StorageDriver):
1265    name = 'Amazon S3 (us-gov-west-1)'
1266    connectionCls = S3USGovWestConnection
1267    ex_location_name = 'us-gov-west-1'
1268    region_name = 'us-gov-west-1'
1269
1270
1271class S3CNNorthWestConnection(S3SignatureV4Connection):
1272    host = S3_CN_NORTHWEST_HOST
1273
1274
1275class S3CNNorthWestStorageDriver(S3StorageDriver):
1276    name = 'Amazon S3 (cn-northwest-1)'
1277    connectionCls = S3CNNorthWestConnection
1278    ex_location_name = 'cn-northwest-1'
1279    region_name = 'cn-northwest-1'
1280
1281
1282class S3CNNorthConnection(S3SignatureV4Connection):
1283    host = S3_CN_NORTH_HOST
1284
1285
1286class S3CNNorthStorageDriver(S3StorageDriver):
1287    name = 'Amazon S3 (cn-north-1)'
1288    connectionCls = S3CNNorthConnection
1289    ex_location_name = 'cn-north-1'
1290    region_name = 'cn-north-1'
1291
1292
1293class S3EUWestConnection(S3SignatureV4Connection):
1294    host = S3_EU_WEST_HOST
1295
1296
1297class S3EUWestStorageDriver(S3StorageDriver):
1298    name = 'Amazon S3 (eu-west-1)'
1299    connectionCls = S3EUWestConnection
1300    ex_location_name = 'EU'
1301    region_name = 'eu-west-1'
1302
1303
1304class S3EUWest2Connection(S3SignatureV4Connection):
1305    host = S3_EU_WEST2_HOST
1306
1307
1308class S3EUWest2StorageDriver(S3StorageDriver):
1309    name = 'Amazon S3 (eu-west-2)'
1310    connectionCls = S3EUWest2Connection
1311    ex_location_name = 'eu-west-2'
1312    region_name = 'eu-west-2'
1313
1314
1315class S3EUCentralConnection(S3SignatureV4Connection):
1316    host = S3_EU_CENTRAL_HOST
1317
1318
1319class S3EUCentralStorageDriver(S3StorageDriver):
1320    name = 'Amazon S3 (eu-central-1)'
1321    connectionCls = S3EUCentralConnection
1322    ex_location_name = 'eu-central-1'
1323    region_name = 'eu-central-1'
1324
1325
1326class S3APSEConnection(S3SignatureV4Connection):
1327    host = S3_AP_SOUTHEAST_HOST
1328
1329
1330class S3EUNorth1Connection(S3SignatureV4Connection):
1331    host = S3_EU_NORTH1_HOST
1332
1333
1334class S3EUNorth1StorageDriver(S3StorageDriver):
1335    name = 'Amazon S3 (eu-north-1)'
1336    connectionCls = S3EUNorth1Connection
1337    ex_location_name = 'eu-north-1'
1338    region_name = 'eu-north-1'
1339
1340
1341class S3APSEStorageDriver(S3StorageDriver):
1342    name = 'Amazon S3 (ap-southeast-1)'
1343    connectionCls = S3APSEConnection
1344    ex_location_name = 'ap-southeast-1'
1345    region_name = 'ap-southeast-1'
1346
1347
1348class S3APSE2Connection(S3SignatureV4Connection):
1349    host = S3_AP_SOUTHEAST2_HOST
1350
1351
1352class S3APSE2StorageDriver(S3StorageDriver):
1353    name = 'Amazon S3 (ap-southeast-2)'
1354    connectionCls = S3APSE2Connection
1355    ex_location_name = 'ap-southeast-2'
1356    region_name = 'ap-southeast-2'
1357
1358
1359class S3APNE1Connection(S3SignatureV4Connection):
1360    host = S3_AP_NORTHEAST1_HOST
1361
1362
1363S3APNEConnection = S3APNE1Connection
1364
1365
1366class S3APNE1StorageDriver(S3StorageDriver):
1367    name = 'Amazon S3 (ap-northeast-1)'
1368    connectionCls = S3APNEConnection
1369    ex_location_name = 'ap-northeast-1'
1370    region_name = 'ap-northeast-1'
1371
1372
1373S3APNEStorageDriver = S3APNE1StorageDriver
1374
1375
1376class S3APNE2Connection(S3SignatureV4Connection):
1377    host = S3_AP_NORTHEAST2_HOST
1378
1379
1380class S3APNE2StorageDriver(S3StorageDriver):
1381    name = 'Amazon S3 (ap-northeast-2)'
1382    connectionCls = S3APNE2Connection
1383    ex_location_name = 'ap-northeast-2'
1384    region_name = 'ap-northeast-2'
1385
1386
1387class S3APSouthConnection(S3SignatureV4Connection):
1388    host = S3_AP_SOUTH_HOST
1389
1390
1391class S3APSouthStorageDriver(S3StorageDriver):
1392    name = 'Amazon S3 (ap-south-1)'
1393    connectionCls = S3APSouthConnection
1394    ex_location_name = 'ap-south-1'
1395    region_name = 'ap-south-1'
1396
1397
1398class S3SAEastConnection(S3SignatureV4Connection):
1399    host = S3_SA_EAST_HOST
1400
1401
1402class S3SAEastStorageDriver(S3StorageDriver):
1403    name = 'Amazon S3 (sa-east-1)'
1404    connectionCls = S3SAEastConnection
1405    ex_location_name = 'sa-east-1'
1406    region_name = 'sa-east-1'
1407
1408
1409class S3CACentralConnection(S3SignatureV4Connection):
1410    host = S3_CA_CENTRAL_HOST
1411
1412
1413class S3CACentralStorageDriver(S3StorageDriver):
1414    name = 'Amazon S3 (ca-central-1)'
1415    connectionCls = S3CACentralConnection
1416    ex_location_name = 'ca-central-1'
1417    region_name = 'ca-central-1'
1418