1#!/usr/bin/python
2#
3# This is a free software: you can redistribute it and/or modify
4# it under the terms of the GNU General Public License as published by
5# the Free Software Foundation, either version 3 of the License, or
6# (at your option) any later version.
7#
8# This Ansible library is distributed in the hope that it will be useful,
9# but WITHOUT ANY WARRANTY; without even the implied warranty of
10# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11# GNU General Public License for more details.
12#
13# You should have received a copy of the GNU General Public License
14# along with this library.  If not, see <http://www.gnu.org/licenses/>.
15
16ANSIBLE_METADATA = {'metadata_version': '1.1',
17                    'status': ['stableinterface'],
18                    'supported_by': 'core'}
19
20
21DOCUMENTATION = '''
22---
23module: s3_bucket
24short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus and FakeS3
25description:
26    - Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus and FakeS3
27version_added: "2.0"
28requirements: [ boto3 ]
29author: "Rob White (@wimnat)"
30options:
31  force:
32    description:
33      - When trying to delete a bucket, delete all keys (including versions and delete markers)
34        in the bucket first (an s3 bucket must be empty for a successful deletion)
35    type: bool
36    default: 'no'
37  name:
38    description:
39      - Name of the s3 bucket
40    required: true
41  policy:
42    description:
43      - The JSON policy as a string.
44  s3_url:
45    description:
46      - S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and fakes3 etc.
47      - Assumes AWS if not specified.
48      - For Walrus, use FQDN of the endpoint without scheme nor path.
49    aliases: [ S3_URL ]
50  ceph:
51    description:
52      - Enable API compatibility with Ceph. It takes into account the S3 API subset working
53        with Ceph in order to provide the same module behaviour where possible.
54    type: bool
55    version_added: "2.2"
56  requester_pays:
57    description:
58      - With Requester Pays buckets, the requester instead of the bucket owner pays the cost
59        of the request and the data download from the bucket.
60    type: bool
61    default: False
62  state:
63    description:
64      - Create or remove the s3 bucket
65    required: false
66    default: present
67    choices: [ 'present', 'absent' ]
68  tags:
69    description:
70      - tags dict to apply to bucket
71  purge_tags:
72    description:
73      - whether to remove tags that aren't present in the C(tags) parameter
74    type: bool
75    default: True
76    version_added: "2.9"
77  versioning:
78    description:
79      - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
80    type: bool
81  encryption:
82    description:
83      - Describes the default server-side encryption to apply to new objects in the bucket.
84        In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly.
85    choices: [ 'none', 'AES256', 'aws:kms' ]
86    version_added: "2.9"
87  encryption_key_id:
88    description: KMS master key ID to use for the default encryption. This parameter is allowed if encryption is aws:kms. If
89                 not specified then it will default to the AWS provided KMS key.
90    version_added: "2.9"
91extends_documentation_fragment:
92    - aws
93    - ec2
94notes:
95    - If C(requestPayment), C(policy), C(tagging) or C(versioning)
96      operations/API aren't implemented by the endpoint, module doesn't fail
97      if related parameters I(requester_pays), I(policy), I(tags) or
98      I(versioning) are C(None).
99'''
100
101EXAMPLES = '''
102# Note: These examples do not set authentication details, see the AWS Guide for details.
103
104# Create a simple s3 bucket
105- s3_bucket:
106    name: mys3bucket
107    state: present
108
109# Create a simple s3 bucket on Ceph Rados Gateway
110- s3_bucket:
111    name: mys3bucket
112    s3_url: http://your-ceph-rados-gateway-server.xxx
113    ceph: true
114
115# Remove an s3 bucket and any keys it contains
116- s3_bucket:
117    name: mys3bucket
118    state: absent
119    force: yes
120
121# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
122- s3_bucket:
123    name: mys3bucket
124    policy: "{{ lookup('file','policy.json') }}"
125    requester_pays: yes
126    versioning: yes
127    tags:
128      example: tag1
129      another: tag2
130
131# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint
132- s3_bucket:
133    name: mydobucket
134    s3_url: 'https://nyc3.digitaloceanspaces.com'
135
136'''
137
138import json
139import os
140import time
141
142from ansible.module_utils.six.moves.urllib.parse import urlparse
143from ansible.module_utils.six import string_types
144from ansible.module_utils.basic import to_text
145from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
146from ansible.module_utils.ec2 import compare_policies, ec2_argument_spec, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
147from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry
148
149try:
150    from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError
151except ImportError:
152    pass  # handled by AnsibleAWSModule
153
154
155def create_or_update_bucket(s3_client, module, location):
156
157    policy = module.params.get("policy")
158    name = module.params.get("name")
159    requester_pays = module.params.get("requester_pays")
160    tags = module.params.get("tags")
161    purge_tags = module.params.get("purge_tags")
162    versioning = module.params.get("versioning")
163    encryption = module.params.get("encryption")
164    encryption_key_id = module.params.get("encryption_key_id")
165    changed = False
166    result = {}
167
168    try:
169        bucket_is_present = bucket_exists(s3_client, name)
170    except EndpointConnectionError as e:
171        module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
172    except (BotoCoreError, ClientError) as e:
173        module.fail_json_aws(e, msg="Failed to check bucket presence")
174
175    if not bucket_is_present:
176        try:
177            bucket_changed = create_bucket(s3_client, name, location)
178            s3_client.get_waiter('bucket_exists').wait(Bucket=name)
179            changed = changed or bucket_changed
180        except WaiterError as e:
181            module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
182        except (BotoCoreError, ClientError) as e:
183            module.fail_json_aws(e, msg="Failed while creating bucket")
184
185    # Versioning
186    try:
187        versioning_status = get_bucket_versioning(s3_client, name)
188    except BotoCoreError as exp:
189        module.fail_json_aws(exp, msg="Failed to get bucket versioning")
190    except ClientError as exp:
191        if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None:
192            module.fail_json_aws(exp, msg="Failed to get bucket versioning")
193    else:
194        if versioning is not None:
195            required_versioning = None
196            if versioning and versioning_status.get('Status') != "Enabled":
197                required_versioning = 'Enabled'
198            elif not versioning and versioning_status.get('Status') == "Enabled":
199                required_versioning = 'Suspended'
200
201            if required_versioning:
202                try:
203                    put_bucket_versioning(s3_client, name, required_versioning)
204                    changed = True
205                except (BotoCoreError, ClientError) as e:
206                    module.fail_json_aws(e, msg="Failed to update bucket versioning")
207
208                versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
209
210        # This output format is there to ensure compatibility with previous versions of the module
211        result['versioning'] = {
212            'Versioning': versioning_status.get('Status', 'Disabled'),
213            'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
214        }
215
216    # Requester pays
217    try:
218        requester_pays_status = get_bucket_request_payment(s3_client, name)
219    except BotoCoreError as exp:
220        module.fail_json_aws(exp, msg="Failed to get bucket request payment")
221    except ClientError as exp:
222        if exp.response['Error']['Code'] != 'NotImplemented' or requester_pays is not None:
223            module.fail_json_aws(exp, msg="Failed to get bucket request payment")
224    else:
225        if requester_pays:
226            payer = 'Requester' if requester_pays else 'BucketOwner'
227            if requester_pays_status != payer:
228                put_bucket_request_payment(s3_client, name, payer)
229                requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
230                if requester_pays_status is None:
231                    # We have seen that it happens quite a lot of times that the put request was not taken into
232                    # account, so we retry one more time
233                    put_bucket_request_payment(s3_client, name, payer)
234                    requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
235                changed = True
236
237        result['requester_pays'] = requester_pays
238
239    # Policy
240    try:
241        current_policy = get_bucket_policy(s3_client, name)
242    except BotoCoreError as exp:
243        module.fail_json_aws(exp, msg="Failed to get bucket policy")
244    except ClientError as exp:
245        if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None:
246            module.fail_json_aws(exp, msg="Failed to get bucket policy")
247    else:
248        if policy is not None:
249            if isinstance(policy, string_types):
250                policy = json.loads(policy)
251
252            if not policy and current_policy:
253                try:
254                    delete_bucket_policy(s3_client, name)
255                except (BotoCoreError, ClientError) as e:
256                    module.fail_json_aws(e, msg="Failed to delete bucket policy")
257                current_policy = wait_policy_is_applied(module, s3_client, name, policy)
258                changed = True
259            elif compare_policies(current_policy, policy):
260                try:
261                    put_bucket_policy(s3_client, name, policy)
262                except (BotoCoreError, ClientError) as e:
263                    module.fail_json_aws(e, msg="Failed to update bucket policy")
264                current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
265                if current_policy is None:
266                    # As for request payement, it happens quite a lot of times that the put request was not taken into
267                    # account, so we retry one more time
268                    put_bucket_policy(s3_client, name, policy)
269                    current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
270                changed = True
271
272        result['policy'] = current_policy
273
274    # Tags
275    try:
276        current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
277    except BotoCoreError as exp:
278        module.fail_json_aws(exp, msg="Failed to get bucket tags")
279    except ClientError as exp:
280        if exp.response['Error']['Code'] != 'NotImplemented' or tags is not None:
281            module.fail_json_aws(exp, msg="Failed to get bucket tags")
282    else:
283        if tags is not None:
284            # Tags are always returned as text
285            tags = dict((to_text(k), to_text(v)) for k, v in tags.items())
286            if not purge_tags:
287                # Ensure existing tags that aren't updated by desired tags remain
288                current_copy = current_tags_dict.copy()
289                current_copy.update(tags)
290                tags = current_copy
291            if current_tags_dict != tags:
292                if tags:
293                    try:
294                        put_bucket_tagging(s3_client, name, tags)
295                    except (BotoCoreError, ClientError) as e:
296                        module.fail_json_aws(e, msg="Failed to update bucket tags")
297                else:
298                    if purge_tags:
299                        try:
300                            delete_bucket_tagging(s3_client, name)
301                        except (BotoCoreError, ClientError) as e:
302                            module.fail_json_aws(e, msg="Failed to delete bucket tags")
303                current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
304                changed = True
305
306        result['tags'] = current_tags_dict
307
308    # Encryption
309    if hasattr(s3_client, "get_bucket_encryption"):
310        try:
311            current_encryption = get_bucket_encryption(s3_client, name)
312        except (ClientError, BotoCoreError) as e:
313            module.fail_json_aws(e, msg="Failed to get bucket encryption")
314    elif encryption is not None:
315        module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41")
316
317    if encryption is not None:
318        current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None
319        current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None
320        if encryption == 'none' and current_encryption_algorithm is not None:
321            try:
322                delete_bucket_encryption(s3_client, name)
323            except (BotoCoreError, ClientError) as e:
324                module.fail_json_aws(e, msg="Failed to delete bucket encryption")
325            current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
326            changed = True
327        elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id):
328            expected_encryption = {'SSEAlgorithm': encryption}
329            if encryption == 'aws:kms':
330                expected_encryption.update({'KMSMasterKeyID': encryption_key_id})
331            try:
332                put_bucket_encryption(s3_client, name, expected_encryption)
333            except (BotoCoreError, ClientError) as e:
334                module.fail_json_aws(e, msg="Failed to set bucket encryption")
335            current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption)
336            changed = True
337
338        result['encryption'] = current_encryption
339
340    module.exit_json(changed=changed, name=name, **result)
341
342
343def bucket_exists(s3_client, bucket_name):
344    # head_bucket appeared to be really inconsistent, so we use list_buckets instead,
345    # and loop over all the buckets, even if we know it's less performant :(
346    all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets']
347    return any(bucket['Name'] == bucket_name for bucket in all_buckets)
348
349
350@AWSRetry.exponential_backoff(max_delay=120)
351def create_bucket(s3_client, bucket_name, location):
352    try:
353        configuration = {}
354        if location not in ('us-east-1', None):
355            configuration['LocationConstraint'] = location
356        if len(configuration) > 0:
357            s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
358        else:
359            s3_client.create_bucket(Bucket=bucket_name)
360        return True
361    except ClientError as e:
362        error_code = e.response['Error']['Code']
363        if error_code == 'BucketAlreadyOwnedByYou':
364            # We should never get there since we check the bucket presence before calling the create_or_update_bucket
365            # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
366            return False
367        else:
368            raise e
369
370
371@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
372def put_bucket_tagging(s3_client, bucket_name, tags):
373    s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
374
375
376@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
377def put_bucket_policy(s3_client, bucket_name, policy):
378    s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
379
380
381@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
382def delete_bucket_policy(s3_client, bucket_name):
383    s3_client.delete_bucket_policy(Bucket=bucket_name)
384
385
386@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
387def get_bucket_policy(s3_client, bucket_name):
388    try:
389        current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
390    except ClientError as e:
391        if e.response['Error']['Code'] == 'NoSuchBucketPolicy':
392            current_policy = None
393        else:
394            raise e
395    return current_policy
396
397
398@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
399def put_bucket_request_payment(s3_client, bucket_name, payer):
400    s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
401
402
403@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
404def get_bucket_request_payment(s3_client, bucket_name):
405    return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
406
407
408@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
409def get_bucket_versioning(s3_client, bucket_name):
410    return s3_client.get_bucket_versioning(Bucket=bucket_name)
411
412
413@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
414def put_bucket_versioning(s3_client, bucket_name, required_versioning):
415    s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
416
417
418@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
419def get_bucket_encryption(s3_client, bucket_name):
420    try:
421        result = s3_client.get_bucket_encryption(Bucket=bucket_name)
422        return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault')
423    except ClientError as e:
424        if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
425            return None
426        else:
427            raise e
428    except (IndexError, KeyError):
429        return None
430
431
432@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
433def put_bucket_encryption(s3_client, bucket_name, encryption):
434    server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]}
435    s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration)
436
437
438@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
439def delete_bucket_tagging(s3_client, bucket_name):
440    s3_client.delete_bucket_tagging(Bucket=bucket_name)
441
442
443@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
444def delete_bucket_encryption(s3_client, bucket_name):
445    s3_client.delete_bucket_encryption(Bucket=bucket_name)
446
447
448@AWSRetry.exponential_backoff(max_delay=120)
449def delete_bucket(s3_client, bucket_name):
450    try:
451        s3_client.delete_bucket(Bucket=bucket_name)
452    except ClientError as e:
453        if e.response['Error']['Code'] == 'NoSuchBucket':
454            # This means bucket should have been in a deleting state when we checked it existence
455            # We just ignore the error
456            pass
457        else:
458            raise e
459
460
461def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
462    for dummy in range(0, 12):
463        try:
464            current_policy = get_bucket_policy(s3_client, bucket_name)
465        except (ClientError, BotoCoreError) as e:
466            module.fail_json_aws(e, msg="Failed to get bucket policy")
467
468        if compare_policies(current_policy, expected_policy):
469            time.sleep(5)
470        else:
471            return current_policy
472    if should_fail:
473        module.fail_json(msg="Bucket policy failed to apply in the expected time")
474    else:
475        return None
476
477
478def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
479    for dummy in range(0, 12):
480        try:
481            requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
482        except (BotoCoreError, ClientError) as e:
483            module.fail_json_aws(e, msg="Failed to get bucket request payment")
484        if requester_pays_status != expected_payer:
485            time.sleep(5)
486        else:
487            return requester_pays_status
488    if should_fail:
489        module.fail_json(msg="Bucket request payment failed to apply in the expected time")
490    else:
491        return None
492
493
494def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption):
495    for dummy in range(0, 12):
496        try:
497            encryption = get_bucket_encryption(s3_client, bucket_name)
498        except (BotoCoreError, ClientError) as e:
499            module.fail_json_aws(e, msg="Failed to get updated encryption for bucket")
500        if encryption != expected_encryption:
501            time.sleep(5)
502        else:
503            return encryption
504    module.fail_json(msg="Bucket encryption failed to apply in the expected time")
505
506
507def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
508    for dummy in range(0, 24):
509        try:
510            versioning_status = get_bucket_versioning(s3_client, bucket_name)
511        except (BotoCoreError, ClientError) as e:
512            module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
513        if versioning_status.get('Status') != required_versioning:
514            time.sleep(8)
515        else:
516            return versioning_status
517    module.fail_json(msg="Bucket versioning failed to apply in the expected time")
518
519
520def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
521    for dummy in range(0, 12):
522        try:
523            current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
524        except (ClientError, BotoCoreError) as e:
525            module.fail_json_aws(e, msg="Failed to get bucket policy")
526        if current_tags_dict != expected_tags_dict:
527            time.sleep(5)
528        else:
529            return current_tags_dict
530    module.fail_json(msg="Bucket tags failed to apply in the expected time")
531
532
533def get_current_bucket_tags_dict(s3_client, bucket_name):
534    try:
535        current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
536    except ClientError as e:
537        if e.response['Error']['Code'] == 'NoSuchTagSet':
538            return {}
539        raise e
540
541    return boto3_tag_list_to_ansible_dict(current_tags)
542
543
544def paginated_list(s3_client, **pagination_params):
545    pg = s3_client.get_paginator('list_objects_v2')
546    for page in pg.paginate(**pagination_params):
547        yield [data['Key'] for data in page.get('Contents', [])]
548
549
550def paginated_versions_list(s3_client, **pagination_params):
551    try:
552        pg = s3_client.get_paginator('list_object_versions')
553        for page in pg.paginate(**pagination_params):
554            # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion
555            yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))]
556    except is_boto3_error_code('NoSuchBucket'):
557        yield []
558
559
560def destroy_bucket(s3_client, module):
561
562    force = module.params.get("force")
563    name = module.params.get("name")
564    try:
565        bucket_is_present = bucket_exists(s3_client, name)
566    except EndpointConnectionError as e:
567        module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
568    except (BotoCoreError, ClientError) as e:
569        module.fail_json_aws(e, msg="Failed to check bucket presence")
570
571    if not bucket_is_present:
572        module.exit_json(changed=False)
573
574    if force:
575        # if there are contents then we need to delete them (including versions) before we can delete the bucket
576        try:
577            for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
578                formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs]
579                for fk in formatted_keys:
580                    # remove VersionId from cases where they are `None` so that
581                    # unversioned objects are deleted using `DeleteObject`
582                    # rather than `DeleteObjectVersion`, improving backwards
583                    # compatibility with older IAM policies.
584                    if not fk.get('VersionId'):
585                        fk.pop('VersionId')
586
587                if formatted_keys:
588                    resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
589                    if resp.get('Errors'):
590                        module.fail_json(
591                            msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format(
592                                ', '.join([k['Key'] for k in resp['Errors']])
593                            ),
594                            errors=resp['Errors'], response=resp
595                        )
596        except (BotoCoreError, ClientError) as e:
597            module.fail_json_aws(e, msg="Failed while deleting bucket")
598
599    try:
600        delete_bucket(s3_client, name)
601        s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60))
602    except WaiterError as e:
603        module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
604    except (BotoCoreError, ClientError) as e:
605        module.fail_json_aws(e, msg="Failed to delete bucket")
606
607    module.exit_json(changed=True)
608
609
610def is_fakes3(s3_url):
611    """ Return True if s3_url has scheme fakes3:// """
612    if s3_url is not None:
613        return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
614    else:
615        return False
616
617
618def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
619    if s3_url and ceph:  # TODO - test this
620        ceph = urlparse(s3_url)
621        params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
622    elif is_fakes3(s3_url):
623        fakes3 = urlparse(s3_url)
624        port = fakes3.port
625        if fakes3.scheme == 'fakes3s':
626            protocol = "https"
627            if port is None:
628                port = 443
629        else:
630            protocol = "http"
631            if port is None:
632                port = 80
633        params = dict(module=module, conn_type='client', resource='s3', region=location,
634                      endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
635                      use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
636    else:
637        params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
638    return boto3_conn(**params)
639
640
641def main():
642
643    argument_spec = ec2_argument_spec()
644    argument_spec.update(
645        dict(
646            force=dict(default=False, type='bool'),
647            policy=dict(type='json'),
648            name=dict(required=True),
649            requester_pays=dict(default=False, type='bool'),
650            s3_url=dict(aliases=['S3_URL']),
651            state=dict(default='present', choices=['present', 'absent']),
652            tags=dict(type='dict'),
653            purge_tags=dict(type='bool', default=True),
654            versioning=dict(type='bool'),
655            ceph=dict(default=False, type='bool'),
656            encryption=dict(choices=['none', 'AES256', 'aws:kms']),
657            encryption_key_id=dict()
658        )
659    )
660
661    module = AnsibleAWSModule(
662        argument_spec=argument_spec,
663        required_if=[['encryption', 'aws:kms', ['encryption_key_id']]]
664    )
665
666    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
667
668    if region in ('us-east-1', '', None):
669        # default to US Standard region
670        location = 'us-east-1'
671    else:
672        # Boto uses symbolic names for locations but region strings will
673        # actually work fine for everything except us-east-1 (US Standard)
674        location = region
675
676    s3_url = module.params.get('s3_url')
677    ceph = module.params.get('ceph')
678
679    # allow eucarc environment variables to be used if ansible vars aren't set
680    if not s3_url and 'S3_URL' in os.environ:
681        s3_url = os.environ['S3_URL']
682
683    if ceph and not s3_url:
684        module.fail_json(msg='ceph flavour requires s3_url')
685
686    # Look at s3_url and tweak connection settings
687    # if connecting to Ceph RGW, Walrus or fakes3
688    if s3_url:
689        for key in ['validate_certs', 'security_token', 'profile_name']:
690            aws_connect_kwargs.pop(key, None)
691    s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
692
693    if s3_client is None:  # this should never happen
694        module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
695
696    state = module.params.get("state")
697    encryption = module.params.get("encryption")
698    encryption_key_id = module.params.get("encryption_key_id")
699
700    # Parameter validation
701    if encryption_key_id is not None and encryption is None:
702        module.fail_json(msg="You must specify encryption parameter along with encryption_key_id.")
703    elif encryption_key_id is not None and encryption != 'aws:kms':
704        module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")
705
706    if state == 'present':
707        create_or_update_bucket(s3_client, module, location)
708    elif state == 'absent':
709        destroy_bucket(s3_client, module)
710
711
712if __name__ == '__main__':
713    main()
714