1# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"). You 4# may not use this file except in compliance with the License. A copy of 5# the License is located at 6# 7# http://aws.amazon.com/apache2.0/ 8# 9# or in the "license" file accompanying this file. This file is 10# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11# ANY KIND, either express or implied. See the License for the specific 12# language governing permissions and limitations under the License. 13import os 14import logging 15import sys 16 17from botocore.client import Config 18from dateutil.parser import parse 19from dateutil.tz import tzlocal 20 21from awscli.compat import six 22from awscli.compat import queue 23from awscli.customizations.commands import BasicCommand 24from awscli.customizations.s3.comparator import Comparator 25from awscli.customizations.s3.fileinfobuilder import FileInfoBuilder 26from awscli.customizations.s3.fileformat import FileFormat 27from awscli.customizations.s3.filegenerator import FileGenerator 28from awscli.customizations.s3.fileinfo import FileInfo 29from awscli.customizations.s3.filters import create_filter 30from awscli.customizations.s3.s3handler import S3TransferHandlerFactory 31from awscli.customizations.s3.utils import find_bucket_key, AppendFilter, \ 32 find_dest_path_comp_key, human_readable_size, \ 33 RequestParamsMapper, split_s3_bucket_key, block_unsupported_resources 34from awscli.customizations.utils import uni_print 35from awscli.customizations.s3.syncstrategy.base import MissingFileSync, \ 36 SizeAndLastModifiedSync, NeverSync 37from awscli.customizations.s3 import transferconfig 38 39 40LOGGER = logging.getLogger(__name__) 41 42 43RECURSIVE = {'name': 'recursive', 'action': 'store_true', 'dest': 'dir_op', 44 'help_text': ( 45 "Command is performed on all files or objects " 46 "under the specified directory or prefix.")} 47 48 49HUMAN_READABLE = {'name': 'human-readable', 'action': 'store_true', 50 'help_text': "Displays file sizes in human readable format."} 51 52 53SUMMARIZE = {'name': 'summarize', 'action': 'store_true', 54 'help_text': ( 55 "Displays summary information " 56 "(number of objects, total size).")} 57 58 59DRYRUN = {'name': 'dryrun', 'action': 'store_true', 60 'help_text': ( 61 "Displays the operations that would be performed using the " 62 "specified command without actually running them.")} 63 64 65QUIET = {'name': 'quiet', 'action': 'store_true', 66 'help_text': ( 67 "Does not display the operations performed from the specified " 68 "command.")} 69 70 71FORCE = {'name': 'force', 'action': 'store_true', 72 'help_text': ( 73 "Deletes all objects in the bucket including the bucket itself. " 74 "Note that versioned objects will not be deleted in this " 75 "process which would cause the bucket deletion to fail because " 76 "the bucket would not be empty. To delete versioned " 77 "objects use the ``s3api delete-object`` command with " 78 "the ``--version-id`` parameter.")} 79 80 81FOLLOW_SYMLINKS = {'name': 'follow-symlinks', 'action': 'store_true', 82 'default': True, 'group_name': 'follow_symlinks', 83 'help_text': ( 84 "Symbolic links are followed " 85 "only when uploading to S3 from the local filesystem. " 86 "Note that S3 does not support symbolic links, so the " 87 "contents of the link target are uploaded under the " 88 "name of the link. When neither ``--follow-symlinks`` " 89 "nor ``--no-follow-symlinks`` is specified, the default " 90 "is to follow symlinks.")} 91 92 93NO_FOLLOW_SYMLINKS = {'name': 'no-follow-symlinks', 'action': 'store_false', 94 'dest': 'follow_symlinks', 'default': True, 95 'group_name': 'follow_symlinks'} 96 97 98NO_GUESS_MIME_TYPE = {'name': 'no-guess-mime-type', 'action': 'store_false', 99 'dest': 'guess_mime_type', 'default': True, 100 'help_text': ( 101 "Do not try to guess the mime type for " 102 "uploaded files. By default the mime type of a " 103 "file is guessed when it is uploaded.")} 104 105 106CONTENT_TYPE = {'name': 'content-type', 107 'help_text': ( 108 "Specify an explicit content type for this operation. " 109 "This value overrides any guessed mime types.")} 110 111 112EXCLUDE = {'name': 'exclude', 'action': AppendFilter, 'nargs': 1, 113 'dest': 'filters', 114 'help_text': ( 115 "Exclude all files or objects from the command that matches " 116 "the specified pattern.")} 117 118 119INCLUDE = {'name': 'include', 'action': AppendFilter, 'nargs': 1, 120 'dest': 'filters', 121 'help_text': ( 122 "Don't exclude files or objects " 123 "in the command that match the specified pattern. " 124 'See <a href="http://docs.aws.amazon.com/cli/latest/reference' 125 '/s3/index.html#use-of-exclude-and-include-filters">Use of ' 126 'Exclude and Include Filters</a> for details.')} 127 128 129ACL = {'name': 'acl', 130 'choices': ['private', 'public-read', 'public-read-write', 131 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 132 'bucket-owner-full-control', 'log-delivery-write'], 133 'help_text': ( 134 "Sets the ACL for the object when the command is " 135 "performed. If you use this parameter you must have the " 136 '"s3:PutObjectAcl" permission included in the list of actions ' 137 "for your IAM policy. " 138 "Only accepts values of ``private``, ``public-read``, " 139 "``public-read-write``, ``authenticated-read``, ``aws-exec-read``, " 140 "``bucket-owner-read``, ``bucket-owner-full-control`` and " 141 "``log-delivery-write``. " 142 'See <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/' 143 'acl-overview.html#canned-acl">Canned ACL</a> for details')} 144 145 146GRANTS = { 147 'name': 'grants', 'nargs': '+', 148 'help_text': ( 149 '<p>Grant specific permissions to individual users or groups. You ' 150 'can supply a list of grants of the form</p><codeblock>--grants ' 151 'Permission=Grantee_Type=Grantee_ID [Permission=Grantee_Type=' 152 'Grantee_ID ...]</codeblock>To specify the same permission type ' 153 'for multiple ' 154 'grantees, specify the permission as such as <codeblock>--grants ' 155 'Permission=Grantee_Type=Grantee_ID,Grantee_Type=Grantee_ID,...' 156 '</codeblock>Each value contains the following elements:' 157 '<ul><li><code>Permission</code> - Specifies ' 158 'the granted permissions, and can be set to read, readacl, ' 159 'writeacl, or full.</li><li><code>Grantee_Type</code> - ' 160 'Specifies how the grantee is to be identified, and can be set ' 161 'to uri or id.</li><li><code>Grantee_ID</code> - ' 162 'Specifies the grantee based on Grantee_Type. The ' 163 '<code>Grantee_ID</code> value can be one of:<ul><li><b>uri</b> ' 164 '- The group\'s URI. For more information, see ' 165 '<a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/' 166 'ACLOverview.html#SpecifyingGrantee">' 167 'Who Is a Grantee?</a></li>' 168 '<li><b>id</b> - The account\'s canonical ID</li></ul>' 169 '</li></ul>' 170 'For more information on Amazon S3 access control, see ' 171 '<a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/' 172 'UsingAuthAccess.html">Access Control</a>')} 173 174 175SSE = { 176 'name': 'sse', 'nargs': '?', 'const': 'AES256', 177 'choices': ['AES256', 'aws:kms'], 178 'help_text': ( 179 'Specifies server-side encryption of the object in S3. ' 180 'Valid values are ``AES256`` and ``aws:kms``. If the parameter is ' 181 'specified but no value is provided, ``AES256`` is used.' 182 ) 183} 184 185 186SSE_C = { 187 'name': 'sse-c', 'nargs': '?', 'const': 'AES256', 'choices': ['AES256'], 188 'help_text': ( 189 'Specifies server-side encryption using customer provided keys ' 190 'of the the object in S3. ``AES256`` is the only valid value. ' 191 'If the parameter is specified but no value is provided, ' 192 '``AES256`` is used. If you provide this value, ``--sse-c-key`` ' 193 'must be specified as well.' 194 ) 195} 196 197 198SSE_C_KEY = { 199 'name': 'sse-c-key', 'cli_type_name': 'blob', 200 'help_text': ( 201 'The customer-provided encryption key to use to server-side ' 202 'encrypt the object in S3. If you provide this value, ' 203 '``--sse-c`` must be specified as well. The key provided should ' 204 '**not** be base64 encoded.' 205 ) 206} 207 208 209SSE_KMS_KEY_ID = { 210 'name': 'sse-kms-key-id', 211 'help_text': ( 212 'The customer-managed AWS Key Management Service (KMS) key ID that ' 213 'should be used to server-side encrypt the object in S3. You should ' 214 'only provide this parameter if you are using a customer managed ' 215 'customer master key (CMK) and not the AWS managed KMS CMK.' 216 ) 217} 218 219 220SSE_C_COPY_SOURCE = { 221 'name': 'sse-c-copy-source', 'nargs': '?', 222 'const': 'AES256', 'choices': ['AES256'], 223 'help_text': ( 224 'This parameter should only be specified when copying an S3 object ' 225 'that was encrypted server-side with a customer-provided ' 226 'key. It specifies the algorithm to use when decrypting the source ' 227 'object. ``AES256`` is the only valid ' 228 'value. If the parameter is specified but no value is provided, ' 229 '``AES256`` is used. If you provide this value, ' 230 '``--sse-c-copy-source-key`` must be specified as well. ' 231 ) 232} 233 234 235SSE_C_COPY_SOURCE_KEY = { 236 'name': 'sse-c-copy-source-key', 'cli_type_name': 'blob', 237 'help_text': ( 238 'This parameter should only be specified when copying an S3 object ' 239 'that was encrypted server-side with a customer-provided ' 240 'key. Specifies the customer-provided encryption key for Amazon S3 ' 241 'to use to decrypt the source object. The encryption key provided ' 242 'must be one that was used when the source object was created. ' 243 'If you provide this value, ``--sse-c-copy-source`` be specified as ' 244 'well. The key provided should **not** be base64 encoded.' 245 ) 246} 247 248 249STORAGE_CLASS = {'name': 'storage-class', 250 'choices': ['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA', 251 'ONEZONE_IA', 'INTELLIGENT_TIERING', 'GLACIER', 252 'DEEP_ARCHIVE'], 253 'help_text': ( 254 "The type of storage to use for the object. " 255 "Valid choices are: STANDARD | REDUCED_REDUNDANCY " 256 "| STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING " 257 "| GLACIER | DEEP_ARCHIVE. " 258 "Defaults to 'STANDARD'")} 259 260 261WEBSITE_REDIRECT = {'name': 'website-redirect', 262 'help_text': ( 263 "If the bucket is configured as a website, " 264 "redirects requests for this object to another object " 265 "in the same bucket or to an external URL. Amazon S3 " 266 "stores the value of this header in the object " 267 "metadata.")} 268 269 270CACHE_CONTROL = {'name': 'cache-control', 271 'help_text': ( 272 "Specifies caching behavior along the " 273 "request/reply chain.")} 274 275 276CONTENT_DISPOSITION = {'name': 'content-disposition', 277 'help_text': ( 278 "Specifies presentational information " 279 "for the object.")} 280 281 282CONTENT_ENCODING = {'name': 'content-encoding', 283 'help_text': ( 284 "Specifies what content encodings have been " 285 "applied to the object and thus what decoding " 286 "mechanisms must be applied to obtain the media-type " 287 "referenced by the Content-Type header field.")} 288 289 290CONTENT_LANGUAGE = {'name': 'content-language', 291 'help_text': ("The language the content is in.")} 292 293 294SOURCE_REGION = {'name': 'source-region', 295 'help_text': ( 296 "When transferring objects from an s3 bucket to an s3 " 297 "bucket, this specifies the region of the source bucket." 298 " Note the region specified by ``--region`` or through " 299 "configuration of the CLI refers to the region of the " 300 "destination bucket. If ``--source-region`` is not " 301 "specified the region of the source will be the same " 302 "as the region of the destination bucket.")} 303 304 305EXPIRES = { 306 'name': 'expires', 307 'help_text': ( 308 "The date and time at which the object is no longer cacheable.") 309} 310 311 312METADATA = { 313 'name': 'metadata', 'cli_type_name': 'map', 314 'schema': { 315 'type': 'map', 316 'key': {'type': 'string'}, 317 'value': {'type': 'string'} 318 }, 319 'help_text': ( 320 "A map of metadata to store with the objects in S3. This will be " 321 "applied to every object which is part of this request. In a sync, this " 322 "means that files which haven't changed won't receive the new metadata. " 323 "When copying between two s3 locations, the metadata-directive " 324 "argument will default to 'REPLACE' unless otherwise specified." 325 ) 326} 327 328 329METADATA_DIRECTIVE = { 330 'name': 'metadata-directive', 'choices': ['COPY', 'REPLACE'], 331 'help_text': ( 332 'Specifies whether the metadata is copied from the source object ' 333 'or replaced with metadata provided when copying S3 objects. ' 334 'Note that if the object is copied over in parts, the source ' 335 'object\'s metadata will not be copied over, no matter the value for ' 336 '``--metadata-directive``, and instead the desired metadata values ' 337 'must be specified as parameters on the command line. ' 338 'Valid values are ``COPY`` and ``REPLACE``. If this parameter is not ' 339 'specified, ``COPY`` will be used by default. If ``REPLACE`` is used, ' 340 'the copied object will only have the metadata values that were' 341 ' specified by the CLI command. Note that if you are ' 342 'using any of the following parameters: ``--content-type``, ' 343 '``content-language``, ``--content-encoding``, ' 344 '``--content-disposition``, ``--cache-control``, or ``--expires``, you ' 345 'will need to specify ``--metadata-directive REPLACE`` for ' 346 'non-multipart copies if you want the copied objects to have the ' 347 'specified metadata values.') 348} 349 350 351INDEX_DOCUMENT = {'name': 'index-document', 352 'help_text': ( 353 'A suffix that is appended to a request that is for ' 354 'a directory on the website endpoint (e.g. if the ' 355 'suffix is index.html and you make a request to ' 356 'samplebucket/images/ the data that is returned ' 357 'will be for the object with the key name ' 358 'images/index.html) The suffix must not be empty and ' 359 'must not include a slash character.')} 360 361 362ERROR_DOCUMENT = {'name': 'error-document', 363 'help_text': ( 364 'The object key name to use when ' 365 'a 4XX class error occurs.')} 366 367 368ONLY_SHOW_ERRORS = {'name': 'only-show-errors', 'action': 'store_true', 369 'help_text': ( 370 'Only errors and warnings are displayed. All other ' 371 'output is suppressed.')} 372 373 374NO_PROGRESS = {'name': 'no-progress', 375 'action': 'store_false', 376 'dest': 'progress', 377 'help_text': ( 378 'File transfer progress is not displayed. This flag ' 379 'is only applied when the quiet and only-show-errors ' 380 'flags are not provided.')} 381 382 383EXPECTED_SIZE = {'name': 'expected-size', 384 'help_text': ( 385 'This argument specifies the expected size of a stream ' 386 'in terms of bytes. Note that this argument is needed ' 387 'only when a stream is being uploaded to s3 and the size ' 388 'is larger than 50GB. Failure to include this argument ' 389 'under these conditions may result in a failed upload ' 390 'due to too many parts in upload.')} 391 392 393PAGE_SIZE = {'name': 'page-size', 'cli_type_name': 'integer', 394 'help_text': ( 395 'The number of results to return in each response to a list ' 396 'operation. The default value is 1000 (the maximum allowed). ' 397 'Using a lower value may help if an operation times out.')} 398 399 400IGNORE_GLACIER_WARNINGS = { 401 'name': 'ignore-glacier-warnings', 'action': 'store_true', 402 'help_text': ( 403 'Turns off glacier warnings. Warnings about an operation that cannot ' 404 'be performed because it involves copying, downloading, or moving ' 405 'a glacier object will no longer be printed to standard error and ' 406 'will no longer cause the return code of the command to be ``2``.' 407 ) 408} 409 410 411FORCE_GLACIER_TRANSFER = { 412 'name': 'force-glacier-transfer', 'action': 'store_true', 413 'help_text': ( 414 'Forces a transfer request on all Glacier objects in a sync or ' 415 'recursive copy.' 416 ) 417} 418 419REQUEST_PAYER = { 420 'name': 'request-payer', 'choices': ['requester'], 421 'nargs': '?', 'const': 'requester', 422 'help_text': ( 423 'Confirms that the requester knows that they will be charged ' 424 'for the request. Bucket owners need not specify this parameter in ' 425 'their requests. Documentation on downloading objects from requester ' 426 'pays buckets can be found at ' 427 'http://docs.aws.amazon.com/AmazonS3/latest/dev/' 428 'ObjectsinRequesterPaysBuckets.html' 429 ) 430} 431 432TRANSFER_ARGS = [DRYRUN, QUIET, INCLUDE, EXCLUDE, ACL, 433 FOLLOW_SYMLINKS, NO_FOLLOW_SYMLINKS, NO_GUESS_MIME_TYPE, 434 SSE, SSE_C, SSE_C_KEY, SSE_KMS_KEY_ID, SSE_C_COPY_SOURCE, 435 SSE_C_COPY_SOURCE_KEY, STORAGE_CLASS, GRANTS, 436 WEBSITE_REDIRECT, CONTENT_TYPE, CACHE_CONTROL, 437 CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE, 438 EXPIRES, SOURCE_REGION, ONLY_SHOW_ERRORS, NO_PROGRESS, 439 PAGE_SIZE, IGNORE_GLACIER_WARNINGS, FORCE_GLACIER_TRANSFER, 440 REQUEST_PAYER] 441 442 443def get_client(session, region, endpoint_url, verify, config=None): 444 return session.create_client('s3', region_name=region, 445 endpoint_url=endpoint_url, verify=verify, 446 config=config) 447 448 449class S3Command(BasicCommand): 450 def _run_main(self, parsed_args, parsed_globals): 451 self.client = get_client(self._session, parsed_globals.region, 452 parsed_globals.endpoint_url, 453 parsed_globals.verify_ssl) 454 455 456class ListCommand(S3Command): 457 NAME = 'ls' 458 DESCRIPTION = ("List S3 objects and common prefixes under a prefix or " 459 "all S3 buckets. Note that the --output and --no-paginate " 460 "arguments are ignored for this command.") 461 USAGE = "<S3Uri> or NONE" 462 ARG_TABLE = [{'name': 'paths', 'nargs': '?', 'default': 's3://', 463 'positional_arg': True, 'synopsis': USAGE}, RECURSIVE, 464 PAGE_SIZE, HUMAN_READABLE, SUMMARIZE, REQUEST_PAYER] 465 466 def _run_main(self, parsed_args, parsed_globals): 467 super(ListCommand, self)._run_main(parsed_args, parsed_globals) 468 self._empty_result = False 469 self._at_first_page = True 470 self._size_accumulator = 0 471 self._total_objects = 0 472 self._human_readable = parsed_args.human_readable 473 path = parsed_args.paths 474 if path.startswith('s3://'): 475 path = path[5:] 476 bucket, key = find_bucket_key(path) 477 if not bucket: 478 self._list_all_buckets() 479 elif parsed_args.dir_op: 480 # Then --recursive was specified. 481 self._list_all_objects_recursive( 482 bucket, key, parsed_args.page_size, parsed_args.request_payer) 483 else: 484 self._list_all_objects( 485 bucket, key, parsed_args.page_size, parsed_args.request_payer) 486 if parsed_args.summarize: 487 self._print_summary() 488 if key: 489 # User specified a key to look for. We should return an rc of one 490 # if there are no matching keys and/or prefixes or return an rc 491 # of zero if there are matching keys or prefixes. 492 return self._check_no_objects() 493 else: 494 # This covers the case when user is trying to list all of of 495 # the buckets or is trying to list the objects of a bucket 496 # (without specifying a key). For both situations, a rc of 0 497 # should be returned because applicable errors are supplied by 498 # the server (i.e. bucket not existing). These errors will be 499 # thrown before reaching the automatic return of rc of zero. 500 return 0 501 502 def _list_all_objects(self, bucket, key, page_size=None, 503 request_payer=None): 504 paginator = self.client.get_paginator('list_objects_v2') 505 paging_args = { 506 'Bucket': bucket, 'Prefix': key, 'Delimiter': '/', 507 'PaginationConfig': {'PageSize': page_size} 508 } 509 if request_payer is not None: 510 paging_args['RequestPayer'] = request_payer 511 iterator = paginator.paginate(**paging_args) 512 for response_data in iterator: 513 self._display_page(response_data) 514 515 def _display_page(self, response_data, use_basename=True): 516 common_prefixes = response_data.get('CommonPrefixes', []) 517 contents = response_data.get('Contents', []) 518 if not contents and not common_prefixes: 519 self._empty_result = True 520 return 521 for common_prefix in common_prefixes: 522 prefix_components = common_prefix['Prefix'].split('/') 523 prefix = prefix_components[-2] 524 pre_string = "PRE".rjust(30, " ") 525 print_str = pre_string + ' ' + prefix + '/\n' 526 uni_print(print_str) 527 for content in contents: 528 last_mod_str = self._make_last_mod_str(content['LastModified']) 529 self._size_accumulator += int(content['Size']) 530 self._total_objects += 1 531 size_str = self._make_size_str(content['Size']) 532 if use_basename: 533 filename_components = content['Key'].split('/') 534 filename = filename_components[-1] 535 else: 536 filename = content['Key'] 537 print_str = last_mod_str + ' ' + size_str + ' ' + \ 538 filename + '\n' 539 uni_print(print_str) 540 self._at_first_page = False 541 542 def _list_all_buckets(self): 543 response_data = self.client.list_buckets() 544 buckets = response_data['Buckets'] 545 for bucket in buckets: 546 last_mod_str = self._make_last_mod_str(bucket['CreationDate']) 547 print_str = last_mod_str + ' ' + bucket['Name'] + '\n' 548 uni_print(print_str) 549 550 def _list_all_objects_recursive(self, bucket, key, page_size=None, 551 request_payer=None): 552 paginator = self.client.get_paginator('list_objects_v2') 553 paging_args = { 554 'Bucket': bucket, 'Prefix': key, 555 'PaginationConfig': {'PageSize': page_size} 556 } 557 if request_payer is not None: 558 paging_args['RequestPayer'] = request_payer 559 iterator = paginator.paginate(**paging_args) 560 for response_data in iterator: 561 self._display_page(response_data, use_basename=False) 562 563 def _check_no_objects(self): 564 if self._empty_result and self._at_first_page: 565 # Nothing was returned in the first page of results when listing 566 # the objects. 567 return 1 568 return 0 569 570 def _make_last_mod_str(self, last_mod): 571 """ 572 This function creates the last modified time string whenever objects 573 or buckets are being listed 574 """ 575 last_mod = parse(last_mod) 576 last_mod = last_mod.astimezone(tzlocal()) 577 last_mod_tup = (str(last_mod.year), str(last_mod.month).zfill(2), 578 str(last_mod.day).zfill(2), 579 str(last_mod.hour).zfill(2), 580 str(last_mod.minute).zfill(2), 581 str(last_mod.second).zfill(2)) 582 last_mod_str = "%s-%s-%s %s:%s:%s" % last_mod_tup 583 return last_mod_str.ljust(19, ' ') 584 585 def _make_size_str(self, size): 586 """ 587 This function creates the size string when objects are being listed. 588 """ 589 if self._human_readable: 590 size_str = human_readable_size(size) 591 else: 592 size_str = str(size) 593 return size_str.rjust(10, ' ') 594 595 def _print_summary(self): 596 """ 597 This function prints a summary of total objects and total bytes 598 """ 599 print_str = str(self._total_objects) 600 uni_print("\nTotal Objects: ".rjust(15, ' ') + print_str + "\n") 601 if self._human_readable: 602 print_str = human_readable_size(self._size_accumulator) 603 else: 604 print_str = str(self._size_accumulator) 605 uni_print("Total Size: ".rjust(15, ' ') + print_str + "\n") 606 607 608class WebsiteCommand(S3Command): 609 NAME = 'website' 610 DESCRIPTION = 'Set the website configuration for a bucket.' 611 USAGE = '<S3Uri>' 612 ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True, 613 'synopsis': USAGE}, INDEX_DOCUMENT, ERROR_DOCUMENT] 614 615 def _run_main(self, parsed_args, parsed_globals): 616 super(WebsiteCommand, self)._run_main(parsed_args, parsed_globals) 617 bucket = self._get_bucket_name(parsed_args.paths[0]) 618 website_configuration = self._build_website_configuration(parsed_args) 619 self.client.put_bucket_website( 620 Bucket=bucket, WebsiteConfiguration=website_configuration) 621 return 0 622 623 def _build_website_configuration(self, parsed_args): 624 website_config = {} 625 if parsed_args.index_document is not None: 626 website_config['IndexDocument'] = \ 627 {'Suffix': parsed_args.index_document} 628 if parsed_args.error_document is not None: 629 website_config['ErrorDocument'] = \ 630 {'Key': parsed_args.error_document} 631 return website_config 632 633 def _get_bucket_name(self, path): 634 # We support either: 635 # s3://bucketname 636 # bucketname 637 # 638 # We also strip off the trailing slash if a user 639 # accidentally appends a slash. 640 if path.startswith('s3://'): 641 path = path[5:] 642 if path.endswith('/'): 643 path = path[:-1] 644 block_unsupported_resources(path) 645 return path 646 647 648class PresignCommand(S3Command): 649 NAME = 'presign' 650 DESCRIPTION = ( 651 "Generate a pre-signed URL for an Amazon S3 object. This allows " 652 "anyone who receives the pre-signed URL to retrieve the S3 object " 653 "with an HTTP GET request. For sigv4 requests the region needs to be " 654 "configured explicitly." 655 ) 656 USAGE = "<S3Uri>" 657 ARG_TABLE = [{'name': 'path', 658 'positional_arg': True, 'synopsis': USAGE}, 659 {'name': 'expires-in', 'default': 3600, 660 'cli_type_name': 'integer', 661 'help_text': ( 662 'Number of seconds until the pre-signed ' 663 'URL expires. Default is 3600 seconds.')}] 664 665 def _run_main(self, parsed_args, parsed_globals): 666 super(PresignCommand, self)._run_main(parsed_args, parsed_globals) 667 path = parsed_args.path 668 if path.startswith('s3://'): 669 path = path[5:] 670 bucket, key = find_bucket_key(path) 671 url = self.client.generate_presigned_url( 672 'get_object', 673 {'Bucket': bucket, 'Key': key}, 674 ExpiresIn=parsed_args.expires_in 675 ) 676 uni_print(url) 677 uni_print('\n') 678 return 0 679 680 681class S3TransferCommand(S3Command): 682 def _run_main(self, parsed_args, parsed_globals): 683 super(S3TransferCommand, self)._run_main(parsed_args, parsed_globals) 684 self._convert_path_args(parsed_args) 685 params = self._build_call_parameters(parsed_args, {}) 686 cmd_params = CommandParameters(self.NAME, params, 687 self.USAGE) 688 cmd_params.add_region(parsed_globals) 689 cmd_params.add_endpoint_url(parsed_globals) 690 cmd_params.add_verify_ssl(parsed_globals) 691 cmd_params.add_page_size(parsed_args) 692 cmd_params.add_paths(parsed_args.paths) 693 694 runtime_config = transferconfig.RuntimeConfig().build_config( 695 **self._session.get_scoped_config().get('s3', {})) 696 cmd = CommandArchitecture(self._session, self.NAME, 697 cmd_params.parameters, 698 runtime_config) 699 cmd.set_clients() 700 cmd.create_instructions() 701 return cmd.run() 702 703 def _build_call_parameters(self, args, command_params): 704 """ 705 This takes all of the commands in the name space and puts them 706 into a dictionary 707 """ 708 for name, value in vars(args).items(): 709 command_params[name] = value 710 return command_params 711 712 def _convert_path_args(self, parsed_args): 713 if not isinstance(parsed_args.paths, list): 714 parsed_args.paths = [parsed_args.paths] 715 for i in range(len(parsed_args.paths)): 716 path = parsed_args.paths[i] 717 if isinstance(path, six.binary_type): 718 dec_path = path.decode(sys.getfilesystemencoding()) 719 enc_path = dec_path.encode('utf-8') 720 new_path = enc_path.decode('utf-8') 721 parsed_args.paths[i] = new_path 722 723 724class CpCommand(S3TransferCommand): 725 NAME = 'cp' 726 DESCRIPTION = "Copies a local file or S3 object to another location " \ 727 "locally or in S3." 728 USAGE = "<LocalPath> <S3Uri> or <S3Uri> <LocalPath> " \ 729 "or <S3Uri> <S3Uri>" 730 ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, 731 'synopsis': USAGE}] + TRANSFER_ARGS + \ 732 [METADATA, METADATA_DIRECTIVE, EXPECTED_SIZE, RECURSIVE] 733 734 735class MvCommand(S3TransferCommand): 736 NAME = 'mv' 737 DESCRIPTION = "Moves a local file or S3 object to " \ 738 "another location locally or in S3." 739 USAGE = "<LocalPath> <S3Uri> or <S3Uri> <LocalPath> " \ 740 "or <S3Uri> <S3Uri>" 741 ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, 742 'synopsis': USAGE}] + TRANSFER_ARGS +\ 743 [METADATA, METADATA_DIRECTIVE, RECURSIVE] 744 745class RmCommand(S3TransferCommand): 746 NAME = 'rm' 747 DESCRIPTION = "Deletes an S3 object." 748 USAGE = "<S3Uri>" 749 ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True, 750 'synopsis': USAGE}, DRYRUN, QUIET, RECURSIVE, REQUEST_PAYER, 751 INCLUDE, EXCLUDE, ONLY_SHOW_ERRORS, PAGE_SIZE] 752 753 754class SyncCommand(S3TransferCommand): 755 NAME = 'sync' 756 DESCRIPTION = "Syncs directories and S3 prefixes. Recursively copies " \ 757 "new and updated files from the source directory to " \ 758 "the destination. Only creates folders in the destination " \ 759 "if they contain one or more files." 760 USAGE = "<LocalPath> <S3Uri> or <S3Uri> " \ 761 "<LocalPath> or <S3Uri> <S3Uri>" 762 ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, 763 'synopsis': USAGE}] + TRANSFER_ARGS + \ 764 [METADATA, METADATA_DIRECTIVE] 765 766 767class MbCommand(S3Command): 768 NAME = 'mb' 769 DESCRIPTION = "Creates an S3 bucket." 770 USAGE = "<S3Uri>" 771 ARG_TABLE = [{'name': 'path', 'positional_arg': True, 'synopsis': USAGE}] 772 773 def _run_main(self, parsed_args, parsed_globals): 774 super(MbCommand, self)._run_main(parsed_args, parsed_globals) 775 776 if not parsed_args.path.startswith('s3://'): 777 raise TypeError("%s\nError: Invalid argument type" % self.USAGE) 778 bucket, _ = split_s3_bucket_key(parsed_args.path) 779 780 bucket_config = {'LocationConstraint': self.client.meta.region_name} 781 params = {'Bucket': bucket} 782 if self.client.meta.region_name != 'us-east-1': 783 params['CreateBucketConfiguration'] = bucket_config 784 785 # TODO: Consolidate how we handle return codes and errors 786 try: 787 self.client.create_bucket(**params) 788 uni_print("make_bucket: %s\n" % bucket) 789 return 0 790 except Exception as e: 791 uni_print( 792 "make_bucket failed: %s %s\n" % (parsed_args.path, e), 793 sys.stderr 794 ) 795 return 1 796 797 798class RbCommand(S3Command): 799 NAME = 'rb' 800 DESCRIPTION = ( 801 "Deletes an empty S3 bucket. A bucket must be completely empty " 802 "of objects and versioned objects before it can be deleted. " 803 "However, the ``--force`` parameter can be used to delete " 804 "the non-versioned objects in the bucket before the bucket is " 805 "deleted." 806 ) 807 USAGE = "<S3Uri>" 808 ARG_TABLE = [{'name': 'path', 'positional_arg': True, 809 'synopsis': USAGE}, FORCE] 810 811 def _run_main(self, parsed_args, parsed_globals): 812 super(RbCommand, self)._run_main(parsed_args, parsed_globals) 813 814 if not parsed_args.path.startswith('s3://'): 815 raise TypeError("%s\nError: Invalid argument type" % self.USAGE) 816 bucket, key = split_s3_bucket_key(parsed_args.path) 817 818 if key: 819 raise ValueError('Please specify a valid bucket name only.' 820 ' E.g. s3://%s' % bucket) 821 822 if parsed_args.force: 823 self._force(parsed_args.path, parsed_globals) 824 825 try: 826 self.client.delete_bucket(Bucket=bucket) 827 uni_print("remove_bucket: %s\n" % bucket) 828 return 0 829 except Exception as e: 830 uni_print( 831 "remove_bucket failed: %s %s\n" % (parsed_args.path, e), 832 sys.stderr 833 ) 834 return 1 835 836 def _force(self, path, parsed_globals): 837 """Calls rm --recursive on the given path.""" 838 rm = RmCommand(self._session) 839 rc = rm([path, '--recursive'], parsed_globals) 840 if rc != 0: 841 raise RuntimeError( 842 "remove_bucket failed: Unable to delete all objects in the " 843 "bucket, bucket will not be deleted.") 844 845 846class CommandArchitecture(object): 847 """ 848 This class drives the actual command. A command is performed in two 849 steps. First a list of instructions is generated. This list of 850 instructions identifies which type of components are required based on the 851 name of the command and the parameters passed to the command line. After 852 the instructions are generated the second step involves using the 853 list of instructions to wire together an assortment of generators to 854 perform the command. 855 """ 856 def __init__(self, session, cmd, parameters, runtime_config=None): 857 self.session = session 858 self.cmd = cmd 859 self.parameters = parameters 860 self.instructions = [] 861 self._runtime_config = runtime_config 862 self._endpoint = None 863 self._source_endpoint = None 864 self._client = None 865 self._source_client = None 866 867 def set_clients(self): 868 client_config = None 869 if self.parameters.get('sse') == 'aws:kms': 870 client_config = Config(signature_version='s3v4') 871 self._client = get_client( 872 self.session, 873 region=self.parameters['region'], 874 endpoint_url=self.parameters['endpoint_url'], 875 verify=self.parameters['verify_ssl'], 876 config=client_config 877 ) 878 self._source_client = get_client( 879 self.session, 880 region=self.parameters['region'], 881 endpoint_url=self.parameters['endpoint_url'], 882 verify=self.parameters['verify_ssl'], 883 config=client_config 884 ) 885 if self.parameters['source_region']: 886 if self.parameters['paths_type'] == 's3s3': 887 self._source_client = get_client( 888 self.session, 889 region=self.parameters['source_region'], 890 endpoint_url=None, 891 verify=self.parameters['verify_ssl'], 892 config=client_config 893 ) 894 895 def create_instructions(self): 896 """ 897 This function creates the instructions based on the command name and 898 extra parameters. Note that all commands must have an s3_handler 899 instruction in the instructions and must be at the end of the 900 instruction list because it sends the request to S3 and does not 901 yield anything. 902 """ 903 if self.needs_filegenerator(): 904 self.instructions.append('file_generator') 905 if self.parameters.get('filters'): 906 self.instructions.append('filters') 907 if self.cmd == 'sync': 908 self.instructions.append('comparator') 909 self.instructions.append('file_info_builder') 910 self.instructions.append('s3_handler') 911 912 def needs_filegenerator(self): 913 return not self.parameters['is_stream'] 914 915 def choose_sync_strategies(self): 916 """Determines the sync strategy for the command. 917 918 It defaults to the default sync strategies but a customizable sync 919 strategy can override the default strategy if it returns the instance 920 of its self when the event is emitted. 921 """ 922 sync_strategies = {} 923 # Set the default strategies. 924 sync_strategies['file_at_src_and_dest_sync_strategy'] = \ 925 SizeAndLastModifiedSync() 926 sync_strategies['file_not_at_dest_sync_strategy'] = MissingFileSync() 927 sync_strategies['file_not_at_src_sync_strategy'] = NeverSync() 928 929 # Determine what strategies to override if any. 930 responses = self.session.emit( 931 'choosing-s3-sync-strategy', params=self.parameters) 932 if responses is not None: 933 for response in responses: 934 override_sync_strategy = response[1] 935 if override_sync_strategy is not None: 936 sync_type = override_sync_strategy.sync_type 937 sync_type += '_sync_strategy' 938 sync_strategies[sync_type] = override_sync_strategy 939 940 return sync_strategies 941 942 def run(self): 943 """ 944 This function wires together all of the generators and completes 945 the command. First a dictionary is created that is indexed first by 946 the command name. Then using the instruction, another dictionary 947 can be indexed to obtain the objects corresponding to the 948 particular instruction for that command. To begin the wiring, 949 either a ``FileFormat`` or ``TaskInfo`` object, depending on the 950 command, is put into a list. Then the function enters a while loop 951 that pops off an instruction. It then determines the object needed 952 and calls the call function of the object using the list as the input. 953 Depending on the number of objects in the input list and the number 954 of components in the list corresponding to the instruction, the call 955 method of the component can be called two different ways. If the 956 number of inputs is equal to the number of components a 1:1 mapping of 957 inputs to components is used when calling the call function. If the 958 there are more inputs than components, then a 2:1 mapping of inputs to 959 components is used where the component call method takes two inputs 960 instead of one. Whatever files are yielded from the call function 961 is appended to a list and used as the input for the next repetition 962 of the while loop until there are no more instructions. 963 """ 964 src = self.parameters['src'] 965 dest = self.parameters['dest'] 966 paths_type = self.parameters['paths_type'] 967 files = FileFormat().format(src, dest, self.parameters) 968 rev_files = FileFormat().format(dest, src, self.parameters) 969 970 cmd_translation = { 971 'locals3': 'upload', 972 's3s3': 'copy', 973 's3local': 'download', 974 's3': 'delete' 975 } 976 result_queue = queue.Queue() 977 operation_name = cmd_translation[paths_type] 978 979 fgen_kwargs = { 980 'client': self._source_client, 'operation_name': operation_name, 981 'follow_symlinks': self.parameters['follow_symlinks'], 982 'page_size': self.parameters['page_size'], 983 'result_queue': result_queue, 984 } 985 rgen_kwargs = { 986 'client': self._client, 'operation_name': '', 987 'follow_symlinks': self.parameters['follow_symlinks'], 988 'page_size': self.parameters['page_size'], 989 'result_queue': result_queue, 990 } 991 992 fgen_request_parameters = \ 993 self._get_file_generator_request_parameters_skeleton() 994 self._map_request_payer_params(fgen_request_parameters) 995 self._map_sse_c_params(fgen_request_parameters, paths_type) 996 fgen_kwargs['request_parameters'] = fgen_request_parameters 997 998 rgen_request_parameters = \ 999 self._get_file_generator_request_parameters_skeleton() 1000 self._map_request_payer_params(rgen_request_parameters) 1001 rgen_kwargs['request_parameters'] = rgen_request_parameters 1002 1003 file_generator = FileGenerator(**fgen_kwargs) 1004 rev_generator = FileGenerator(**rgen_kwargs) 1005 stream_dest_path, stream_compare_key = find_dest_path_comp_key(files) 1006 stream_file_info = [FileInfo(src=files['src']['path'], 1007 dest=stream_dest_path, 1008 compare_key=stream_compare_key, 1009 src_type=files['src']['type'], 1010 dest_type=files['dest']['type'], 1011 operation_name=operation_name, 1012 client=self._client, 1013 is_stream=True)] 1014 file_info_builder = FileInfoBuilder( 1015 self._client, self._source_client, self.parameters) 1016 1017 s3_transfer_handler = S3TransferHandlerFactory( 1018 self.parameters, self._runtime_config)( 1019 self._client, result_queue) 1020 1021 sync_strategies = self.choose_sync_strategies() 1022 1023 command_dict = {} 1024 if self.cmd == 'sync': 1025 command_dict = {'setup': [files, rev_files], 1026 'file_generator': [file_generator, 1027 rev_generator], 1028 'filters': [create_filter(self.parameters), 1029 create_filter(self.parameters)], 1030 'comparator': [Comparator(**sync_strategies)], 1031 'file_info_builder': [file_info_builder], 1032 's3_handler': [s3_transfer_handler]} 1033 elif self.cmd == 'cp' and self.parameters['is_stream']: 1034 command_dict = {'setup': [stream_file_info], 1035 's3_handler': [s3_transfer_handler]} 1036 elif self.cmd == 'cp': 1037 command_dict = {'setup': [files], 1038 'file_generator': [file_generator], 1039 'filters': [create_filter(self.parameters)], 1040 'file_info_builder': [file_info_builder], 1041 's3_handler': [s3_transfer_handler]} 1042 elif self.cmd == 'rm': 1043 command_dict = {'setup': [files], 1044 'file_generator': [file_generator], 1045 'filters': [create_filter(self.parameters)], 1046 'file_info_builder': [file_info_builder], 1047 's3_handler': [s3_transfer_handler]} 1048 elif self.cmd == 'mv': 1049 command_dict = {'setup': [files], 1050 'file_generator': [file_generator], 1051 'filters': [create_filter(self.parameters)], 1052 'file_info_builder': [file_info_builder], 1053 's3_handler': [s3_transfer_handler]} 1054 1055 files = command_dict['setup'] 1056 while self.instructions: 1057 instruction = self.instructions.pop(0) 1058 file_list = [] 1059 components = command_dict[instruction] 1060 for i in range(len(components)): 1061 if len(files) > len(components): 1062 file_list.append(components[i].call(*files)) 1063 else: 1064 file_list.append(components[i].call(files[i])) 1065 files = file_list 1066 # This is kinda quirky, but each call through the instructions 1067 # will replaces the files attr with the return value of the 1068 # file_list. The very last call is a single list of 1069 # [s3_handler], and the s3_handler returns the number of 1070 # tasks failed and the number of tasks warned. 1071 # This means that files[0] now contains a namedtuple with 1072 # the number of failed tasks and the number of warned tasks. 1073 # In terms of the RC, we're keeping it simple and saying 1074 # that > 0 failed tasks will give a 1 RC and > 0 warned 1075 # tasks will give a 2 RC. Otherwise a RC of zero is returned. 1076 rc = 0 1077 if files[0].num_tasks_failed > 0: 1078 rc = 1 1079 elif files[0].num_tasks_warned > 0: 1080 rc = 2 1081 return rc 1082 1083 def _get_file_generator_request_parameters_skeleton(self): 1084 return { 1085 'HeadObject': {}, 1086 'ListObjects': {}, 1087 'ListObjectsV2': {} 1088 } 1089 1090 def _map_request_payer_params(self, request_parameters): 1091 RequestParamsMapper.map_head_object_params( 1092 request_parameters['HeadObject'], { 1093 'request_payer': self.parameters.get('request_payer') 1094 } 1095 ) 1096 RequestParamsMapper.map_list_objects_v2_params( 1097 request_parameters['ListObjectsV2'], { 1098 'request_payer': self.parameters.get('request_payer') 1099 } 1100 ) 1101 1102 def _map_sse_c_params(self, request_parameters, paths_type): 1103 # SSE-C may be needed for HeadObject for copies/downloads/deletes 1104 # If the operation is s3 to s3, the FileGenerator should use the 1105 # copy source key and algorithm. Otherwise, use the regular 1106 # SSE-C key and algorithm. Note the reverse FileGenerator does 1107 # not need any of these because it is used only for sync operations 1108 # which only use ListObjects which does not require HeadObject. 1109 RequestParamsMapper.map_head_object_params( 1110 request_parameters['HeadObject'], self.parameters) 1111 if paths_type == 's3s3': 1112 RequestParamsMapper.map_head_object_params( 1113 request_parameters['HeadObject'], { 1114 'sse_c': self.parameters.get('sse_c_copy_source'), 1115 'sse_c_key': self.parameters.get('sse_c_copy_source_key') 1116 } 1117 ) 1118 1119 1120class CommandParameters(object): 1121 """ 1122 This class is used to do some initial error based on the 1123 parameters and arguments passed to the command line. 1124 """ 1125 def __init__(self, cmd, parameters, usage): 1126 """ 1127 Stores command name and parameters. Ensures that the ``dir_op`` flag 1128 is true if a certain command is being used. 1129 1130 :param cmd: The name of the command, e.g. "rm". 1131 :param parameters: A dictionary of parameters. 1132 :param usage: A usage string 1133 1134 """ 1135 self.cmd = cmd 1136 self.parameters = parameters 1137 self.usage = usage 1138 if 'dir_op' not in parameters: 1139 self.parameters['dir_op'] = False 1140 if 'follow_symlinks' not in parameters: 1141 self.parameters['follow_symlinks'] = True 1142 if 'source_region' not in parameters: 1143 self.parameters['source_region'] = None 1144 if self.cmd in ['sync', 'mb', 'rb']: 1145 self.parameters['dir_op'] = True 1146 if self.cmd == 'mv': 1147 self.parameters['is_move'] = True 1148 else: 1149 self.parameters['is_move'] = False 1150 1151 def add_paths(self, paths): 1152 """ 1153 Reformats the parameters dictionary by including a key and 1154 value for the source and the destination. If a destination is 1155 not used the destination is the same as the source to ensure 1156 the destination always have some value. 1157 """ 1158 self.check_path_type(paths) 1159 self._normalize_s3_trailing_slash(paths) 1160 src_path = paths[0] 1161 self.parameters['src'] = src_path 1162 if len(paths) == 2: 1163 self.parameters['dest'] = paths[1] 1164 elif len(paths) == 1: 1165 self.parameters['dest'] = paths[0] 1166 self._validate_streaming_paths() 1167 self._validate_path_args() 1168 self._validate_sse_c_args() 1169 1170 def _validate_streaming_paths(self): 1171 self.parameters['is_stream'] = False 1172 if self.parameters['src'] == '-' or self.parameters['dest'] == '-': 1173 if self.cmd != 'cp' or self.parameters.get('dir_op'): 1174 raise ValueError( 1175 "Streaming currently is only compatible with " 1176 "non-recursive cp commands" 1177 ) 1178 self.parameters['is_stream'] = True 1179 self.parameters['dir_op'] = False 1180 self.parameters['only_show_errors'] = True 1181 1182 def _validate_path_args(self): 1183 # If we're using a mv command, you can't copy the object onto itself. 1184 params = self.parameters 1185 if self.cmd == 'mv' and self._same_path(params['src'], params['dest']): 1186 raise ValueError("Cannot mv a file onto itself: '%s' - '%s'" % ( 1187 params['src'], params['dest'])) 1188 1189 # If the user provided local path does not exist, hard fail because 1190 # we know that we will not be able to upload the file. 1191 if 'locals3' == params['paths_type'] and not params['is_stream']: 1192 if not os.path.exists(params['src']): 1193 raise RuntimeError( 1194 'The user-provided path %s does not exist.' % 1195 params['src']) 1196 # If the operation is downloading to a directory that does not exist, 1197 # create the directories so no warnings are thrown during the syncing 1198 # process. 1199 elif 's3local' == params['paths_type'] and params['dir_op']: 1200 if not os.path.exists(params['dest']): 1201 os.makedirs(params['dest']) 1202 1203 def _same_path(self, src, dest): 1204 if not self.parameters['paths_type'] == 's3s3': 1205 return False 1206 elif src == dest: 1207 return True 1208 elif dest.endswith('/'): 1209 src_base = os.path.basename(src) 1210 return src == os.path.join(dest, src_base) 1211 1212 def _normalize_s3_trailing_slash(self, paths): 1213 for i, path in enumerate(paths): 1214 if path.startswith('s3://'): 1215 bucket, key = find_bucket_key(path[5:]) 1216 if not key and not path.endswith('/'): 1217 # If only a bucket was specified, we need 1218 # to normalize the path and ensure it ends 1219 # with a '/', s3://bucket -> s3://bucket/ 1220 path += '/' 1221 paths[i] = path 1222 1223 def check_path_type(self, paths): 1224 """ 1225 This initial check ensures that the path types for the specified 1226 command is correct. 1227 """ 1228 template_type = {'s3s3': ['cp', 'sync', 'mv'], 1229 's3local': ['cp', 'sync', 'mv'], 1230 'locals3': ['cp', 'sync', 'mv'], 1231 's3': ['mb', 'rb', 'rm'], 1232 'local': [], 'locallocal': []} 1233 paths_type = '' 1234 usage = "usage: aws s3 %s %s" % (self.cmd, 1235 self.usage) 1236 for i in range(len(paths)): 1237 if paths[i].startswith('s3://'): 1238 paths_type = paths_type + 's3' 1239 else: 1240 paths_type = paths_type + 'local' 1241 if self.cmd in template_type[paths_type]: 1242 self.parameters['paths_type'] = paths_type 1243 else: 1244 raise TypeError("%s\nError: Invalid argument type" % usage) 1245 1246 def add_region(self, parsed_globals): 1247 self.parameters['region'] = parsed_globals.region 1248 1249 def add_endpoint_url(self, parsed_globals): 1250 """ 1251 Adds endpoint_url to the parameters. 1252 """ 1253 if 'endpoint_url' in parsed_globals: 1254 self.parameters['endpoint_url'] = getattr(parsed_globals, 1255 'endpoint_url') 1256 else: 1257 self.parameters['endpoint_url'] = None 1258 1259 def add_verify_ssl(self, parsed_globals): 1260 self.parameters['verify_ssl'] = parsed_globals.verify_ssl 1261 1262 def add_page_size(self, parsed_args): 1263 self.parameters['page_size'] = getattr(parsed_args, 'page_size', None) 1264 1265 def _validate_sse_c_args(self): 1266 self._validate_sse_c_arg() 1267 self._validate_sse_c_arg('sse_c_copy_source') 1268 self._validate_sse_c_copy_source_for_paths() 1269 1270 def _validate_sse_c_arg(self, sse_c_type='sse_c'): 1271 sse_c_key_type = sse_c_type + '_key' 1272 sse_c_type_param = '--' + sse_c_type.replace('_', '-') 1273 sse_c_key_type_param = '--' + sse_c_key_type.replace('_', '-') 1274 if self.parameters.get(sse_c_type): 1275 if not self.parameters.get(sse_c_key_type): 1276 raise ValueError( 1277 'It %s is specified, %s must be specified ' 1278 'as well.' % (sse_c_type_param, sse_c_key_type_param) 1279 ) 1280 if self.parameters.get(sse_c_key_type): 1281 if not self.parameters.get(sse_c_type): 1282 raise ValueError( 1283 'It %s is specified, %s must be specified ' 1284 'as well.' % (sse_c_key_type_param, sse_c_type_param) 1285 ) 1286 1287 def _validate_sse_c_copy_source_for_paths(self): 1288 if self.parameters.get('sse_c_copy_source'): 1289 if self.parameters['paths_type'] != 's3s3': 1290 raise ValueError( 1291 '--sse-c-copy-source is only supported for ' 1292 'copy operations.' 1293 ) 1294