1 /**
2  * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3  * SPDX-License-Identifier: Apache-2.0.
4  */
5 
6 #pragma once
7 #include <aws/dms/DatabaseMigrationService_EXPORTS.h>
8 #include <aws/core/utils/memory/stl/AWSString.h>
9 #include <aws/dms/model/CompressionTypeValue.h>
10 #include <aws/dms/model/EncryptionModeValue.h>
11 #include <aws/dms/model/DataFormatValue.h>
12 #include <aws/dms/model/EncodingTypeValue.h>
13 #include <aws/dms/model/ParquetVersionValue.h>
14 #include <aws/dms/model/DatePartitionSequenceValue.h>
15 #include <aws/dms/model/DatePartitionDelimiterValue.h>
16 #include <aws/dms/model/CannedAclForObjectsValue.h>
17 #include <utility>
18 
19 namespace Aws
20 {
21 namespace Utils
22 {
23 namespace Json
24 {
25   class JsonValue;
26   class JsonView;
27 } // namespace Json
28 } // namespace Utils
29 namespace DatabaseMigrationService
30 {
31 namespace Model
32 {
33 
34   /**
35    * <p>Settings for exporting data to Amazon S3. </p><p><h3>See Also:</h3>   <a
36    * href="http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/S3Settings">AWS API
37    * Reference</a></p>
38    */
39   class AWS_DATABASEMIGRATIONSERVICE_API S3Settings
40   {
41   public:
42     S3Settings();
43     S3Settings(Aws::Utils::Json::JsonView jsonValue);
44     S3Settings& operator=(Aws::Utils::Json::JsonView jsonValue);
45     Aws::Utils::Json::JsonValue Jsonize() const;
46 
47 
48     /**
49      * <p> The Amazon Resource Name (ARN) used by the service to access the IAM role.
50      * The role must allow the <code>iam:PassRole</code> action. It is a required
51      * parameter that enables DMS to write and read objects from an S3 bucket.</p>
52      */
GetServiceAccessRoleArn()53     inline const Aws::String& GetServiceAccessRoleArn() const{ return m_serviceAccessRoleArn; }
54 
55     /**
56      * <p> The Amazon Resource Name (ARN) used by the service to access the IAM role.
57      * The role must allow the <code>iam:PassRole</code> action. It is a required
58      * parameter that enables DMS to write and read objects from an S3 bucket.</p>
59      */
ServiceAccessRoleArnHasBeenSet()60     inline bool ServiceAccessRoleArnHasBeenSet() const { return m_serviceAccessRoleArnHasBeenSet; }
61 
62     /**
63      * <p> The Amazon Resource Name (ARN) used by the service to access the IAM role.
64      * The role must allow the <code>iam:PassRole</code> action. It is a required
65      * parameter that enables DMS to write and read objects from an S3 bucket.</p>
66      */
SetServiceAccessRoleArn(const Aws::String & value)67     inline void SetServiceAccessRoleArn(const Aws::String& value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn = value; }
68 
69     /**
70      * <p> The Amazon Resource Name (ARN) used by the service to access the IAM role.
71      * The role must allow the <code>iam:PassRole</code> action. It is a required
72      * parameter that enables DMS to write and read objects from an S3 bucket.</p>
73      */
SetServiceAccessRoleArn(Aws::String && value)74     inline void SetServiceAccessRoleArn(Aws::String&& value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn = std::move(value); }
75 
76     /**
77      * <p> The Amazon Resource Name (ARN) used by the service to access the IAM role.
78      * The role must allow the <code>iam:PassRole</code> action. It is a required
79      * parameter that enables DMS to write and read objects from an S3 bucket.</p>
80      */
SetServiceAccessRoleArn(const char * value)81     inline void SetServiceAccessRoleArn(const char* value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn.assign(value); }
82 
83     /**
84      * <p> The Amazon Resource Name (ARN) used by the service to access the IAM role.
85      * The role must allow the <code>iam:PassRole</code> action. It is a required
86      * parameter that enables DMS to write and read objects from an S3 bucket.</p>
87      */
WithServiceAccessRoleArn(const Aws::String & value)88     inline S3Settings& WithServiceAccessRoleArn(const Aws::String& value) { SetServiceAccessRoleArn(value); return *this;}
89 
90     /**
91      * <p> The Amazon Resource Name (ARN) used by the service to access the IAM role.
92      * The role must allow the <code>iam:PassRole</code> action. It is a required
93      * parameter that enables DMS to write and read objects from an S3 bucket.</p>
94      */
WithServiceAccessRoleArn(Aws::String && value)95     inline S3Settings& WithServiceAccessRoleArn(Aws::String&& value) { SetServiceAccessRoleArn(std::move(value)); return *this;}
96 
97     /**
98      * <p> The Amazon Resource Name (ARN) used by the service to access the IAM role.
99      * The role must allow the <code>iam:PassRole</code> action. It is a required
100      * parameter that enables DMS to write and read objects from an S3 bucket.</p>
101      */
WithServiceAccessRoleArn(const char * value)102     inline S3Settings& WithServiceAccessRoleArn(const char* value) { SetServiceAccessRoleArn(value); return *this;}
103 
104 
105     /**
106      * <p> Specifies how tables are defined in the S3 source files only. </p>
107      */
GetExternalTableDefinition()108     inline const Aws::String& GetExternalTableDefinition() const{ return m_externalTableDefinition; }
109 
110     /**
111      * <p> Specifies how tables are defined in the S3 source files only. </p>
112      */
ExternalTableDefinitionHasBeenSet()113     inline bool ExternalTableDefinitionHasBeenSet() const { return m_externalTableDefinitionHasBeenSet; }
114 
115     /**
116      * <p> Specifies how tables are defined in the S3 source files only. </p>
117      */
SetExternalTableDefinition(const Aws::String & value)118     inline void SetExternalTableDefinition(const Aws::String& value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition = value; }
119 
120     /**
121      * <p> Specifies how tables are defined in the S3 source files only. </p>
122      */
SetExternalTableDefinition(Aws::String && value)123     inline void SetExternalTableDefinition(Aws::String&& value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition = std::move(value); }
124 
125     /**
126      * <p> Specifies how tables are defined in the S3 source files only. </p>
127      */
SetExternalTableDefinition(const char * value)128     inline void SetExternalTableDefinition(const char* value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition.assign(value); }
129 
130     /**
131      * <p> Specifies how tables are defined in the S3 source files only. </p>
132      */
WithExternalTableDefinition(const Aws::String & value)133     inline S3Settings& WithExternalTableDefinition(const Aws::String& value) { SetExternalTableDefinition(value); return *this;}
134 
135     /**
136      * <p> Specifies how tables are defined in the S3 source files only. </p>
137      */
WithExternalTableDefinition(Aws::String && value)138     inline S3Settings& WithExternalTableDefinition(Aws::String&& value) { SetExternalTableDefinition(std::move(value)); return *this;}
139 
140     /**
141      * <p> Specifies how tables are defined in the S3 source files only. </p>
142      */
WithExternalTableDefinition(const char * value)143     inline S3Settings& WithExternalTableDefinition(const char* value) { SetExternalTableDefinition(value); return *this;}
144 
145 
146     /**
147      * <p> The delimiter used to separate rows in the .csv file for both source and
148      * target. The default is a carriage return (<code>\n</code>). </p>
149      */
GetCsvRowDelimiter()150     inline const Aws::String& GetCsvRowDelimiter() const{ return m_csvRowDelimiter; }
151 
152     /**
153      * <p> The delimiter used to separate rows in the .csv file for both source and
154      * target. The default is a carriage return (<code>\n</code>). </p>
155      */
CsvRowDelimiterHasBeenSet()156     inline bool CsvRowDelimiterHasBeenSet() const { return m_csvRowDelimiterHasBeenSet; }
157 
158     /**
159      * <p> The delimiter used to separate rows in the .csv file for both source and
160      * target. The default is a carriage return (<code>\n</code>). </p>
161      */
SetCsvRowDelimiter(const Aws::String & value)162     inline void SetCsvRowDelimiter(const Aws::String& value) { m_csvRowDelimiterHasBeenSet = true; m_csvRowDelimiter = value; }
163 
164     /**
165      * <p> The delimiter used to separate rows in the .csv file for both source and
166      * target. The default is a carriage return (<code>\n</code>). </p>
167      */
SetCsvRowDelimiter(Aws::String && value)168     inline void SetCsvRowDelimiter(Aws::String&& value) { m_csvRowDelimiterHasBeenSet = true; m_csvRowDelimiter = std::move(value); }
169 
170     /**
171      * <p> The delimiter used to separate rows in the .csv file for both source and
172      * target. The default is a carriage return (<code>\n</code>). </p>
173      */
SetCsvRowDelimiter(const char * value)174     inline void SetCsvRowDelimiter(const char* value) { m_csvRowDelimiterHasBeenSet = true; m_csvRowDelimiter.assign(value); }
175 
176     /**
177      * <p> The delimiter used to separate rows in the .csv file for both source and
178      * target. The default is a carriage return (<code>\n</code>). </p>
179      */
WithCsvRowDelimiter(const Aws::String & value)180     inline S3Settings& WithCsvRowDelimiter(const Aws::String& value) { SetCsvRowDelimiter(value); return *this;}
181 
182     /**
183      * <p> The delimiter used to separate rows in the .csv file for both source and
184      * target. The default is a carriage return (<code>\n</code>). </p>
185      */
WithCsvRowDelimiter(Aws::String && value)186     inline S3Settings& WithCsvRowDelimiter(Aws::String&& value) { SetCsvRowDelimiter(std::move(value)); return *this;}
187 
188     /**
189      * <p> The delimiter used to separate rows in the .csv file for both source and
190      * target. The default is a carriage return (<code>\n</code>). </p>
191      */
WithCsvRowDelimiter(const char * value)192     inline S3Settings& WithCsvRowDelimiter(const char* value) { SetCsvRowDelimiter(value); return *this;}
193 
194 
195     /**
196      * <p> The delimiter used to separate columns in the .csv file for both source and
197      * target. The default is a comma. </p>
198      */
GetCsvDelimiter()199     inline const Aws::String& GetCsvDelimiter() const{ return m_csvDelimiter; }
200 
201     /**
202      * <p> The delimiter used to separate columns in the .csv file for both source and
203      * target. The default is a comma. </p>
204      */
CsvDelimiterHasBeenSet()205     inline bool CsvDelimiterHasBeenSet() const { return m_csvDelimiterHasBeenSet; }
206 
207     /**
208      * <p> The delimiter used to separate columns in the .csv file for both source and
209      * target. The default is a comma. </p>
210      */
SetCsvDelimiter(const Aws::String & value)211     inline void SetCsvDelimiter(const Aws::String& value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter = value; }
212 
213     /**
214      * <p> The delimiter used to separate columns in the .csv file for both source and
215      * target. The default is a comma. </p>
216      */
SetCsvDelimiter(Aws::String && value)217     inline void SetCsvDelimiter(Aws::String&& value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter = std::move(value); }
218 
219     /**
220      * <p> The delimiter used to separate columns in the .csv file for both source and
221      * target. The default is a comma. </p>
222      */
SetCsvDelimiter(const char * value)223     inline void SetCsvDelimiter(const char* value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter.assign(value); }
224 
225     /**
226      * <p> The delimiter used to separate columns in the .csv file for both source and
227      * target. The default is a comma. </p>
228      */
WithCsvDelimiter(const Aws::String & value)229     inline S3Settings& WithCsvDelimiter(const Aws::String& value) { SetCsvDelimiter(value); return *this;}
230 
231     /**
232      * <p> The delimiter used to separate columns in the .csv file for both source and
233      * target. The default is a comma. </p>
234      */
WithCsvDelimiter(Aws::String && value)235     inline S3Settings& WithCsvDelimiter(Aws::String&& value) { SetCsvDelimiter(std::move(value)); return *this;}
236 
237     /**
238      * <p> The delimiter used to separate columns in the .csv file for both source and
239      * target. The default is a comma. </p>
240      */
WithCsvDelimiter(const char * value)241     inline S3Settings& WithCsvDelimiter(const char* value) { SetCsvDelimiter(value); return *this;}
242 
243 
244     /**
245      * <p> An optional parameter to set a folder name in the S3 bucket. If provided,
246      * tables are created in the path <code>
247      * <i>bucketFolder</i>/<i>schema_name</i>/<i>table_name</i>/</code>. If this
248      * parameter isn't specified, then the path used is <code>
249      * <i>schema_name</i>/<i>table_name</i>/</code>. </p>
250      */
GetBucketFolder()251     inline const Aws::String& GetBucketFolder() const{ return m_bucketFolder; }
252 
253     /**
254      * <p> An optional parameter to set a folder name in the S3 bucket. If provided,
255      * tables are created in the path <code>
256      * <i>bucketFolder</i>/<i>schema_name</i>/<i>table_name</i>/</code>. If this
257      * parameter isn't specified, then the path used is <code>
258      * <i>schema_name</i>/<i>table_name</i>/</code>. </p>
259      */
BucketFolderHasBeenSet()260     inline bool BucketFolderHasBeenSet() const { return m_bucketFolderHasBeenSet; }
261 
262     /**
263      * <p> An optional parameter to set a folder name in the S3 bucket. If provided,
264      * tables are created in the path <code>
265      * <i>bucketFolder</i>/<i>schema_name</i>/<i>table_name</i>/</code>. If this
266      * parameter isn't specified, then the path used is <code>
267      * <i>schema_name</i>/<i>table_name</i>/</code>. </p>
268      */
SetBucketFolder(const Aws::String & value)269     inline void SetBucketFolder(const Aws::String& value) { m_bucketFolderHasBeenSet = true; m_bucketFolder = value; }
270 
271     /**
272      * <p> An optional parameter to set a folder name in the S3 bucket. If provided,
273      * tables are created in the path <code>
274      * <i>bucketFolder</i>/<i>schema_name</i>/<i>table_name</i>/</code>. If this
275      * parameter isn't specified, then the path used is <code>
276      * <i>schema_name</i>/<i>table_name</i>/</code>. </p>
277      */
SetBucketFolder(Aws::String && value)278     inline void SetBucketFolder(Aws::String&& value) { m_bucketFolderHasBeenSet = true; m_bucketFolder = std::move(value); }
279 
280     /**
281      * <p> An optional parameter to set a folder name in the S3 bucket. If provided,
282      * tables are created in the path <code>
283      * <i>bucketFolder</i>/<i>schema_name</i>/<i>table_name</i>/</code>. If this
284      * parameter isn't specified, then the path used is <code>
285      * <i>schema_name</i>/<i>table_name</i>/</code>. </p>
286      */
SetBucketFolder(const char * value)287     inline void SetBucketFolder(const char* value) { m_bucketFolderHasBeenSet = true; m_bucketFolder.assign(value); }
288 
289     /**
290      * <p> An optional parameter to set a folder name in the S3 bucket. If provided,
291      * tables are created in the path <code>
292      * <i>bucketFolder</i>/<i>schema_name</i>/<i>table_name</i>/</code>. If this
293      * parameter isn't specified, then the path used is <code>
294      * <i>schema_name</i>/<i>table_name</i>/</code>. </p>
295      */
WithBucketFolder(const Aws::String & value)296     inline S3Settings& WithBucketFolder(const Aws::String& value) { SetBucketFolder(value); return *this;}
297 
298     /**
299      * <p> An optional parameter to set a folder name in the S3 bucket. If provided,
300      * tables are created in the path <code>
301      * <i>bucketFolder</i>/<i>schema_name</i>/<i>table_name</i>/</code>. If this
302      * parameter isn't specified, then the path used is <code>
303      * <i>schema_name</i>/<i>table_name</i>/</code>. </p>
304      */
WithBucketFolder(Aws::String && value)305     inline S3Settings& WithBucketFolder(Aws::String&& value) { SetBucketFolder(std::move(value)); return *this;}
306 
307     /**
308      * <p> An optional parameter to set a folder name in the S3 bucket. If provided,
309      * tables are created in the path <code>
310      * <i>bucketFolder</i>/<i>schema_name</i>/<i>table_name</i>/</code>. If this
311      * parameter isn't specified, then the path used is <code>
312      * <i>schema_name</i>/<i>table_name</i>/</code>. </p>
313      */
WithBucketFolder(const char * value)314     inline S3Settings& WithBucketFolder(const char* value) { SetBucketFolder(value); return *this;}
315 
316 
317     /**
318      * <p> The name of the S3 bucket. </p>
319      */
GetBucketName()320     inline const Aws::String& GetBucketName() const{ return m_bucketName; }
321 
322     /**
323      * <p> The name of the S3 bucket. </p>
324      */
BucketNameHasBeenSet()325     inline bool BucketNameHasBeenSet() const { return m_bucketNameHasBeenSet; }
326 
327     /**
328      * <p> The name of the S3 bucket. </p>
329      */
SetBucketName(const Aws::String & value)330     inline void SetBucketName(const Aws::String& value) { m_bucketNameHasBeenSet = true; m_bucketName = value; }
331 
332     /**
333      * <p> The name of the S3 bucket. </p>
334      */
SetBucketName(Aws::String && value)335     inline void SetBucketName(Aws::String&& value) { m_bucketNameHasBeenSet = true; m_bucketName = std::move(value); }
336 
337     /**
338      * <p> The name of the S3 bucket. </p>
339      */
SetBucketName(const char * value)340     inline void SetBucketName(const char* value) { m_bucketNameHasBeenSet = true; m_bucketName.assign(value); }
341 
342     /**
343      * <p> The name of the S3 bucket. </p>
344      */
WithBucketName(const Aws::String & value)345     inline S3Settings& WithBucketName(const Aws::String& value) { SetBucketName(value); return *this;}
346 
347     /**
348      * <p> The name of the S3 bucket. </p>
349      */
WithBucketName(Aws::String && value)350     inline S3Settings& WithBucketName(Aws::String&& value) { SetBucketName(std::move(value)); return *this;}
351 
352     /**
353      * <p> The name of the S3 bucket. </p>
354      */
WithBucketName(const char * value)355     inline S3Settings& WithBucketName(const char* value) { SetBucketName(value); return *this;}
356 
357 
358     /**
359      * <p>An optional parameter to use GZIP to compress the target files. Set to GZIP
360      * to compress the target files. Either set this parameter to NONE (the default) or
361      * don't use it to leave the files uncompressed. This parameter applies to both
362      * .csv and .parquet file formats. </p>
363      */
GetCompressionType()364     inline const CompressionTypeValue& GetCompressionType() const{ return m_compressionType; }
365 
366     /**
367      * <p>An optional parameter to use GZIP to compress the target files. Set to GZIP
368      * to compress the target files. Either set this parameter to NONE (the default) or
369      * don't use it to leave the files uncompressed. This parameter applies to both
370      * .csv and .parquet file formats. </p>
371      */
CompressionTypeHasBeenSet()372     inline bool CompressionTypeHasBeenSet() const { return m_compressionTypeHasBeenSet; }
373 
374     /**
375      * <p>An optional parameter to use GZIP to compress the target files. Set to GZIP
376      * to compress the target files. Either set this parameter to NONE (the default) or
377      * don't use it to leave the files uncompressed. This parameter applies to both
378      * .csv and .parquet file formats. </p>
379      */
SetCompressionType(const CompressionTypeValue & value)380     inline void SetCompressionType(const CompressionTypeValue& value) { m_compressionTypeHasBeenSet = true; m_compressionType = value; }
381 
382     /**
383      * <p>An optional parameter to use GZIP to compress the target files. Set to GZIP
384      * to compress the target files. Either set this parameter to NONE (the default) or
385      * don't use it to leave the files uncompressed. This parameter applies to both
386      * .csv and .parquet file formats. </p>
387      */
SetCompressionType(CompressionTypeValue && value)388     inline void SetCompressionType(CompressionTypeValue&& value) { m_compressionTypeHasBeenSet = true; m_compressionType = std::move(value); }
389 
390     /**
391      * <p>An optional parameter to use GZIP to compress the target files. Set to GZIP
392      * to compress the target files. Either set this parameter to NONE (the default) or
393      * don't use it to leave the files uncompressed. This parameter applies to both
394      * .csv and .parquet file formats. </p>
395      */
WithCompressionType(const CompressionTypeValue & value)396     inline S3Settings& WithCompressionType(const CompressionTypeValue& value) { SetCompressionType(value); return *this;}
397 
398     /**
399      * <p>An optional parameter to use GZIP to compress the target files. Set to GZIP
400      * to compress the target files. Either set this parameter to NONE (the default) or
401      * don't use it to leave the files uncompressed. This parameter applies to both
402      * .csv and .parquet file formats. </p>
403      */
WithCompressionType(CompressionTypeValue && value)404     inline S3Settings& WithCompressionType(CompressionTypeValue&& value) { SetCompressionType(std::move(value)); return *this;}
405 
406 
407     /**
408      * <p>The type of server-side encryption that you want to use for your data. This
409      * encryption type is part of the endpoint settings or the extra connections
410      * attributes for Amazon S3. You can choose either <code>SSE_S3</code> (the
411      * default) or <code>SSE_KMS</code>. </p>  <p>For the
412      * <code>ModifyEndpoint</code> operation, you can change the existing value of the
413      * <code>EncryptionMode</code> parameter from <code>SSE_KMS</code> to
414      * <code>SSE_S3</code>. But you can’t change the existing value from
415      * <code>SSE_S3</code> to <code>SSE_KMS</code>.</p>  <p>To use
416      * <code>SSE_S3</code>, you need an Identity and Access Management (IAM) role with
417      * permission to allow <code>"arn:aws:s3:::dms-*"</code> to use the following
418      * actions:</p> <ul> <li> <p> <code>s3:CreateBucket</code> </p> </li> <li> <p>
419      * <code>s3:ListBucket</code> </p> </li> <li> <p> <code>s3:DeleteBucket</code> </p>
420      * </li> <li> <p> <code>s3:GetBucketLocation</code> </p> </li> <li> <p>
421      * <code>s3:GetObject</code> </p> </li> <li> <p> <code>s3:PutObject</code> </p>
422      * </li> <li> <p> <code>s3:DeleteObject</code> </p> </li> <li> <p>
423      * <code>s3:GetObjectVersion</code> </p> </li> <li> <p>
424      * <code>s3:GetBucketPolicy</code> </p> </li> <li> <p>
425      * <code>s3:PutBucketPolicy</code> </p> </li> <li> <p>
426      * <code>s3:DeleteBucketPolicy</code> </p> </li> </ul>
427      */
GetEncryptionMode()428     inline const EncryptionModeValue& GetEncryptionMode() const{ return m_encryptionMode; }
429 
430     /**
431      * <p>The type of server-side encryption that you want to use for your data. This
432      * encryption type is part of the endpoint settings or the extra connections
433      * attributes for Amazon S3. You can choose either <code>SSE_S3</code> (the
434      * default) or <code>SSE_KMS</code>. </p>  <p>For the
435      * <code>ModifyEndpoint</code> operation, you can change the existing value of the
436      * <code>EncryptionMode</code> parameter from <code>SSE_KMS</code> to
437      * <code>SSE_S3</code>. But you can’t change the existing value from
438      * <code>SSE_S3</code> to <code>SSE_KMS</code>.</p>  <p>To use
439      * <code>SSE_S3</code>, you need an Identity and Access Management (IAM) role with
440      * permission to allow <code>"arn:aws:s3:::dms-*"</code> to use the following
441      * actions:</p> <ul> <li> <p> <code>s3:CreateBucket</code> </p> </li> <li> <p>
442      * <code>s3:ListBucket</code> </p> </li> <li> <p> <code>s3:DeleteBucket</code> </p>
443      * </li> <li> <p> <code>s3:GetBucketLocation</code> </p> </li> <li> <p>
444      * <code>s3:GetObject</code> </p> </li> <li> <p> <code>s3:PutObject</code> </p>
445      * </li> <li> <p> <code>s3:DeleteObject</code> </p> </li> <li> <p>
446      * <code>s3:GetObjectVersion</code> </p> </li> <li> <p>
447      * <code>s3:GetBucketPolicy</code> </p> </li> <li> <p>
448      * <code>s3:PutBucketPolicy</code> </p> </li> <li> <p>
449      * <code>s3:DeleteBucketPolicy</code> </p> </li> </ul>
450      */
EncryptionModeHasBeenSet()451     inline bool EncryptionModeHasBeenSet() const { return m_encryptionModeHasBeenSet; }
452 
453     /**
454      * <p>The type of server-side encryption that you want to use for your data. This
455      * encryption type is part of the endpoint settings or the extra connections
456      * attributes for Amazon S3. You can choose either <code>SSE_S3</code> (the
457      * default) or <code>SSE_KMS</code>. </p>  <p>For the
458      * <code>ModifyEndpoint</code> operation, you can change the existing value of the
459      * <code>EncryptionMode</code> parameter from <code>SSE_KMS</code> to
460      * <code>SSE_S3</code>. But you can’t change the existing value from
461      * <code>SSE_S3</code> to <code>SSE_KMS</code>.</p>  <p>To use
462      * <code>SSE_S3</code>, you need an Identity and Access Management (IAM) role with
463      * permission to allow <code>"arn:aws:s3:::dms-*"</code> to use the following
464      * actions:</p> <ul> <li> <p> <code>s3:CreateBucket</code> </p> </li> <li> <p>
465      * <code>s3:ListBucket</code> </p> </li> <li> <p> <code>s3:DeleteBucket</code> </p>
466      * </li> <li> <p> <code>s3:GetBucketLocation</code> </p> </li> <li> <p>
467      * <code>s3:GetObject</code> </p> </li> <li> <p> <code>s3:PutObject</code> </p>
468      * </li> <li> <p> <code>s3:DeleteObject</code> </p> </li> <li> <p>
469      * <code>s3:GetObjectVersion</code> </p> </li> <li> <p>
470      * <code>s3:GetBucketPolicy</code> </p> </li> <li> <p>
471      * <code>s3:PutBucketPolicy</code> </p> </li> <li> <p>
472      * <code>s3:DeleteBucketPolicy</code> </p> </li> </ul>
473      */
SetEncryptionMode(const EncryptionModeValue & value)474     inline void SetEncryptionMode(const EncryptionModeValue& value) { m_encryptionModeHasBeenSet = true; m_encryptionMode = value; }
475 
476     /**
477      * <p>The type of server-side encryption that you want to use for your data. This
478      * encryption type is part of the endpoint settings or the extra connections
479      * attributes for Amazon S3. You can choose either <code>SSE_S3</code> (the
480      * default) or <code>SSE_KMS</code>. </p>  <p>For the
481      * <code>ModifyEndpoint</code> operation, you can change the existing value of the
482      * <code>EncryptionMode</code> parameter from <code>SSE_KMS</code> to
483      * <code>SSE_S3</code>. But you can’t change the existing value from
484      * <code>SSE_S3</code> to <code>SSE_KMS</code>.</p>  <p>To use
485      * <code>SSE_S3</code>, you need an Identity and Access Management (IAM) role with
486      * permission to allow <code>"arn:aws:s3:::dms-*"</code> to use the following
487      * actions:</p> <ul> <li> <p> <code>s3:CreateBucket</code> </p> </li> <li> <p>
488      * <code>s3:ListBucket</code> </p> </li> <li> <p> <code>s3:DeleteBucket</code> </p>
489      * </li> <li> <p> <code>s3:GetBucketLocation</code> </p> </li> <li> <p>
490      * <code>s3:GetObject</code> </p> </li> <li> <p> <code>s3:PutObject</code> </p>
491      * </li> <li> <p> <code>s3:DeleteObject</code> </p> </li> <li> <p>
492      * <code>s3:GetObjectVersion</code> </p> </li> <li> <p>
493      * <code>s3:GetBucketPolicy</code> </p> </li> <li> <p>
494      * <code>s3:PutBucketPolicy</code> </p> </li> <li> <p>
495      * <code>s3:DeleteBucketPolicy</code> </p> </li> </ul>
496      */
SetEncryptionMode(EncryptionModeValue && value)497     inline void SetEncryptionMode(EncryptionModeValue&& value) { m_encryptionModeHasBeenSet = true; m_encryptionMode = std::move(value); }
498 
499     /**
500      * <p>The type of server-side encryption that you want to use for your data. This
501      * encryption type is part of the endpoint settings or the extra connections
502      * attributes for Amazon S3. You can choose either <code>SSE_S3</code> (the
503      * default) or <code>SSE_KMS</code>. </p>  <p>For the
504      * <code>ModifyEndpoint</code> operation, you can change the existing value of the
505      * <code>EncryptionMode</code> parameter from <code>SSE_KMS</code> to
506      * <code>SSE_S3</code>. But you can’t change the existing value from
507      * <code>SSE_S3</code> to <code>SSE_KMS</code>.</p>  <p>To use
508      * <code>SSE_S3</code>, you need an Identity and Access Management (IAM) role with
509      * permission to allow <code>"arn:aws:s3:::dms-*"</code> to use the following
510      * actions:</p> <ul> <li> <p> <code>s3:CreateBucket</code> </p> </li> <li> <p>
511      * <code>s3:ListBucket</code> </p> </li> <li> <p> <code>s3:DeleteBucket</code> </p>
512      * </li> <li> <p> <code>s3:GetBucketLocation</code> </p> </li> <li> <p>
513      * <code>s3:GetObject</code> </p> </li> <li> <p> <code>s3:PutObject</code> </p>
514      * </li> <li> <p> <code>s3:DeleteObject</code> </p> </li> <li> <p>
515      * <code>s3:GetObjectVersion</code> </p> </li> <li> <p>
516      * <code>s3:GetBucketPolicy</code> </p> </li> <li> <p>
517      * <code>s3:PutBucketPolicy</code> </p> </li> <li> <p>
518      * <code>s3:DeleteBucketPolicy</code> </p> </li> </ul>
519      */
WithEncryptionMode(const EncryptionModeValue & value)520     inline S3Settings& WithEncryptionMode(const EncryptionModeValue& value) { SetEncryptionMode(value); return *this;}
521 
522     /**
523      * <p>The type of server-side encryption that you want to use for your data. This
524      * encryption type is part of the endpoint settings or the extra connections
525      * attributes for Amazon S3. You can choose either <code>SSE_S3</code> (the
526      * default) or <code>SSE_KMS</code>. </p>  <p>For the
527      * <code>ModifyEndpoint</code> operation, you can change the existing value of the
528      * <code>EncryptionMode</code> parameter from <code>SSE_KMS</code> to
529      * <code>SSE_S3</code>. But you can’t change the existing value from
530      * <code>SSE_S3</code> to <code>SSE_KMS</code>.</p>  <p>To use
531      * <code>SSE_S3</code>, you need an Identity and Access Management (IAM) role with
532      * permission to allow <code>"arn:aws:s3:::dms-*"</code> to use the following
533      * actions:</p> <ul> <li> <p> <code>s3:CreateBucket</code> </p> </li> <li> <p>
534      * <code>s3:ListBucket</code> </p> </li> <li> <p> <code>s3:DeleteBucket</code> </p>
535      * </li> <li> <p> <code>s3:GetBucketLocation</code> </p> </li> <li> <p>
536      * <code>s3:GetObject</code> </p> </li> <li> <p> <code>s3:PutObject</code> </p>
537      * </li> <li> <p> <code>s3:DeleteObject</code> </p> </li> <li> <p>
538      * <code>s3:GetObjectVersion</code> </p> </li> <li> <p>
539      * <code>s3:GetBucketPolicy</code> </p> </li> <li> <p>
540      * <code>s3:PutBucketPolicy</code> </p> </li> <li> <p>
541      * <code>s3:DeleteBucketPolicy</code> </p> </li> </ul>
542      */
WithEncryptionMode(EncryptionModeValue && value)543     inline S3Settings& WithEncryptionMode(EncryptionModeValue&& value) { SetEncryptionMode(std::move(value)); return *this;}
544 
545 
546     /**
547      * <p>If you are using <code>SSE_KMS</code> for the <code>EncryptionMode</code>,
548      * provide the KMS key ID. The key that you use needs an attached policy that
549      * enables Identity and Access Management (IAM) user permissions and allows use of
550      * the key.</p> <p>Here is a CLI example: <code>aws dms create-endpoint
551      * --endpoint-identifier <i>value</i> --endpoint-type target --engine-name s3
552      * --s3-settings
553      * ServiceAccessRoleArn=<i>value</i>,BucketFolder=<i>value</i>,BucketName=<i>value</i>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<i>value</i>
554      * </code> </p>
555      */
GetServerSideEncryptionKmsKeyId()556     inline const Aws::String& GetServerSideEncryptionKmsKeyId() const{ return m_serverSideEncryptionKmsKeyId; }
557 
558     /**
559      * <p>If you are using <code>SSE_KMS</code> for the <code>EncryptionMode</code>,
560      * provide the KMS key ID. The key that you use needs an attached policy that
561      * enables Identity and Access Management (IAM) user permissions and allows use of
562      * the key.</p> <p>Here is a CLI example: <code>aws dms create-endpoint
563      * --endpoint-identifier <i>value</i> --endpoint-type target --engine-name s3
564      * --s3-settings
565      * ServiceAccessRoleArn=<i>value</i>,BucketFolder=<i>value</i>,BucketName=<i>value</i>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<i>value</i>
566      * </code> </p>
567      */
ServerSideEncryptionKmsKeyIdHasBeenSet()568     inline bool ServerSideEncryptionKmsKeyIdHasBeenSet() const { return m_serverSideEncryptionKmsKeyIdHasBeenSet; }
569 
570     /**
571      * <p>If you are using <code>SSE_KMS</code> for the <code>EncryptionMode</code>,
572      * provide the KMS key ID. The key that you use needs an attached policy that
573      * enables Identity and Access Management (IAM) user permissions and allows use of
574      * the key.</p> <p>Here is a CLI example: <code>aws dms create-endpoint
575      * --endpoint-identifier <i>value</i> --endpoint-type target --engine-name s3
576      * --s3-settings
577      * ServiceAccessRoleArn=<i>value</i>,BucketFolder=<i>value</i>,BucketName=<i>value</i>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<i>value</i>
578      * </code> </p>
579      */
SetServerSideEncryptionKmsKeyId(const Aws::String & value)580     inline void SetServerSideEncryptionKmsKeyId(const Aws::String& value) { m_serverSideEncryptionKmsKeyIdHasBeenSet = true; m_serverSideEncryptionKmsKeyId = value; }
581 
582     /**
583      * <p>If you are using <code>SSE_KMS</code> for the <code>EncryptionMode</code>,
584      * provide the KMS key ID. The key that you use needs an attached policy that
585      * enables Identity and Access Management (IAM) user permissions and allows use of
586      * the key.</p> <p>Here is a CLI example: <code>aws dms create-endpoint
587      * --endpoint-identifier <i>value</i> --endpoint-type target --engine-name s3
588      * --s3-settings
589      * ServiceAccessRoleArn=<i>value</i>,BucketFolder=<i>value</i>,BucketName=<i>value</i>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<i>value</i>
590      * </code> </p>
591      */
SetServerSideEncryptionKmsKeyId(Aws::String && value)592     inline void SetServerSideEncryptionKmsKeyId(Aws::String&& value) { m_serverSideEncryptionKmsKeyIdHasBeenSet = true; m_serverSideEncryptionKmsKeyId = std::move(value); }
593 
594     /**
595      * <p>If you are using <code>SSE_KMS</code> for the <code>EncryptionMode</code>,
596      * provide the KMS key ID. The key that you use needs an attached policy that
597      * enables Identity and Access Management (IAM) user permissions and allows use of
598      * the key.</p> <p>Here is a CLI example: <code>aws dms create-endpoint
599      * --endpoint-identifier <i>value</i> --endpoint-type target --engine-name s3
600      * --s3-settings
601      * ServiceAccessRoleArn=<i>value</i>,BucketFolder=<i>value</i>,BucketName=<i>value</i>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<i>value</i>
602      * </code> </p>
603      */
SetServerSideEncryptionKmsKeyId(const char * value)604     inline void SetServerSideEncryptionKmsKeyId(const char* value) { m_serverSideEncryptionKmsKeyIdHasBeenSet = true; m_serverSideEncryptionKmsKeyId.assign(value); }
605 
606     /**
607      * <p>If you are using <code>SSE_KMS</code> for the <code>EncryptionMode</code>,
608      * provide the KMS key ID. The key that you use needs an attached policy that
609      * enables Identity and Access Management (IAM) user permissions and allows use of
610      * the key.</p> <p>Here is a CLI example: <code>aws dms create-endpoint
611      * --endpoint-identifier <i>value</i> --endpoint-type target --engine-name s3
612      * --s3-settings
613      * ServiceAccessRoleArn=<i>value</i>,BucketFolder=<i>value</i>,BucketName=<i>value</i>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<i>value</i>
614      * </code> </p>
615      */
WithServerSideEncryptionKmsKeyId(const Aws::String & value)616     inline S3Settings& WithServerSideEncryptionKmsKeyId(const Aws::String& value) { SetServerSideEncryptionKmsKeyId(value); return *this;}
617 
618     /**
619      * <p>If you are using <code>SSE_KMS</code> for the <code>EncryptionMode</code>,
620      * provide the KMS key ID. The key that you use needs an attached policy that
621      * enables Identity and Access Management (IAM) user permissions and allows use of
622      * the key.</p> <p>Here is a CLI example: <code>aws dms create-endpoint
623      * --endpoint-identifier <i>value</i> --endpoint-type target --engine-name s3
624      * --s3-settings
625      * ServiceAccessRoleArn=<i>value</i>,BucketFolder=<i>value</i>,BucketName=<i>value</i>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<i>value</i>
626      * </code> </p>
627      */
WithServerSideEncryptionKmsKeyId(Aws::String && value)628     inline S3Settings& WithServerSideEncryptionKmsKeyId(Aws::String&& value) { SetServerSideEncryptionKmsKeyId(std::move(value)); return *this;}
629 
630     /**
631      * <p>If you are using <code>SSE_KMS</code> for the <code>EncryptionMode</code>,
632      * provide the KMS key ID. The key that you use needs an attached policy that
633      * enables Identity and Access Management (IAM) user permissions and allows use of
634      * the key.</p> <p>Here is a CLI example: <code>aws dms create-endpoint
635      * --endpoint-identifier <i>value</i> --endpoint-type target --engine-name s3
636      * --s3-settings
637      * ServiceAccessRoleArn=<i>value</i>,BucketFolder=<i>value</i>,BucketName=<i>value</i>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<i>value</i>
638      * </code> </p>
639      */
WithServerSideEncryptionKmsKeyId(const char * value)640     inline S3Settings& WithServerSideEncryptionKmsKeyId(const char* value) { SetServerSideEncryptionKmsKeyId(value); return *this;}
641 
642 
643     /**
644      * <p>The format of the data that you want to use for output. You can choose one of
645      * the following: </p> <ul> <li> <p> <code>csv</code> : This is a row-based file
646      * format with comma-separated values (.csv). </p> </li> <li> <p>
647      * <code>parquet</code> : Apache Parquet (.parquet) is a columnar storage file
648      * format that features efficient compression and provides faster query response.
649      * </p> </li> </ul>
650      */
GetDataFormat()651     inline const DataFormatValue& GetDataFormat() const{ return m_dataFormat; }
652 
653     /**
654      * <p>The format of the data that you want to use for output. You can choose one of
655      * the following: </p> <ul> <li> <p> <code>csv</code> : This is a row-based file
656      * format with comma-separated values (.csv). </p> </li> <li> <p>
657      * <code>parquet</code> : Apache Parquet (.parquet) is a columnar storage file
658      * format that features efficient compression and provides faster query response.
659      * </p> </li> </ul>
660      */
DataFormatHasBeenSet()661     inline bool DataFormatHasBeenSet() const { return m_dataFormatHasBeenSet; }
662 
663     /**
664      * <p>The format of the data that you want to use for output. You can choose one of
665      * the following: </p> <ul> <li> <p> <code>csv</code> : This is a row-based file
666      * format with comma-separated values (.csv). </p> </li> <li> <p>
667      * <code>parquet</code> : Apache Parquet (.parquet) is a columnar storage file
668      * format that features efficient compression and provides faster query response.
669      * </p> </li> </ul>
670      */
SetDataFormat(const DataFormatValue & value)671     inline void SetDataFormat(const DataFormatValue& value) { m_dataFormatHasBeenSet = true; m_dataFormat = value; }
672 
673     /**
674      * <p>The format of the data that you want to use for output. You can choose one of
675      * the following: </p> <ul> <li> <p> <code>csv</code> : This is a row-based file
676      * format with comma-separated values (.csv). </p> </li> <li> <p>
677      * <code>parquet</code> : Apache Parquet (.parquet) is a columnar storage file
678      * format that features efficient compression and provides faster query response.
679      * </p> </li> </ul>
680      */
SetDataFormat(DataFormatValue && value)681     inline void SetDataFormat(DataFormatValue&& value) { m_dataFormatHasBeenSet = true; m_dataFormat = std::move(value); }
682 
683     /**
684      * <p>The format of the data that you want to use for output. You can choose one of
685      * the following: </p> <ul> <li> <p> <code>csv</code> : This is a row-based file
686      * format with comma-separated values (.csv). </p> </li> <li> <p>
687      * <code>parquet</code> : Apache Parquet (.parquet) is a columnar storage file
688      * format that features efficient compression and provides faster query response.
689      * </p> </li> </ul>
690      */
WithDataFormat(const DataFormatValue & value)691     inline S3Settings& WithDataFormat(const DataFormatValue& value) { SetDataFormat(value); return *this;}
692 
693     /**
694      * <p>The format of the data that you want to use for output. You can choose one of
695      * the following: </p> <ul> <li> <p> <code>csv</code> : This is a row-based file
696      * format with comma-separated values (.csv). </p> </li> <li> <p>
697      * <code>parquet</code> : Apache Parquet (.parquet) is a columnar storage file
698      * format that features efficient compression and provides faster query response.
699      * </p> </li> </ul>
700      */
WithDataFormat(DataFormatValue && value)701     inline S3Settings& WithDataFormat(DataFormatValue&& value) { SetDataFormat(std::move(value)); return *this;}
702 
703 
704     /**
705      * <p>The type of encoding you are using: </p> <ul> <li> <p>
706      * <code>RLE_DICTIONARY</code> uses a combination of bit-packing and run-length
707      * encoding to store repeated values more efficiently. This is the default.</p>
708      * </li> <li> <p> <code>PLAIN</code> doesn't use encoding at all. Values are stored
709      * as they are.</p> </li> <li> <p> <code>PLAIN_DICTIONARY</code> builds a
710      * dictionary of the values encountered in a given column. The dictionary is stored
711      * in a dictionary page for each column chunk.</p> </li> </ul>
712      */
GetEncodingType()713     inline const EncodingTypeValue& GetEncodingType() const{ return m_encodingType; }
714 
715     /**
716      * <p>The type of encoding you are using: </p> <ul> <li> <p>
717      * <code>RLE_DICTIONARY</code> uses a combination of bit-packing and run-length
718      * encoding to store repeated values more efficiently. This is the default.</p>
719      * </li> <li> <p> <code>PLAIN</code> doesn't use encoding at all. Values are stored
720      * as they are.</p> </li> <li> <p> <code>PLAIN_DICTIONARY</code> builds a
721      * dictionary of the values encountered in a given column. The dictionary is stored
722      * in a dictionary page for each column chunk.</p> </li> </ul>
723      */
EncodingTypeHasBeenSet()724     inline bool EncodingTypeHasBeenSet() const { return m_encodingTypeHasBeenSet; }
725 
726     /**
727      * <p>The type of encoding you are using: </p> <ul> <li> <p>
728      * <code>RLE_DICTIONARY</code> uses a combination of bit-packing and run-length
729      * encoding to store repeated values more efficiently. This is the default.</p>
730      * </li> <li> <p> <code>PLAIN</code> doesn't use encoding at all. Values are stored
731      * as they are.</p> </li> <li> <p> <code>PLAIN_DICTIONARY</code> builds a
732      * dictionary of the values encountered in a given column. The dictionary is stored
733      * in a dictionary page for each column chunk.</p> </li> </ul>
734      */
SetEncodingType(const EncodingTypeValue & value)735     inline void SetEncodingType(const EncodingTypeValue& value) { m_encodingTypeHasBeenSet = true; m_encodingType = value; }
736 
737     /**
738      * <p>The type of encoding you are using: </p> <ul> <li> <p>
739      * <code>RLE_DICTIONARY</code> uses a combination of bit-packing and run-length
740      * encoding to store repeated values more efficiently. This is the default.</p>
741      * </li> <li> <p> <code>PLAIN</code> doesn't use encoding at all. Values are stored
742      * as they are.</p> </li> <li> <p> <code>PLAIN_DICTIONARY</code> builds a
743      * dictionary of the values encountered in a given column. The dictionary is stored
744      * in a dictionary page for each column chunk.</p> </li> </ul>
745      */
SetEncodingType(EncodingTypeValue && value)746     inline void SetEncodingType(EncodingTypeValue&& value) { m_encodingTypeHasBeenSet = true; m_encodingType = std::move(value); }
747 
748     /**
749      * <p>The type of encoding you are using: </p> <ul> <li> <p>
750      * <code>RLE_DICTIONARY</code> uses a combination of bit-packing and run-length
751      * encoding to store repeated values more efficiently. This is the default.</p>
752      * </li> <li> <p> <code>PLAIN</code> doesn't use encoding at all. Values are stored
753      * as they are.</p> </li> <li> <p> <code>PLAIN_DICTIONARY</code> builds a
754      * dictionary of the values encountered in a given column. The dictionary is stored
755      * in a dictionary page for each column chunk.</p> </li> </ul>
756      */
WithEncodingType(const EncodingTypeValue & value)757     inline S3Settings& WithEncodingType(const EncodingTypeValue& value) { SetEncodingType(value); return *this;}
758 
759     /**
760      * <p>The type of encoding you are using: </p> <ul> <li> <p>
761      * <code>RLE_DICTIONARY</code> uses a combination of bit-packing and run-length
762      * encoding to store repeated values more efficiently. This is the default.</p>
763      * </li> <li> <p> <code>PLAIN</code> doesn't use encoding at all. Values are stored
764      * as they are.</p> </li> <li> <p> <code>PLAIN_DICTIONARY</code> builds a
765      * dictionary of the values encountered in a given column. The dictionary is stored
766      * in a dictionary page for each column chunk.</p> </li> </ul>
767      */
WithEncodingType(EncodingTypeValue && value)768     inline S3Settings& WithEncodingType(EncodingTypeValue&& value) { SetEncodingType(std::move(value)); return *this;}
769 
770 
771     /**
772      * <p>The maximum size of an encoded dictionary page of a column. If the dictionary
773      * page exceeds this, this column is stored using an encoding type of
774      * <code>PLAIN</code>. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
775      * maximum size of a dictionary page before it reverts to <code>PLAIN</code>
776      * encoding. This size is used for .parquet file format only. </p>
777      */
GetDictPageSizeLimit()778     inline int GetDictPageSizeLimit() const{ return m_dictPageSizeLimit; }
779 
780     /**
781      * <p>The maximum size of an encoded dictionary page of a column. If the dictionary
782      * page exceeds this, this column is stored using an encoding type of
783      * <code>PLAIN</code>. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
784      * maximum size of a dictionary page before it reverts to <code>PLAIN</code>
785      * encoding. This size is used for .parquet file format only. </p>
786      */
DictPageSizeLimitHasBeenSet()787     inline bool DictPageSizeLimitHasBeenSet() const { return m_dictPageSizeLimitHasBeenSet; }
788 
789     /**
790      * <p>The maximum size of an encoded dictionary page of a column. If the dictionary
791      * page exceeds this, this column is stored using an encoding type of
792      * <code>PLAIN</code>. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
793      * maximum size of a dictionary page before it reverts to <code>PLAIN</code>
794      * encoding. This size is used for .parquet file format only. </p>
795      */
SetDictPageSizeLimit(int value)796     inline void SetDictPageSizeLimit(int value) { m_dictPageSizeLimitHasBeenSet = true; m_dictPageSizeLimit = value; }
797 
798     /**
799      * <p>The maximum size of an encoded dictionary page of a column. If the dictionary
800      * page exceeds this, this column is stored using an encoding type of
801      * <code>PLAIN</code>. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
802      * maximum size of a dictionary page before it reverts to <code>PLAIN</code>
803      * encoding. This size is used for .parquet file format only. </p>
804      */
WithDictPageSizeLimit(int value)805     inline S3Settings& WithDictPageSizeLimit(int value) { SetDictPageSizeLimit(value); return *this;}
806 
807 
808     /**
809      * <p>The number of rows in a row group. A smaller row group size provides faster
810      * reads. But as the number of row groups grows, the slower writes become. This
811      * parameter defaults to 10,000 rows. This number is used for .parquet file format
812      * only. </p> <p>If you choose a value larger than the maximum,
813      * <code>RowGroupLength</code> is set to the max row group length in bytes (64 *
814      * 1024 * 1024). </p>
815      */
GetRowGroupLength()816     inline int GetRowGroupLength() const{ return m_rowGroupLength; }
817 
818     /**
819      * <p>The number of rows in a row group. A smaller row group size provides faster
820      * reads. But as the number of row groups grows, the slower writes become. This
821      * parameter defaults to 10,000 rows. This number is used for .parquet file format
822      * only. </p> <p>If you choose a value larger than the maximum,
823      * <code>RowGroupLength</code> is set to the max row group length in bytes (64 *
824      * 1024 * 1024). </p>
825      */
RowGroupLengthHasBeenSet()826     inline bool RowGroupLengthHasBeenSet() const { return m_rowGroupLengthHasBeenSet; }
827 
828     /**
829      * <p>The number of rows in a row group. A smaller row group size provides faster
830      * reads. But as the number of row groups grows, the slower writes become. This
831      * parameter defaults to 10,000 rows. This number is used for .parquet file format
832      * only. </p> <p>If you choose a value larger than the maximum,
833      * <code>RowGroupLength</code> is set to the max row group length in bytes (64 *
834      * 1024 * 1024). </p>
835      */
SetRowGroupLength(int value)836     inline void SetRowGroupLength(int value) { m_rowGroupLengthHasBeenSet = true; m_rowGroupLength = value; }
837 
838     /**
839      * <p>The number of rows in a row group. A smaller row group size provides faster
840      * reads. But as the number of row groups grows, the slower writes become. This
841      * parameter defaults to 10,000 rows. This number is used for .parquet file format
842      * only. </p> <p>If you choose a value larger than the maximum,
843      * <code>RowGroupLength</code> is set to the max row group length in bytes (64 *
844      * 1024 * 1024). </p>
845      */
WithRowGroupLength(int value)846     inline S3Settings& WithRowGroupLength(int value) { SetRowGroupLength(value); return *this;}
847 
848 
849     /**
850      * <p>The size of one data page in bytes. This parameter defaults to 1024 * 1024
851      * bytes (1 MiB). This number is used for .parquet file format only. </p>
852      */
GetDataPageSize()853     inline int GetDataPageSize() const{ return m_dataPageSize; }
854 
855     /**
856      * <p>The size of one data page in bytes. This parameter defaults to 1024 * 1024
857      * bytes (1 MiB). This number is used for .parquet file format only. </p>
858      */
DataPageSizeHasBeenSet()859     inline bool DataPageSizeHasBeenSet() const { return m_dataPageSizeHasBeenSet; }
860 
861     /**
862      * <p>The size of one data page in bytes. This parameter defaults to 1024 * 1024
863      * bytes (1 MiB). This number is used for .parquet file format only. </p>
864      */
SetDataPageSize(int value)865     inline void SetDataPageSize(int value) { m_dataPageSizeHasBeenSet = true; m_dataPageSize = value; }
866 
867     /**
868      * <p>The size of one data page in bytes. This parameter defaults to 1024 * 1024
869      * bytes (1 MiB). This number is used for .parquet file format only. </p>
870      */
WithDataPageSize(int value)871     inline S3Settings& WithDataPageSize(int value) { SetDataPageSize(value); return *this;}
872 
873 
874     /**
875      * <p>The version of the Apache Parquet format that you want to use:
876      * <code>parquet_1_0</code> (the default) or <code>parquet_2_0</code>.</p>
877      */
GetParquetVersion()878     inline const ParquetVersionValue& GetParquetVersion() const{ return m_parquetVersion; }
879 
880     /**
881      * <p>The version of the Apache Parquet format that you want to use:
882      * <code>parquet_1_0</code> (the default) or <code>parquet_2_0</code>.</p>
883      */
ParquetVersionHasBeenSet()884     inline bool ParquetVersionHasBeenSet() const { return m_parquetVersionHasBeenSet; }
885 
886     /**
887      * <p>The version of the Apache Parquet format that you want to use:
888      * <code>parquet_1_0</code> (the default) or <code>parquet_2_0</code>.</p>
889      */
SetParquetVersion(const ParquetVersionValue & value)890     inline void SetParquetVersion(const ParquetVersionValue& value) { m_parquetVersionHasBeenSet = true; m_parquetVersion = value; }
891 
892     /**
893      * <p>The version of the Apache Parquet format that you want to use:
894      * <code>parquet_1_0</code> (the default) or <code>parquet_2_0</code>.</p>
895      */
SetParquetVersion(ParquetVersionValue && value)896     inline void SetParquetVersion(ParquetVersionValue&& value) { m_parquetVersionHasBeenSet = true; m_parquetVersion = std::move(value); }
897 
898     /**
899      * <p>The version of the Apache Parquet format that you want to use:
900      * <code>parquet_1_0</code> (the default) or <code>parquet_2_0</code>.</p>
901      */
WithParquetVersion(const ParquetVersionValue & value)902     inline S3Settings& WithParquetVersion(const ParquetVersionValue& value) { SetParquetVersion(value); return *this;}
903 
904     /**
905      * <p>The version of the Apache Parquet format that you want to use:
906      * <code>parquet_1_0</code> (the default) or <code>parquet_2_0</code>.</p>
907      */
WithParquetVersion(ParquetVersionValue && value)908     inline S3Settings& WithParquetVersion(ParquetVersionValue&& value) { SetParquetVersion(std::move(value)); return *this;}
909 
910 
911     /**
912      * <p>A value that enables statistics for Parquet pages and row groups. Choose
913      * <code>true</code> to enable statistics, <code>false</code> to disable.
914      * Statistics include <code>NULL</code>, <code>DISTINCT</code>, <code>MAX</code>,
915      * and <code>MIN</code> values. This parameter defaults to <code>true</code>. This
916      * value is used for .parquet file format only.</p>
917      */
GetEnableStatistics()918     inline bool GetEnableStatistics() const{ return m_enableStatistics; }
919 
920     /**
921      * <p>A value that enables statistics for Parquet pages and row groups. Choose
922      * <code>true</code> to enable statistics, <code>false</code> to disable.
923      * Statistics include <code>NULL</code>, <code>DISTINCT</code>, <code>MAX</code>,
924      * and <code>MIN</code> values. This parameter defaults to <code>true</code>. This
925      * value is used for .parquet file format only.</p>
926      */
EnableStatisticsHasBeenSet()927     inline bool EnableStatisticsHasBeenSet() const { return m_enableStatisticsHasBeenSet; }
928 
929     /**
930      * <p>A value that enables statistics for Parquet pages and row groups. Choose
931      * <code>true</code> to enable statistics, <code>false</code> to disable.
932      * Statistics include <code>NULL</code>, <code>DISTINCT</code>, <code>MAX</code>,
933      * and <code>MIN</code> values. This parameter defaults to <code>true</code>. This
934      * value is used for .parquet file format only.</p>
935      */
SetEnableStatistics(bool value)936     inline void SetEnableStatistics(bool value) { m_enableStatisticsHasBeenSet = true; m_enableStatistics = value; }
937 
938     /**
939      * <p>A value that enables statistics for Parquet pages and row groups. Choose
940      * <code>true</code> to enable statistics, <code>false</code> to disable.
941      * Statistics include <code>NULL</code>, <code>DISTINCT</code>, <code>MAX</code>,
942      * and <code>MIN</code> values. This parameter defaults to <code>true</code>. This
943      * value is used for .parquet file format only.</p>
944      */
WithEnableStatistics(bool value)945     inline S3Settings& WithEnableStatistics(bool value) { SetEnableStatistics(value); return *this;}
946 
947 
948     /**
949      * <p>A value that enables a full load to write INSERT operations to the
950      * comma-separated value (.csv) output files only to indicate how the rows were
951      * added to the source database.</p>  <p>DMS supports the
952      * <code>IncludeOpForFullLoad</code> parameter in versions 3.1.4 and later.</p>
953      *  <p>For full load, records can only be inserted. By default (the
954      * <code>false</code> setting), no information is recorded in these output files
955      * for a full load to indicate that the rows were inserted at the source database.
956      * If <code>IncludeOpForFullLoad</code> is set to <code>true</code> or
957      * <code>y</code>, the INSERT is recorded as an I annotation in the first field of
958      * the .csv file. This allows the format of your target records from a full load to
959      * be consistent with the target records from a CDC load.</p>  <p>This
960      * setting works together with the <code>CdcInsertsOnly</code> and the
961      * <code>CdcInsertsAndUpdates</code> parameters for output to .csv files only. For
962      * more information about how these settings work together, see <a
963      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
964      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
965      * Service User Guide.</i>.</p>
966      */
GetIncludeOpForFullLoad()967     inline bool GetIncludeOpForFullLoad() const{ return m_includeOpForFullLoad; }
968 
969     /**
970      * <p>A value that enables a full load to write INSERT operations to the
971      * comma-separated value (.csv) output files only to indicate how the rows were
972      * added to the source database.</p>  <p>DMS supports the
973      * <code>IncludeOpForFullLoad</code> parameter in versions 3.1.4 and later.</p>
974      *  <p>For full load, records can only be inserted. By default (the
975      * <code>false</code> setting), no information is recorded in these output files
976      * for a full load to indicate that the rows were inserted at the source database.
977      * If <code>IncludeOpForFullLoad</code> is set to <code>true</code> or
978      * <code>y</code>, the INSERT is recorded as an I annotation in the first field of
979      * the .csv file. This allows the format of your target records from a full load to
980      * be consistent with the target records from a CDC load.</p>  <p>This
981      * setting works together with the <code>CdcInsertsOnly</code> and the
982      * <code>CdcInsertsAndUpdates</code> parameters for output to .csv files only. For
983      * more information about how these settings work together, see <a
984      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
985      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
986      * Service User Guide.</i>.</p>
987      */
IncludeOpForFullLoadHasBeenSet()988     inline bool IncludeOpForFullLoadHasBeenSet() const { return m_includeOpForFullLoadHasBeenSet; }
989 
990     /**
991      * <p>A value that enables a full load to write INSERT operations to the
992      * comma-separated value (.csv) output files only to indicate how the rows were
993      * added to the source database.</p>  <p>DMS supports the
994      * <code>IncludeOpForFullLoad</code> parameter in versions 3.1.4 and later.</p>
995      *  <p>For full load, records can only be inserted. By default (the
996      * <code>false</code> setting), no information is recorded in these output files
997      * for a full load to indicate that the rows were inserted at the source database.
998      * If <code>IncludeOpForFullLoad</code> is set to <code>true</code> or
999      * <code>y</code>, the INSERT is recorded as an I annotation in the first field of
1000      * the .csv file. This allows the format of your target records from a full load to
1001      * be consistent with the target records from a CDC load.</p>  <p>This
1002      * setting works together with the <code>CdcInsertsOnly</code> and the
1003      * <code>CdcInsertsAndUpdates</code> parameters for output to .csv files only. For
1004      * more information about how these settings work together, see <a
1005      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
1006      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
1007      * Service User Guide.</i>.</p>
1008      */
SetIncludeOpForFullLoad(bool value)1009     inline void SetIncludeOpForFullLoad(bool value) { m_includeOpForFullLoadHasBeenSet = true; m_includeOpForFullLoad = value; }
1010 
1011     /**
1012      * <p>A value that enables a full load to write INSERT operations to the
1013      * comma-separated value (.csv) output files only to indicate how the rows were
1014      * added to the source database.</p>  <p>DMS supports the
1015      * <code>IncludeOpForFullLoad</code> parameter in versions 3.1.4 and later.</p>
1016      *  <p>For full load, records can only be inserted. By default (the
1017      * <code>false</code> setting), no information is recorded in these output files
1018      * for a full load to indicate that the rows were inserted at the source database.
1019      * If <code>IncludeOpForFullLoad</code> is set to <code>true</code> or
1020      * <code>y</code>, the INSERT is recorded as an I annotation in the first field of
1021      * the .csv file. This allows the format of your target records from a full load to
1022      * be consistent with the target records from a CDC load.</p>  <p>This
1023      * setting works together with the <code>CdcInsertsOnly</code> and the
1024      * <code>CdcInsertsAndUpdates</code> parameters for output to .csv files only. For
1025      * more information about how these settings work together, see <a
1026      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
1027      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
1028      * Service User Guide.</i>.</p>
1029      */
WithIncludeOpForFullLoad(bool value)1030     inline S3Settings& WithIncludeOpForFullLoad(bool value) { SetIncludeOpForFullLoad(value); return *this;}
1031 
1032 
1033     /**
1034      * <p>A value that enables a change data capture (CDC) load to write only INSERT
1035      * operations to .csv or columnar storage (.parquet) output files. By default (the
1036      * <code>false</code> setting), the first field in a .csv or .parquet record
1037      * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
1038      * whether the row was inserted, updated, or deleted at the source database for a
1039      * CDC load to the target.</p> <p>If <code>CdcInsertsOnly</code> is set to
1040      * <code>true</code> or <code>y</code>, only INSERTs from the source database are
1041      * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
1042      * are recorded depends on the value of <code>IncludeOpForFullLoad</code>. If
1043      * <code>IncludeOpForFullLoad</code> is set to <code>true</code>, the first field
1044      * of every CDC record is set to I to indicate the INSERT operation at the source.
1045      * If <code>IncludeOpForFullLoad</code> is set to <code>false</code>, every CDC
1046      * record is written without a first field to indicate the INSERT operation at the
1047      * source. For more information about how these settings work together, see <a
1048      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
1049      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
1050      * Service User Guide.</i>.</p>  <p>DMS supports the interaction described
1051      * preceding between the <code>CdcInsertsOnly</code> and
1052      * <code>IncludeOpForFullLoad</code> parameters in versions 3.1.4 and later. </p>
1053      * <p> <code>CdcInsertsOnly</code> and <code>CdcInsertsAndUpdates</code> can't both
1054      * be set to <code>true</code> for the same endpoint. Set either
1055      * <code>CdcInsertsOnly</code> or <code>CdcInsertsAndUpdates</code> to
1056      * <code>true</code> for the same endpoint, but not both.</p>
1057      */
GetCdcInsertsOnly()1058     inline bool GetCdcInsertsOnly() const{ return m_cdcInsertsOnly; }
1059 
1060     /**
1061      * <p>A value that enables a change data capture (CDC) load to write only INSERT
1062      * operations to .csv or columnar storage (.parquet) output files. By default (the
1063      * <code>false</code> setting), the first field in a .csv or .parquet record
1064      * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
1065      * whether the row was inserted, updated, or deleted at the source database for a
1066      * CDC load to the target.</p> <p>If <code>CdcInsertsOnly</code> is set to
1067      * <code>true</code> or <code>y</code>, only INSERTs from the source database are
1068      * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
1069      * are recorded depends on the value of <code>IncludeOpForFullLoad</code>. If
1070      * <code>IncludeOpForFullLoad</code> is set to <code>true</code>, the first field
1071      * of every CDC record is set to I to indicate the INSERT operation at the source.
1072      * If <code>IncludeOpForFullLoad</code> is set to <code>false</code>, every CDC
1073      * record is written without a first field to indicate the INSERT operation at the
1074      * source. For more information about how these settings work together, see <a
1075      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
1076      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
1077      * Service User Guide.</i>.</p>  <p>DMS supports the interaction described
1078      * preceding between the <code>CdcInsertsOnly</code> and
1079      * <code>IncludeOpForFullLoad</code> parameters in versions 3.1.4 and later. </p>
1080      * <p> <code>CdcInsertsOnly</code> and <code>CdcInsertsAndUpdates</code> can't both
1081      * be set to <code>true</code> for the same endpoint. Set either
1082      * <code>CdcInsertsOnly</code> or <code>CdcInsertsAndUpdates</code> to
1083      * <code>true</code> for the same endpoint, but not both.</p>
1084      */
CdcInsertsOnlyHasBeenSet()1085     inline bool CdcInsertsOnlyHasBeenSet() const { return m_cdcInsertsOnlyHasBeenSet; }
1086 
1087     /**
1088      * <p>A value that enables a change data capture (CDC) load to write only INSERT
1089      * operations to .csv or columnar storage (.parquet) output files. By default (the
1090      * <code>false</code> setting), the first field in a .csv or .parquet record
1091      * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
1092      * whether the row was inserted, updated, or deleted at the source database for a
1093      * CDC load to the target.</p> <p>If <code>CdcInsertsOnly</code> is set to
1094      * <code>true</code> or <code>y</code>, only INSERTs from the source database are
1095      * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
1096      * are recorded depends on the value of <code>IncludeOpForFullLoad</code>. If
1097      * <code>IncludeOpForFullLoad</code> is set to <code>true</code>, the first field
1098      * of every CDC record is set to I to indicate the INSERT operation at the source.
1099      * If <code>IncludeOpForFullLoad</code> is set to <code>false</code>, every CDC
1100      * record is written without a first field to indicate the INSERT operation at the
1101      * source. For more information about how these settings work together, see <a
1102      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
1103      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
1104      * Service User Guide.</i>.</p>  <p>DMS supports the interaction described
1105      * preceding between the <code>CdcInsertsOnly</code> and
1106      * <code>IncludeOpForFullLoad</code> parameters in versions 3.1.4 and later. </p>
1107      * <p> <code>CdcInsertsOnly</code> and <code>CdcInsertsAndUpdates</code> can't both
1108      * be set to <code>true</code> for the same endpoint. Set either
1109      * <code>CdcInsertsOnly</code> or <code>CdcInsertsAndUpdates</code> to
1110      * <code>true</code> for the same endpoint, but not both.</p>
1111      */
SetCdcInsertsOnly(bool value)1112     inline void SetCdcInsertsOnly(bool value) { m_cdcInsertsOnlyHasBeenSet = true; m_cdcInsertsOnly = value; }
1113 
1114     /**
1115      * <p>A value that enables a change data capture (CDC) load to write only INSERT
1116      * operations to .csv or columnar storage (.parquet) output files. By default (the
1117      * <code>false</code> setting), the first field in a .csv or .parquet record
1118      * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
1119      * whether the row was inserted, updated, or deleted at the source database for a
1120      * CDC load to the target.</p> <p>If <code>CdcInsertsOnly</code> is set to
1121      * <code>true</code> or <code>y</code>, only INSERTs from the source database are
1122      * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
1123      * are recorded depends on the value of <code>IncludeOpForFullLoad</code>. If
1124      * <code>IncludeOpForFullLoad</code> is set to <code>true</code>, the first field
1125      * of every CDC record is set to I to indicate the INSERT operation at the source.
1126      * If <code>IncludeOpForFullLoad</code> is set to <code>false</code>, every CDC
1127      * record is written without a first field to indicate the INSERT operation at the
1128      * source. For more information about how these settings work together, see <a
1129      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
1130      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
1131      * Service User Guide.</i>.</p>  <p>DMS supports the interaction described
1132      * preceding between the <code>CdcInsertsOnly</code> and
1133      * <code>IncludeOpForFullLoad</code> parameters in versions 3.1.4 and later. </p>
1134      * <p> <code>CdcInsertsOnly</code> and <code>CdcInsertsAndUpdates</code> can't both
1135      * be set to <code>true</code> for the same endpoint. Set either
1136      * <code>CdcInsertsOnly</code> or <code>CdcInsertsAndUpdates</code> to
1137      * <code>true</code> for the same endpoint, but not both.</p>
1138      */
WithCdcInsertsOnly(bool value)1139     inline S3Settings& WithCdcInsertsOnly(bool value) { SetCdcInsertsOnly(value); return *this;}
1140 
1141 
1142     /**
1143      * <p>A value that when nonblank causes DMS to add a column with timestamp
1144      * information to the endpoint data for an Amazon S3 target.</p>  <p>DMS
1145      * supports the <code>TimestampColumnName</code> parameter in versions 3.1.4 and
1146      * later.</p>  <p>DMS includes an additional <code>STRING</code> column in
1147      * the .csv or .parquet object files of your migrated data when you set
1148      * <code>TimestampColumnName</code> to a nonblank value.</p> <p>For a full load,
1149      * each row of this timestamp column contains a timestamp for when the data was
1150      * transferred from the source to the target by DMS. </p> <p>For a change data
1151      * capture (CDC) load, each row of the timestamp column contains the timestamp for
1152      * the commit of that row in the source database.</p> <p>The string format for this
1153      * timestamp column value is <code>yyyy-MM-dd HH:mm:ss.SSSSSS</code>. By default,
1154      * the precision of this value is in microseconds. For a CDC load, the rounding of
1155      * the precision depends on the commit timestamp supported by DMS for the source
1156      * database.</p> <p>When the <code>AddColumnName</code> parameter is set to
1157      * <code>true</code>, DMS also includes a name for the timestamp column that you
1158      * set with <code>TimestampColumnName</code>.</p>
1159      */
GetTimestampColumnName()1160     inline const Aws::String& GetTimestampColumnName() const{ return m_timestampColumnName; }
1161 
1162     /**
1163      * <p>A value that when nonblank causes DMS to add a column with timestamp
1164      * information to the endpoint data for an Amazon S3 target.</p>  <p>DMS
1165      * supports the <code>TimestampColumnName</code> parameter in versions 3.1.4 and
1166      * later.</p>  <p>DMS includes an additional <code>STRING</code> column in
1167      * the .csv or .parquet object files of your migrated data when you set
1168      * <code>TimestampColumnName</code> to a nonblank value.</p> <p>For a full load,
1169      * each row of this timestamp column contains a timestamp for when the data was
1170      * transferred from the source to the target by DMS. </p> <p>For a change data
1171      * capture (CDC) load, each row of the timestamp column contains the timestamp for
1172      * the commit of that row in the source database.</p> <p>The string format for this
1173      * timestamp column value is <code>yyyy-MM-dd HH:mm:ss.SSSSSS</code>. By default,
1174      * the precision of this value is in microseconds. For a CDC load, the rounding of
1175      * the precision depends on the commit timestamp supported by DMS for the source
1176      * database.</p> <p>When the <code>AddColumnName</code> parameter is set to
1177      * <code>true</code>, DMS also includes a name for the timestamp column that you
1178      * set with <code>TimestampColumnName</code>.</p>
1179      */
TimestampColumnNameHasBeenSet()1180     inline bool TimestampColumnNameHasBeenSet() const { return m_timestampColumnNameHasBeenSet; }
1181 
1182     /**
1183      * <p>A value that when nonblank causes DMS to add a column with timestamp
1184      * information to the endpoint data for an Amazon S3 target.</p>  <p>DMS
1185      * supports the <code>TimestampColumnName</code> parameter in versions 3.1.4 and
1186      * later.</p>  <p>DMS includes an additional <code>STRING</code> column in
1187      * the .csv or .parquet object files of your migrated data when you set
1188      * <code>TimestampColumnName</code> to a nonblank value.</p> <p>For a full load,
1189      * each row of this timestamp column contains a timestamp for when the data was
1190      * transferred from the source to the target by DMS. </p> <p>For a change data
1191      * capture (CDC) load, each row of the timestamp column contains the timestamp for
1192      * the commit of that row in the source database.</p> <p>The string format for this
1193      * timestamp column value is <code>yyyy-MM-dd HH:mm:ss.SSSSSS</code>. By default,
1194      * the precision of this value is in microseconds. For a CDC load, the rounding of
1195      * the precision depends on the commit timestamp supported by DMS for the source
1196      * database.</p> <p>When the <code>AddColumnName</code> parameter is set to
1197      * <code>true</code>, DMS also includes a name for the timestamp column that you
1198      * set with <code>TimestampColumnName</code>.</p>
1199      */
SetTimestampColumnName(const Aws::String & value)1200     inline void SetTimestampColumnName(const Aws::String& value) { m_timestampColumnNameHasBeenSet = true; m_timestampColumnName = value; }
1201 
1202     /**
1203      * <p>A value that when nonblank causes DMS to add a column with timestamp
1204      * information to the endpoint data for an Amazon S3 target.</p>  <p>DMS
1205      * supports the <code>TimestampColumnName</code> parameter in versions 3.1.4 and
1206      * later.</p>  <p>DMS includes an additional <code>STRING</code> column in
1207      * the .csv or .parquet object files of your migrated data when you set
1208      * <code>TimestampColumnName</code> to a nonblank value.</p> <p>For a full load,
1209      * each row of this timestamp column contains a timestamp for when the data was
1210      * transferred from the source to the target by DMS. </p> <p>For a change data
1211      * capture (CDC) load, each row of the timestamp column contains the timestamp for
1212      * the commit of that row in the source database.</p> <p>The string format for this
1213      * timestamp column value is <code>yyyy-MM-dd HH:mm:ss.SSSSSS</code>. By default,
1214      * the precision of this value is in microseconds. For a CDC load, the rounding of
1215      * the precision depends on the commit timestamp supported by DMS for the source
1216      * database.</p> <p>When the <code>AddColumnName</code> parameter is set to
1217      * <code>true</code>, DMS also includes a name for the timestamp column that you
1218      * set with <code>TimestampColumnName</code>.</p>
1219      */
SetTimestampColumnName(Aws::String && value)1220     inline void SetTimestampColumnName(Aws::String&& value) { m_timestampColumnNameHasBeenSet = true; m_timestampColumnName = std::move(value); }
1221 
1222     /**
1223      * <p>A value that when nonblank causes DMS to add a column with timestamp
1224      * information to the endpoint data for an Amazon S3 target.</p>  <p>DMS
1225      * supports the <code>TimestampColumnName</code> parameter in versions 3.1.4 and
1226      * later.</p>  <p>DMS includes an additional <code>STRING</code> column in
1227      * the .csv or .parquet object files of your migrated data when you set
1228      * <code>TimestampColumnName</code> to a nonblank value.</p> <p>For a full load,
1229      * each row of this timestamp column contains a timestamp for when the data was
1230      * transferred from the source to the target by DMS. </p> <p>For a change data
1231      * capture (CDC) load, each row of the timestamp column contains the timestamp for
1232      * the commit of that row in the source database.</p> <p>The string format for this
1233      * timestamp column value is <code>yyyy-MM-dd HH:mm:ss.SSSSSS</code>. By default,
1234      * the precision of this value is in microseconds. For a CDC load, the rounding of
1235      * the precision depends on the commit timestamp supported by DMS for the source
1236      * database.</p> <p>When the <code>AddColumnName</code> parameter is set to
1237      * <code>true</code>, DMS also includes a name for the timestamp column that you
1238      * set with <code>TimestampColumnName</code>.</p>
1239      */
SetTimestampColumnName(const char * value)1240     inline void SetTimestampColumnName(const char* value) { m_timestampColumnNameHasBeenSet = true; m_timestampColumnName.assign(value); }
1241 
1242     /**
1243      * <p>A value that when nonblank causes DMS to add a column with timestamp
1244      * information to the endpoint data for an Amazon S3 target.</p>  <p>DMS
1245      * supports the <code>TimestampColumnName</code> parameter in versions 3.1.4 and
1246      * later.</p>  <p>DMS includes an additional <code>STRING</code> column in
1247      * the .csv or .parquet object files of your migrated data when you set
1248      * <code>TimestampColumnName</code> to a nonblank value.</p> <p>For a full load,
1249      * each row of this timestamp column contains a timestamp for when the data was
1250      * transferred from the source to the target by DMS. </p> <p>For a change data
1251      * capture (CDC) load, each row of the timestamp column contains the timestamp for
1252      * the commit of that row in the source database.</p> <p>The string format for this
1253      * timestamp column value is <code>yyyy-MM-dd HH:mm:ss.SSSSSS</code>. By default,
1254      * the precision of this value is in microseconds. For a CDC load, the rounding of
1255      * the precision depends on the commit timestamp supported by DMS for the source
1256      * database.</p> <p>When the <code>AddColumnName</code> parameter is set to
1257      * <code>true</code>, DMS also includes a name for the timestamp column that you
1258      * set with <code>TimestampColumnName</code>.</p>
1259      */
WithTimestampColumnName(const Aws::String & value)1260     inline S3Settings& WithTimestampColumnName(const Aws::String& value) { SetTimestampColumnName(value); return *this;}
1261 
1262     /**
1263      * <p>A value that when nonblank causes DMS to add a column with timestamp
1264      * information to the endpoint data for an Amazon S3 target.</p>  <p>DMS
1265      * supports the <code>TimestampColumnName</code> parameter in versions 3.1.4 and
1266      * later.</p>  <p>DMS includes an additional <code>STRING</code> column in
1267      * the .csv or .parquet object files of your migrated data when you set
1268      * <code>TimestampColumnName</code> to a nonblank value.</p> <p>For a full load,
1269      * each row of this timestamp column contains a timestamp for when the data was
1270      * transferred from the source to the target by DMS. </p> <p>For a change data
1271      * capture (CDC) load, each row of the timestamp column contains the timestamp for
1272      * the commit of that row in the source database.</p> <p>The string format for this
1273      * timestamp column value is <code>yyyy-MM-dd HH:mm:ss.SSSSSS</code>. By default,
1274      * the precision of this value is in microseconds. For a CDC load, the rounding of
1275      * the precision depends on the commit timestamp supported by DMS for the source
1276      * database.</p> <p>When the <code>AddColumnName</code> parameter is set to
1277      * <code>true</code>, DMS also includes a name for the timestamp column that you
1278      * set with <code>TimestampColumnName</code>.</p>
1279      */
WithTimestampColumnName(Aws::String && value)1280     inline S3Settings& WithTimestampColumnName(Aws::String&& value) { SetTimestampColumnName(std::move(value)); return *this;}
1281 
1282     /**
1283      * <p>A value that when nonblank causes DMS to add a column with timestamp
1284      * information to the endpoint data for an Amazon S3 target.</p>  <p>DMS
1285      * supports the <code>TimestampColumnName</code> parameter in versions 3.1.4 and
1286      * later.</p>  <p>DMS includes an additional <code>STRING</code> column in
1287      * the .csv or .parquet object files of your migrated data when you set
1288      * <code>TimestampColumnName</code> to a nonblank value.</p> <p>For a full load,
1289      * each row of this timestamp column contains a timestamp for when the data was
1290      * transferred from the source to the target by DMS. </p> <p>For a change data
1291      * capture (CDC) load, each row of the timestamp column contains the timestamp for
1292      * the commit of that row in the source database.</p> <p>The string format for this
1293      * timestamp column value is <code>yyyy-MM-dd HH:mm:ss.SSSSSS</code>. By default,
1294      * the precision of this value is in microseconds. For a CDC load, the rounding of
1295      * the precision depends on the commit timestamp supported by DMS for the source
1296      * database.</p> <p>When the <code>AddColumnName</code> parameter is set to
1297      * <code>true</code>, DMS also includes a name for the timestamp column that you
1298      * set with <code>TimestampColumnName</code>.</p>
1299      */
WithTimestampColumnName(const char * value)1300     inline S3Settings& WithTimestampColumnName(const char* value) { SetTimestampColumnName(value); return *this;}
1301 
1302 
1303     /**
1304      * <p>A value that specifies the precision of any <code>TIMESTAMP</code> column
1305      * values that are written to an Amazon S3 object file in .parquet format.</p>
1306      *  <p>DMS supports the <code>ParquetTimestampInMillisecond</code> parameter
1307      * in versions 3.1.4 and later.</p>  <p>When
1308      * <code>ParquetTimestampInMillisecond</code> is set to <code>true</code> or
1309      * <code>y</code>, DMS writes all <code>TIMESTAMP</code> columns in a .parquet
1310      * formatted file with millisecond precision. Otherwise, DMS writes them with
1311      * microsecond precision.</p> <p>Currently, Amazon Athena and Glue can handle only
1312      * millisecond precision for <code>TIMESTAMP</code> values. Set this parameter to
1313      * <code>true</code> for S3 endpoint object files that are .parquet formatted only
1314      * if you plan to query or process the data with Athena or Glue.</p>  <p>DMS
1315      * writes any <code>TIMESTAMP</code> column values written to an S3 file in .csv
1316      * format with microsecond precision.</p> <p>Setting
1317      * <code>ParquetTimestampInMillisecond</code> has no effect on the string format of
1318      * the timestamp column value that is inserted by setting the
1319      * <code>TimestampColumnName</code> parameter.</p>
1320      */
GetParquetTimestampInMillisecond()1321     inline bool GetParquetTimestampInMillisecond() const{ return m_parquetTimestampInMillisecond; }
1322 
1323     /**
1324      * <p>A value that specifies the precision of any <code>TIMESTAMP</code> column
1325      * values that are written to an Amazon S3 object file in .parquet format.</p>
1326      *  <p>DMS supports the <code>ParquetTimestampInMillisecond</code> parameter
1327      * in versions 3.1.4 and later.</p>  <p>When
1328      * <code>ParquetTimestampInMillisecond</code> is set to <code>true</code> or
1329      * <code>y</code>, DMS writes all <code>TIMESTAMP</code> columns in a .parquet
1330      * formatted file with millisecond precision. Otherwise, DMS writes them with
1331      * microsecond precision.</p> <p>Currently, Amazon Athena and Glue can handle only
1332      * millisecond precision for <code>TIMESTAMP</code> values. Set this parameter to
1333      * <code>true</code> for S3 endpoint object files that are .parquet formatted only
1334      * if you plan to query or process the data with Athena or Glue.</p>  <p>DMS
1335      * writes any <code>TIMESTAMP</code> column values written to an S3 file in .csv
1336      * format with microsecond precision.</p> <p>Setting
1337      * <code>ParquetTimestampInMillisecond</code> has no effect on the string format of
1338      * the timestamp column value that is inserted by setting the
1339      * <code>TimestampColumnName</code> parameter.</p>
1340      */
ParquetTimestampInMillisecondHasBeenSet()1341     inline bool ParquetTimestampInMillisecondHasBeenSet() const { return m_parquetTimestampInMillisecondHasBeenSet; }
1342 
1343     /**
1344      * <p>A value that specifies the precision of any <code>TIMESTAMP</code> column
1345      * values that are written to an Amazon S3 object file in .parquet format.</p>
1346      *  <p>DMS supports the <code>ParquetTimestampInMillisecond</code> parameter
1347      * in versions 3.1.4 and later.</p>  <p>When
1348      * <code>ParquetTimestampInMillisecond</code> is set to <code>true</code> or
1349      * <code>y</code>, DMS writes all <code>TIMESTAMP</code> columns in a .parquet
1350      * formatted file with millisecond precision. Otherwise, DMS writes them with
1351      * microsecond precision.</p> <p>Currently, Amazon Athena and Glue can handle only
1352      * millisecond precision for <code>TIMESTAMP</code> values. Set this parameter to
1353      * <code>true</code> for S3 endpoint object files that are .parquet formatted only
1354      * if you plan to query or process the data with Athena or Glue.</p>  <p>DMS
1355      * writes any <code>TIMESTAMP</code> column values written to an S3 file in .csv
1356      * format with microsecond precision.</p> <p>Setting
1357      * <code>ParquetTimestampInMillisecond</code> has no effect on the string format of
1358      * the timestamp column value that is inserted by setting the
1359      * <code>TimestampColumnName</code> parameter.</p>
1360      */
SetParquetTimestampInMillisecond(bool value)1361     inline void SetParquetTimestampInMillisecond(bool value) { m_parquetTimestampInMillisecondHasBeenSet = true; m_parquetTimestampInMillisecond = value; }
1362 
1363     /**
1364      * <p>A value that specifies the precision of any <code>TIMESTAMP</code> column
1365      * values that are written to an Amazon S3 object file in .parquet format.</p>
1366      *  <p>DMS supports the <code>ParquetTimestampInMillisecond</code> parameter
1367      * in versions 3.1.4 and later.</p>  <p>When
1368      * <code>ParquetTimestampInMillisecond</code> is set to <code>true</code> or
1369      * <code>y</code>, DMS writes all <code>TIMESTAMP</code> columns in a .parquet
1370      * formatted file with millisecond precision. Otherwise, DMS writes them with
1371      * microsecond precision.</p> <p>Currently, Amazon Athena and Glue can handle only
1372      * millisecond precision for <code>TIMESTAMP</code> values. Set this parameter to
1373      * <code>true</code> for S3 endpoint object files that are .parquet formatted only
1374      * if you plan to query or process the data with Athena or Glue.</p>  <p>DMS
1375      * writes any <code>TIMESTAMP</code> column values written to an S3 file in .csv
1376      * format with microsecond precision.</p> <p>Setting
1377      * <code>ParquetTimestampInMillisecond</code> has no effect on the string format of
1378      * the timestamp column value that is inserted by setting the
1379      * <code>TimestampColumnName</code> parameter.</p>
1380      */
WithParquetTimestampInMillisecond(bool value)1381     inline S3Settings& WithParquetTimestampInMillisecond(bool value) { SetParquetTimestampInMillisecond(value); return *this;}
1382 
1383 
1384     /**
1385      * <p>A value that enables a change data capture (CDC) load to write INSERT and
1386      * UPDATE operations to .csv or .parquet (columnar storage) output files. The
1387      * default setting is <code>false</code>, but when
1388      * <code>CdcInsertsAndUpdates</code> is set to <code>true</code> or <code>y</code>,
1389      * only INSERTs and UPDATEs from the source database are migrated to the .csv or
1390      * .parquet file. </p> <p>For .csv file format only, how these INSERTs and UPDATEs
1391      * are recorded depends on the value of the <code>IncludeOpForFullLoad</code>
1392      * parameter. If <code>IncludeOpForFullLoad</code> is set to <code>true</code>, the
1393      * first field of every CDC record is set to either <code>I</code> or
1394      * <code>U</code> to indicate INSERT and UPDATE operations at the source. But if
1395      * <code>IncludeOpForFullLoad</code> is set to <code>false</code>, CDC records are
1396      * written without an indication of INSERT or UPDATE operations at the source. For
1397      * more information about how these settings work together, see <a
1398      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
1399      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
1400      * Service User Guide.</i>.</p>  <p>DMS supports the use of the
1401      * <code>CdcInsertsAndUpdates</code> parameter in versions 3.3.1 and later.</p> <p>
1402      * <code>CdcInsertsOnly</code> and <code>CdcInsertsAndUpdates</code> can't both be
1403      * set to <code>true</code> for the same endpoint. Set either
1404      * <code>CdcInsertsOnly</code> or <code>CdcInsertsAndUpdates</code> to
1405      * <code>true</code> for the same endpoint, but not both.</p>
1406      */
GetCdcInsertsAndUpdates()1407     inline bool GetCdcInsertsAndUpdates() const{ return m_cdcInsertsAndUpdates; }
1408 
1409     /**
1410      * <p>A value that enables a change data capture (CDC) load to write INSERT and
1411      * UPDATE operations to .csv or .parquet (columnar storage) output files. The
1412      * default setting is <code>false</code>, but when
1413      * <code>CdcInsertsAndUpdates</code> is set to <code>true</code> or <code>y</code>,
1414      * only INSERTs and UPDATEs from the source database are migrated to the .csv or
1415      * .parquet file. </p> <p>For .csv file format only, how these INSERTs and UPDATEs
1416      * are recorded depends on the value of the <code>IncludeOpForFullLoad</code>
1417      * parameter. If <code>IncludeOpForFullLoad</code> is set to <code>true</code>, the
1418      * first field of every CDC record is set to either <code>I</code> or
1419      * <code>U</code> to indicate INSERT and UPDATE operations at the source. But if
1420      * <code>IncludeOpForFullLoad</code> is set to <code>false</code>, CDC records are
1421      * written without an indication of INSERT or UPDATE operations at the source. For
1422      * more information about how these settings work together, see <a
1423      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
1424      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
1425      * Service User Guide.</i>.</p>  <p>DMS supports the use of the
1426      * <code>CdcInsertsAndUpdates</code> parameter in versions 3.3.1 and later.</p> <p>
1427      * <code>CdcInsertsOnly</code> and <code>CdcInsertsAndUpdates</code> can't both be
1428      * set to <code>true</code> for the same endpoint. Set either
1429      * <code>CdcInsertsOnly</code> or <code>CdcInsertsAndUpdates</code> to
1430      * <code>true</code> for the same endpoint, but not both.</p>
1431      */
CdcInsertsAndUpdatesHasBeenSet()1432     inline bool CdcInsertsAndUpdatesHasBeenSet() const { return m_cdcInsertsAndUpdatesHasBeenSet; }
1433 
1434     /**
1435      * <p>A value that enables a change data capture (CDC) load to write INSERT and
1436      * UPDATE operations to .csv or .parquet (columnar storage) output files. The
1437      * default setting is <code>false</code>, but when
1438      * <code>CdcInsertsAndUpdates</code> is set to <code>true</code> or <code>y</code>,
1439      * only INSERTs and UPDATEs from the source database are migrated to the .csv or
1440      * .parquet file. </p> <p>For .csv file format only, how these INSERTs and UPDATEs
1441      * are recorded depends on the value of the <code>IncludeOpForFullLoad</code>
1442      * parameter. If <code>IncludeOpForFullLoad</code> is set to <code>true</code>, the
1443      * first field of every CDC record is set to either <code>I</code> or
1444      * <code>U</code> to indicate INSERT and UPDATE operations at the source. But if
1445      * <code>IncludeOpForFullLoad</code> is set to <code>false</code>, CDC records are
1446      * written without an indication of INSERT or UPDATE operations at the source. For
1447      * more information about how these settings work together, see <a
1448      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
1449      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
1450      * Service User Guide.</i>.</p>  <p>DMS supports the use of the
1451      * <code>CdcInsertsAndUpdates</code> parameter in versions 3.3.1 and later.</p> <p>
1452      * <code>CdcInsertsOnly</code> and <code>CdcInsertsAndUpdates</code> can't both be
1453      * set to <code>true</code> for the same endpoint. Set either
1454      * <code>CdcInsertsOnly</code> or <code>CdcInsertsAndUpdates</code> to
1455      * <code>true</code> for the same endpoint, but not both.</p>
1456      */
SetCdcInsertsAndUpdates(bool value)1457     inline void SetCdcInsertsAndUpdates(bool value) { m_cdcInsertsAndUpdatesHasBeenSet = true; m_cdcInsertsAndUpdates = value; }
1458 
1459     /**
1460      * <p>A value that enables a change data capture (CDC) load to write INSERT and
1461      * UPDATE operations to .csv or .parquet (columnar storage) output files. The
1462      * default setting is <code>false</code>, but when
1463      * <code>CdcInsertsAndUpdates</code> is set to <code>true</code> or <code>y</code>,
1464      * only INSERTs and UPDATEs from the source database are migrated to the .csv or
1465      * .parquet file. </p> <p>For .csv file format only, how these INSERTs and UPDATEs
1466      * are recorded depends on the value of the <code>IncludeOpForFullLoad</code>
1467      * parameter. If <code>IncludeOpForFullLoad</code> is set to <code>true</code>, the
1468      * first field of every CDC record is set to either <code>I</code> or
1469      * <code>U</code> to indicate INSERT and UPDATE operations at the source. But if
1470      * <code>IncludeOpForFullLoad</code> is set to <code>false</code>, CDC records are
1471      * written without an indication of INSERT or UPDATE operations at the source. For
1472      * more information about how these settings work together, see <a
1473      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps">Indicating
1474      * Source DB Operations in Migrated S3 Data</a> in the <i>Database Migration
1475      * Service User Guide.</i>.</p>  <p>DMS supports the use of the
1476      * <code>CdcInsertsAndUpdates</code> parameter in versions 3.3.1 and later.</p> <p>
1477      * <code>CdcInsertsOnly</code> and <code>CdcInsertsAndUpdates</code> can't both be
1478      * set to <code>true</code> for the same endpoint. Set either
1479      * <code>CdcInsertsOnly</code> or <code>CdcInsertsAndUpdates</code> to
1480      * <code>true</code> for the same endpoint, but not both.</p>
1481      */
WithCdcInsertsAndUpdates(bool value)1482     inline S3Settings& WithCdcInsertsAndUpdates(bool value) { SetCdcInsertsAndUpdates(value); return *this;}
1483 
1484 
1485     /**
1486      * <p>When set to <code>true</code>, this parameter partitions S3 bucket folders
1487      * based on transaction commit dates. The default value is <code>false</code>. For
1488      * more information about date-based folder partitioning, see <a
1489      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.DatePartitioning">Using
1490      * date-based folder partitioning</a>.</p>
1491      */
GetDatePartitionEnabled()1492     inline bool GetDatePartitionEnabled() const{ return m_datePartitionEnabled; }
1493 
1494     /**
1495      * <p>When set to <code>true</code>, this parameter partitions S3 bucket folders
1496      * based on transaction commit dates. The default value is <code>false</code>. For
1497      * more information about date-based folder partitioning, see <a
1498      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.DatePartitioning">Using
1499      * date-based folder partitioning</a>.</p>
1500      */
DatePartitionEnabledHasBeenSet()1501     inline bool DatePartitionEnabledHasBeenSet() const { return m_datePartitionEnabledHasBeenSet; }
1502 
1503     /**
1504      * <p>When set to <code>true</code>, this parameter partitions S3 bucket folders
1505      * based on transaction commit dates. The default value is <code>false</code>. For
1506      * more information about date-based folder partitioning, see <a
1507      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.DatePartitioning">Using
1508      * date-based folder partitioning</a>.</p>
1509      */
SetDatePartitionEnabled(bool value)1510     inline void SetDatePartitionEnabled(bool value) { m_datePartitionEnabledHasBeenSet = true; m_datePartitionEnabled = value; }
1511 
1512     /**
1513      * <p>When set to <code>true</code>, this parameter partitions S3 bucket folders
1514      * based on transaction commit dates. The default value is <code>false</code>. For
1515      * more information about date-based folder partitioning, see <a
1516      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.DatePartitioning">Using
1517      * date-based folder partitioning</a>.</p>
1518      */
WithDatePartitionEnabled(bool value)1519     inline S3Settings& WithDatePartitionEnabled(bool value) { SetDatePartitionEnabled(value); return *this;}
1520 
1521 
1522     /**
1523      * <p>Identifies the sequence of the date format to use during folder partitioning.
1524      * The default value is <code>YYYYMMDD</code>. Use this parameter when
1525      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1526      */
GetDatePartitionSequence()1527     inline const DatePartitionSequenceValue& GetDatePartitionSequence() const{ return m_datePartitionSequence; }
1528 
1529     /**
1530      * <p>Identifies the sequence of the date format to use during folder partitioning.
1531      * The default value is <code>YYYYMMDD</code>. Use this parameter when
1532      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1533      */
DatePartitionSequenceHasBeenSet()1534     inline bool DatePartitionSequenceHasBeenSet() const { return m_datePartitionSequenceHasBeenSet; }
1535 
1536     /**
1537      * <p>Identifies the sequence of the date format to use during folder partitioning.
1538      * The default value is <code>YYYYMMDD</code>. Use this parameter when
1539      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1540      */
SetDatePartitionSequence(const DatePartitionSequenceValue & value)1541     inline void SetDatePartitionSequence(const DatePartitionSequenceValue& value) { m_datePartitionSequenceHasBeenSet = true; m_datePartitionSequence = value; }
1542 
1543     /**
1544      * <p>Identifies the sequence of the date format to use during folder partitioning.
1545      * The default value is <code>YYYYMMDD</code>. Use this parameter when
1546      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1547      */
SetDatePartitionSequence(DatePartitionSequenceValue && value)1548     inline void SetDatePartitionSequence(DatePartitionSequenceValue&& value) { m_datePartitionSequenceHasBeenSet = true; m_datePartitionSequence = std::move(value); }
1549 
1550     /**
1551      * <p>Identifies the sequence of the date format to use during folder partitioning.
1552      * The default value is <code>YYYYMMDD</code>. Use this parameter when
1553      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1554      */
WithDatePartitionSequence(const DatePartitionSequenceValue & value)1555     inline S3Settings& WithDatePartitionSequence(const DatePartitionSequenceValue& value) { SetDatePartitionSequence(value); return *this;}
1556 
1557     /**
1558      * <p>Identifies the sequence of the date format to use during folder partitioning.
1559      * The default value is <code>YYYYMMDD</code>. Use this parameter when
1560      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1561      */
WithDatePartitionSequence(DatePartitionSequenceValue && value)1562     inline S3Settings& WithDatePartitionSequence(DatePartitionSequenceValue&& value) { SetDatePartitionSequence(std::move(value)); return *this;}
1563 
1564 
1565     /**
1566      * <p>Specifies a date separating delimiter to use during folder partitioning. The
1567      * default value is <code>SLASH</code>. Use this parameter when
1568      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1569      */
GetDatePartitionDelimiter()1570     inline const DatePartitionDelimiterValue& GetDatePartitionDelimiter() const{ return m_datePartitionDelimiter; }
1571 
1572     /**
1573      * <p>Specifies a date separating delimiter to use during folder partitioning. The
1574      * default value is <code>SLASH</code>. Use this parameter when
1575      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1576      */
DatePartitionDelimiterHasBeenSet()1577     inline bool DatePartitionDelimiterHasBeenSet() const { return m_datePartitionDelimiterHasBeenSet; }
1578 
1579     /**
1580      * <p>Specifies a date separating delimiter to use during folder partitioning. The
1581      * default value is <code>SLASH</code>. Use this parameter when
1582      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1583      */
SetDatePartitionDelimiter(const DatePartitionDelimiterValue & value)1584     inline void SetDatePartitionDelimiter(const DatePartitionDelimiterValue& value) { m_datePartitionDelimiterHasBeenSet = true; m_datePartitionDelimiter = value; }
1585 
1586     /**
1587      * <p>Specifies a date separating delimiter to use during folder partitioning. The
1588      * default value is <code>SLASH</code>. Use this parameter when
1589      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1590      */
SetDatePartitionDelimiter(DatePartitionDelimiterValue && value)1591     inline void SetDatePartitionDelimiter(DatePartitionDelimiterValue&& value) { m_datePartitionDelimiterHasBeenSet = true; m_datePartitionDelimiter = std::move(value); }
1592 
1593     /**
1594      * <p>Specifies a date separating delimiter to use during folder partitioning. The
1595      * default value is <code>SLASH</code>. Use this parameter when
1596      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1597      */
WithDatePartitionDelimiter(const DatePartitionDelimiterValue & value)1598     inline S3Settings& WithDatePartitionDelimiter(const DatePartitionDelimiterValue& value) { SetDatePartitionDelimiter(value); return *this;}
1599 
1600     /**
1601      * <p>Specifies a date separating delimiter to use during folder partitioning. The
1602      * default value is <code>SLASH</code>. Use this parameter when
1603      * <code>DatePartitionedEnabled</code> is set to <code>true</code>.</p>
1604      */
WithDatePartitionDelimiter(DatePartitionDelimiterValue && value)1605     inline S3Settings& WithDatePartitionDelimiter(DatePartitionDelimiterValue&& value) { SetDatePartitionDelimiter(std::move(value)); return *this;}
1606 
1607 
1608     /**
1609      * <p>This setting applies if the S3 output files during a change data capture
1610      * (CDC) load are written in .csv format. If set to <code>true</code> for columns
1611      * not included in the supplemental log, DMS uses the value specified by <a
1612      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CsvNoSupValue">
1613      * <code>CsvNoSupValue</code> </a>. If not set or set to <code>false</code>, DMS
1614      * uses the null value for these columns.</p>  <p>This setting is supported
1615      * in DMS versions 3.4.1 and later.</p>
1616      */
GetUseCsvNoSupValue()1617     inline bool GetUseCsvNoSupValue() const{ return m_useCsvNoSupValue; }
1618 
1619     /**
1620      * <p>This setting applies if the S3 output files during a change data capture
1621      * (CDC) load are written in .csv format. If set to <code>true</code> for columns
1622      * not included in the supplemental log, DMS uses the value specified by <a
1623      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CsvNoSupValue">
1624      * <code>CsvNoSupValue</code> </a>. If not set or set to <code>false</code>, DMS
1625      * uses the null value for these columns.</p>  <p>This setting is supported
1626      * in DMS versions 3.4.1 and later.</p>
1627      */
UseCsvNoSupValueHasBeenSet()1628     inline bool UseCsvNoSupValueHasBeenSet() const { return m_useCsvNoSupValueHasBeenSet; }
1629 
1630     /**
1631      * <p>This setting applies if the S3 output files during a change data capture
1632      * (CDC) load are written in .csv format. If set to <code>true</code> for columns
1633      * not included in the supplemental log, DMS uses the value specified by <a
1634      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CsvNoSupValue">
1635      * <code>CsvNoSupValue</code> </a>. If not set or set to <code>false</code>, DMS
1636      * uses the null value for these columns.</p>  <p>This setting is supported
1637      * in DMS versions 3.4.1 and later.</p>
1638      */
SetUseCsvNoSupValue(bool value)1639     inline void SetUseCsvNoSupValue(bool value) { m_useCsvNoSupValueHasBeenSet = true; m_useCsvNoSupValue = value; }
1640 
1641     /**
1642      * <p>This setting applies if the S3 output files during a change data capture
1643      * (CDC) load are written in .csv format. If set to <code>true</code> for columns
1644      * not included in the supplemental log, DMS uses the value specified by <a
1645      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CsvNoSupValue">
1646      * <code>CsvNoSupValue</code> </a>. If not set or set to <code>false</code>, DMS
1647      * uses the null value for these columns.</p>  <p>This setting is supported
1648      * in DMS versions 3.4.1 and later.</p>
1649      */
WithUseCsvNoSupValue(bool value)1650     inline S3Settings& WithUseCsvNoSupValue(bool value) { SetUseCsvNoSupValue(value); return *this;}
1651 
1652 
1653     /**
1654      * <p>This setting only applies if your Amazon S3 output files during a change data
1655      * capture (CDC) load are written in .csv format. If <a
1656      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue">
1657      * <code>UseCsvNoSupValue</code> </a> is set to true, specify a string value that
1658      * you want DMS to use for all columns not included in the supplemental log. If you
1659      * do not specify a string value, DMS uses the null value for these columns
1660      * regardless of the <code>UseCsvNoSupValue</code> setting.</p>  <p>This
1661      * setting is supported in DMS versions 3.4.1 and later.</p>
1662      */
GetCsvNoSupValue()1663     inline const Aws::String& GetCsvNoSupValue() const{ return m_csvNoSupValue; }
1664 
1665     /**
1666      * <p>This setting only applies if your Amazon S3 output files during a change data
1667      * capture (CDC) load are written in .csv format. If <a
1668      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue">
1669      * <code>UseCsvNoSupValue</code> </a> is set to true, specify a string value that
1670      * you want DMS to use for all columns not included in the supplemental log. If you
1671      * do not specify a string value, DMS uses the null value for these columns
1672      * regardless of the <code>UseCsvNoSupValue</code> setting.</p>  <p>This
1673      * setting is supported in DMS versions 3.4.1 and later.</p>
1674      */
CsvNoSupValueHasBeenSet()1675     inline bool CsvNoSupValueHasBeenSet() const { return m_csvNoSupValueHasBeenSet; }
1676 
1677     /**
1678      * <p>This setting only applies if your Amazon S3 output files during a change data
1679      * capture (CDC) load are written in .csv format. If <a
1680      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue">
1681      * <code>UseCsvNoSupValue</code> </a> is set to true, specify a string value that
1682      * you want DMS to use for all columns not included in the supplemental log. If you
1683      * do not specify a string value, DMS uses the null value for these columns
1684      * regardless of the <code>UseCsvNoSupValue</code> setting.</p>  <p>This
1685      * setting is supported in DMS versions 3.4.1 and later.</p>
1686      */
SetCsvNoSupValue(const Aws::String & value)1687     inline void SetCsvNoSupValue(const Aws::String& value) { m_csvNoSupValueHasBeenSet = true; m_csvNoSupValue = value; }
1688 
1689     /**
1690      * <p>This setting only applies if your Amazon S3 output files during a change data
1691      * capture (CDC) load are written in .csv format. If <a
1692      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue">
1693      * <code>UseCsvNoSupValue</code> </a> is set to true, specify a string value that
1694      * you want DMS to use for all columns not included in the supplemental log. If you
1695      * do not specify a string value, DMS uses the null value for these columns
1696      * regardless of the <code>UseCsvNoSupValue</code> setting.</p>  <p>This
1697      * setting is supported in DMS versions 3.4.1 and later.</p>
1698      */
SetCsvNoSupValue(Aws::String && value)1699     inline void SetCsvNoSupValue(Aws::String&& value) { m_csvNoSupValueHasBeenSet = true; m_csvNoSupValue = std::move(value); }
1700 
1701     /**
1702      * <p>This setting only applies if your Amazon S3 output files during a change data
1703      * capture (CDC) load are written in .csv format. If <a
1704      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue">
1705      * <code>UseCsvNoSupValue</code> </a> is set to true, specify a string value that
1706      * you want DMS to use for all columns not included in the supplemental log. If you
1707      * do not specify a string value, DMS uses the null value for these columns
1708      * regardless of the <code>UseCsvNoSupValue</code> setting.</p>  <p>This
1709      * setting is supported in DMS versions 3.4.1 and later.</p>
1710      */
SetCsvNoSupValue(const char * value)1711     inline void SetCsvNoSupValue(const char* value) { m_csvNoSupValueHasBeenSet = true; m_csvNoSupValue.assign(value); }
1712 
1713     /**
1714      * <p>This setting only applies if your Amazon S3 output files during a change data
1715      * capture (CDC) load are written in .csv format. If <a
1716      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue">
1717      * <code>UseCsvNoSupValue</code> </a> is set to true, specify a string value that
1718      * you want DMS to use for all columns not included in the supplemental log. If you
1719      * do not specify a string value, DMS uses the null value for these columns
1720      * regardless of the <code>UseCsvNoSupValue</code> setting.</p>  <p>This
1721      * setting is supported in DMS versions 3.4.1 and later.</p>
1722      */
WithCsvNoSupValue(const Aws::String & value)1723     inline S3Settings& WithCsvNoSupValue(const Aws::String& value) { SetCsvNoSupValue(value); return *this;}
1724 
1725     /**
1726      * <p>This setting only applies if your Amazon S3 output files during a change data
1727      * capture (CDC) load are written in .csv format. If <a
1728      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue">
1729      * <code>UseCsvNoSupValue</code> </a> is set to true, specify a string value that
1730      * you want DMS to use for all columns not included in the supplemental log. If you
1731      * do not specify a string value, DMS uses the null value for these columns
1732      * regardless of the <code>UseCsvNoSupValue</code> setting.</p>  <p>This
1733      * setting is supported in DMS versions 3.4.1 and later.</p>
1734      */
WithCsvNoSupValue(Aws::String && value)1735     inline S3Settings& WithCsvNoSupValue(Aws::String&& value) { SetCsvNoSupValue(std::move(value)); return *this;}
1736 
1737     /**
1738      * <p>This setting only applies if your Amazon S3 output files during a change data
1739      * capture (CDC) load are written in .csv format. If <a
1740      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue">
1741      * <code>UseCsvNoSupValue</code> </a> is set to true, specify a string value that
1742      * you want DMS to use for all columns not included in the supplemental log. If you
1743      * do not specify a string value, DMS uses the null value for these columns
1744      * regardless of the <code>UseCsvNoSupValue</code> setting.</p>  <p>This
1745      * setting is supported in DMS versions 3.4.1 and later.</p>
1746      */
WithCsvNoSupValue(const char * value)1747     inline S3Settings& WithCsvNoSupValue(const char* value) { SetCsvNoSupValue(value); return *this;}
1748 
1749 
1750     /**
1751      * <p>If set to <code>true</code>, DMS saves the transaction order for a change
1752      * data capture (CDC) load on the Amazon S3 target specified by <a
1753      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CdcPath">
1754      * <code>CdcPath</code> </a>. For more information, see <a
1755      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
1756      * data changes (CDC) including transaction order on the S3 target</a>.</p>
1757      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
1758      */
GetPreserveTransactions()1759     inline bool GetPreserveTransactions() const{ return m_preserveTransactions; }
1760 
1761     /**
1762      * <p>If set to <code>true</code>, DMS saves the transaction order for a change
1763      * data capture (CDC) load on the Amazon S3 target specified by <a
1764      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CdcPath">
1765      * <code>CdcPath</code> </a>. For more information, see <a
1766      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
1767      * data changes (CDC) including transaction order on the S3 target</a>.</p>
1768      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
1769      */
PreserveTransactionsHasBeenSet()1770     inline bool PreserveTransactionsHasBeenSet() const { return m_preserveTransactionsHasBeenSet; }
1771 
1772     /**
1773      * <p>If set to <code>true</code>, DMS saves the transaction order for a change
1774      * data capture (CDC) load on the Amazon S3 target specified by <a
1775      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CdcPath">
1776      * <code>CdcPath</code> </a>. For more information, see <a
1777      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
1778      * data changes (CDC) including transaction order on the S3 target</a>.</p>
1779      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
1780      */
SetPreserveTransactions(bool value)1781     inline void SetPreserveTransactions(bool value) { m_preserveTransactionsHasBeenSet = true; m_preserveTransactions = value; }
1782 
1783     /**
1784      * <p>If set to <code>true</code>, DMS saves the transaction order for a change
1785      * data capture (CDC) load on the Amazon S3 target specified by <a
1786      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CdcPath">
1787      * <code>CdcPath</code> </a>. For more information, see <a
1788      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
1789      * data changes (CDC) including transaction order on the S3 target</a>.</p>
1790      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
1791      */
WithPreserveTransactions(bool value)1792     inline S3Settings& WithPreserveTransactions(bool value) { SetPreserveTransactions(value); return *this;}
1793 
1794 
1795     /**
1796      * <p>Specifies the folder path of CDC files. For an S3 source, this setting is
1797      * required if a task captures change data; otherwise, it's optional. If
1798      * <code>CdcPath</code> is set, DMS reads CDC files from this path and replicates
1799      * the data changes to the target endpoint. For an S3 target if you set <a
1800      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions">
1801      * <code>PreserveTransactions</code> </a> to <code>true</code>, DMS verifies that
1802      * you have set this parameter to a folder path on your S3 target where DMS can
1803      * save the transaction order for the CDC load. DMS creates this CDC folder path in
1804      * either your S3 target working directory or the S3 target location specified by
1805      * <a
1806      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder">
1807      * <code>BucketFolder</code> </a> and <a
1808      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName">
1809      * <code>BucketName</code> </a>.</p> <p>For example, if you specify
1810      * <code>CdcPath</code> as <code>MyChangedData</code>, and you specify
1811      * <code>BucketName</code> as <code>MyTargetBucket</code> but do not specify
1812      * <code>BucketFolder</code>, DMS creates the CDC folder path following:
1813      * <code>MyTargetBucket/MyChangedData</code>.</p> <p>If you specify the same
1814      * <code>CdcPath</code>, and you specify <code>BucketName</code> as
1815      * <code>MyTargetBucket</code> and <code>BucketFolder</code> as
1816      * <code>MyTargetData</code>, DMS creates the CDC folder path following:
1817      * <code>MyTargetBucket/MyTargetData/MyChangedData</code>.</p> <p>For more
1818      * information on CDC including transaction order on an S3 target, see <a
1819      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
1820      * data changes (CDC) including transaction order on the S3 target</a>.</p>
1821      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
1822      */
GetCdcPath()1823     inline const Aws::String& GetCdcPath() const{ return m_cdcPath; }
1824 
1825     /**
1826      * <p>Specifies the folder path of CDC files. For an S3 source, this setting is
1827      * required if a task captures change data; otherwise, it's optional. If
1828      * <code>CdcPath</code> is set, DMS reads CDC files from this path and replicates
1829      * the data changes to the target endpoint. For an S3 target if you set <a
1830      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions">
1831      * <code>PreserveTransactions</code> </a> to <code>true</code>, DMS verifies that
1832      * you have set this parameter to a folder path on your S3 target where DMS can
1833      * save the transaction order for the CDC load. DMS creates this CDC folder path in
1834      * either your S3 target working directory or the S3 target location specified by
1835      * <a
1836      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder">
1837      * <code>BucketFolder</code> </a> and <a
1838      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName">
1839      * <code>BucketName</code> </a>.</p> <p>For example, if you specify
1840      * <code>CdcPath</code> as <code>MyChangedData</code>, and you specify
1841      * <code>BucketName</code> as <code>MyTargetBucket</code> but do not specify
1842      * <code>BucketFolder</code>, DMS creates the CDC folder path following:
1843      * <code>MyTargetBucket/MyChangedData</code>.</p> <p>If you specify the same
1844      * <code>CdcPath</code>, and you specify <code>BucketName</code> as
1845      * <code>MyTargetBucket</code> and <code>BucketFolder</code> as
1846      * <code>MyTargetData</code>, DMS creates the CDC folder path following:
1847      * <code>MyTargetBucket/MyTargetData/MyChangedData</code>.</p> <p>For more
1848      * information on CDC including transaction order on an S3 target, see <a
1849      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
1850      * data changes (CDC) including transaction order on the S3 target</a>.</p>
1851      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
1852      */
CdcPathHasBeenSet()1853     inline bool CdcPathHasBeenSet() const { return m_cdcPathHasBeenSet; }
1854 
1855     /**
1856      * <p>Specifies the folder path of CDC files. For an S3 source, this setting is
1857      * required if a task captures change data; otherwise, it's optional. If
1858      * <code>CdcPath</code> is set, DMS reads CDC files from this path and replicates
1859      * the data changes to the target endpoint. For an S3 target if you set <a
1860      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions">
1861      * <code>PreserveTransactions</code> </a> to <code>true</code>, DMS verifies that
1862      * you have set this parameter to a folder path on your S3 target where DMS can
1863      * save the transaction order for the CDC load. DMS creates this CDC folder path in
1864      * either your S3 target working directory or the S3 target location specified by
1865      * <a
1866      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder">
1867      * <code>BucketFolder</code> </a> and <a
1868      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName">
1869      * <code>BucketName</code> </a>.</p> <p>For example, if you specify
1870      * <code>CdcPath</code> as <code>MyChangedData</code>, and you specify
1871      * <code>BucketName</code> as <code>MyTargetBucket</code> but do not specify
1872      * <code>BucketFolder</code>, DMS creates the CDC folder path following:
1873      * <code>MyTargetBucket/MyChangedData</code>.</p> <p>If you specify the same
1874      * <code>CdcPath</code>, and you specify <code>BucketName</code> as
1875      * <code>MyTargetBucket</code> and <code>BucketFolder</code> as
1876      * <code>MyTargetData</code>, DMS creates the CDC folder path following:
1877      * <code>MyTargetBucket/MyTargetData/MyChangedData</code>.</p> <p>For more
1878      * information on CDC including transaction order on an S3 target, see <a
1879      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
1880      * data changes (CDC) including transaction order on the S3 target</a>.</p>
1881      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
1882      */
SetCdcPath(const Aws::String & value)1883     inline void SetCdcPath(const Aws::String& value) { m_cdcPathHasBeenSet = true; m_cdcPath = value; }
1884 
1885     /**
1886      * <p>Specifies the folder path of CDC files. For an S3 source, this setting is
1887      * required if a task captures change data; otherwise, it's optional. If
1888      * <code>CdcPath</code> is set, DMS reads CDC files from this path and replicates
1889      * the data changes to the target endpoint. For an S3 target if you set <a
1890      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions">
1891      * <code>PreserveTransactions</code> </a> to <code>true</code>, DMS verifies that
1892      * you have set this parameter to a folder path on your S3 target where DMS can
1893      * save the transaction order for the CDC load. DMS creates this CDC folder path in
1894      * either your S3 target working directory or the S3 target location specified by
1895      * <a
1896      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder">
1897      * <code>BucketFolder</code> </a> and <a
1898      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName">
1899      * <code>BucketName</code> </a>.</p> <p>For example, if you specify
1900      * <code>CdcPath</code> as <code>MyChangedData</code>, and you specify
1901      * <code>BucketName</code> as <code>MyTargetBucket</code> but do not specify
1902      * <code>BucketFolder</code>, DMS creates the CDC folder path following:
1903      * <code>MyTargetBucket/MyChangedData</code>.</p> <p>If you specify the same
1904      * <code>CdcPath</code>, and you specify <code>BucketName</code> as
1905      * <code>MyTargetBucket</code> and <code>BucketFolder</code> as
1906      * <code>MyTargetData</code>, DMS creates the CDC folder path following:
1907      * <code>MyTargetBucket/MyTargetData/MyChangedData</code>.</p> <p>For more
1908      * information on CDC including transaction order on an S3 target, see <a
1909      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
1910      * data changes (CDC) including transaction order on the S3 target</a>.</p>
1911      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
1912      */
SetCdcPath(Aws::String && value)1913     inline void SetCdcPath(Aws::String&& value) { m_cdcPathHasBeenSet = true; m_cdcPath = std::move(value); }
1914 
1915     /**
1916      * <p>Specifies the folder path of CDC files. For an S3 source, this setting is
1917      * required if a task captures change data; otherwise, it's optional. If
1918      * <code>CdcPath</code> is set, DMS reads CDC files from this path and replicates
1919      * the data changes to the target endpoint. For an S3 target if you set <a
1920      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions">
1921      * <code>PreserveTransactions</code> </a> to <code>true</code>, DMS verifies that
1922      * you have set this parameter to a folder path on your S3 target where DMS can
1923      * save the transaction order for the CDC load. DMS creates this CDC folder path in
1924      * either your S3 target working directory or the S3 target location specified by
1925      * <a
1926      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder">
1927      * <code>BucketFolder</code> </a> and <a
1928      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName">
1929      * <code>BucketName</code> </a>.</p> <p>For example, if you specify
1930      * <code>CdcPath</code> as <code>MyChangedData</code>, and you specify
1931      * <code>BucketName</code> as <code>MyTargetBucket</code> but do not specify
1932      * <code>BucketFolder</code>, DMS creates the CDC folder path following:
1933      * <code>MyTargetBucket/MyChangedData</code>.</p> <p>If you specify the same
1934      * <code>CdcPath</code>, and you specify <code>BucketName</code> as
1935      * <code>MyTargetBucket</code> and <code>BucketFolder</code> as
1936      * <code>MyTargetData</code>, DMS creates the CDC folder path following:
1937      * <code>MyTargetBucket/MyTargetData/MyChangedData</code>.</p> <p>For more
1938      * information on CDC including transaction order on an S3 target, see <a
1939      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
1940      * data changes (CDC) including transaction order on the S3 target</a>.</p>
1941      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
1942      */
SetCdcPath(const char * value)1943     inline void SetCdcPath(const char* value) { m_cdcPathHasBeenSet = true; m_cdcPath.assign(value); }
1944 
1945     /**
1946      * <p>Specifies the folder path of CDC files. For an S3 source, this setting is
1947      * required if a task captures change data; otherwise, it's optional. If
1948      * <code>CdcPath</code> is set, DMS reads CDC files from this path and replicates
1949      * the data changes to the target endpoint. For an S3 target if you set <a
1950      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions">
1951      * <code>PreserveTransactions</code> </a> to <code>true</code>, DMS verifies that
1952      * you have set this parameter to a folder path on your S3 target where DMS can
1953      * save the transaction order for the CDC load. DMS creates this CDC folder path in
1954      * either your S3 target working directory or the S3 target location specified by
1955      * <a
1956      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder">
1957      * <code>BucketFolder</code> </a> and <a
1958      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName">
1959      * <code>BucketName</code> </a>.</p> <p>For example, if you specify
1960      * <code>CdcPath</code> as <code>MyChangedData</code>, and you specify
1961      * <code>BucketName</code> as <code>MyTargetBucket</code> but do not specify
1962      * <code>BucketFolder</code>, DMS creates the CDC folder path following:
1963      * <code>MyTargetBucket/MyChangedData</code>.</p> <p>If you specify the same
1964      * <code>CdcPath</code>, and you specify <code>BucketName</code> as
1965      * <code>MyTargetBucket</code> and <code>BucketFolder</code> as
1966      * <code>MyTargetData</code>, DMS creates the CDC folder path following:
1967      * <code>MyTargetBucket/MyTargetData/MyChangedData</code>.</p> <p>For more
1968      * information on CDC including transaction order on an S3 target, see <a
1969      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
1970      * data changes (CDC) including transaction order on the S3 target</a>.</p>
1971      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
1972      */
WithCdcPath(const Aws::String & value)1973     inline S3Settings& WithCdcPath(const Aws::String& value) { SetCdcPath(value); return *this;}
1974 
1975     /**
1976      * <p>Specifies the folder path of CDC files. For an S3 source, this setting is
1977      * required if a task captures change data; otherwise, it's optional. If
1978      * <code>CdcPath</code> is set, DMS reads CDC files from this path and replicates
1979      * the data changes to the target endpoint. For an S3 target if you set <a
1980      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions">
1981      * <code>PreserveTransactions</code> </a> to <code>true</code>, DMS verifies that
1982      * you have set this parameter to a folder path on your S3 target where DMS can
1983      * save the transaction order for the CDC load. DMS creates this CDC folder path in
1984      * either your S3 target working directory or the S3 target location specified by
1985      * <a
1986      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder">
1987      * <code>BucketFolder</code> </a> and <a
1988      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName">
1989      * <code>BucketName</code> </a>.</p> <p>For example, if you specify
1990      * <code>CdcPath</code> as <code>MyChangedData</code>, and you specify
1991      * <code>BucketName</code> as <code>MyTargetBucket</code> but do not specify
1992      * <code>BucketFolder</code>, DMS creates the CDC folder path following:
1993      * <code>MyTargetBucket/MyChangedData</code>.</p> <p>If you specify the same
1994      * <code>CdcPath</code>, and you specify <code>BucketName</code> as
1995      * <code>MyTargetBucket</code> and <code>BucketFolder</code> as
1996      * <code>MyTargetData</code>, DMS creates the CDC folder path following:
1997      * <code>MyTargetBucket/MyTargetData/MyChangedData</code>.</p> <p>For more
1998      * information on CDC including transaction order on an S3 target, see <a
1999      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
2000      * data changes (CDC) including transaction order on the S3 target</a>.</p>
2001      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
2002      */
WithCdcPath(Aws::String && value)2003     inline S3Settings& WithCdcPath(Aws::String&& value) { SetCdcPath(std::move(value)); return *this;}
2004 
2005     /**
2006      * <p>Specifies the folder path of CDC files. For an S3 source, this setting is
2007      * required if a task captures change data; otherwise, it's optional. If
2008      * <code>CdcPath</code> is set, DMS reads CDC files from this path and replicates
2009      * the data changes to the target endpoint. For an S3 target if you set <a
2010      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions">
2011      * <code>PreserveTransactions</code> </a> to <code>true</code>, DMS verifies that
2012      * you have set this parameter to a folder path on your S3 target where DMS can
2013      * save the transaction order for the CDC load. DMS creates this CDC folder path in
2014      * either your S3 target working directory or the S3 target location specified by
2015      * <a
2016      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder">
2017      * <code>BucketFolder</code> </a> and <a
2018      * href="https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName">
2019      * <code>BucketName</code> </a>.</p> <p>For example, if you specify
2020      * <code>CdcPath</code> as <code>MyChangedData</code>, and you specify
2021      * <code>BucketName</code> as <code>MyTargetBucket</code> but do not specify
2022      * <code>BucketFolder</code>, DMS creates the CDC folder path following:
2023      * <code>MyTargetBucket/MyChangedData</code>.</p> <p>If you specify the same
2024      * <code>CdcPath</code>, and you specify <code>BucketName</code> as
2025      * <code>MyTargetBucket</code> and <code>BucketFolder</code> as
2026      * <code>MyTargetData</code>, DMS creates the CDC folder path following:
2027      * <code>MyTargetBucket/MyTargetData/MyChangedData</code>.</p> <p>For more
2028      * information on CDC including transaction order on an S3 target, see <a
2029      * href="https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath">Capturing
2030      * data changes (CDC) including transaction order on the S3 target</a>.</p>
2031      * <p>This setting is supported in DMS versions 3.4.2 and later.</p>
2032      */
WithCdcPath(const char * value)2033     inline S3Settings& WithCdcPath(const char* value) { SetCdcPath(value); return *this;}
2034 
2035 
2036     /**
2037      * <p>A value that enables DMS to specify a predefined (canned) access control list
2038      * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more
2039      * information about Amazon S3 canned ACLs, see <a
2040      * href="http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl">Canned
2041      * ACL</a> in the <i>Amazon S3 Developer Guide.</i> </p> <p>The default value is
2042      * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
2043      * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and
2044      * BUCKET_OWNER_FULL_CONTROL.</p>
2045      */
GetCannedAclForObjects()2046     inline const CannedAclForObjectsValue& GetCannedAclForObjects() const{ return m_cannedAclForObjects; }
2047 
2048     /**
2049      * <p>A value that enables DMS to specify a predefined (canned) access control list
2050      * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more
2051      * information about Amazon S3 canned ACLs, see <a
2052      * href="http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl">Canned
2053      * ACL</a> in the <i>Amazon S3 Developer Guide.</i> </p> <p>The default value is
2054      * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
2055      * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and
2056      * BUCKET_OWNER_FULL_CONTROL.</p>
2057      */
CannedAclForObjectsHasBeenSet()2058     inline bool CannedAclForObjectsHasBeenSet() const { return m_cannedAclForObjectsHasBeenSet; }
2059 
2060     /**
2061      * <p>A value that enables DMS to specify a predefined (canned) access control list
2062      * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more
2063      * information about Amazon S3 canned ACLs, see <a
2064      * href="http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl">Canned
2065      * ACL</a> in the <i>Amazon S3 Developer Guide.</i> </p> <p>The default value is
2066      * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
2067      * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and
2068      * BUCKET_OWNER_FULL_CONTROL.</p>
2069      */
SetCannedAclForObjects(const CannedAclForObjectsValue & value)2070     inline void SetCannedAclForObjects(const CannedAclForObjectsValue& value) { m_cannedAclForObjectsHasBeenSet = true; m_cannedAclForObjects = value; }
2071 
2072     /**
2073      * <p>A value that enables DMS to specify a predefined (canned) access control list
2074      * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more
2075      * information about Amazon S3 canned ACLs, see <a
2076      * href="http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl">Canned
2077      * ACL</a> in the <i>Amazon S3 Developer Guide.</i> </p> <p>The default value is
2078      * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
2079      * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and
2080      * BUCKET_OWNER_FULL_CONTROL.</p>
2081      */
SetCannedAclForObjects(CannedAclForObjectsValue && value)2082     inline void SetCannedAclForObjects(CannedAclForObjectsValue&& value) { m_cannedAclForObjectsHasBeenSet = true; m_cannedAclForObjects = std::move(value); }
2083 
2084     /**
2085      * <p>A value that enables DMS to specify a predefined (canned) access control list
2086      * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more
2087      * information about Amazon S3 canned ACLs, see <a
2088      * href="http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl">Canned
2089      * ACL</a> in the <i>Amazon S3 Developer Guide.</i> </p> <p>The default value is
2090      * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
2091      * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and
2092      * BUCKET_OWNER_FULL_CONTROL.</p>
2093      */
WithCannedAclForObjects(const CannedAclForObjectsValue & value)2094     inline S3Settings& WithCannedAclForObjects(const CannedAclForObjectsValue& value) { SetCannedAclForObjects(value); return *this;}
2095 
2096     /**
2097      * <p>A value that enables DMS to specify a predefined (canned) access control list
2098      * for objects created in an Amazon S3 bucket as .csv or .parquet files. For more
2099      * information about Amazon S3 canned ACLs, see <a
2100      * href="http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl">Canned
2101      * ACL</a> in the <i>Amazon S3 Developer Guide.</i> </p> <p>The default value is
2102      * NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
2103      * AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and
2104      * BUCKET_OWNER_FULL_CONTROL.</p>
2105      */
WithCannedAclForObjects(CannedAclForObjectsValue && value)2106     inline S3Settings& WithCannedAclForObjects(CannedAclForObjectsValue&& value) { SetCannedAclForObjects(std::move(value)); return *this;}
2107 
2108 
2109     /**
2110      * <p>An optional parameter that, when set to <code>true</code> or <code>y</code>,
2111      * you can use to add column name information to the .csv output file.</p> <p>The
2112      * default value is <code>false</code>. Valid values are <code>true</code>,
2113      * <code>false</code>, <code>y</code>, and <code>n</code>.</p>
2114      */
GetAddColumnName()2115     inline bool GetAddColumnName() const{ return m_addColumnName; }
2116 
2117     /**
2118      * <p>An optional parameter that, when set to <code>true</code> or <code>y</code>,
2119      * you can use to add column name information to the .csv output file.</p> <p>The
2120      * default value is <code>false</code>. Valid values are <code>true</code>,
2121      * <code>false</code>, <code>y</code>, and <code>n</code>.</p>
2122      */
AddColumnNameHasBeenSet()2123     inline bool AddColumnNameHasBeenSet() const { return m_addColumnNameHasBeenSet; }
2124 
2125     /**
2126      * <p>An optional parameter that, when set to <code>true</code> or <code>y</code>,
2127      * you can use to add column name information to the .csv output file.</p> <p>The
2128      * default value is <code>false</code>. Valid values are <code>true</code>,
2129      * <code>false</code>, <code>y</code>, and <code>n</code>.</p>
2130      */
SetAddColumnName(bool value)2131     inline void SetAddColumnName(bool value) { m_addColumnNameHasBeenSet = true; m_addColumnName = value; }
2132 
2133     /**
2134      * <p>An optional parameter that, when set to <code>true</code> or <code>y</code>,
2135      * you can use to add column name information to the .csv output file.</p> <p>The
2136      * default value is <code>false</code>. Valid values are <code>true</code>,
2137      * <code>false</code>, <code>y</code>, and <code>n</code>.</p>
2138      */
WithAddColumnName(bool value)2139     inline S3Settings& WithAddColumnName(bool value) { SetAddColumnName(value); return *this;}
2140 
2141 
2142     /**
2143      * <p>Maximum length of the interval, defined in seconds, after which to output a
2144      * file to Amazon S3.</p> <p>When <code>CdcMaxBatchInterval</code> and
2145      * <code>CdcMinFileSize</code> are both specified, the file write is triggered by
2146      * whichever parameter condition is met first within an DMS CloudFormation
2147      * template.</p> <p>The default value is 60 seconds.</p>
2148      */
GetCdcMaxBatchInterval()2149     inline int GetCdcMaxBatchInterval() const{ return m_cdcMaxBatchInterval; }
2150 
2151     /**
2152      * <p>Maximum length of the interval, defined in seconds, after which to output a
2153      * file to Amazon S3.</p> <p>When <code>CdcMaxBatchInterval</code> and
2154      * <code>CdcMinFileSize</code> are both specified, the file write is triggered by
2155      * whichever parameter condition is met first within an DMS CloudFormation
2156      * template.</p> <p>The default value is 60 seconds.</p>
2157      */
CdcMaxBatchIntervalHasBeenSet()2158     inline bool CdcMaxBatchIntervalHasBeenSet() const { return m_cdcMaxBatchIntervalHasBeenSet; }
2159 
2160     /**
2161      * <p>Maximum length of the interval, defined in seconds, after which to output a
2162      * file to Amazon S3.</p> <p>When <code>CdcMaxBatchInterval</code> and
2163      * <code>CdcMinFileSize</code> are both specified, the file write is triggered by
2164      * whichever parameter condition is met first within an DMS CloudFormation
2165      * template.</p> <p>The default value is 60 seconds.</p>
2166      */
SetCdcMaxBatchInterval(int value)2167     inline void SetCdcMaxBatchInterval(int value) { m_cdcMaxBatchIntervalHasBeenSet = true; m_cdcMaxBatchInterval = value; }
2168 
2169     /**
2170      * <p>Maximum length of the interval, defined in seconds, after which to output a
2171      * file to Amazon S3.</p> <p>When <code>CdcMaxBatchInterval</code> and
2172      * <code>CdcMinFileSize</code> are both specified, the file write is triggered by
2173      * whichever parameter condition is met first within an DMS CloudFormation
2174      * template.</p> <p>The default value is 60 seconds.</p>
2175      */
WithCdcMaxBatchInterval(int value)2176     inline S3Settings& WithCdcMaxBatchInterval(int value) { SetCdcMaxBatchInterval(value); return *this;}
2177 
2178 
2179     /**
2180      * <p>Minimum file size, defined in megabytes, to reach for a file output to Amazon
2181      * S3.</p> <p>When <code>CdcMinFileSize</code> and <code>CdcMaxBatchInterval</code>
2182      * are both specified, the file write is triggered by whichever parameter condition
2183      * is met first within an DMS CloudFormation template.</p> <p>The default value is
2184      * 32 MB.</p>
2185      */
GetCdcMinFileSize()2186     inline int GetCdcMinFileSize() const{ return m_cdcMinFileSize; }
2187 
2188     /**
2189      * <p>Minimum file size, defined in megabytes, to reach for a file output to Amazon
2190      * S3.</p> <p>When <code>CdcMinFileSize</code> and <code>CdcMaxBatchInterval</code>
2191      * are both specified, the file write is triggered by whichever parameter condition
2192      * is met first within an DMS CloudFormation template.</p> <p>The default value is
2193      * 32 MB.</p>
2194      */
CdcMinFileSizeHasBeenSet()2195     inline bool CdcMinFileSizeHasBeenSet() const { return m_cdcMinFileSizeHasBeenSet; }
2196 
2197     /**
2198      * <p>Minimum file size, defined in megabytes, to reach for a file output to Amazon
2199      * S3.</p> <p>When <code>CdcMinFileSize</code> and <code>CdcMaxBatchInterval</code>
2200      * are both specified, the file write is triggered by whichever parameter condition
2201      * is met first within an DMS CloudFormation template.</p> <p>The default value is
2202      * 32 MB.</p>
2203      */
SetCdcMinFileSize(int value)2204     inline void SetCdcMinFileSize(int value) { m_cdcMinFileSizeHasBeenSet = true; m_cdcMinFileSize = value; }
2205 
2206     /**
2207      * <p>Minimum file size, defined in megabytes, to reach for a file output to Amazon
2208      * S3.</p> <p>When <code>CdcMinFileSize</code> and <code>CdcMaxBatchInterval</code>
2209      * are both specified, the file write is triggered by whichever parameter condition
2210      * is met first within an DMS CloudFormation template.</p> <p>The default value is
2211      * 32 MB.</p>
2212      */
WithCdcMinFileSize(int value)2213     inline S3Settings& WithCdcMinFileSize(int value) { SetCdcMinFileSize(value); return *this;}
2214 
2215 
2216     /**
2217      * <p>An optional parameter that specifies how DMS treats null values. While
2218      * handling the null value, you can use this parameter to pass a user-defined
2219      * string as null when writing to the target. For example, when target columns are
2220      * not nullable, you can use this option to differentiate between the empty string
2221      * value and the null value. So, if you set this parameter value to the empty
2222      * string ("" or ''), DMS treats the empty string as the null value instead of
2223      * <code>NULL</code>.</p> <p>The default value is <code>NULL</code>. Valid values
2224      * include any valid string.</p>
2225      */
GetCsvNullValue()2226     inline const Aws::String& GetCsvNullValue() const{ return m_csvNullValue; }
2227 
2228     /**
2229      * <p>An optional parameter that specifies how DMS treats null values. While
2230      * handling the null value, you can use this parameter to pass a user-defined
2231      * string as null when writing to the target. For example, when target columns are
2232      * not nullable, you can use this option to differentiate between the empty string
2233      * value and the null value. So, if you set this parameter value to the empty
2234      * string ("" or ''), DMS treats the empty string as the null value instead of
2235      * <code>NULL</code>.</p> <p>The default value is <code>NULL</code>. Valid values
2236      * include any valid string.</p>
2237      */
CsvNullValueHasBeenSet()2238     inline bool CsvNullValueHasBeenSet() const { return m_csvNullValueHasBeenSet; }
2239 
2240     /**
2241      * <p>An optional parameter that specifies how DMS treats null values. While
2242      * handling the null value, you can use this parameter to pass a user-defined
2243      * string as null when writing to the target. For example, when target columns are
2244      * not nullable, you can use this option to differentiate between the empty string
2245      * value and the null value. So, if you set this parameter value to the empty
2246      * string ("" or ''), DMS treats the empty string as the null value instead of
2247      * <code>NULL</code>.</p> <p>The default value is <code>NULL</code>. Valid values
2248      * include any valid string.</p>
2249      */
SetCsvNullValue(const Aws::String & value)2250     inline void SetCsvNullValue(const Aws::String& value) { m_csvNullValueHasBeenSet = true; m_csvNullValue = value; }
2251 
2252     /**
2253      * <p>An optional parameter that specifies how DMS treats null values. While
2254      * handling the null value, you can use this parameter to pass a user-defined
2255      * string as null when writing to the target. For example, when target columns are
2256      * not nullable, you can use this option to differentiate between the empty string
2257      * value and the null value. So, if you set this parameter value to the empty
2258      * string ("" or ''), DMS treats the empty string as the null value instead of
2259      * <code>NULL</code>.</p> <p>The default value is <code>NULL</code>. Valid values
2260      * include any valid string.</p>
2261      */
SetCsvNullValue(Aws::String && value)2262     inline void SetCsvNullValue(Aws::String&& value) { m_csvNullValueHasBeenSet = true; m_csvNullValue = std::move(value); }
2263 
2264     /**
2265      * <p>An optional parameter that specifies how DMS treats null values. While
2266      * handling the null value, you can use this parameter to pass a user-defined
2267      * string as null when writing to the target. For example, when target columns are
2268      * not nullable, you can use this option to differentiate between the empty string
2269      * value and the null value. So, if you set this parameter value to the empty
2270      * string ("" or ''), DMS treats the empty string as the null value instead of
2271      * <code>NULL</code>.</p> <p>The default value is <code>NULL</code>. Valid values
2272      * include any valid string.</p>
2273      */
SetCsvNullValue(const char * value)2274     inline void SetCsvNullValue(const char* value) { m_csvNullValueHasBeenSet = true; m_csvNullValue.assign(value); }
2275 
2276     /**
2277      * <p>An optional parameter that specifies how DMS treats null values. While
2278      * handling the null value, you can use this parameter to pass a user-defined
2279      * string as null when writing to the target. For example, when target columns are
2280      * not nullable, you can use this option to differentiate between the empty string
2281      * value and the null value. So, if you set this parameter value to the empty
2282      * string ("" or ''), DMS treats the empty string as the null value instead of
2283      * <code>NULL</code>.</p> <p>The default value is <code>NULL</code>. Valid values
2284      * include any valid string.</p>
2285      */
WithCsvNullValue(const Aws::String & value)2286     inline S3Settings& WithCsvNullValue(const Aws::String& value) { SetCsvNullValue(value); return *this;}
2287 
2288     /**
2289      * <p>An optional parameter that specifies how DMS treats null values. While
2290      * handling the null value, you can use this parameter to pass a user-defined
2291      * string as null when writing to the target. For example, when target columns are
2292      * not nullable, you can use this option to differentiate between the empty string
2293      * value and the null value. So, if you set this parameter value to the empty
2294      * string ("" or ''), DMS treats the empty string as the null value instead of
2295      * <code>NULL</code>.</p> <p>The default value is <code>NULL</code>. Valid values
2296      * include any valid string.</p>
2297      */
WithCsvNullValue(Aws::String && value)2298     inline S3Settings& WithCsvNullValue(Aws::String&& value) { SetCsvNullValue(std::move(value)); return *this;}
2299 
2300     /**
2301      * <p>An optional parameter that specifies how DMS treats null values. While
2302      * handling the null value, you can use this parameter to pass a user-defined
2303      * string as null when writing to the target. For example, when target columns are
2304      * not nullable, you can use this option to differentiate between the empty string
2305      * value and the null value. So, if you set this parameter value to the empty
2306      * string ("" or ''), DMS treats the empty string as the null value instead of
2307      * <code>NULL</code>.</p> <p>The default value is <code>NULL</code>. Valid values
2308      * include any valid string.</p>
2309      */
WithCsvNullValue(const char * value)2310     inline S3Settings& WithCsvNullValue(const char* value) { SetCsvNullValue(value); return *this;}
2311 
2312 
2313     /**
2314      * <p>When this value is set to 1, DMS ignores the first row header in a .csv file.
2315      * A value of 1 turns on the feature; a value of 0 turns off the feature.</p>
2316      * <p>The default is 0.</p>
2317      */
GetIgnoreHeaderRows()2318     inline int GetIgnoreHeaderRows() const{ return m_ignoreHeaderRows; }
2319 
2320     /**
2321      * <p>When this value is set to 1, DMS ignores the first row header in a .csv file.
2322      * A value of 1 turns on the feature; a value of 0 turns off the feature.</p>
2323      * <p>The default is 0.</p>
2324      */
IgnoreHeaderRowsHasBeenSet()2325     inline bool IgnoreHeaderRowsHasBeenSet() const { return m_ignoreHeaderRowsHasBeenSet; }
2326 
2327     /**
2328      * <p>When this value is set to 1, DMS ignores the first row header in a .csv file.
2329      * A value of 1 turns on the feature; a value of 0 turns off the feature.</p>
2330      * <p>The default is 0.</p>
2331      */
SetIgnoreHeaderRows(int value)2332     inline void SetIgnoreHeaderRows(int value) { m_ignoreHeaderRowsHasBeenSet = true; m_ignoreHeaderRows = value; }
2333 
2334     /**
2335      * <p>When this value is set to 1, DMS ignores the first row header in a .csv file.
2336      * A value of 1 turns on the feature; a value of 0 turns off the feature.</p>
2337      * <p>The default is 0.</p>
2338      */
WithIgnoreHeaderRows(int value)2339     inline S3Settings& WithIgnoreHeaderRows(int value) { SetIgnoreHeaderRows(value); return *this;}
2340 
2341 
2342     /**
2343      * <p>A value that specifies the maximum size (in KB) of any .csv file to be
2344      * created while migrating to an S3 target during full load.</p> <p>The default
2345      * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.</p>
2346      */
GetMaxFileSize()2347     inline int GetMaxFileSize() const{ return m_maxFileSize; }
2348 
2349     /**
2350      * <p>A value that specifies the maximum size (in KB) of any .csv file to be
2351      * created while migrating to an S3 target during full load.</p> <p>The default
2352      * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.</p>
2353      */
MaxFileSizeHasBeenSet()2354     inline bool MaxFileSizeHasBeenSet() const { return m_maxFileSizeHasBeenSet; }
2355 
2356     /**
2357      * <p>A value that specifies the maximum size (in KB) of any .csv file to be
2358      * created while migrating to an S3 target during full load.</p> <p>The default
2359      * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.</p>
2360      */
SetMaxFileSize(int value)2361     inline void SetMaxFileSize(int value) { m_maxFileSizeHasBeenSet = true; m_maxFileSize = value; }
2362 
2363     /**
2364      * <p>A value that specifies the maximum size (in KB) of any .csv file to be
2365      * created while migrating to an S3 target during full load.</p> <p>The default
2366      * value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.</p>
2367      */
WithMaxFileSize(int value)2368     inline S3Settings& WithMaxFileSize(int value) { SetMaxFileSize(value); return *this;}
2369 
2370 
2371     /**
2372      * <p>For an S3 source, when this value is set to <code>true</code> or
2373      * <code>y</code>, each leading double quotation mark has to be followed by an
2374      * ending double quotation mark. This formatting complies with RFC 4180. When this
2375      * value is set to <code>false</code> or <code>n</code>, string literals are copied
2376      * to the target as is. In this case, a delimiter (row or column) signals the end
2377      * of the field. Thus, you can't use a delimiter as part of the string, because it
2378      * signals the end of the value.</p> <p>For an S3 target, an optional parameter
2379      * used to set behavior to comply with RFC 4180 for data migrated to Amazon S3
2380      * using .csv file format only. When this value is set to <code>true</code> or
2381      * <code>y</code> using Amazon S3 as a target, if the data has quotation marks or
2382      * newline characters in it, DMS encloses the entire column with an additional pair
2383      * of double quotation marks ("). Every quotation mark within the data is repeated
2384      * twice.</p> <p>The default value is <code>true</code>. Valid values include
2385      * <code>true</code>, <code>false</code>, <code>y</code>, and <code>n</code>.</p>
2386      */
GetRfc4180()2387     inline bool GetRfc4180() const{ return m_rfc4180; }
2388 
2389     /**
2390      * <p>For an S3 source, when this value is set to <code>true</code> or
2391      * <code>y</code>, each leading double quotation mark has to be followed by an
2392      * ending double quotation mark. This formatting complies with RFC 4180. When this
2393      * value is set to <code>false</code> or <code>n</code>, string literals are copied
2394      * to the target as is. In this case, a delimiter (row or column) signals the end
2395      * of the field. Thus, you can't use a delimiter as part of the string, because it
2396      * signals the end of the value.</p> <p>For an S3 target, an optional parameter
2397      * used to set behavior to comply with RFC 4180 for data migrated to Amazon S3
2398      * using .csv file format only. When this value is set to <code>true</code> or
2399      * <code>y</code> using Amazon S3 as a target, if the data has quotation marks or
2400      * newline characters in it, DMS encloses the entire column with an additional pair
2401      * of double quotation marks ("). Every quotation mark within the data is repeated
2402      * twice.</p> <p>The default value is <code>true</code>. Valid values include
2403      * <code>true</code>, <code>false</code>, <code>y</code>, and <code>n</code>.</p>
2404      */
Rfc4180HasBeenSet()2405     inline bool Rfc4180HasBeenSet() const { return m_rfc4180HasBeenSet; }
2406 
2407     /**
2408      * <p>For an S3 source, when this value is set to <code>true</code> or
2409      * <code>y</code>, each leading double quotation mark has to be followed by an
2410      * ending double quotation mark. This formatting complies with RFC 4180. When this
2411      * value is set to <code>false</code> or <code>n</code>, string literals are copied
2412      * to the target as is. In this case, a delimiter (row or column) signals the end
2413      * of the field. Thus, you can't use a delimiter as part of the string, because it
2414      * signals the end of the value.</p> <p>For an S3 target, an optional parameter
2415      * used to set behavior to comply with RFC 4180 for data migrated to Amazon S3
2416      * using .csv file format only. When this value is set to <code>true</code> or
2417      * <code>y</code> using Amazon S3 as a target, if the data has quotation marks or
2418      * newline characters in it, DMS encloses the entire column with an additional pair
2419      * of double quotation marks ("). Every quotation mark within the data is repeated
2420      * twice.</p> <p>The default value is <code>true</code>. Valid values include
2421      * <code>true</code>, <code>false</code>, <code>y</code>, and <code>n</code>.</p>
2422      */
SetRfc4180(bool value)2423     inline void SetRfc4180(bool value) { m_rfc4180HasBeenSet = true; m_rfc4180 = value; }
2424 
2425     /**
2426      * <p>For an S3 source, when this value is set to <code>true</code> or
2427      * <code>y</code>, each leading double quotation mark has to be followed by an
2428      * ending double quotation mark. This formatting complies with RFC 4180. When this
2429      * value is set to <code>false</code> or <code>n</code>, string literals are copied
2430      * to the target as is. In this case, a delimiter (row or column) signals the end
2431      * of the field. Thus, you can't use a delimiter as part of the string, because it
2432      * signals the end of the value.</p> <p>For an S3 target, an optional parameter
2433      * used to set behavior to comply with RFC 4180 for data migrated to Amazon S3
2434      * using .csv file format only. When this value is set to <code>true</code> or
2435      * <code>y</code> using Amazon S3 as a target, if the data has quotation marks or
2436      * newline characters in it, DMS encloses the entire column with an additional pair
2437      * of double quotation marks ("). Every quotation mark within the data is repeated
2438      * twice.</p> <p>The default value is <code>true</code>. Valid values include
2439      * <code>true</code>, <code>false</code>, <code>y</code>, and <code>n</code>.</p>
2440      */
WithRfc4180(bool value)2441     inline S3Settings& WithRfc4180(bool value) { SetRfc4180(value); return *this;}
2442 
2443   private:
2444 
2445     Aws::String m_serviceAccessRoleArn;
2446     bool m_serviceAccessRoleArnHasBeenSet;
2447 
2448     Aws::String m_externalTableDefinition;
2449     bool m_externalTableDefinitionHasBeenSet;
2450 
2451     Aws::String m_csvRowDelimiter;
2452     bool m_csvRowDelimiterHasBeenSet;
2453 
2454     Aws::String m_csvDelimiter;
2455     bool m_csvDelimiterHasBeenSet;
2456 
2457     Aws::String m_bucketFolder;
2458     bool m_bucketFolderHasBeenSet;
2459 
2460     Aws::String m_bucketName;
2461     bool m_bucketNameHasBeenSet;
2462 
2463     CompressionTypeValue m_compressionType;
2464     bool m_compressionTypeHasBeenSet;
2465 
2466     EncryptionModeValue m_encryptionMode;
2467     bool m_encryptionModeHasBeenSet;
2468 
2469     Aws::String m_serverSideEncryptionKmsKeyId;
2470     bool m_serverSideEncryptionKmsKeyIdHasBeenSet;
2471 
2472     DataFormatValue m_dataFormat;
2473     bool m_dataFormatHasBeenSet;
2474 
2475     EncodingTypeValue m_encodingType;
2476     bool m_encodingTypeHasBeenSet;
2477 
2478     int m_dictPageSizeLimit;
2479     bool m_dictPageSizeLimitHasBeenSet;
2480 
2481     int m_rowGroupLength;
2482     bool m_rowGroupLengthHasBeenSet;
2483 
2484     int m_dataPageSize;
2485     bool m_dataPageSizeHasBeenSet;
2486 
2487     ParquetVersionValue m_parquetVersion;
2488     bool m_parquetVersionHasBeenSet;
2489 
2490     bool m_enableStatistics;
2491     bool m_enableStatisticsHasBeenSet;
2492 
2493     bool m_includeOpForFullLoad;
2494     bool m_includeOpForFullLoadHasBeenSet;
2495 
2496     bool m_cdcInsertsOnly;
2497     bool m_cdcInsertsOnlyHasBeenSet;
2498 
2499     Aws::String m_timestampColumnName;
2500     bool m_timestampColumnNameHasBeenSet;
2501 
2502     bool m_parquetTimestampInMillisecond;
2503     bool m_parquetTimestampInMillisecondHasBeenSet;
2504 
2505     bool m_cdcInsertsAndUpdates;
2506     bool m_cdcInsertsAndUpdatesHasBeenSet;
2507 
2508     bool m_datePartitionEnabled;
2509     bool m_datePartitionEnabledHasBeenSet;
2510 
2511     DatePartitionSequenceValue m_datePartitionSequence;
2512     bool m_datePartitionSequenceHasBeenSet;
2513 
2514     DatePartitionDelimiterValue m_datePartitionDelimiter;
2515     bool m_datePartitionDelimiterHasBeenSet;
2516 
2517     bool m_useCsvNoSupValue;
2518     bool m_useCsvNoSupValueHasBeenSet;
2519 
2520     Aws::String m_csvNoSupValue;
2521     bool m_csvNoSupValueHasBeenSet;
2522 
2523     bool m_preserveTransactions;
2524     bool m_preserveTransactionsHasBeenSet;
2525 
2526     Aws::String m_cdcPath;
2527     bool m_cdcPathHasBeenSet;
2528 
2529     CannedAclForObjectsValue m_cannedAclForObjects;
2530     bool m_cannedAclForObjectsHasBeenSet;
2531 
2532     bool m_addColumnName;
2533     bool m_addColumnNameHasBeenSet;
2534 
2535     int m_cdcMaxBatchInterval;
2536     bool m_cdcMaxBatchIntervalHasBeenSet;
2537 
2538     int m_cdcMinFileSize;
2539     bool m_cdcMinFileSizeHasBeenSet;
2540 
2541     Aws::String m_csvNullValue;
2542     bool m_csvNullValueHasBeenSet;
2543 
2544     int m_ignoreHeaderRows;
2545     bool m_ignoreHeaderRowsHasBeenSet;
2546 
2547     int m_maxFileSize;
2548     bool m_maxFileSizeHasBeenSet;
2549 
2550     bool m_rfc4180;
2551     bool m_rfc4180HasBeenSet;
2552   };
2553 
2554 } // namespace Model
2555 } // namespace DatabaseMigrationService
2556 } // namespace Aws
2557