/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Settings for exporting data to Amazon S3. See Also:
AWS API
* Reference
The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.
*/ inline const Aws::String& GetServiceAccessRoleArn() const{ return m_serviceAccessRoleArn; } /** *The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.
*/ inline bool ServiceAccessRoleArnHasBeenSet() const { return m_serviceAccessRoleArnHasBeenSet; } /** *The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.
*/ inline void SetServiceAccessRoleArn(const Aws::String& value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn = value; } /** *The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.
*/ inline void SetServiceAccessRoleArn(Aws::String&& value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn = std::move(value); } /** *The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.
*/ inline void SetServiceAccessRoleArn(const char* value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn.assign(value); } /** *The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.
*/ inline S3Settings& WithServiceAccessRoleArn(const Aws::String& value) { SetServiceAccessRoleArn(value); return *this;} /** *The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.
*/ inline S3Settings& WithServiceAccessRoleArn(Aws::String&& value) { SetServiceAccessRoleArn(std::move(value)); return *this;} /** *The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.
*/ inline S3Settings& WithServiceAccessRoleArn(const char* value) { SetServiceAccessRoleArn(value); return *this;} /** *Specifies how tables are defined in the S3 source files only.
*/ inline const Aws::String& GetExternalTableDefinition() const{ return m_externalTableDefinition; } /** *Specifies how tables are defined in the S3 source files only.
*/ inline bool ExternalTableDefinitionHasBeenSet() const { return m_externalTableDefinitionHasBeenSet; } /** *Specifies how tables are defined in the S3 source files only.
*/ inline void SetExternalTableDefinition(const Aws::String& value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition = value; } /** *Specifies how tables are defined in the S3 source files only.
*/ inline void SetExternalTableDefinition(Aws::String&& value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition = std::move(value); } /** *Specifies how tables are defined in the S3 source files only.
*/ inline void SetExternalTableDefinition(const char* value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition.assign(value); } /** *Specifies how tables are defined in the S3 source files only.
*/ inline S3Settings& WithExternalTableDefinition(const Aws::String& value) { SetExternalTableDefinition(value); return *this;} /** *Specifies how tables are defined in the S3 source files only.
*/ inline S3Settings& WithExternalTableDefinition(Aws::String&& value) { SetExternalTableDefinition(std::move(value)); return *this;} /** *Specifies how tables are defined in the S3 source files only.
*/ inline S3Settings& WithExternalTableDefinition(const char* value) { SetExternalTableDefinition(value); return *this;} /** * The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n).
The delimiter used to separate rows in the .csv file for both source and
* target. The default is a carriage return (\n).
The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline const Aws::String& GetCsvDelimiter() const{ return m_csvDelimiter; } /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline bool CsvDelimiterHasBeenSet() const { return m_csvDelimiterHasBeenSet; } /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline void SetCsvDelimiter(const Aws::String& value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter = value; } /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline void SetCsvDelimiter(Aws::String&& value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter = std::move(value); } /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline void SetCsvDelimiter(const char* value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter.assign(value); } /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline S3Settings& WithCsvDelimiter(const Aws::String& value) { SetCsvDelimiter(value); return *this;} /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline S3Settings& WithCsvDelimiter(Aws::String&& value) { SetCsvDelimiter(std::move(value)); return *this;} /** *The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.
*/ inline S3Settings& WithCsvDelimiter(const char* value) { SetCsvDelimiter(value); return *this;} /** * An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/.
An optional parameter to set a folder name in the S3 bucket. If provided,
* tables are created in the path
* bucketFolder/schema_name/table_name/. If this
* parameter isn't specified, then the path used is
* schema_name/table_name/.
The name of the S3 bucket.
*/ inline const Aws::String& GetBucketName() const{ return m_bucketName; } /** *The name of the S3 bucket.
*/ inline bool BucketNameHasBeenSet() const { return m_bucketNameHasBeenSet; } /** *The name of the S3 bucket.
*/ inline void SetBucketName(const Aws::String& value) { m_bucketNameHasBeenSet = true; m_bucketName = value; } /** *The name of the S3 bucket.
*/ inline void SetBucketName(Aws::String&& value) { m_bucketNameHasBeenSet = true; m_bucketName = std::move(value); } /** *The name of the S3 bucket.
*/ inline void SetBucketName(const char* value) { m_bucketNameHasBeenSet = true; m_bucketName.assign(value); } /** *The name of the S3 bucket.
*/ inline S3Settings& WithBucketName(const Aws::String& value) { SetBucketName(value); return *this;} /** *The name of the S3 bucket.
*/ inline S3Settings& WithBucketName(Aws::String&& value) { SetBucketName(std::move(value)); return *this;} /** *The name of the S3 bucket.
*/ inline S3Settings& WithBucketName(const char* value) { SetBucketName(value); return *this;} /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline const CompressionTypeValue& GetCompressionType() const{ return m_compressionType; } /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline bool CompressionTypeHasBeenSet() const { return m_compressionTypeHasBeenSet; } /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline void SetCompressionType(const CompressionTypeValue& value) { m_compressionTypeHasBeenSet = true; m_compressionType = value; } /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline void SetCompressionType(CompressionTypeValue&& value) { m_compressionTypeHasBeenSet = true; m_compressionType = std::move(value); } /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline S3Settings& WithCompressionType(const CompressionTypeValue& value) { SetCompressionType(value); return *this;} /** *An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.
*/ inline S3Settings& WithCompressionType(CompressionTypeValue&& value) { SetCompressionType(std::move(value)); return *this;} /** *The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3 (the
* default) or SSE_KMS.
For the
* ModifyEndpoint operation, you can change the existing value of the
* EncryptionMode parameter from SSE_KMS to
* SSE_S3. But you can’t change the existing value from
* SSE_S3 to SSE_KMS.
To use
* SSE_S3, you need an AWS Identity and Access Management (IAM) role
* with permission to allow "arn:aws:s3:::dms-*" to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3 (the
* default) or SSE_KMS.
For the
* ModifyEndpoint operation, you can change the existing value of the
* EncryptionMode parameter from SSE_KMS to
* SSE_S3. But you can’t change the existing value from
* SSE_S3 to SSE_KMS.
To use
* SSE_S3, you need an AWS Identity and Access Management (IAM) role
* with permission to allow "arn:aws:s3:::dms-*" to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3 (the
* default) or SSE_KMS.
For the
* ModifyEndpoint operation, you can change the existing value of the
* EncryptionMode parameter from SSE_KMS to
* SSE_S3. But you can’t change the existing value from
* SSE_S3 to SSE_KMS.
To use
* SSE_S3, you need an AWS Identity and Access Management (IAM) role
* with permission to allow "arn:aws:s3:::dms-*" to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3 (the
* default) or SSE_KMS.
For the
* ModifyEndpoint operation, you can change the existing value of the
* EncryptionMode parameter from SSE_KMS to
* SSE_S3. But you can’t change the existing value from
* SSE_S3 to SSE_KMS.
To use
* SSE_S3, you need an AWS Identity and Access Management (IAM) role
* with permission to allow "arn:aws:s3:::dms-*" to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3 (the
* default) or SSE_KMS.
For the
* ModifyEndpoint operation, you can change the existing value of the
* EncryptionMode parameter from SSE_KMS to
* SSE_S3. But you can’t change the existing value from
* SSE_S3 to SSE_KMS.
To use
* SSE_S3, you need an AWS Identity and Access Management (IAM) role
* with permission to allow "arn:aws:s3:::dms-*" to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
The type of server-side encryption that you want to use for your data. This
* encryption type is part of the endpoint settings or the extra connections
* attributes for Amazon S3. You can choose either SSE_S3 (the
* default) or SSE_KMS.
For the
* ModifyEndpoint operation, you can change the existing value of the
* EncryptionMode parameter from SSE_KMS to
* SSE_S3. But you can’t change the existing value from
* SSE_S3 to SSE_KMS.
To use
* SSE_S3, you need an AWS Identity and Access Management (IAM) role
* with permission to allow "arn:aws:s3:::dms-*" to use the following
* actions:
s3:CreateBucket
* s3:ListBucket
s3:DeleteBucket
s3:GetBucketLocation
* s3:GetObject
s3:PutObject
s3:DeleteObject
* s3:GetObjectVersion
* s3:GetBucketPolicy
* s3:PutBucketPolicy
* s3:DeleteBucketPolicy
If you are using SSE_KMS for the EncryptionMode,
* provide the AWS KMS key ID. The key that you use needs an attached policy that
* enables AWS Identity and Access Management (IAM) user permissions and allows use
* of the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS for the EncryptionMode,
* provide the AWS KMS key ID. The key that you use needs an attached policy that
* enables AWS Identity and Access Management (IAM) user permissions and allows use
* of the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS for the EncryptionMode,
* provide the AWS KMS key ID. The key that you use needs an attached policy that
* enables AWS Identity and Access Management (IAM) user permissions and allows use
* of the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS for the EncryptionMode,
* provide the AWS KMS key ID. The key that you use needs an attached policy that
* enables AWS Identity and Access Management (IAM) user permissions and allows use
* of the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS for the EncryptionMode,
* provide the AWS KMS key ID. The key that you use needs an attached policy that
* enables AWS Identity and Access Management (IAM) user permissions and allows use
* of the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS for the EncryptionMode,
* provide the AWS KMS key ID. The key that you use needs an attached policy that
* enables AWS Identity and Access Management (IAM) user permissions and allows use
* of the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS for the EncryptionMode,
* provide the AWS KMS key ID. The key that you use needs an attached policy that
* enables AWS Identity and Access Management (IAM) user permissions and allows use
* of the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
If you are using SSE_KMS for the EncryptionMode,
* provide the AWS KMS key ID. The key that you use needs an attached policy that
* enables AWS Identity and Access Management (IAM) user permissions and allows use
* of the key.
Here is a CLI example: aws dms create-endpoint
* --endpoint-identifier value --endpoint-type target --engine-name s3
* --s3-settings
* ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
The format of the data that you want to use for output. You can choose one of * the following:
csv : This is a row-based file
* format with comma-separated values (.csv).
* parquet : Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The format of the data that you want to use for output. You can choose one of * the following:
csv : This is a row-based file
* format with comma-separated values (.csv).
* parquet : Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The format of the data that you want to use for output. You can choose one of * the following:
csv : This is a row-based file
* format with comma-separated values (.csv).
* parquet : Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The format of the data that you want to use for output. You can choose one of * the following:
csv : This is a row-based file
* format with comma-separated values (.csv).
* parquet : Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The format of the data that you want to use for output. You can choose one of * the following:
csv : This is a row-based file
* format with comma-separated values (.csv).
* parquet : Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The format of the data that you want to use for output. You can choose one of * the following:
csv : This is a row-based file
* format with comma-separated values (.csv).
* parquet : Apache Parquet (.parquet) is a columnar storage file
* format that features efficient compression and provides faster query response.
*
The type of encoding you are using:
* RLE_DICTIONARY uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The type of encoding you are using:
* RLE_DICTIONARY uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The type of encoding you are using:
* RLE_DICTIONARY uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The type of encoding you are using:
* RLE_DICTIONARY uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The type of encoding you are using:
* RLE_DICTIONARY uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The type of encoding you are using:
* RLE_DICTIONARY uses a combination of bit-packing and run-length
* encoding to store repeated values more efficiently. This is the default.
PLAIN doesn't use encoding at all. Values are stored
* as they are.
PLAIN_DICTIONARY builds a
* dictionary of the values encountered in a given column. The dictionary is stored
* in a dictionary page for each column chunk.
The maximum size of an encoded dictionary page of a column. If the dictionary
* page exceeds this, this column is stored using an encoding type of
* PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
* encoding. This size is used for .parquet file format only.
The maximum size of an encoded dictionary page of a column. If the dictionary
* page exceeds this, this column is stored using an encoding type of
* PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
* encoding. This size is used for .parquet file format only.
The maximum size of an encoded dictionary page of a column. If the dictionary
* page exceeds this, this column is stored using an encoding type of
* PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
* encoding. This size is used for .parquet file format only.
The maximum size of an encoded dictionary page of a column. If the dictionary
* page exceeds this, this column is stored using an encoding type of
* PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
* encoding. This size is used for .parquet file format only.
The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.
If you choose a value larger than the maximum,
* RowGroupLength is set to the max row group length in bytes (64 *
* 1024 * 1024).
The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.
If you choose a value larger than the maximum,
* RowGroupLength is set to the max row group length in bytes (64 *
* 1024 * 1024).
The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.
If you choose a value larger than the maximum,
* RowGroupLength is set to the max row group length in bytes (64 *
* 1024 * 1024).
The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.
If you choose a value larger than the maximum,
* RowGroupLength is set to the max row group length in bytes (64 *
* 1024 * 1024).
The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.
*/ inline int GetDataPageSize() const{ return m_dataPageSize; } /** *The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.
*/ inline bool DataPageSizeHasBeenSet() const { return m_dataPageSizeHasBeenSet; } /** *The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.
*/ inline void SetDataPageSize(int value) { m_dataPageSizeHasBeenSet = true; m_dataPageSize = value; } /** *The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.
*/ inline S3Settings& WithDataPageSize(int value) { SetDataPageSize(value); return *this;} /** *The version of the Apache Parquet format that you want to use:
* parquet_1_0 (the default) or parquet_2_0.
The version of the Apache Parquet format that you want to use:
* parquet_1_0 (the default) or parquet_2_0.
The version of the Apache Parquet format that you want to use:
* parquet_1_0 (the default) or parquet_2_0.
The version of the Apache Parquet format that you want to use:
* parquet_1_0 (the default) or parquet_2_0.
The version of the Apache Parquet format that you want to use:
* parquet_1_0 (the default) or parquet_2_0.
The version of the Apache Parquet format that you want to use:
* parquet_1_0 (the default) or parquet_2_0.
A value that enables statistics for Parquet pages and row groups. Choose
* true to enable statistics, false to disable.
* Statistics include NULL, DISTINCT, MAX,
* and MIN values. This parameter defaults to true. This
* value is used for .parquet file format only.
A value that enables statistics for Parquet pages and row groups. Choose
* true to enable statistics, false to disable.
* Statistics include NULL, DISTINCT, MAX,
* and MIN values. This parameter defaults to true. This
* value is used for .parquet file format only.
A value that enables statistics for Parquet pages and row groups. Choose
* true to enable statistics, false to disable.
* Statistics include NULL, DISTINCT, MAX,
* and MIN values. This parameter defaults to true. This
* value is used for .parquet file format only.
A value that enables statistics for Parquet pages and row groups. Choose
* true to enable statistics, false to disable.
* Statistics include NULL, DISTINCT, MAX,
* and MIN values. This parameter defaults to true. This
* value is used for .parquet file format only.
A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) output files only to indicate how the rows were * added to the source database.
AWS DMS supports the
* IncludeOpForFullLoad parameter in versions 3.1.4 and later.
For full load, records can only be inserted. By default (the
* false setting), no information is recorded in these output files
* for a full load to indicate that the rows were inserted at the source database.
* If IncludeOpForFullLoad is set to true or
* y, the INSERT is recorded as an I annotation in the first field of
* the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
This
* setting works together with the CdcInsertsOnly and the
* CdcInsertsAndUpdates parameters for output to .csv files only. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) output files only to indicate how the rows were * added to the source database.
AWS DMS supports the
* IncludeOpForFullLoad parameter in versions 3.1.4 and later.
For full load, records can only be inserted. By default (the
* false setting), no information is recorded in these output files
* for a full load to indicate that the rows were inserted at the source database.
* If IncludeOpForFullLoad is set to true or
* y, the INSERT is recorded as an I annotation in the first field of
* the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
This
* setting works together with the CdcInsertsOnly and the
* CdcInsertsAndUpdates parameters for output to .csv files only. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) output files only to indicate how the rows were * added to the source database.
AWS DMS supports the
* IncludeOpForFullLoad parameter in versions 3.1.4 and later.
For full load, records can only be inserted. By default (the
* false setting), no information is recorded in these output files
* for a full load to indicate that the rows were inserted at the source database.
* If IncludeOpForFullLoad is set to true or
* y, the INSERT is recorded as an I annotation in the first field of
* the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
This
* setting works together with the CdcInsertsOnly and the
* CdcInsertsAndUpdates parameters for output to .csv files only. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) output files only to indicate how the rows were * added to the source database.
AWS DMS supports the
* IncludeOpForFullLoad parameter in versions 3.1.4 and later.
For full load, records can only be inserted. By default (the
* false setting), no information is recorded in these output files
* for a full load to indicate that the rows were inserted at the source database.
* If IncludeOpForFullLoad is set to true or
* y, the INSERT is recorded as an I annotation in the first field of
* the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
This
* setting works together with the CdcInsertsOnly and the
* CdcInsertsAndUpdates parameters for output to .csv files only. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
A value that enables a change data capture (CDC) load to write only INSERT
* operations to .csv or columnar storage (.parquet) output files. By default (the
* false setting), the first field in a .csv or .parquet record
* contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a
* CDC load to the target.
If CdcInsertsOnly is set to
* true or y, only INSERTs from the source database are
* migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
* are recorded depends on the value of IncludeOpForFullLoad. If
* IncludeOpForFullLoad is set to true, the first field
* of every CDC record is set to I to indicate the INSERT operation at the source.
* If IncludeOpForFullLoad is set to false, every CDC
* record is written without a first field to indicate the INSERT operation at the
* source. For more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
AWS DMS supports the interaction
* described preceding between the CdcInsertsOnly and
* IncludeOpForFullLoad parameters in versions 3.1.4 and later.
CdcInsertsOnly and CdcInsertsAndUpdates can't both
* be set to true for the same endpoint. Set either
* CdcInsertsOnly or CdcInsertsAndUpdates to
* true for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write only INSERT
* operations to .csv or columnar storage (.parquet) output files. By default (the
* false setting), the first field in a .csv or .parquet record
* contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a
* CDC load to the target.
If CdcInsertsOnly is set to
* true or y, only INSERTs from the source database are
* migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
* are recorded depends on the value of IncludeOpForFullLoad. If
* IncludeOpForFullLoad is set to true, the first field
* of every CDC record is set to I to indicate the INSERT operation at the source.
* If IncludeOpForFullLoad is set to false, every CDC
* record is written without a first field to indicate the INSERT operation at the
* source. For more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
AWS DMS supports the interaction
* described preceding between the CdcInsertsOnly and
* IncludeOpForFullLoad parameters in versions 3.1.4 and later.
CdcInsertsOnly and CdcInsertsAndUpdates can't both
* be set to true for the same endpoint. Set either
* CdcInsertsOnly or CdcInsertsAndUpdates to
* true for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write only INSERT
* operations to .csv or columnar storage (.parquet) output files. By default (the
* false setting), the first field in a .csv or .parquet record
* contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a
* CDC load to the target.
If CdcInsertsOnly is set to
* true or y, only INSERTs from the source database are
* migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
* are recorded depends on the value of IncludeOpForFullLoad. If
* IncludeOpForFullLoad is set to true, the first field
* of every CDC record is set to I to indicate the INSERT operation at the source.
* If IncludeOpForFullLoad is set to false, every CDC
* record is written without a first field to indicate the INSERT operation at the
* source. For more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
AWS DMS supports the interaction
* described preceding between the CdcInsertsOnly and
* IncludeOpForFullLoad parameters in versions 3.1.4 and later.
CdcInsertsOnly and CdcInsertsAndUpdates can't both
* be set to true for the same endpoint. Set either
* CdcInsertsOnly or CdcInsertsAndUpdates to
* true for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write only INSERT
* operations to .csv or columnar storage (.parquet) output files. By default (the
* false setting), the first field in a .csv or .parquet record
* contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a
* CDC load to the target.
If CdcInsertsOnly is set to
* true or y, only INSERTs from the source database are
* migrated to the .csv or .parquet file. For .csv format only, how these INSERTs
* are recorded depends on the value of IncludeOpForFullLoad. If
* IncludeOpForFullLoad is set to true, the first field
* of every CDC record is set to I to indicate the INSERT operation at the source.
* If IncludeOpForFullLoad is set to false, every CDC
* record is written without a first field to indicate the INSERT operation at the
* source. For more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
AWS DMS supports the interaction
* described preceding between the CdcInsertsOnly and
* IncludeOpForFullLoad parameters in versions 3.1.4 and later.
CdcInsertsOnly and CdcInsertsAndUpdates can't both
* be set to true for the same endpoint. Set either
* CdcInsertsOnly or CdcInsertsAndUpdates to
* true for the same endpoint, but not both.
A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
AWS DMS
* supports the TimestampColumnName parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName parameter is set to
* true, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName.
A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
AWS DMS
* supports the TimestampColumnName parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName parameter is set to
* true, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName.
A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
AWS DMS
* supports the TimestampColumnName parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName parameter is set to
* true, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName.
A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
AWS DMS
* supports the TimestampColumnName parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName parameter is set to
* true, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName.
A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
AWS DMS
* supports the TimestampColumnName parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName parameter is set to
* true, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName.
A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
AWS DMS
* supports the TimestampColumnName parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName parameter is set to
* true, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName.
A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
AWS DMS
* supports the TimestampColumnName parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName parameter is set to
* true, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName.
A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.
AWS DMS
* supports the TimestampColumnName parameter in versions 3.1.4 and
* later.
DMS includes an additional STRING column in
* the .csv or .parquet object files of your migrated data when you set
* TimestampColumnName to a nonblank value.
For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.
For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.
The string format for this
* timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of
* the precision depends on the commit timestamp supported by DMS for the source
* database.
When the AddColumnName parameter is set to
* true, DMS also includes a name for the timestamp column that you
* set with TimestampColumnName.
A value that specifies the precision of any TIMESTAMP column
* values that are written to an Amazon S3 object file in .parquet format.
AWS DMS supports the ParquetTimestampInMillisecond
* parameter in versions 3.1.4 and later.
When
* ParquetTimestampInMillisecond is set to true or
* y, AWS DMS writes all TIMESTAMP columns in a .parquet
* formatted file with millisecond precision. Otherwise, DMS writes them with
* microsecond precision.
Currently, Amazon Athena and AWS Glue can handle
* only millisecond precision for TIMESTAMP values. Set this parameter
* to true for S3 endpoint object files that are .parquet formatted
* only if you plan to query or process the data with Athena or AWS Glue.
AWS DMS writes any TIMESTAMP column values written to an
* S3 file in .csv format with microsecond precision.
Setting
* ParquetTimestampInMillisecond has no effect on the string format of
* the timestamp column value that is inserted by setting the
* TimestampColumnName parameter.
A value that specifies the precision of any TIMESTAMP column
* values that are written to an Amazon S3 object file in .parquet format.
AWS DMS supports the ParquetTimestampInMillisecond
* parameter in versions 3.1.4 and later.
When
* ParquetTimestampInMillisecond is set to true or
* y, AWS DMS writes all TIMESTAMP columns in a .parquet
* formatted file with millisecond precision. Otherwise, DMS writes them with
* microsecond precision.
Currently, Amazon Athena and AWS Glue can handle
* only millisecond precision for TIMESTAMP values. Set this parameter
* to true for S3 endpoint object files that are .parquet formatted
* only if you plan to query or process the data with Athena or AWS Glue.
AWS DMS writes any TIMESTAMP column values written to an
* S3 file in .csv format with microsecond precision.
Setting
* ParquetTimestampInMillisecond has no effect on the string format of
* the timestamp column value that is inserted by setting the
* TimestampColumnName parameter.
A value that specifies the precision of any TIMESTAMP column
* values that are written to an Amazon S3 object file in .parquet format.
AWS DMS supports the ParquetTimestampInMillisecond
* parameter in versions 3.1.4 and later.
When
* ParquetTimestampInMillisecond is set to true or
* y, AWS DMS writes all TIMESTAMP columns in a .parquet
* formatted file with millisecond precision. Otherwise, DMS writes them with
* microsecond precision.
Currently, Amazon Athena and AWS Glue can handle
* only millisecond precision for TIMESTAMP values. Set this parameter
* to true for S3 endpoint object files that are .parquet formatted
* only if you plan to query or process the data with Athena or AWS Glue.
AWS DMS writes any TIMESTAMP column values written to an
* S3 file in .csv format with microsecond precision.
Setting
* ParquetTimestampInMillisecond has no effect on the string format of
* the timestamp column value that is inserted by setting the
* TimestampColumnName parameter.
A value that specifies the precision of any TIMESTAMP column
* values that are written to an Amazon S3 object file in .parquet format.
AWS DMS supports the ParquetTimestampInMillisecond
* parameter in versions 3.1.4 and later.
When
* ParquetTimestampInMillisecond is set to true or
* y, AWS DMS writes all TIMESTAMP columns in a .parquet
* formatted file with millisecond precision. Otherwise, DMS writes them with
* microsecond precision.
Currently, Amazon Athena and AWS Glue can handle
* only millisecond precision for TIMESTAMP values. Set this parameter
* to true for S3 endpoint object files that are .parquet formatted
* only if you plan to query or process the data with Athena or AWS Glue.
AWS DMS writes any TIMESTAMP column values written to an
* S3 file in .csv format with microsecond precision.
Setting
* ParquetTimestampInMillisecond has no effect on the string format of
* the timestamp column value that is inserted by setting the
* TimestampColumnName parameter.
A value that enables a change data capture (CDC) load to write INSERT and
* UPDATE operations to .csv or .parquet (columnar storage) output files. The
* default setting is false, but when
* CdcInsertsAndUpdates is set to true or y,
* only INSERTs and UPDATEs from the source database are migrated to the .csv or
* .parquet file.
For .csv file format only, how these INSERTs and UPDATEs
* are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad is set to true, the
* first field of every CDC record is set to either I or
* U to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad is set to false, CDC records are
* written without an indication of INSERT or UPDATE operations at the source. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
AWS DMS supports the use of the
* CdcInsertsAndUpdates parameter in versions 3.3.1 and later.
* CdcInsertsOnly and CdcInsertsAndUpdates can't both be
* set to true for the same endpoint. Set either
* CdcInsertsOnly or CdcInsertsAndUpdates to
* true for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write INSERT and
* UPDATE operations to .csv or .parquet (columnar storage) output files. The
* default setting is false, but when
* CdcInsertsAndUpdates is set to true or y,
* only INSERTs and UPDATEs from the source database are migrated to the .csv or
* .parquet file.
For .csv file format only, how these INSERTs and UPDATEs
* are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad is set to true, the
* first field of every CDC record is set to either I or
* U to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad is set to false, CDC records are
* written without an indication of INSERT or UPDATE operations at the source. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
AWS DMS supports the use of the
* CdcInsertsAndUpdates parameter in versions 3.3.1 and later.
* CdcInsertsOnly and CdcInsertsAndUpdates can't both be
* set to true for the same endpoint. Set either
* CdcInsertsOnly or CdcInsertsAndUpdates to
* true for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write INSERT and
* UPDATE operations to .csv or .parquet (columnar storage) output files. The
* default setting is false, but when
* CdcInsertsAndUpdates is set to true or y,
* only INSERTs and UPDATEs from the source database are migrated to the .csv or
* .parquet file.
For .csv file format only, how these INSERTs and UPDATEs
* are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad is set to true, the
* first field of every CDC record is set to either I or
* U to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad is set to false, CDC records are
* written without an indication of INSERT or UPDATE operations at the source. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
AWS DMS supports the use of the
* CdcInsertsAndUpdates parameter in versions 3.3.1 and later.
* CdcInsertsOnly and CdcInsertsAndUpdates can't both be
* set to true for the same endpoint. Set either
* CdcInsertsOnly or CdcInsertsAndUpdates to
* true for the same endpoint, but not both.
A value that enables a change data capture (CDC) load to write INSERT and
* UPDATE operations to .csv or .parquet (columnar storage) output files. The
* default setting is false, but when
* CdcInsertsAndUpdates is set to true or y,
* only INSERTs and UPDATEs from the source database are migrated to the .csv or
* .parquet file.
For .csv file format only, how these INSERTs and UPDATEs
* are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad is set to true, the
* first field of every CDC record is set to either I or
* U to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad is set to false, CDC records are
* written without an indication of INSERT or UPDATE operations at the source. For
* more information about how these settings work together, see Indicating
* Source DB Operations in Migrated S3 Data in the AWS Database Migration
* Service User Guide..
AWS DMS supports the use of the
* CdcInsertsAndUpdates parameter in versions 3.3.1 and later.
* CdcInsertsOnly and CdcInsertsAndUpdates can't both be
* set to true for the same endpoint. Set either
* CdcInsertsOnly or CdcInsertsAndUpdates to
* true for the same endpoint, but not both.