/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace DatabaseMigrationService { namespace Model { /** *

Settings for exporting data to Amazon S3.

See Also:

AWS API * Reference

*/ class AWS_DATABASEMIGRATIONSERVICE_API S3Settings { public: S3Settings(); S3Settings(Aws::Utils::Json::JsonView jsonValue); S3Settings& operator=(Aws::Utils::Json::JsonView jsonValue); Aws::Utils::Json::JsonValue Jsonize() const; /** *

The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.

*/ inline const Aws::String& GetServiceAccessRoleArn() const{ return m_serviceAccessRoleArn; } /** *

The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.

*/ inline bool ServiceAccessRoleArnHasBeenSet() const { return m_serviceAccessRoleArnHasBeenSet; } /** *

The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.

*/ inline void SetServiceAccessRoleArn(const Aws::String& value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn = value; } /** *

The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.

*/ inline void SetServiceAccessRoleArn(Aws::String&& value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn = std::move(value); } /** *

The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.

*/ inline void SetServiceAccessRoleArn(const char* value) { m_serviceAccessRoleArnHasBeenSet = true; m_serviceAccessRoleArn.assign(value); } /** *

The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.

*/ inline S3Settings& WithServiceAccessRoleArn(const Aws::String& value) { SetServiceAccessRoleArn(value); return *this;} /** *

The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.

*/ inline S3Settings& WithServiceAccessRoleArn(Aws::String&& value) { SetServiceAccessRoleArn(std::move(value)); return *this;} /** *

The Amazon Resource Name (ARN) used by the service access IAM role. It is a * required parameter that enables DMS to write and read objects from an 3S * bucket.

*/ inline S3Settings& WithServiceAccessRoleArn(const char* value) { SetServiceAccessRoleArn(value); return *this;} /** *

Specifies how tables are defined in the S3 source files only.

*/ inline const Aws::String& GetExternalTableDefinition() const{ return m_externalTableDefinition; } /** *

Specifies how tables are defined in the S3 source files only.

*/ inline bool ExternalTableDefinitionHasBeenSet() const { return m_externalTableDefinitionHasBeenSet; } /** *

Specifies how tables are defined in the S3 source files only.

*/ inline void SetExternalTableDefinition(const Aws::String& value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition = value; } /** *

Specifies how tables are defined in the S3 source files only.

*/ inline void SetExternalTableDefinition(Aws::String&& value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition = std::move(value); } /** *

Specifies how tables are defined in the S3 source files only.

*/ inline void SetExternalTableDefinition(const char* value) { m_externalTableDefinitionHasBeenSet = true; m_externalTableDefinition.assign(value); } /** *

Specifies how tables are defined in the S3 source files only.

*/ inline S3Settings& WithExternalTableDefinition(const Aws::String& value) { SetExternalTableDefinition(value); return *this;} /** *

Specifies how tables are defined in the S3 source files only.

*/ inline S3Settings& WithExternalTableDefinition(Aws::String&& value) { SetExternalTableDefinition(std::move(value)); return *this;} /** *

Specifies how tables are defined in the S3 source files only.

*/ inline S3Settings& WithExternalTableDefinition(const char* value) { SetExternalTableDefinition(value); return *this;} /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline const Aws::String& GetCsvRowDelimiter() const{ return m_csvRowDelimiter; } /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline bool CsvRowDelimiterHasBeenSet() const { return m_csvRowDelimiterHasBeenSet; } /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline void SetCsvRowDelimiter(const Aws::String& value) { m_csvRowDelimiterHasBeenSet = true; m_csvRowDelimiter = value; } /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline void SetCsvRowDelimiter(Aws::String&& value) { m_csvRowDelimiterHasBeenSet = true; m_csvRowDelimiter = std::move(value); } /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline void SetCsvRowDelimiter(const char* value) { m_csvRowDelimiterHasBeenSet = true; m_csvRowDelimiter.assign(value); } /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline S3Settings& WithCsvRowDelimiter(const Aws::String& value) { SetCsvRowDelimiter(value); return *this;} /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline S3Settings& WithCsvRowDelimiter(Aws::String&& value) { SetCsvRowDelimiter(std::move(value)); return *this;} /** *

The delimiter used to separate rows in the .csv file for both source and * target. The default is a carriage return (\n).

*/ inline S3Settings& WithCsvRowDelimiter(const char* value) { SetCsvRowDelimiter(value); return *this;} /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline const Aws::String& GetCsvDelimiter() const{ return m_csvDelimiter; } /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline bool CsvDelimiterHasBeenSet() const { return m_csvDelimiterHasBeenSet; } /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline void SetCsvDelimiter(const Aws::String& value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter = value; } /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline void SetCsvDelimiter(Aws::String&& value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter = std::move(value); } /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline void SetCsvDelimiter(const char* value) { m_csvDelimiterHasBeenSet = true; m_csvDelimiter.assign(value); } /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline S3Settings& WithCsvDelimiter(const Aws::String& value) { SetCsvDelimiter(value); return *this;} /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline S3Settings& WithCsvDelimiter(Aws::String&& value) { SetCsvDelimiter(std::move(value)); return *this;} /** *

The delimiter used to separate columns in the .csv file for both source and * target. The default is a comma.

*/ inline S3Settings& WithCsvDelimiter(const char* value) { SetCsvDelimiter(value); return *this;} /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline const Aws::String& GetBucketFolder() const{ return m_bucketFolder; } /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline bool BucketFolderHasBeenSet() const { return m_bucketFolderHasBeenSet; } /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline void SetBucketFolder(const Aws::String& value) { m_bucketFolderHasBeenSet = true; m_bucketFolder = value; } /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline void SetBucketFolder(Aws::String&& value) { m_bucketFolderHasBeenSet = true; m_bucketFolder = std::move(value); } /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline void SetBucketFolder(const char* value) { m_bucketFolderHasBeenSet = true; m_bucketFolder.assign(value); } /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline S3Settings& WithBucketFolder(const Aws::String& value) { SetBucketFolder(value); return *this;} /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline S3Settings& WithBucketFolder(Aws::String&& value) { SetBucketFolder(std::move(value)); return *this;} /** *

An optional parameter to set a folder name in the S3 bucket. If provided, * tables are created in the path * bucketFolder/schema_name/table_name/. If this * parameter isn't specified, then the path used is * schema_name/table_name/.

*/ inline S3Settings& WithBucketFolder(const char* value) { SetBucketFolder(value); return *this;} /** *

The name of the S3 bucket.

*/ inline const Aws::String& GetBucketName() const{ return m_bucketName; } /** *

The name of the S3 bucket.

*/ inline bool BucketNameHasBeenSet() const { return m_bucketNameHasBeenSet; } /** *

The name of the S3 bucket.

*/ inline void SetBucketName(const Aws::String& value) { m_bucketNameHasBeenSet = true; m_bucketName = value; } /** *

The name of the S3 bucket.

*/ inline void SetBucketName(Aws::String&& value) { m_bucketNameHasBeenSet = true; m_bucketName = std::move(value); } /** *

The name of the S3 bucket.

*/ inline void SetBucketName(const char* value) { m_bucketNameHasBeenSet = true; m_bucketName.assign(value); } /** *

The name of the S3 bucket.

*/ inline S3Settings& WithBucketName(const Aws::String& value) { SetBucketName(value); return *this;} /** *

The name of the S3 bucket.

*/ inline S3Settings& WithBucketName(Aws::String&& value) { SetBucketName(std::move(value)); return *this;} /** *

The name of the S3 bucket.

*/ inline S3Settings& WithBucketName(const char* value) { SetBucketName(value); return *this;} /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline const CompressionTypeValue& GetCompressionType() const{ return m_compressionType; } /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline bool CompressionTypeHasBeenSet() const { return m_compressionTypeHasBeenSet; } /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline void SetCompressionType(const CompressionTypeValue& value) { m_compressionTypeHasBeenSet = true; m_compressionType = value; } /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline void SetCompressionType(CompressionTypeValue&& value) { m_compressionTypeHasBeenSet = true; m_compressionType = std::move(value); } /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline S3Settings& WithCompressionType(const CompressionTypeValue& value) { SetCompressionType(value); return *this;} /** *

An optional parameter to use GZIP to compress the target files. Set to GZIP * to compress the target files. Either set this parameter to NONE (the default) or * don't use it to leave the files uncompressed. This parameter applies to both * .csv and .parquet file formats.

*/ inline S3Settings& WithCompressionType(CompressionTypeValue&& value) { SetCompressionType(std::move(value)); return *this;} /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an AWS Identity and Access Management (IAM) role * with permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline const EncryptionModeValue& GetEncryptionMode() const{ return m_encryptionMode; } /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an AWS Identity and Access Management (IAM) role * with permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline bool EncryptionModeHasBeenSet() const { return m_encryptionModeHasBeenSet; } /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an AWS Identity and Access Management (IAM) role * with permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline void SetEncryptionMode(const EncryptionModeValue& value) { m_encryptionModeHasBeenSet = true; m_encryptionMode = value; } /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an AWS Identity and Access Management (IAM) role * with permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline void SetEncryptionMode(EncryptionModeValue&& value) { m_encryptionModeHasBeenSet = true; m_encryptionMode = std::move(value); } /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an AWS Identity and Access Management (IAM) role * with permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline S3Settings& WithEncryptionMode(const EncryptionModeValue& value) { SetEncryptionMode(value); return *this;} /** *

The type of server-side encryption that you want to use for your data. This * encryption type is part of the endpoint settings or the extra connections * attributes for Amazon S3. You can choose either SSE_S3 (the * default) or SSE_KMS.

For the * ModifyEndpoint operation, you can change the existing value of the * EncryptionMode parameter from SSE_KMS to * SSE_S3. But you can’t change the existing value from * SSE_S3 to SSE_KMS.

To use * SSE_S3, you need an AWS Identity and Access Management (IAM) role * with permission to allow "arn:aws:s3:::dms-*" to use the following * actions:

  • s3:CreateBucket

  • * s3:ListBucket

  • s3:DeleteBucket

    *
  • s3:GetBucketLocation

  • * s3:GetObject

  • s3:PutObject

    *
  • s3:DeleteObject

  • * s3:GetObjectVersion

  • * s3:GetBucketPolicy

  • * s3:PutBucketPolicy

  • * s3:DeleteBucketPolicy

*/ inline S3Settings& WithEncryptionMode(EncryptionModeValue&& value) { SetEncryptionMode(std::move(value)); return *this;} /** *

If you are using SSE_KMS for the EncryptionMode, * provide the AWS KMS key ID. The key that you use needs an attached policy that * enables AWS Identity and Access Management (IAM) user permissions and allows use * of the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline const Aws::String& GetServerSideEncryptionKmsKeyId() const{ return m_serverSideEncryptionKmsKeyId; } /** *

If you are using SSE_KMS for the EncryptionMode, * provide the AWS KMS key ID. The key that you use needs an attached policy that * enables AWS Identity and Access Management (IAM) user permissions and allows use * of the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline bool ServerSideEncryptionKmsKeyIdHasBeenSet() const { return m_serverSideEncryptionKmsKeyIdHasBeenSet; } /** *

If you are using SSE_KMS for the EncryptionMode, * provide the AWS KMS key ID. The key that you use needs an attached policy that * enables AWS Identity and Access Management (IAM) user permissions and allows use * of the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline void SetServerSideEncryptionKmsKeyId(const Aws::String& value) { m_serverSideEncryptionKmsKeyIdHasBeenSet = true; m_serverSideEncryptionKmsKeyId = value; } /** *

If you are using SSE_KMS for the EncryptionMode, * provide the AWS KMS key ID. The key that you use needs an attached policy that * enables AWS Identity and Access Management (IAM) user permissions and allows use * of the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline void SetServerSideEncryptionKmsKeyId(Aws::String&& value) { m_serverSideEncryptionKmsKeyIdHasBeenSet = true; m_serverSideEncryptionKmsKeyId = std::move(value); } /** *

If you are using SSE_KMS for the EncryptionMode, * provide the AWS KMS key ID. The key that you use needs an attached policy that * enables AWS Identity and Access Management (IAM) user permissions and allows use * of the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline void SetServerSideEncryptionKmsKeyId(const char* value) { m_serverSideEncryptionKmsKeyIdHasBeenSet = true; m_serverSideEncryptionKmsKeyId.assign(value); } /** *

If you are using SSE_KMS for the EncryptionMode, * provide the AWS KMS key ID. The key that you use needs an attached policy that * enables AWS Identity and Access Management (IAM) user permissions and allows use * of the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline S3Settings& WithServerSideEncryptionKmsKeyId(const Aws::String& value) { SetServerSideEncryptionKmsKeyId(value); return *this;} /** *

If you are using SSE_KMS for the EncryptionMode, * provide the AWS KMS key ID. The key that you use needs an attached policy that * enables AWS Identity and Access Management (IAM) user permissions and allows use * of the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline S3Settings& WithServerSideEncryptionKmsKeyId(Aws::String&& value) { SetServerSideEncryptionKmsKeyId(std::move(value)); return *this;} /** *

If you are using SSE_KMS for the EncryptionMode, * provide the AWS KMS key ID. The key that you use needs an attached policy that * enables AWS Identity and Access Management (IAM) user permissions and allows use * of the key.

Here is a CLI example: aws dms create-endpoint * --endpoint-identifier value --endpoint-type target --engine-name s3 * --s3-settings * ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value *

*/ inline S3Settings& WithServerSideEncryptionKmsKeyId(const char* value) { SetServerSideEncryptionKmsKeyId(value); return *this;} /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline const DataFormatValue& GetDataFormat() const{ return m_dataFormat; } /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline bool DataFormatHasBeenSet() const { return m_dataFormatHasBeenSet; } /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline void SetDataFormat(const DataFormatValue& value) { m_dataFormatHasBeenSet = true; m_dataFormat = value; } /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline void SetDataFormat(DataFormatValue&& value) { m_dataFormatHasBeenSet = true; m_dataFormat = std::move(value); } /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline S3Settings& WithDataFormat(const DataFormatValue& value) { SetDataFormat(value); return *this;} /** *

The format of the data that you want to use for output. You can choose one of * the following:

  • csv : This is a row-based file * format with comma-separated values (.csv).

  • * parquet : Apache Parquet (.parquet) is a columnar storage file * format that features efficient compression and provides faster query response. *

*/ inline S3Settings& WithDataFormat(DataFormatValue&& value) { SetDataFormat(std::move(value)); return *this;} /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline const EncodingTypeValue& GetEncodingType() const{ return m_encodingType; } /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline bool EncodingTypeHasBeenSet() const { return m_encodingTypeHasBeenSet; } /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline void SetEncodingType(const EncodingTypeValue& value) { m_encodingTypeHasBeenSet = true; m_encodingType = value; } /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline void SetEncodingType(EncodingTypeValue&& value) { m_encodingTypeHasBeenSet = true; m_encodingType = std::move(value); } /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline S3Settings& WithEncodingType(const EncodingTypeValue& value) { SetEncodingType(value); return *this;} /** *

The type of encoding you are using:

  • * RLE_DICTIONARY uses a combination of bit-packing and run-length * encoding to store repeated values more efficiently. This is the default.

    *
  • PLAIN doesn't use encoding at all. Values are stored * as they are.

  • PLAIN_DICTIONARY builds a * dictionary of the values encountered in a given column. The dictionary is stored * in a dictionary page for each column chunk.

*/ inline S3Settings& WithEncodingType(EncodingTypeValue&& value) { SetEncodingType(std::move(value)); return *this;} /** *

The maximum size of an encoded dictionary page of a column. If the dictionary * page exceeds this, this column is stored using an encoding type of * PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the * maximum size of a dictionary page before it reverts to PLAIN * encoding. This size is used for .parquet file format only.

*/ inline int GetDictPageSizeLimit() const{ return m_dictPageSizeLimit; } /** *

The maximum size of an encoded dictionary page of a column. If the dictionary * page exceeds this, this column is stored using an encoding type of * PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the * maximum size of a dictionary page before it reverts to PLAIN * encoding. This size is used for .parquet file format only.

*/ inline bool DictPageSizeLimitHasBeenSet() const { return m_dictPageSizeLimitHasBeenSet; } /** *

The maximum size of an encoded dictionary page of a column. If the dictionary * page exceeds this, this column is stored using an encoding type of * PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the * maximum size of a dictionary page before it reverts to PLAIN * encoding. This size is used for .parquet file format only.

*/ inline void SetDictPageSizeLimit(int value) { m_dictPageSizeLimitHasBeenSet = true; m_dictPageSizeLimit = value; } /** *

The maximum size of an encoded dictionary page of a column. If the dictionary * page exceeds this, this column is stored using an encoding type of * PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the * maximum size of a dictionary page before it reverts to PLAIN * encoding. This size is used for .parquet file format only.

*/ inline S3Settings& WithDictPageSizeLimit(int value) { SetDictPageSizeLimit(value); return *this;} /** *

The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.

If you choose a value larger than the maximum, * RowGroupLength is set to the max row group length in bytes (64 * * 1024 * 1024).

*/ inline int GetRowGroupLength() const{ return m_rowGroupLength; } /** *

The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.

If you choose a value larger than the maximum, * RowGroupLength is set to the max row group length in bytes (64 * * 1024 * 1024).

*/ inline bool RowGroupLengthHasBeenSet() const { return m_rowGroupLengthHasBeenSet; } /** *

The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.

If you choose a value larger than the maximum, * RowGroupLength is set to the max row group length in bytes (64 * * 1024 * 1024).

*/ inline void SetRowGroupLength(int value) { m_rowGroupLengthHasBeenSet = true; m_rowGroupLength = value; } /** *

The number of rows in a row group. A smaller row group size provides faster * reads. But as the number of row groups grows, the slower writes become. This * parameter defaults to 10,000 rows. This number is used for .parquet file format * only.

If you choose a value larger than the maximum, * RowGroupLength is set to the max row group length in bytes (64 * * 1024 * 1024).

*/ inline S3Settings& WithRowGroupLength(int value) { SetRowGroupLength(value); return *this;} /** *

The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.

*/ inline int GetDataPageSize() const{ return m_dataPageSize; } /** *

The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.

*/ inline bool DataPageSizeHasBeenSet() const { return m_dataPageSizeHasBeenSet; } /** *

The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.

*/ inline void SetDataPageSize(int value) { m_dataPageSizeHasBeenSet = true; m_dataPageSize = value; } /** *

The size of one data page in bytes. This parameter defaults to 1024 * 1024 * bytes (1 MiB). This number is used for .parquet file format only.

*/ inline S3Settings& WithDataPageSize(int value) { SetDataPageSize(value); return *this;} /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline const ParquetVersionValue& GetParquetVersion() const{ return m_parquetVersion; } /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline bool ParquetVersionHasBeenSet() const { return m_parquetVersionHasBeenSet; } /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline void SetParquetVersion(const ParquetVersionValue& value) { m_parquetVersionHasBeenSet = true; m_parquetVersion = value; } /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline void SetParquetVersion(ParquetVersionValue&& value) { m_parquetVersionHasBeenSet = true; m_parquetVersion = std::move(value); } /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline S3Settings& WithParquetVersion(const ParquetVersionValue& value) { SetParquetVersion(value); return *this;} /** *

The version of the Apache Parquet format that you want to use: * parquet_1_0 (the default) or parquet_2_0.

*/ inline S3Settings& WithParquetVersion(ParquetVersionValue&& value) { SetParquetVersion(std::move(value)); return *this;} /** *

A value that enables statistics for Parquet pages and row groups. Choose * true to enable statistics, false to disable. * Statistics include NULL, DISTINCT, MAX, * and MIN values. This parameter defaults to true. This * value is used for .parquet file format only.

*/ inline bool GetEnableStatistics() const{ return m_enableStatistics; } /** *

A value that enables statistics for Parquet pages and row groups. Choose * true to enable statistics, false to disable. * Statistics include NULL, DISTINCT, MAX, * and MIN values. This parameter defaults to true. This * value is used for .parquet file format only.

*/ inline bool EnableStatisticsHasBeenSet() const { return m_enableStatisticsHasBeenSet; } /** *

A value that enables statistics for Parquet pages and row groups. Choose * true to enable statistics, false to disable. * Statistics include NULL, DISTINCT, MAX, * and MIN values. This parameter defaults to true. This * value is used for .parquet file format only.

*/ inline void SetEnableStatistics(bool value) { m_enableStatisticsHasBeenSet = true; m_enableStatistics = value; } /** *

A value that enables statistics for Parquet pages and row groups. Choose * true to enable statistics, false to disable. * Statistics include NULL, DISTINCT, MAX, * and MIN values. This parameter defaults to true. This * value is used for .parquet file format only.

*/ inline S3Settings& WithEnableStatistics(bool value) { SetEnableStatistics(value); return *this;} /** *

A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) output files only to indicate how the rows were * added to the source database.

AWS DMS supports the * IncludeOpForFullLoad parameter in versions 3.1.4 and later.

*

For full load, records can only be inserted. By default (the * false setting), no information is recorded in these output files * for a full load to indicate that the rows were inserted at the source database. * If IncludeOpForFullLoad is set to true or * y, the INSERT is recorded as an I annotation in the first field of * the .csv file. This allows the format of your target records from a full load to * be consistent with the target records from a CDC load.

This * setting works together with the CdcInsertsOnly and the * CdcInsertsAndUpdates parameters for output to .csv files only. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

*/ inline bool GetIncludeOpForFullLoad() const{ return m_includeOpForFullLoad; } /** *

A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) output files only to indicate how the rows were * added to the source database.

AWS DMS supports the * IncludeOpForFullLoad parameter in versions 3.1.4 and later.

*

For full load, records can only be inserted. By default (the * false setting), no information is recorded in these output files * for a full load to indicate that the rows were inserted at the source database. * If IncludeOpForFullLoad is set to true or * y, the INSERT is recorded as an I annotation in the first field of * the .csv file. This allows the format of your target records from a full load to * be consistent with the target records from a CDC load.

This * setting works together with the CdcInsertsOnly and the * CdcInsertsAndUpdates parameters for output to .csv files only. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

*/ inline bool IncludeOpForFullLoadHasBeenSet() const { return m_includeOpForFullLoadHasBeenSet; } /** *

A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) output files only to indicate how the rows were * added to the source database.

AWS DMS supports the * IncludeOpForFullLoad parameter in versions 3.1.4 and later.

*

For full load, records can only be inserted. By default (the * false setting), no information is recorded in these output files * for a full load to indicate that the rows were inserted at the source database. * If IncludeOpForFullLoad is set to true or * y, the INSERT is recorded as an I annotation in the first field of * the .csv file. This allows the format of your target records from a full load to * be consistent with the target records from a CDC load.

This * setting works together with the CdcInsertsOnly and the * CdcInsertsAndUpdates parameters for output to .csv files only. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

*/ inline void SetIncludeOpForFullLoad(bool value) { m_includeOpForFullLoadHasBeenSet = true; m_includeOpForFullLoad = value; } /** *

A value that enables a full load to write INSERT operations to the * comma-separated value (.csv) output files only to indicate how the rows were * added to the source database.

AWS DMS supports the * IncludeOpForFullLoad parameter in versions 3.1.4 and later.

*

For full load, records can only be inserted. By default (the * false setting), no information is recorded in these output files * for a full load to indicate that the rows were inserted at the source database. * If IncludeOpForFullLoad is set to true or * y, the INSERT is recorded as an I annotation in the first field of * the .csv file. This allows the format of your target records from a full load to * be consistent with the target records from a CDC load.

This * setting works together with the CdcInsertsOnly and the * CdcInsertsAndUpdates parameters for output to .csv files only. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

*/ inline S3Settings& WithIncludeOpForFullLoad(bool value) { SetIncludeOpForFullLoad(value); return *this;} /** *

A value that enables a change data capture (CDC) load to write only INSERT * operations to .csv or columnar storage (.parquet) output files. By default (the * false setting), the first field in a .csv or .parquet record * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate * whether the row was inserted, updated, or deleted at the source database for a * CDC load to the target.

If CdcInsertsOnly is set to * true or y, only INSERTs from the source database are * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs * are recorded depends on the value of IncludeOpForFullLoad. If * IncludeOpForFullLoad is set to true, the first field * of every CDC record is set to I to indicate the INSERT operation at the source. * If IncludeOpForFullLoad is set to false, every CDC * record is written without a first field to indicate the INSERT operation at the * source. For more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

AWS DMS supports the interaction * described preceding between the CdcInsertsOnly and * IncludeOpForFullLoad parameters in versions 3.1.4 and later.

*

CdcInsertsOnly and CdcInsertsAndUpdates can't both * be set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline bool GetCdcInsertsOnly() const{ return m_cdcInsertsOnly; } /** *

A value that enables a change data capture (CDC) load to write only INSERT * operations to .csv or columnar storage (.parquet) output files. By default (the * false setting), the first field in a .csv or .parquet record * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate * whether the row was inserted, updated, or deleted at the source database for a * CDC load to the target.

If CdcInsertsOnly is set to * true or y, only INSERTs from the source database are * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs * are recorded depends on the value of IncludeOpForFullLoad. If * IncludeOpForFullLoad is set to true, the first field * of every CDC record is set to I to indicate the INSERT operation at the source. * If IncludeOpForFullLoad is set to false, every CDC * record is written without a first field to indicate the INSERT operation at the * source. For more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

AWS DMS supports the interaction * described preceding between the CdcInsertsOnly and * IncludeOpForFullLoad parameters in versions 3.1.4 and later.

*

CdcInsertsOnly and CdcInsertsAndUpdates can't both * be set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline bool CdcInsertsOnlyHasBeenSet() const { return m_cdcInsertsOnlyHasBeenSet; } /** *

A value that enables a change data capture (CDC) load to write only INSERT * operations to .csv or columnar storage (.parquet) output files. By default (the * false setting), the first field in a .csv or .parquet record * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate * whether the row was inserted, updated, or deleted at the source database for a * CDC load to the target.

If CdcInsertsOnly is set to * true or y, only INSERTs from the source database are * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs * are recorded depends on the value of IncludeOpForFullLoad. If * IncludeOpForFullLoad is set to true, the first field * of every CDC record is set to I to indicate the INSERT operation at the source. * If IncludeOpForFullLoad is set to false, every CDC * record is written without a first field to indicate the INSERT operation at the * source. For more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

AWS DMS supports the interaction * described preceding between the CdcInsertsOnly and * IncludeOpForFullLoad parameters in versions 3.1.4 and later.

*

CdcInsertsOnly and CdcInsertsAndUpdates can't both * be set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline void SetCdcInsertsOnly(bool value) { m_cdcInsertsOnlyHasBeenSet = true; m_cdcInsertsOnly = value; } /** *

A value that enables a change data capture (CDC) load to write only INSERT * operations to .csv or columnar storage (.parquet) output files. By default (the * false setting), the first field in a .csv or .parquet record * contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate * whether the row was inserted, updated, or deleted at the source database for a * CDC load to the target.

If CdcInsertsOnly is set to * true or y, only INSERTs from the source database are * migrated to the .csv or .parquet file. For .csv format only, how these INSERTs * are recorded depends on the value of IncludeOpForFullLoad. If * IncludeOpForFullLoad is set to true, the first field * of every CDC record is set to I to indicate the INSERT operation at the source. * If IncludeOpForFullLoad is set to false, every CDC * record is written without a first field to indicate the INSERT operation at the * source. For more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

AWS DMS supports the interaction * described preceding between the CdcInsertsOnly and * IncludeOpForFullLoad parameters in versions 3.1.4 and later.

*

CdcInsertsOnly and CdcInsertsAndUpdates can't both * be set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline S3Settings& WithCdcInsertsOnly(bool value) { SetCdcInsertsOnly(value); return *this;} /** *

A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

AWS DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline const Aws::String& GetTimestampColumnName() const{ return m_timestampColumnName; } /** *

A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

AWS DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline bool TimestampColumnNameHasBeenSet() const { return m_timestampColumnNameHasBeenSet; } /** *

A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

AWS DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline void SetTimestampColumnName(const Aws::String& value) { m_timestampColumnNameHasBeenSet = true; m_timestampColumnName = value; } /** *

A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

AWS DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline void SetTimestampColumnName(Aws::String&& value) { m_timestampColumnNameHasBeenSet = true; m_timestampColumnName = std::move(value); } /** *

A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

AWS DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline void SetTimestampColumnName(const char* value) { m_timestampColumnNameHasBeenSet = true; m_timestampColumnName.assign(value); } /** *

A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

AWS DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline S3Settings& WithTimestampColumnName(const Aws::String& value) { SetTimestampColumnName(value); return *this;} /** *

A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

AWS DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline S3Settings& WithTimestampColumnName(Aws::String&& value) { SetTimestampColumnName(std::move(value)); return *this;} /** *

A value that when nonblank causes AWS DMS to add a column with timestamp * information to the endpoint data for an Amazon S3 target.

AWS DMS * supports the TimestampColumnName parameter in versions 3.1.4 and * later.

DMS includes an additional STRING column in * the .csv or .parquet object files of your migrated data when you set * TimestampColumnName to a nonblank value.

For a full load, * each row of this timestamp column contains a timestamp for when the data was * transferred from the source to the target by DMS.

For a change data * capture (CDC) load, each row of the timestamp column contains the timestamp for * the commit of that row in the source database.

The string format for this * timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS. By default, * the precision of this value is in microseconds. For a CDC load, the rounding of * the precision depends on the commit timestamp supported by DMS for the source * database.

When the AddColumnName parameter is set to * true, DMS also includes a name for the timestamp column that you * set with TimestampColumnName.

*/ inline S3Settings& WithTimestampColumnName(const char* value) { SetTimestampColumnName(value); return *this;} /** *

A value that specifies the precision of any TIMESTAMP column * values that are written to an Amazon S3 object file in .parquet format.

*

AWS DMS supports the ParquetTimestampInMillisecond * parameter in versions 3.1.4 and later.

When * ParquetTimestampInMillisecond is set to true or * y, AWS DMS writes all TIMESTAMP columns in a .parquet * formatted file with millisecond precision. Otherwise, DMS writes them with * microsecond precision.

Currently, Amazon Athena and AWS Glue can handle * only millisecond precision for TIMESTAMP values. Set this parameter * to true for S3 endpoint object files that are .parquet formatted * only if you plan to query or process the data with Athena or AWS Glue.

*

AWS DMS writes any TIMESTAMP column values written to an * S3 file in .csv format with microsecond precision.

Setting * ParquetTimestampInMillisecond has no effect on the string format of * the timestamp column value that is inserted by setting the * TimestampColumnName parameter.

*/ inline bool GetParquetTimestampInMillisecond() const{ return m_parquetTimestampInMillisecond; } /** *

A value that specifies the precision of any TIMESTAMP column * values that are written to an Amazon S3 object file in .parquet format.

*

AWS DMS supports the ParquetTimestampInMillisecond * parameter in versions 3.1.4 and later.

When * ParquetTimestampInMillisecond is set to true or * y, AWS DMS writes all TIMESTAMP columns in a .parquet * formatted file with millisecond precision. Otherwise, DMS writes them with * microsecond precision.

Currently, Amazon Athena and AWS Glue can handle * only millisecond precision for TIMESTAMP values. Set this parameter * to true for S3 endpoint object files that are .parquet formatted * only if you plan to query or process the data with Athena or AWS Glue.

*

AWS DMS writes any TIMESTAMP column values written to an * S3 file in .csv format with microsecond precision.

Setting * ParquetTimestampInMillisecond has no effect on the string format of * the timestamp column value that is inserted by setting the * TimestampColumnName parameter.

*/ inline bool ParquetTimestampInMillisecondHasBeenSet() const { return m_parquetTimestampInMillisecondHasBeenSet; } /** *

A value that specifies the precision of any TIMESTAMP column * values that are written to an Amazon S3 object file in .parquet format.

*

AWS DMS supports the ParquetTimestampInMillisecond * parameter in versions 3.1.4 and later.

When * ParquetTimestampInMillisecond is set to true or * y, AWS DMS writes all TIMESTAMP columns in a .parquet * formatted file with millisecond precision. Otherwise, DMS writes them with * microsecond precision.

Currently, Amazon Athena and AWS Glue can handle * only millisecond precision for TIMESTAMP values. Set this parameter * to true for S3 endpoint object files that are .parquet formatted * only if you plan to query or process the data with Athena or AWS Glue.

*

AWS DMS writes any TIMESTAMP column values written to an * S3 file in .csv format with microsecond precision.

Setting * ParquetTimestampInMillisecond has no effect on the string format of * the timestamp column value that is inserted by setting the * TimestampColumnName parameter.

*/ inline void SetParquetTimestampInMillisecond(bool value) { m_parquetTimestampInMillisecondHasBeenSet = true; m_parquetTimestampInMillisecond = value; } /** *

A value that specifies the precision of any TIMESTAMP column * values that are written to an Amazon S3 object file in .parquet format.

*

AWS DMS supports the ParquetTimestampInMillisecond * parameter in versions 3.1.4 and later.

When * ParquetTimestampInMillisecond is set to true or * y, AWS DMS writes all TIMESTAMP columns in a .parquet * formatted file with millisecond precision. Otherwise, DMS writes them with * microsecond precision.

Currently, Amazon Athena and AWS Glue can handle * only millisecond precision for TIMESTAMP values. Set this parameter * to true for S3 endpoint object files that are .parquet formatted * only if you plan to query or process the data with Athena or AWS Glue.

*

AWS DMS writes any TIMESTAMP column values written to an * S3 file in .csv format with microsecond precision.

Setting * ParquetTimestampInMillisecond has no effect on the string format of * the timestamp column value that is inserted by setting the * TimestampColumnName parameter.

*/ inline S3Settings& WithParquetTimestampInMillisecond(bool value) { SetParquetTimestampInMillisecond(value); return *this;} /** *

A value that enables a change data capture (CDC) load to write INSERT and * UPDATE operations to .csv or .parquet (columnar storage) output files. The * default setting is false, but when * CdcInsertsAndUpdates is set to true or y, * only INSERTs and UPDATEs from the source database are migrated to the .csv or * .parquet file.

For .csv file format only, how these INSERTs and UPDATEs * are recorded depends on the value of the IncludeOpForFullLoad * parameter. If IncludeOpForFullLoad is set to true, the * first field of every CDC record is set to either I or * U to indicate INSERT and UPDATE operations at the source. But if * IncludeOpForFullLoad is set to false, CDC records are * written without an indication of INSERT or UPDATE operations at the source. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

AWS DMS supports the use of the * CdcInsertsAndUpdates parameter in versions 3.3.1 and later.

* CdcInsertsOnly and CdcInsertsAndUpdates can't both be * set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline bool GetCdcInsertsAndUpdates() const{ return m_cdcInsertsAndUpdates; } /** *

A value that enables a change data capture (CDC) load to write INSERT and * UPDATE operations to .csv or .parquet (columnar storage) output files. The * default setting is false, but when * CdcInsertsAndUpdates is set to true or y, * only INSERTs and UPDATEs from the source database are migrated to the .csv or * .parquet file.

For .csv file format only, how these INSERTs and UPDATEs * are recorded depends on the value of the IncludeOpForFullLoad * parameter. If IncludeOpForFullLoad is set to true, the * first field of every CDC record is set to either I or * U to indicate INSERT and UPDATE operations at the source. But if * IncludeOpForFullLoad is set to false, CDC records are * written without an indication of INSERT or UPDATE operations at the source. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

AWS DMS supports the use of the * CdcInsertsAndUpdates parameter in versions 3.3.1 and later.

* CdcInsertsOnly and CdcInsertsAndUpdates can't both be * set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline bool CdcInsertsAndUpdatesHasBeenSet() const { return m_cdcInsertsAndUpdatesHasBeenSet; } /** *

A value that enables a change data capture (CDC) load to write INSERT and * UPDATE operations to .csv or .parquet (columnar storage) output files. The * default setting is false, but when * CdcInsertsAndUpdates is set to true or y, * only INSERTs and UPDATEs from the source database are migrated to the .csv or * .parquet file.

For .csv file format only, how these INSERTs and UPDATEs * are recorded depends on the value of the IncludeOpForFullLoad * parameter. If IncludeOpForFullLoad is set to true, the * first field of every CDC record is set to either I or * U to indicate INSERT and UPDATE operations at the source. But if * IncludeOpForFullLoad is set to false, CDC records are * written without an indication of INSERT or UPDATE operations at the source. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

AWS DMS supports the use of the * CdcInsertsAndUpdates parameter in versions 3.3.1 and later.

* CdcInsertsOnly and CdcInsertsAndUpdates can't both be * set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline void SetCdcInsertsAndUpdates(bool value) { m_cdcInsertsAndUpdatesHasBeenSet = true; m_cdcInsertsAndUpdates = value; } /** *

A value that enables a change data capture (CDC) load to write INSERT and * UPDATE operations to .csv or .parquet (columnar storage) output files. The * default setting is false, but when * CdcInsertsAndUpdates is set to true or y, * only INSERTs and UPDATEs from the source database are migrated to the .csv or * .parquet file.

For .csv file format only, how these INSERTs and UPDATEs * are recorded depends on the value of the IncludeOpForFullLoad * parameter. If IncludeOpForFullLoad is set to true, the * first field of every CDC record is set to either I or * U to indicate INSERT and UPDATE operations at the source. But if * IncludeOpForFullLoad is set to false, CDC records are * written without an indication of INSERT or UPDATE operations at the source. For * more information about how these settings work together, see Indicating * Source DB Operations in Migrated S3 Data in the AWS Database Migration * Service User Guide..

AWS DMS supports the use of the * CdcInsertsAndUpdates parameter in versions 3.3.1 and later.

* CdcInsertsOnly and CdcInsertsAndUpdates can't both be * set to true for the same endpoint. Set either * CdcInsertsOnly or CdcInsertsAndUpdates to * true for the same endpoint, but not both.

*/ inline S3Settings& WithCdcInsertsAndUpdates(bool value) { SetCdcInsertsAndUpdates(value); return *this;} private: Aws::String m_serviceAccessRoleArn; bool m_serviceAccessRoleArnHasBeenSet; Aws::String m_externalTableDefinition; bool m_externalTableDefinitionHasBeenSet; Aws::String m_csvRowDelimiter; bool m_csvRowDelimiterHasBeenSet; Aws::String m_csvDelimiter; bool m_csvDelimiterHasBeenSet; Aws::String m_bucketFolder; bool m_bucketFolderHasBeenSet; Aws::String m_bucketName; bool m_bucketNameHasBeenSet; CompressionTypeValue m_compressionType; bool m_compressionTypeHasBeenSet; EncryptionModeValue m_encryptionMode; bool m_encryptionModeHasBeenSet; Aws::String m_serverSideEncryptionKmsKeyId; bool m_serverSideEncryptionKmsKeyIdHasBeenSet; DataFormatValue m_dataFormat; bool m_dataFormatHasBeenSet; EncodingTypeValue m_encodingType; bool m_encodingTypeHasBeenSet; int m_dictPageSizeLimit; bool m_dictPageSizeLimitHasBeenSet; int m_rowGroupLength; bool m_rowGroupLengthHasBeenSet; int m_dataPageSize; bool m_dataPageSizeHasBeenSet; ParquetVersionValue m_parquetVersion; bool m_parquetVersionHasBeenSet; bool m_enableStatistics; bool m_enableStatisticsHasBeenSet; bool m_includeOpForFullLoad; bool m_includeOpForFullLoadHasBeenSet; bool m_cdcInsertsOnly; bool m_cdcInsertsOnlyHasBeenSet; Aws::String m_timestampColumnName; bool m_timestampColumnNameHasBeenSet; bool m_parquetTimestampInMillisecond; bool m_parquetTimestampInMillisecondHasBeenSet; bool m_cdcInsertsAndUpdates; bool m_cdcInsertsAndUpdatesHasBeenSet; }; } // namespace Model } // namespace DatabaseMigrationService } // namespace Aws