/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include Describes the container, as part of model definition.See
* Also:
AWS
* API Reference
This parameter is ignored for models that contain only a
* PrimaryContainer.
When a ContainerDefinition is
* part of an inference pipeline, the value of the parameter uniquely identifies
* the container for the purposes of logging and metrics. For information, see Use
* Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a
* value for this parameter for a ContainerDefinition that is part of
* an inference pipeline, a unique name is automatically assigned based on the
* position of the ContainerDefinition in the pipeline. If you specify
* a value for the ContainerHostName for any
* ContainerDefinition that is part of an inference pipeline, you must
* specify a value for the ContainerHostName parameter of every
* ContainerDefinition in that pipeline.
This parameter is ignored for models that contain only a
* PrimaryContainer.
When a ContainerDefinition is
* part of an inference pipeline, the value of the parameter uniquely identifies
* the container for the purposes of logging and metrics. For information, see Use
* Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a
* value for this parameter for a ContainerDefinition that is part of
* an inference pipeline, a unique name is automatically assigned based on the
* position of the ContainerDefinition in the pipeline. If you specify
* a value for the ContainerHostName for any
* ContainerDefinition that is part of an inference pipeline, you must
* specify a value for the ContainerHostName parameter of every
* ContainerDefinition in that pipeline.
This parameter is ignored for models that contain only a
* PrimaryContainer.
When a ContainerDefinition is
* part of an inference pipeline, the value of the parameter uniquely identifies
* the container for the purposes of logging and metrics. For information, see Use
* Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a
* value for this parameter for a ContainerDefinition that is part of
* an inference pipeline, a unique name is automatically assigned based on the
* position of the ContainerDefinition in the pipeline. If you specify
* a value for the ContainerHostName for any
* ContainerDefinition that is part of an inference pipeline, you must
* specify a value for the ContainerHostName parameter of every
* ContainerDefinition in that pipeline.
This parameter is ignored for models that contain only a
* PrimaryContainer.
When a ContainerDefinition is
* part of an inference pipeline, the value of the parameter uniquely identifies
* the container for the purposes of logging and metrics. For information, see Use
* Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a
* value for this parameter for a ContainerDefinition that is part of
* an inference pipeline, a unique name is automatically assigned based on the
* position of the ContainerDefinition in the pipeline. If you specify
* a value for the ContainerHostName for any
* ContainerDefinition that is part of an inference pipeline, you must
* specify a value for the ContainerHostName parameter of every
* ContainerDefinition in that pipeline.
This parameter is ignored for models that contain only a
* PrimaryContainer.
When a ContainerDefinition is
* part of an inference pipeline, the value of the parameter uniquely identifies
* the container for the purposes of logging and metrics. For information, see Use
* Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a
* value for this parameter for a ContainerDefinition that is part of
* an inference pipeline, a unique name is automatically assigned based on the
* position of the ContainerDefinition in the pipeline. If you specify
* a value for the ContainerHostName for any
* ContainerDefinition that is part of an inference pipeline, you must
* specify a value for the ContainerHostName parameter of every
* ContainerDefinition in that pipeline.
This parameter is ignored for models that contain only a
* PrimaryContainer.
When a ContainerDefinition is
* part of an inference pipeline, the value of the parameter uniquely identifies
* the container for the purposes of logging and metrics. For information, see Use
* Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a
* value for this parameter for a ContainerDefinition that is part of
* an inference pipeline, a unique name is automatically assigned based on the
* position of the ContainerDefinition in the pipeline. If you specify
* a value for the ContainerHostName for any
* ContainerDefinition that is part of an inference pipeline, you must
* specify a value for the ContainerHostName parameter of every
* ContainerDefinition in that pipeline.
This parameter is ignored for models that contain only a
* PrimaryContainer.
When a ContainerDefinition is
* part of an inference pipeline, the value of the parameter uniquely identifies
* the container for the purposes of logging and metrics. For information, see Use
* Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a
* value for this parameter for a ContainerDefinition that is part of
* an inference pipeline, a unique name is automatically assigned based on the
* position of the ContainerDefinition in the pipeline. If you specify
* a value for the ContainerHostName for any
* ContainerDefinition that is part of an inference pipeline, you must
* specify a value for the ContainerHostName parameter of every
* ContainerDefinition in that pipeline.
This parameter is ignored for models that contain only a
* PrimaryContainer.
When a ContainerDefinition is
* part of an inference pipeline, the value of the parameter uniquely identifies
* the container for the purposes of logging and metrics. For information, see Use
* Logs and Metrics to Monitor an Inference Pipeline. If you don't specify a
* value for this parameter for a ContainerDefinition that is part of
* an inference pipeline, a unique name is automatically assigned based on the
* position of the ContainerDefinition in the pipeline. If you specify
* a value for the ContainerHostName for any
* ContainerDefinition that is part of an inference pipeline, you must
* specify a value for the ContainerHostName parameter of every
* ContainerDefinition in that pipeline.
The Amazon EC2 Container Registry (Amazon ECR) path where inference code is
* stored. If you are using your own custom algorithm instead of an algorithm
* provided by Amazon SageMaker, the inference code must meet Amazon SageMaker
* requirements. Amazon SageMaker supports both
* registry/repository[:tag] and
* registry/repository[@digest] image path formats. For more
* information, see Using
* Your Own Algorithms with Amazon SageMaker
The Amazon EC2 Container Registry (Amazon ECR) path where inference code is
* stored. If you are using your own custom algorithm instead of an algorithm
* provided by Amazon SageMaker, the inference code must meet Amazon SageMaker
* requirements. Amazon SageMaker supports both
* registry/repository[:tag] and
* registry/repository[@digest] image path formats. For more
* information, see Using
* Your Own Algorithms with Amazon SageMaker
The Amazon EC2 Container Registry (Amazon ECR) path where inference code is
* stored. If you are using your own custom algorithm instead of an algorithm
* provided by Amazon SageMaker, the inference code must meet Amazon SageMaker
* requirements. Amazon SageMaker supports both
* registry/repository[:tag] and
* registry/repository[@digest] image path formats. For more
* information, see Using
* Your Own Algorithms with Amazon SageMaker
The Amazon EC2 Container Registry (Amazon ECR) path where inference code is
* stored. If you are using your own custom algorithm instead of an algorithm
* provided by Amazon SageMaker, the inference code must meet Amazon SageMaker
* requirements. Amazon SageMaker supports both
* registry/repository[:tag] and
* registry/repository[@digest] image path formats. For more
* information, see Using
* Your Own Algorithms with Amazon SageMaker
The Amazon EC2 Container Registry (Amazon ECR) path where inference code is
* stored. If you are using your own custom algorithm instead of an algorithm
* provided by Amazon SageMaker, the inference code must meet Amazon SageMaker
* requirements. Amazon SageMaker supports both
* registry/repository[:tag] and
* registry/repository[@digest] image path formats. For more
* information, see Using
* Your Own Algorithms with Amazon SageMaker
The Amazon EC2 Container Registry (Amazon ECR) path where inference code is
* stored. If you are using your own custom algorithm instead of an algorithm
* provided by Amazon SageMaker, the inference code must meet Amazon SageMaker
* requirements. Amazon SageMaker supports both
* registry/repository[:tag] and
* registry/repository[@digest] image path formats. For more
* information, see Using
* Your Own Algorithms with Amazon SageMaker
The Amazon EC2 Container Registry (Amazon ECR) path where inference code is
* stored. If you are using your own custom algorithm instead of an algorithm
* provided by Amazon SageMaker, the inference code must meet Amazon SageMaker
* requirements. Amazon SageMaker supports both
* registry/repository[:tag] and
* registry/repository[@digest] image path formats. For more
* information, see Using
* Your Own Algorithms with Amazon SageMaker
The Amazon EC2 Container Registry (Amazon ECR) path where inference code is
* stored. If you are using your own custom algorithm instead of an algorithm
* provided by Amazon SageMaker, the inference code must meet Amazon SageMaker
* requirements. Amazon SageMaker supports both
* registry/repository[:tag] and
* registry/repository[@digest] image path formats. For more
* information, see Using
* Your Own Algorithms with Amazon SageMaker
Specifies whether the model container is in Amazon ECR or a private Docker * registry in your Amazon Virtual Private Cloud (VPC). For information about * storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers
*/ inline const ImageConfig& GetImageConfig() const{ return m_imageConfig; } /** *Specifies whether the model container is in Amazon ECR or a private Docker * registry in your Amazon Virtual Private Cloud (VPC). For information about * storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers
*/ inline bool ImageConfigHasBeenSet() const { return m_imageConfigHasBeenSet; } /** *Specifies whether the model container is in Amazon ECR or a private Docker * registry in your Amazon Virtual Private Cloud (VPC). For information about * storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers
*/ inline void SetImageConfig(const ImageConfig& value) { m_imageConfigHasBeenSet = true; m_imageConfig = value; } /** *Specifies whether the model container is in Amazon ECR or a private Docker * registry in your Amazon Virtual Private Cloud (VPC). For information about * storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers
*/ inline void SetImageConfig(ImageConfig&& value) { m_imageConfigHasBeenSet = true; m_imageConfig = std::move(value); } /** *Specifies whether the model container is in Amazon ECR or a private Docker * registry in your Amazon Virtual Private Cloud (VPC). For information about * storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers
*/ inline ContainerDefinition& WithImageConfig(const ImageConfig& value) { SetImageConfig(value); return *this;} /** *Specifies whether the model container is in Amazon ECR or a private Docker * registry in your Amazon Virtual Private Cloud (VPC). For information about * storing containers in a private Docker registry, see Use * a Private Docker Registry for Real-Time Inference Containers
*/ inline ContainerDefinition& WithImageConfig(ImageConfig&& value) { SetImageConfig(std::move(value)); return *this;} /** *Whether the container hosts a single model or multiple models.
*/ inline const ContainerMode& GetMode() const{ return m_mode; } /** *Whether the container hosts a single model or multiple models.
*/ inline bool ModeHasBeenSet() const { return m_modeHasBeenSet; } /** *Whether the container hosts a single model or multiple models.
*/ inline void SetMode(const ContainerMode& value) { m_modeHasBeenSet = true; m_mode = value; } /** *Whether the container hosts a single model or multiple models.
*/ inline void SetMode(ContainerMode&& value) { m_modeHasBeenSet = true; m_mode = std::move(value); } /** *Whether the container hosts a single model or multiple models.
*/ inline ContainerDefinition& WithMode(const ContainerMode& value) { SetMode(value); return *this;} /** *Whether the container hosts a single model or multiple models.
*/ inline ContainerDefinition& WithMode(ContainerMode&& value) { SetMode(std::move(value)); return *this;} /** *The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but * not if you use your own algorithms. For more information on built-in algorithms, * see Common * Parameters.
If you provide a value for this parameter, Amazon * SageMaker uses AWS Security Token Service to download model artifacts from the * S3 path you provide. AWS STS is activated in your IAM user account by default. * If you previously deactivated AWS STS for a region, you need to reactivate AWS * STS for that region. For more information, see Activating * and Deactivating AWS STS in an AWS Region in the AWS Identity and Access * Management User Guide.
If you use a built-in algorithm to
* create a model, Amazon SageMaker requires that you provide a S3 path to the
* model artifacts in ModelDataUrl.
The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but * not if you use your own algorithms. For more information on built-in algorithms, * see Common * Parameters.
If you provide a value for this parameter, Amazon * SageMaker uses AWS Security Token Service to download model artifacts from the * S3 path you provide. AWS STS is activated in your IAM user account by default. * If you previously deactivated AWS STS for a region, you need to reactivate AWS * STS for that region. For more information, see Activating * and Deactivating AWS STS in an AWS Region in the AWS Identity and Access * Management User Guide.
If you use a built-in algorithm to
* create a model, Amazon SageMaker requires that you provide a S3 path to the
* model artifacts in ModelDataUrl.
The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but * not if you use your own algorithms. For more information on built-in algorithms, * see Common * Parameters.
If you provide a value for this parameter, Amazon * SageMaker uses AWS Security Token Service to download model artifacts from the * S3 path you provide. AWS STS is activated in your IAM user account by default. * If you previously deactivated AWS STS for a region, you need to reactivate AWS * STS for that region. For more information, see Activating * and Deactivating AWS STS in an AWS Region in the AWS Identity and Access * Management User Guide.
If you use a built-in algorithm to
* create a model, Amazon SageMaker requires that you provide a S3 path to the
* model artifacts in ModelDataUrl.
The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but * not if you use your own algorithms. For more information on built-in algorithms, * see Common * Parameters.
If you provide a value for this parameter, Amazon * SageMaker uses AWS Security Token Service to download model artifacts from the * S3 path you provide. AWS STS is activated in your IAM user account by default. * If you previously deactivated AWS STS for a region, you need to reactivate AWS * STS for that region. For more information, see Activating * and Deactivating AWS STS in an AWS Region in the AWS Identity and Access * Management User Guide.
If you use a built-in algorithm to
* create a model, Amazon SageMaker requires that you provide a S3 path to the
* model artifacts in ModelDataUrl.
The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but * not if you use your own algorithms. For more information on built-in algorithms, * see Common * Parameters.
If you provide a value for this parameter, Amazon * SageMaker uses AWS Security Token Service to download model artifacts from the * S3 path you provide. AWS STS is activated in your IAM user account by default. * If you previously deactivated AWS STS for a region, you need to reactivate AWS * STS for that region. For more information, see Activating * and Deactivating AWS STS in an AWS Region in the AWS Identity and Access * Management User Guide.
If you use a built-in algorithm to
* create a model, Amazon SageMaker requires that you provide a S3 path to the
* model artifacts in ModelDataUrl.
The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but * not if you use your own algorithms. For more information on built-in algorithms, * see Common * Parameters.
If you provide a value for this parameter, Amazon * SageMaker uses AWS Security Token Service to download model artifacts from the * S3 path you provide. AWS STS is activated in your IAM user account by default. * If you previously deactivated AWS STS for a region, you need to reactivate AWS * STS for that region. For more information, see Activating * and Deactivating AWS STS in an AWS Region in the AWS Identity and Access * Management User Guide.
If you use a built-in algorithm to
* create a model, Amazon SageMaker requires that you provide a S3 path to the
* model artifacts in ModelDataUrl.
The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but * not if you use your own algorithms. For more information on built-in algorithms, * see Common * Parameters.
If you provide a value for this parameter, Amazon * SageMaker uses AWS Security Token Service to download model artifacts from the * S3 path you provide. AWS STS is activated in your IAM user account by default. * If you previously deactivated AWS STS for a region, you need to reactivate AWS * STS for that region. For more information, see Activating * and Deactivating AWS STS in an AWS Region in the AWS Identity and Access * Management User Guide.
If you use a built-in algorithm to
* create a model, Amazon SageMaker requires that you provide a S3 path to the
* model artifacts in ModelDataUrl.
The S3 path where the model artifacts, which result from model training, are * stored. This path must point to a single gzip compressed tar archive (.tar.gz * suffix). The S3 path is required for Amazon SageMaker built-in algorithms, but * not if you use your own algorithms. For more information on built-in algorithms, * see Common * Parameters.
If you provide a value for this parameter, Amazon * SageMaker uses AWS Security Token Service to download model artifacts from the * S3 path you provide. AWS STS is activated in your IAM user account by default. * If you previously deactivated AWS STS for a region, you need to reactivate AWS * STS for that region. For more information, see Activating * and Deactivating AWS STS in an AWS Region in the AWS Identity and Access * Management User Guide.
If you use a built-in algorithm to
* create a model, Amazon SageMaker requires that you provide a S3 path to the
* model artifacts in ModelDataUrl.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The environment variables to set in the Docker container. Each key and value
* in the Environment string to string map can have length of up to
* 1024. We support up to 16 entries in the map.
The name or Amazon Resource Name (ARN) of the model package to use to create * the model.
*/ inline const Aws::String& GetModelPackageName() const{ return m_modelPackageName; } /** *The name or Amazon Resource Name (ARN) of the model package to use to create * the model.
*/ inline bool ModelPackageNameHasBeenSet() const { return m_modelPackageNameHasBeenSet; } /** *The name or Amazon Resource Name (ARN) of the model package to use to create * the model.
*/ inline void SetModelPackageName(const Aws::String& value) { m_modelPackageNameHasBeenSet = true; m_modelPackageName = value; } /** *The name or Amazon Resource Name (ARN) of the model package to use to create * the model.
*/ inline void SetModelPackageName(Aws::String&& value) { m_modelPackageNameHasBeenSet = true; m_modelPackageName = std::move(value); } /** *The name or Amazon Resource Name (ARN) of the model package to use to create * the model.
*/ inline void SetModelPackageName(const char* value) { m_modelPackageNameHasBeenSet = true; m_modelPackageName.assign(value); } /** *The name or Amazon Resource Name (ARN) of the model package to use to create * the model.
*/ inline ContainerDefinition& WithModelPackageName(const Aws::String& value) { SetModelPackageName(value); return *this;} /** *The name or Amazon Resource Name (ARN) of the model package to use to create * the model.
*/ inline ContainerDefinition& WithModelPackageName(Aws::String&& value) { SetModelPackageName(std::move(value)); return *this;} /** *The name or Amazon Resource Name (ARN) of the model package to use to create * the model.
*/ inline ContainerDefinition& WithModelPackageName(const char* value) { SetModelPackageName(value); return *this;} private: Aws::String m_containerHostname; bool m_containerHostnameHasBeenSet; Aws::String m_image; bool m_imageHasBeenSet; ImageConfig m_imageConfig; bool m_imageConfigHasBeenSet; ContainerMode m_mode; bool m_modeHasBeenSet; Aws::String m_modelDataUrl; bool m_modelDataUrlHasBeenSet; Aws::Map