/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace Aws { namespace Http { class HttpClient; class HttpClientFactory; } // namespace Http namespace Utils { template< typename R, typename E> class Outcome; namespace Threading { class Executor; } // namespace Threading } // namespace Utils namespace Auth { class AWSCredentials; class AWSCredentialsProvider; } // namespace Auth namespace Client { class RetryStrategy; } // namespace Client namespace ECS { namespace Model { class CreateCapacityProviderRequest; class CreateClusterRequest; class CreateServiceRequest; class CreateTaskSetRequest; class DeleteAccountSettingRequest; class DeleteAttributesRequest; class DeleteCapacityProviderRequest; class DeleteClusterRequest; class DeleteServiceRequest; class DeleteTaskSetRequest; class DeregisterContainerInstanceRequest; class DeregisterTaskDefinitionRequest; class DescribeCapacityProvidersRequest; class DescribeClustersRequest; class DescribeContainerInstancesRequest; class DescribeServicesRequest; class DescribeTaskDefinitionRequest; class DescribeTaskSetsRequest; class DescribeTasksRequest; class DiscoverPollEndpointRequest; class ListAccountSettingsRequest; class ListAttributesRequest; class ListClustersRequest; class ListContainerInstancesRequest; class ListServicesRequest; class ListTagsForResourceRequest; class ListTaskDefinitionFamiliesRequest; class ListTaskDefinitionsRequest; class ListTasksRequest; class PutAccountSettingRequest; class PutAccountSettingDefaultRequest; class PutAttributesRequest; class PutClusterCapacityProvidersRequest; class RegisterContainerInstanceRequest; class RegisterTaskDefinitionRequest; class RunTaskRequest; class StartTaskRequest; class StopTaskRequest; class SubmitAttachmentStateChangesRequest; class SubmitContainerStateChangeRequest; class SubmitTaskStateChangeRequest; class TagResourceRequest; class UntagResourceRequest; class UpdateClusterSettingsRequest; class UpdateContainerAgentRequest; class UpdateContainerInstancesStateRequest; class UpdateServiceRequest; class UpdateServicePrimaryTaskSetRequest; class UpdateTaskSetRequest; typedef Aws::Utils::Outcome CreateCapacityProviderOutcome; typedef Aws::Utils::Outcome CreateClusterOutcome; typedef Aws::Utils::Outcome CreateServiceOutcome; typedef Aws::Utils::Outcome CreateTaskSetOutcome; typedef Aws::Utils::Outcome DeleteAccountSettingOutcome; typedef Aws::Utils::Outcome DeleteAttributesOutcome; typedef Aws::Utils::Outcome DeleteCapacityProviderOutcome; typedef Aws::Utils::Outcome DeleteClusterOutcome; typedef Aws::Utils::Outcome DeleteServiceOutcome; typedef Aws::Utils::Outcome DeleteTaskSetOutcome; typedef Aws::Utils::Outcome DeregisterContainerInstanceOutcome; typedef Aws::Utils::Outcome DeregisterTaskDefinitionOutcome; typedef Aws::Utils::Outcome DescribeCapacityProvidersOutcome; typedef Aws::Utils::Outcome DescribeClustersOutcome; typedef Aws::Utils::Outcome DescribeContainerInstancesOutcome; typedef Aws::Utils::Outcome DescribeServicesOutcome; typedef Aws::Utils::Outcome DescribeTaskDefinitionOutcome; typedef Aws::Utils::Outcome DescribeTaskSetsOutcome; typedef Aws::Utils::Outcome DescribeTasksOutcome; typedef Aws::Utils::Outcome DiscoverPollEndpointOutcome; typedef Aws::Utils::Outcome ListAccountSettingsOutcome; typedef Aws::Utils::Outcome ListAttributesOutcome; typedef Aws::Utils::Outcome ListClustersOutcome; typedef Aws::Utils::Outcome ListContainerInstancesOutcome; typedef Aws::Utils::Outcome ListServicesOutcome; typedef Aws::Utils::Outcome ListTagsForResourceOutcome; typedef Aws::Utils::Outcome ListTaskDefinitionFamiliesOutcome; typedef Aws::Utils::Outcome ListTaskDefinitionsOutcome; typedef Aws::Utils::Outcome ListTasksOutcome; typedef Aws::Utils::Outcome PutAccountSettingOutcome; typedef Aws::Utils::Outcome PutAccountSettingDefaultOutcome; typedef Aws::Utils::Outcome PutAttributesOutcome; typedef Aws::Utils::Outcome PutClusterCapacityProvidersOutcome; typedef Aws::Utils::Outcome RegisterContainerInstanceOutcome; typedef Aws::Utils::Outcome RegisterTaskDefinitionOutcome; typedef Aws::Utils::Outcome RunTaskOutcome; typedef Aws::Utils::Outcome StartTaskOutcome; typedef Aws::Utils::Outcome StopTaskOutcome; typedef Aws::Utils::Outcome SubmitAttachmentStateChangesOutcome; typedef Aws::Utils::Outcome SubmitContainerStateChangeOutcome; typedef Aws::Utils::Outcome SubmitTaskStateChangeOutcome; typedef Aws::Utils::Outcome TagResourceOutcome; typedef Aws::Utils::Outcome UntagResourceOutcome; typedef Aws::Utils::Outcome UpdateClusterSettingsOutcome; typedef Aws::Utils::Outcome UpdateContainerAgentOutcome; typedef Aws::Utils::Outcome UpdateContainerInstancesStateOutcome; typedef Aws::Utils::Outcome UpdateServiceOutcome; typedef Aws::Utils::Outcome UpdateServicePrimaryTaskSetOutcome; typedef Aws::Utils::Outcome UpdateTaskSetOutcome; typedef std::future CreateCapacityProviderOutcomeCallable; typedef std::future CreateClusterOutcomeCallable; typedef std::future CreateServiceOutcomeCallable; typedef std::future CreateTaskSetOutcomeCallable; typedef std::future DeleteAccountSettingOutcomeCallable; typedef std::future DeleteAttributesOutcomeCallable; typedef std::future DeleteCapacityProviderOutcomeCallable; typedef std::future DeleteClusterOutcomeCallable; typedef std::future DeleteServiceOutcomeCallable; typedef std::future DeleteTaskSetOutcomeCallable; typedef std::future DeregisterContainerInstanceOutcomeCallable; typedef std::future DeregisterTaskDefinitionOutcomeCallable; typedef std::future DescribeCapacityProvidersOutcomeCallable; typedef std::future DescribeClustersOutcomeCallable; typedef std::future DescribeContainerInstancesOutcomeCallable; typedef std::future DescribeServicesOutcomeCallable; typedef std::future DescribeTaskDefinitionOutcomeCallable; typedef std::future DescribeTaskSetsOutcomeCallable; typedef std::future DescribeTasksOutcomeCallable; typedef std::future DiscoverPollEndpointOutcomeCallable; typedef std::future ListAccountSettingsOutcomeCallable; typedef std::future ListAttributesOutcomeCallable; typedef std::future ListClustersOutcomeCallable; typedef std::future ListContainerInstancesOutcomeCallable; typedef std::future ListServicesOutcomeCallable; typedef std::future ListTagsForResourceOutcomeCallable; typedef std::future ListTaskDefinitionFamiliesOutcomeCallable; typedef std::future ListTaskDefinitionsOutcomeCallable; typedef std::future ListTasksOutcomeCallable; typedef std::future PutAccountSettingOutcomeCallable; typedef std::future PutAccountSettingDefaultOutcomeCallable; typedef std::future PutAttributesOutcomeCallable; typedef std::future PutClusterCapacityProvidersOutcomeCallable; typedef std::future RegisterContainerInstanceOutcomeCallable; typedef std::future RegisterTaskDefinitionOutcomeCallable; typedef std::future RunTaskOutcomeCallable; typedef std::future StartTaskOutcomeCallable; typedef std::future StopTaskOutcomeCallable; typedef std::future SubmitAttachmentStateChangesOutcomeCallable; typedef std::future SubmitContainerStateChangeOutcomeCallable; typedef std::future SubmitTaskStateChangeOutcomeCallable; typedef std::future TagResourceOutcomeCallable; typedef std::future UntagResourceOutcomeCallable; typedef std::future UpdateClusterSettingsOutcomeCallable; typedef std::future UpdateContainerAgentOutcomeCallable; typedef std::future UpdateContainerInstancesStateOutcomeCallable; typedef std::future UpdateServiceOutcomeCallable; typedef std::future UpdateServicePrimaryTaskSetOutcomeCallable; typedef std::future UpdateTaskSetOutcomeCallable; } // namespace Model class ECSClient; typedef std::function&) > CreateCapacityProviderResponseReceivedHandler; typedef std::function&) > CreateClusterResponseReceivedHandler; typedef std::function&) > CreateServiceResponseReceivedHandler; typedef std::function&) > CreateTaskSetResponseReceivedHandler; typedef std::function&) > DeleteAccountSettingResponseReceivedHandler; typedef std::function&) > DeleteAttributesResponseReceivedHandler; typedef std::function&) > DeleteCapacityProviderResponseReceivedHandler; typedef std::function&) > DeleteClusterResponseReceivedHandler; typedef std::function&) > DeleteServiceResponseReceivedHandler; typedef std::function&) > DeleteTaskSetResponseReceivedHandler; typedef std::function&) > DeregisterContainerInstanceResponseReceivedHandler; typedef std::function&) > DeregisterTaskDefinitionResponseReceivedHandler; typedef std::function&) > DescribeCapacityProvidersResponseReceivedHandler; typedef std::function&) > DescribeClustersResponseReceivedHandler; typedef std::function&) > DescribeContainerInstancesResponseReceivedHandler; typedef std::function&) > DescribeServicesResponseReceivedHandler; typedef std::function&) > DescribeTaskDefinitionResponseReceivedHandler; typedef std::function&) > DescribeTaskSetsResponseReceivedHandler; typedef std::function&) > DescribeTasksResponseReceivedHandler; typedef std::function&) > DiscoverPollEndpointResponseReceivedHandler; typedef std::function&) > ListAccountSettingsResponseReceivedHandler; typedef std::function&) > ListAttributesResponseReceivedHandler; typedef std::function&) > ListClustersResponseReceivedHandler; typedef std::function&) > ListContainerInstancesResponseReceivedHandler; typedef std::function&) > ListServicesResponseReceivedHandler; typedef std::function&) > ListTagsForResourceResponseReceivedHandler; typedef std::function&) > ListTaskDefinitionFamiliesResponseReceivedHandler; typedef std::function&) > ListTaskDefinitionsResponseReceivedHandler; typedef std::function&) > ListTasksResponseReceivedHandler; typedef std::function&) > PutAccountSettingResponseReceivedHandler; typedef std::function&) > PutAccountSettingDefaultResponseReceivedHandler; typedef std::function&) > PutAttributesResponseReceivedHandler; typedef std::function&) > PutClusterCapacityProvidersResponseReceivedHandler; typedef std::function&) > RegisterContainerInstanceResponseReceivedHandler; typedef std::function&) > RegisterTaskDefinitionResponseReceivedHandler; typedef std::function&) > RunTaskResponseReceivedHandler; typedef std::function&) > StartTaskResponseReceivedHandler; typedef std::function&) > StopTaskResponseReceivedHandler; typedef std::function&) > SubmitAttachmentStateChangesResponseReceivedHandler; typedef std::function&) > SubmitContainerStateChangeResponseReceivedHandler; typedef std::function&) > SubmitTaskStateChangeResponseReceivedHandler; typedef std::function&) > TagResourceResponseReceivedHandler; typedef std::function&) > UntagResourceResponseReceivedHandler; typedef std::function&) > UpdateClusterSettingsResponseReceivedHandler; typedef std::function&) > UpdateContainerAgentResponseReceivedHandler; typedef std::function&) > UpdateContainerInstancesStateResponseReceivedHandler; typedef std::function&) > UpdateServiceResponseReceivedHandler; typedef std::function&) > UpdateServicePrimaryTaskSetResponseReceivedHandler; typedef std::function&) > UpdateTaskSetResponseReceivedHandler; /** * Amazon Elastic Container Service

Amazon Elastic * Container Service (Amazon ECS) is a highly scalable, fast, container management * service that makes it easy to run, stop, and manage Docker containers on a * cluster. You can host your cluster on a serverless infrastructure that is * managed by Amazon ECS by launching your services or tasks using the Fargate * launch type. For more control, you can host your tasks on a cluster of Amazon * Elastic Compute Cloud (Amazon EC2) instances that you manage by using the EC2 * launch type. For more information about launch types, see Amazon * ECS Launch Types.

Amazon ECS lets you launch and stop container-based * applications with simple API calls, allows you to get the state of your cluster * from a centralized service, and gives you access to many familiar Amazon EC2 * features.

You can use Amazon ECS to schedule the placement of containers * across your cluster based on your resource needs, isolation policies, and * availability requirements. Amazon ECS eliminates the need for you to operate * your own cluster management and configuration management systems or worry about * scaling your management infrastructure.

*/ class AWS_ECS_API ECSClient : public Aws::Client::AWSJsonClient { public: typedef Aws::Client::AWSJsonClient BASECLASS; /** * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ ECSClient(const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); /** * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ ECSClient(const Aws::Auth::AWSCredentials& credentials, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); /** * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, * the default http client factory will be used */ ECSClient(const std::shared_ptr& credentialsProvider, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); virtual ~ECSClient(); /** *

Creates a new capacity provider. Capacity providers are associated with an * Amazon ECS cluster and are used in capacity provider strategies to facilitate * cluster auto scaling.

Only capacity providers using an Auto Scaling group * can be created. Amazon ECS tasks on AWS Fargate use the FARGATE and * FARGATE_SPOT capacity providers which are already created and * available to all accounts in Regions supported by AWS Fargate.

See * Also:

AWS * API Reference

*/ virtual Model::CreateCapacityProviderOutcome CreateCapacityProvider(const Model::CreateCapacityProviderRequest& request) const; /** *

Creates a new capacity provider. Capacity providers are associated with an * Amazon ECS cluster and are used in capacity provider strategies to facilitate * cluster auto scaling.

Only capacity providers using an Auto Scaling group * can be created. Amazon ECS tasks on AWS Fargate use the FARGATE and * FARGATE_SPOT capacity providers which are already created and * available to all accounts in Regions supported by AWS Fargate.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateCapacityProviderOutcomeCallable CreateCapacityProviderCallable(const Model::CreateCapacityProviderRequest& request) const; /** *

Creates a new capacity provider. Capacity providers are associated with an * Amazon ECS cluster and are used in capacity provider strategies to facilitate * cluster auto scaling.

Only capacity providers using an Auto Scaling group * can be created. Amazon ECS tasks on AWS Fargate use the FARGATE and * FARGATE_SPOT capacity providers which are already created and * available to all accounts in Regions supported by AWS Fargate.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateCapacityProviderAsync(const Model::CreateCapacityProviderRequest& request, const CreateCapacityProviderResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Creates a new Amazon ECS cluster. By default, your account receives a * default cluster when you launch your first container instance. * However, you can create your own cluster with a unique name with the * CreateCluster action.

When you call the * CreateCluster API operation, Amazon ECS attempts to create the Amazon ECS * service-linked role for your account so that required resources in other AWS * services can be managed on your behalf. However, if the IAM user that makes the * call does not have permissions to create the service-linked role, it is not * created. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

See Also:

AWS * API Reference

*/ virtual Model::CreateClusterOutcome CreateCluster(const Model::CreateClusterRequest& request) const; /** *

Creates a new Amazon ECS cluster. By default, your account receives a * default cluster when you launch your first container instance. * However, you can create your own cluster with a unique name with the * CreateCluster action.

When you call the * CreateCluster API operation, Amazon ECS attempts to create the Amazon ECS * service-linked role for your account so that required resources in other AWS * services can be managed on your behalf. However, if the IAM user that makes the * call does not have permissions to create the service-linked role, it is not * created. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateClusterOutcomeCallable CreateClusterCallable(const Model::CreateClusterRequest& request) const; /** *

Creates a new Amazon ECS cluster. By default, your account receives a * default cluster when you launch your first container instance. * However, you can create your own cluster with a unique name with the * CreateCluster action.

When you call the * CreateCluster API operation, Amazon ECS attempts to create the Amazon ECS * service-linked role for your account so that required resources in other AWS * services can be managed on your behalf. However, if the IAM user that makes the * call does not have permissions to create the service-linked role, it is not * created. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateClusterAsync(const Model::CreateClusterRequest& request, const CreateClusterResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Runs and maintains a desired number of tasks from a specified task * definition. If the number of tasks running in a service drops below the * desiredCount, Amazon ECS runs another copy of the task in the * specified cluster. To update an existing service, see the UpdateService * action.

In addition to maintaining the desired count of tasks in your * service, you can optionally run your service behind one or more load balancers. * The load balancers distribute traffic across the tasks that are associated with * the service. For more information, see Service * Load Balancing in the Amazon Elastic Container Service Developer * Guide.

Tasks for services that do not use a load balancer are * considered healthy if they're in the RUNNING state. Tasks for * services that do use a load balancer are considered healthy if they're in * the RUNNING state and the container instance that they're hosted on * is reported as healthy by the load balancer.

There are two service * scheduler strategies available:

  • REPLICA - The * replica scheduling strategy places and maintains the desired number of tasks * across your cluster. By default, the service scheduler spreads tasks across * Availability Zones. You can use task placement strategies and constraints to * customize task placement decisions. For more information, see Service * Scheduler Concepts in the Amazon Elastic Container Service Developer * Guide.

  • DAEMON - The daemon scheduling * strategy deploys exactly one task on each active container instance that meets * all of the task placement constraints that you specify in your cluster. The * service scheduler also evaluates the task placement constraints for running * tasks and will stop tasks that do not meet the placement constraints. When using * this strategy, you don't need to specify a desired number of tasks, a task * placement strategy, or use Service Auto Scaling policies. For more information, * see Service * Scheduler Concepts in the Amazon Elastic Container Service Developer * Guide.

You can optionally specify a deployment * configuration for your service. The deployment is triggered by changing * properties, such as the task definition or the desired count of a service, with * an UpdateService operation. The default value for a replica service for * minimumHealthyPercent is 100%. The default value for a daemon * service for minimumHealthyPercent is 0%.

If a service is * using the ECS deployment controller, the minimum healthy percent * represents a lower limit on the number of tasks in a service that must remain in * the RUNNING state during a deployment, as a percentage of the * desired number of tasks (rounded up to the nearest integer), and while any * container instances are in the DRAINING state if the service * contains tasks using the EC2 launch type. This parameter enables you to deploy * without using additional cluster capacity. For example, if your service has a * desired number of four tasks and a minimum healthy percent of 50%, the scheduler * might stop two existing tasks to free up cluster capacity before starting two * new tasks. Tasks for services that do not use a load balancer are * considered healthy if they're in the RUNNING state. Tasks for * services that do use a load balancer are considered healthy if they're in * the RUNNING state and they're reported as healthy by the load * balancer. The default value for minimum healthy percent is 100%.

If a * service is using the ECS deployment controller, the maximum * percent parameter represents an upper limit on the number of tasks in a * service that are allowed in the RUNNING or PENDING * state during a deployment, as a percentage of the desired number of tasks * (rounded down to the nearest integer), and while any container instances are in * the DRAINING state if the service contains tasks using the EC2 * launch type. This parameter enables you to define the deployment batch size. For * example, if your service has a desired number of four tasks and a maximum * percent value of 200%, the scheduler may start four new tasks before stopping * the four older tasks (provided that the cluster resources required to do this * are available). The default value for maximum percent is 200%.

If a * service is using either the CODE_DEPLOY or EXTERNAL * deployment controller types and tasks that use the EC2 launch type, the * minimum healthy percent and maximum percent values are used only * to define the lower and upper limit on the number of the tasks in the service * that remain in the RUNNING state while the container instances are * in the DRAINING state. If the tasks in the service use the Fargate * launch type, the minimum healthy percent and maximum percent values aren't used, * although they're currently visible when describing your service.

When * creating a service that uses the EXTERNAL deployment controller, * you can specify only parameters that aren't controlled at the task set level. * The only required parameter is the service name. You control your services using * the CreateTaskSet operation. For more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

When the service scheduler launches new tasks, it determines * task placement in your cluster using the following logic:

  • *

    Determine which of the container instances in your cluster can support your * service's task definition (for example, they have the required CPU, memory, * ports, and container instance attributes).

  • By default, the * service scheduler attempts to balance tasks across Availability Zones in this * manner (although you can choose a different placement strategy) with the * placementStrategy parameter):

    • Sort the valid * container instances, giving priority to instances that have the fewest number of * running tasks for this service in their respective Availability Zone. For * example, if zone A has one running service task and zones B and C each have * zero, valid container instances in either zone B or C are considered optimal for * placement.

    • Place the new service task on a valid container * instance in an optimal Availability Zone (based on the previous steps), favoring * container instances with the fewest number of running tasks for this * service.

See Also:

AWS * API Reference

*/ virtual Model::CreateServiceOutcome CreateService(const Model::CreateServiceRequest& request) const; /** *

Runs and maintains a desired number of tasks from a specified task * definition. If the number of tasks running in a service drops below the * desiredCount, Amazon ECS runs another copy of the task in the * specified cluster. To update an existing service, see the UpdateService * action.

In addition to maintaining the desired count of tasks in your * service, you can optionally run your service behind one or more load balancers. * The load balancers distribute traffic across the tasks that are associated with * the service. For more information, see Service * Load Balancing in the Amazon Elastic Container Service Developer * Guide.

Tasks for services that do not use a load balancer are * considered healthy if they're in the RUNNING state. Tasks for * services that do use a load balancer are considered healthy if they're in * the RUNNING state and the container instance that they're hosted on * is reported as healthy by the load balancer.

There are two service * scheduler strategies available:

  • REPLICA - The * replica scheduling strategy places and maintains the desired number of tasks * across your cluster. By default, the service scheduler spreads tasks across * Availability Zones. You can use task placement strategies and constraints to * customize task placement decisions. For more information, see Service * Scheduler Concepts in the Amazon Elastic Container Service Developer * Guide.

  • DAEMON - The daemon scheduling * strategy deploys exactly one task on each active container instance that meets * all of the task placement constraints that you specify in your cluster. The * service scheduler also evaluates the task placement constraints for running * tasks and will stop tasks that do not meet the placement constraints. When using * this strategy, you don't need to specify a desired number of tasks, a task * placement strategy, or use Service Auto Scaling policies. For more information, * see Service * Scheduler Concepts in the Amazon Elastic Container Service Developer * Guide.

You can optionally specify a deployment * configuration for your service. The deployment is triggered by changing * properties, such as the task definition or the desired count of a service, with * an UpdateService operation. The default value for a replica service for * minimumHealthyPercent is 100%. The default value for a daemon * service for minimumHealthyPercent is 0%.

If a service is * using the ECS deployment controller, the minimum healthy percent * represents a lower limit on the number of tasks in a service that must remain in * the RUNNING state during a deployment, as a percentage of the * desired number of tasks (rounded up to the nearest integer), and while any * container instances are in the DRAINING state if the service * contains tasks using the EC2 launch type. This parameter enables you to deploy * without using additional cluster capacity. For example, if your service has a * desired number of four tasks and a minimum healthy percent of 50%, the scheduler * might stop two existing tasks to free up cluster capacity before starting two * new tasks. Tasks for services that do not use a load balancer are * considered healthy if they're in the RUNNING state. Tasks for * services that do use a load balancer are considered healthy if they're in * the RUNNING state and they're reported as healthy by the load * balancer. The default value for minimum healthy percent is 100%.

If a * service is using the ECS deployment controller, the maximum * percent parameter represents an upper limit on the number of tasks in a * service that are allowed in the RUNNING or PENDING * state during a deployment, as a percentage of the desired number of tasks * (rounded down to the nearest integer), and while any container instances are in * the DRAINING state if the service contains tasks using the EC2 * launch type. This parameter enables you to define the deployment batch size. For * example, if your service has a desired number of four tasks and a maximum * percent value of 200%, the scheduler may start four new tasks before stopping * the four older tasks (provided that the cluster resources required to do this * are available). The default value for maximum percent is 200%.

If a * service is using either the CODE_DEPLOY or EXTERNAL * deployment controller types and tasks that use the EC2 launch type, the * minimum healthy percent and maximum percent values are used only * to define the lower and upper limit on the number of the tasks in the service * that remain in the RUNNING state while the container instances are * in the DRAINING state. If the tasks in the service use the Fargate * launch type, the minimum healthy percent and maximum percent values aren't used, * although they're currently visible when describing your service.

When * creating a service that uses the EXTERNAL deployment controller, * you can specify only parameters that aren't controlled at the task set level. * The only required parameter is the service name. You control your services using * the CreateTaskSet operation. For more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

When the service scheduler launches new tasks, it determines * task placement in your cluster using the following logic:

  • *

    Determine which of the container instances in your cluster can support your * service's task definition (for example, they have the required CPU, memory, * ports, and container instance attributes).

  • By default, the * service scheduler attempts to balance tasks across Availability Zones in this * manner (although you can choose a different placement strategy) with the * placementStrategy parameter):

    • Sort the valid * container instances, giving priority to instances that have the fewest number of * running tasks for this service in their respective Availability Zone. For * example, if zone A has one running service task and zones B and C each have * zero, valid container instances in either zone B or C are considered optimal for * placement.

    • Place the new service task on a valid container * instance in an optimal Availability Zone (based on the previous steps), favoring * container instances with the fewest number of running tasks for this * service.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateServiceOutcomeCallable CreateServiceCallable(const Model::CreateServiceRequest& request) const; /** *

Runs and maintains a desired number of tasks from a specified task * definition. If the number of tasks running in a service drops below the * desiredCount, Amazon ECS runs another copy of the task in the * specified cluster. To update an existing service, see the UpdateService * action.

In addition to maintaining the desired count of tasks in your * service, you can optionally run your service behind one or more load balancers. * The load balancers distribute traffic across the tasks that are associated with * the service. For more information, see Service * Load Balancing in the Amazon Elastic Container Service Developer * Guide.

Tasks for services that do not use a load balancer are * considered healthy if they're in the RUNNING state. Tasks for * services that do use a load balancer are considered healthy if they're in * the RUNNING state and the container instance that they're hosted on * is reported as healthy by the load balancer.

There are two service * scheduler strategies available:

  • REPLICA - The * replica scheduling strategy places and maintains the desired number of tasks * across your cluster. By default, the service scheduler spreads tasks across * Availability Zones. You can use task placement strategies and constraints to * customize task placement decisions. For more information, see Service * Scheduler Concepts in the Amazon Elastic Container Service Developer * Guide.

  • DAEMON - The daemon scheduling * strategy deploys exactly one task on each active container instance that meets * all of the task placement constraints that you specify in your cluster. The * service scheduler also evaluates the task placement constraints for running * tasks and will stop tasks that do not meet the placement constraints. When using * this strategy, you don't need to specify a desired number of tasks, a task * placement strategy, or use Service Auto Scaling policies. For more information, * see Service * Scheduler Concepts in the Amazon Elastic Container Service Developer * Guide.

You can optionally specify a deployment * configuration for your service. The deployment is triggered by changing * properties, such as the task definition or the desired count of a service, with * an UpdateService operation. The default value for a replica service for * minimumHealthyPercent is 100%. The default value for a daemon * service for minimumHealthyPercent is 0%.

If a service is * using the ECS deployment controller, the minimum healthy percent * represents a lower limit on the number of tasks in a service that must remain in * the RUNNING state during a deployment, as a percentage of the * desired number of tasks (rounded up to the nearest integer), and while any * container instances are in the DRAINING state if the service * contains tasks using the EC2 launch type. This parameter enables you to deploy * without using additional cluster capacity. For example, if your service has a * desired number of four tasks and a minimum healthy percent of 50%, the scheduler * might stop two existing tasks to free up cluster capacity before starting two * new tasks. Tasks for services that do not use a load balancer are * considered healthy if they're in the RUNNING state. Tasks for * services that do use a load balancer are considered healthy if they're in * the RUNNING state and they're reported as healthy by the load * balancer. The default value for minimum healthy percent is 100%.

If a * service is using the ECS deployment controller, the maximum * percent parameter represents an upper limit on the number of tasks in a * service that are allowed in the RUNNING or PENDING * state during a deployment, as a percentage of the desired number of tasks * (rounded down to the nearest integer), and while any container instances are in * the DRAINING state if the service contains tasks using the EC2 * launch type. This parameter enables you to define the deployment batch size. For * example, if your service has a desired number of four tasks and a maximum * percent value of 200%, the scheduler may start four new tasks before stopping * the four older tasks (provided that the cluster resources required to do this * are available). The default value for maximum percent is 200%.

If a * service is using either the CODE_DEPLOY or EXTERNAL * deployment controller types and tasks that use the EC2 launch type, the * minimum healthy percent and maximum percent values are used only * to define the lower and upper limit on the number of the tasks in the service * that remain in the RUNNING state while the container instances are * in the DRAINING state. If the tasks in the service use the Fargate * launch type, the minimum healthy percent and maximum percent values aren't used, * although they're currently visible when describing your service.

When * creating a service that uses the EXTERNAL deployment controller, * you can specify only parameters that aren't controlled at the task set level. * The only required parameter is the service name. You control your services using * the CreateTaskSet operation. For more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

When the service scheduler launches new tasks, it determines * task placement in your cluster using the following logic:

  • *

    Determine which of the container instances in your cluster can support your * service's task definition (for example, they have the required CPU, memory, * ports, and container instance attributes).

  • By default, the * service scheduler attempts to balance tasks across Availability Zones in this * manner (although you can choose a different placement strategy) with the * placementStrategy parameter):

    • Sort the valid * container instances, giving priority to instances that have the fewest number of * running tasks for this service in their respective Availability Zone. For * example, if zone A has one running service task and zones B and C each have * zero, valid container instances in either zone B or C are considered optimal for * placement.

    • Place the new service task on a valid container * instance in an optimal Availability Zone (based on the previous steps), favoring * container instances with the fewest number of running tasks for this * service.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateServiceAsync(const Model::CreateServiceRequest& request, const CreateServiceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Create a task set in the specified cluster and service. This is used when a * service uses the EXTERNAL deployment controller type. For more * information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

*/ virtual Model::CreateTaskSetOutcome CreateTaskSet(const Model::CreateTaskSetRequest& request) const; /** *

Create a task set in the specified cluster and service. This is used when a * service uses the EXTERNAL deployment controller type. For more * information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateTaskSetOutcomeCallable CreateTaskSetCallable(const Model::CreateTaskSetRequest& request) const; /** *

Create a task set in the specified cluster and service. This is used when a * service uses the EXTERNAL deployment controller type. For more * information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateTaskSetAsync(const Model::CreateTaskSetRequest& request, const CreateTaskSetResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Disables an account setting for a specified IAM user, IAM role, or the root * user for an account.

See Also:

AWS * API Reference

*/ virtual Model::DeleteAccountSettingOutcome DeleteAccountSetting(const Model::DeleteAccountSettingRequest& request) const; /** *

Disables an account setting for a specified IAM user, IAM role, or the root * user for an account.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteAccountSettingOutcomeCallable DeleteAccountSettingCallable(const Model::DeleteAccountSettingRequest& request) const; /** *

Disables an account setting for a specified IAM user, IAM role, or the root * user for an account.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteAccountSettingAsync(const Model::DeleteAccountSettingRequest& request, const DeleteAccountSettingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes one or more custom attributes from an Amazon ECS * resource.

See Also:

AWS * API Reference

*/ virtual Model::DeleteAttributesOutcome DeleteAttributes(const Model::DeleteAttributesRequest& request) const; /** *

Deletes one or more custom attributes from an Amazon ECS * resource.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteAttributesOutcomeCallable DeleteAttributesCallable(const Model::DeleteAttributesRequest& request) const; /** *

Deletes one or more custom attributes from an Amazon ECS * resource.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteAttributesAsync(const Model::DeleteAttributesRequest& request, const DeleteAttributesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the specified capacity provider.

The * FARGATE and FARGATE_SPOT capacity providers are * reserved and cannot be deleted. You can disassociate them from a cluster using * either the PutClusterCapacityProviders API or by deleting the * cluster.

Prior to a capacity provider being deleted, the capacity * provider must be removed from the capacity provider strategy from all services. * The UpdateService API can be used to remove a capacity provider from a * service's capacity provider strategy. When updating a service, the * forceNewDeployment option can be used to ensure that any tasks * using the Amazon EC2 instance capacity provided by the capacity provider are * transitioned to use the capacity from the remaining capacity providers. Only * capacity providers that are not associated with a cluster can be deleted. To * remove a capacity provider from a cluster, you can either use * PutClusterCapacityProviders or delete the cluster.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteCapacityProviderOutcome DeleteCapacityProvider(const Model::DeleteCapacityProviderRequest& request) const; /** *

Deletes the specified capacity provider.

The * FARGATE and FARGATE_SPOT capacity providers are * reserved and cannot be deleted. You can disassociate them from a cluster using * either the PutClusterCapacityProviders API or by deleting the * cluster.

Prior to a capacity provider being deleted, the capacity * provider must be removed from the capacity provider strategy from all services. * The UpdateService API can be used to remove a capacity provider from a * service's capacity provider strategy. When updating a service, the * forceNewDeployment option can be used to ensure that any tasks * using the Amazon EC2 instance capacity provided by the capacity provider are * transitioned to use the capacity from the remaining capacity providers. Only * capacity providers that are not associated with a cluster can be deleted. To * remove a capacity provider from a cluster, you can either use * PutClusterCapacityProviders or delete the cluster.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteCapacityProviderOutcomeCallable DeleteCapacityProviderCallable(const Model::DeleteCapacityProviderRequest& request) const; /** *

Deletes the specified capacity provider.

The * FARGATE and FARGATE_SPOT capacity providers are * reserved and cannot be deleted. You can disassociate them from a cluster using * either the PutClusterCapacityProviders API or by deleting the * cluster.

Prior to a capacity provider being deleted, the capacity * provider must be removed from the capacity provider strategy from all services. * The UpdateService API can be used to remove a capacity provider from a * service's capacity provider strategy. When updating a service, the * forceNewDeployment option can be used to ensure that any tasks * using the Amazon EC2 instance capacity provided by the capacity provider are * transitioned to use the capacity from the remaining capacity providers. Only * capacity providers that are not associated with a cluster can be deleted. To * remove a capacity provider from a cluster, you can either use * PutClusterCapacityProviders or delete the cluster.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteCapacityProviderAsync(const Model::DeleteCapacityProviderRequest& request, const DeleteCapacityProviderResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes the specified cluster. The cluster will transition to the * INACTIVE state. Clusters with an INACTIVE status may * remain discoverable in your account for a period of time. However, this behavior * is subject to change in the future, so you should not rely on * INACTIVE clusters persisting.

You must deregister all * container instances from this cluster before you may delete it. You can list the * container instances in a cluster with ListContainerInstances and * deregister them with DeregisterContainerInstance.

See * Also:

AWS * API Reference

*/ virtual Model::DeleteClusterOutcome DeleteCluster(const Model::DeleteClusterRequest& request) const; /** *

Deletes the specified cluster. The cluster will transition to the * INACTIVE state. Clusters with an INACTIVE status may * remain discoverable in your account for a period of time. However, this behavior * is subject to change in the future, so you should not rely on * INACTIVE clusters persisting.

You must deregister all * container instances from this cluster before you may delete it. You can list the * container instances in a cluster with ListContainerInstances and * deregister them with DeregisterContainerInstance.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteClusterOutcomeCallable DeleteClusterCallable(const Model::DeleteClusterRequest& request) const; /** *

Deletes the specified cluster. The cluster will transition to the * INACTIVE state. Clusters with an INACTIVE status may * remain discoverable in your account for a period of time. However, this behavior * is subject to change in the future, so you should not rely on * INACTIVE clusters persisting.

You must deregister all * container instances from this cluster before you may delete it. You can list the * container instances in a cluster with ListContainerInstances and * deregister them with DeregisterContainerInstance.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteClusterAsync(const Model::DeleteClusterRequest& request, const DeleteClusterResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes a specified service within a cluster. You can delete a service if you * have no running tasks in it and the desired task count is zero. If the service * is actively maintaining tasks, you cannot delete it, and you must update the * service to a desired task count of zero. For more information, see * UpdateService.

When you delete a service, if there are * still running tasks that require cleanup, the service status moves from * ACTIVE to DRAINING, and the service is no longer * visible in the console or in the ListServices API operation. After all * tasks have transitioned to either STOPPING or STOPPED * status, the service status moves from DRAINING to * INACTIVE. Services in the DRAINING or * INACTIVE status can still be viewed with the * DescribeServices API operation. However, in the future, * INACTIVE services may be cleaned up and purged from Amazon ECS * record keeping, and DescribeServices calls on those services return a * ServiceNotFoundException error.

If you * attempt to create a new service with the same name as an existing service in * either ACTIVE or DRAINING status, you receive an * error.

See Also:

AWS * API Reference

*/ virtual Model::DeleteServiceOutcome DeleteService(const Model::DeleteServiceRequest& request) const; /** *

Deletes a specified service within a cluster. You can delete a service if you * have no running tasks in it and the desired task count is zero. If the service * is actively maintaining tasks, you cannot delete it, and you must update the * service to a desired task count of zero. For more information, see * UpdateService.

When you delete a service, if there are * still running tasks that require cleanup, the service status moves from * ACTIVE to DRAINING, and the service is no longer * visible in the console or in the ListServices API operation. After all * tasks have transitioned to either STOPPING or STOPPED * status, the service status moves from DRAINING to * INACTIVE. Services in the DRAINING or * INACTIVE status can still be viewed with the * DescribeServices API operation. However, in the future, * INACTIVE services may be cleaned up and purged from Amazon ECS * record keeping, and DescribeServices calls on those services return a * ServiceNotFoundException error.

If you * attempt to create a new service with the same name as an existing service in * either ACTIVE or DRAINING status, you receive an * error.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteServiceOutcomeCallable DeleteServiceCallable(const Model::DeleteServiceRequest& request) const; /** *

Deletes a specified service within a cluster. You can delete a service if you * have no running tasks in it and the desired task count is zero. If the service * is actively maintaining tasks, you cannot delete it, and you must update the * service to a desired task count of zero. For more information, see * UpdateService.

When you delete a service, if there are * still running tasks that require cleanup, the service status moves from * ACTIVE to DRAINING, and the service is no longer * visible in the console or in the ListServices API operation. After all * tasks have transitioned to either STOPPING or STOPPED * status, the service status moves from DRAINING to * INACTIVE. Services in the DRAINING or * INACTIVE status can still be viewed with the * DescribeServices API operation. However, in the future, * INACTIVE services may be cleaned up and purged from Amazon ECS * record keeping, and DescribeServices calls on those services return a * ServiceNotFoundException error.

If you * attempt to create a new service with the same name as an existing service in * either ACTIVE or DRAINING status, you receive an * error.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteServiceAsync(const Model::DeleteServiceRequest& request, const DeleteServiceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes a specified task set within a service. This is used when a service * uses the EXTERNAL deployment controller type. For more information, * see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

*/ virtual Model::DeleteTaskSetOutcome DeleteTaskSet(const Model::DeleteTaskSetRequest& request) const; /** *

Deletes a specified task set within a service. This is used when a service * uses the EXTERNAL deployment controller type. For more information, * see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteTaskSetOutcomeCallable DeleteTaskSetCallable(const Model::DeleteTaskSetRequest& request) const; /** *

Deletes a specified task set within a service. This is used when a service * uses the EXTERNAL deployment controller type. For more information, * see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteTaskSetAsync(const Model::DeleteTaskSetRequest& request, const DeleteTaskSetResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deregisters an Amazon ECS container instance from the specified cluster. This * instance is no longer available to run tasks.

If you intend to use the * container instance for some other purpose after deregistration, you should stop * all of the tasks running on the container instance before deregistration. That * prevents any orphaned tasks from consuming resources.

Deregistering a * container instance removes the instance from a cluster, but it does not * terminate the EC2 instance. If you are finished using the instance, be sure to * terminate it in the Amazon EC2 console to stop billing.

If you * terminate a running container instance, Amazon ECS automatically deregisters the * instance from your cluster (stopped container instances or instances with * disconnected agents are not automatically deregistered when terminated).

*

See Also:

AWS * API Reference

*/ virtual Model::DeregisterContainerInstanceOutcome DeregisterContainerInstance(const Model::DeregisterContainerInstanceRequest& request) const; /** *

Deregisters an Amazon ECS container instance from the specified cluster. This * instance is no longer available to run tasks.

If you intend to use the * container instance for some other purpose after deregistration, you should stop * all of the tasks running on the container instance before deregistration. That * prevents any orphaned tasks from consuming resources.

Deregistering a * container instance removes the instance from a cluster, but it does not * terminate the EC2 instance. If you are finished using the instance, be sure to * terminate it in the Amazon EC2 console to stop billing.

If you * terminate a running container instance, Amazon ECS automatically deregisters the * instance from your cluster (stopped container instances or instances with * disconnected agents are not automatically deregistered when terminated).

*

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeregisterContainerInstanceOutcomeCallable DeregisterContainerInstanceCallable(const Model::DeregisterContainerInstanceRequest& request) const; /** *

Deregisters an Amazon ECS container instance from the specified cluster. This * instance is no longer available to run tasks.

If you intend to use the * container instance for some other purpose after deregistration, you should stop * all of the tasks running on the container instance before deregistration. That * prevents any orphaned tasks from consuming resources.

Deregistering a * container instance removes the instance from a cluster, but it does not * terminate the EC2 instance. If you are finished using the instance, be sure to * terminate it in the Amazon EC2 console to stop billing.

If you * terminate a running container instance, Amazon ECS automatically deregisters the * instance from your cluster (stopped container instances or instances with * disconnected agents are not automatically deregistered when terminated).

*

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeregisterContainerInstanceAsync(const Model::DeregisterContainerInstanceRequest& request, const DeregisterContainerInstanceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deregisters the specified task definition by family and revision. Upon * deregistration, the task definition is marked as INACTIVE. Existing * tasks and services that reference an INACTIVE task definition * continue to run without disruption. Existing services that reference an * INACTIVE task definition can still scale up or down by modifying * the service's desired count.

You cannot use an INACTIVE task * definition to run new tasks or create new services, and you cannot update an * existing service to reference an INACTIVE task definition. However, * there may be up to a 10-minute window following deregistration where these * restrictions have not yet taken effect.

At this time, * INACTIVE task definitions remain discoverable in your account * indefinitely. However, this behavior is subject to change in the future, so you * should not rely on INACTIVE task definitions persisting beyond the * lifecycle of any associated tasks and services.

See Also:

* AWS * API Reference

*/ virtual Model::DeregisterTaskDefinitionOutcome DeregisterTaskDefinition(const Model::DeregisterTaskDefinitionRequest& request) const; /** *

Deregisters the specified task definition by family and revision. Upon * deregistration, the task definition is marked as INACTIVE. Existing * tasks and services that reference an INACTIVE task definition * continue to run without disruption. Existing services that reference an * INACTIVE task definition can still scale up or down by modifying * the service's desired count.

You cannot use an INACTIVE task * definition to run new tasks or create new services, and you cannot update an * existing service to reference an INACTIVE task definition. However, * there may be up to a 10-minute window following deregistration where these * restrictions have not yet taken effect.

At this time, * INACTIVE task definitions remain discoverable in your account * indefinitely. However, this behavior is subject to change in the future, so you * should not rely on INACTIVE task definitions persisting beyond the * lifecycle of any associated tasks and services.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeregisterTaskDefinitionOutcomeCallable DeregisterTaskDefinitionCallable(const Model::DeregisterTaskDefinitionRequest& request) const; /** *

Deregisters the specified task definition by family and revision. Upon * deregistration, the task definition is marked as INACTIVE. Existing * tasks and services that reference an INACTIVE task definition * continue to run without disruption. Existing services that reference an * INACTIVE task definition can still scale up or down by modifying * the service's desired count.

You cannot use an INACTIVE task * definition to run new tasks or create new services, and you cannot update an * existing service to reference an INACTIVE task definition. However, * there may be up to a 10-minute window following deregistration where these * restrictions have not yet taken effect.

At this time, * INACTIVE task definitions remain discoverable in your account * indefinitely. However, this behavior is subject to change in the future, so you * should not rely on INACTIVE task definitions persisting beyond the * lifecycle of any associated tasks and services.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeregisterTaskDefinitionAsync(const Model::DeregisterTaskDefinitionRequest& request, const DeregisterTaskDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes one or more of your capacity providers.

See Also:

* AWS * API Reference

*/ virtual Model::DescribeCapacityProvidersOutcome DescribeCapacityProviders(const Model::DescribeCapacityProvidersRequest& request) const; /** *

Describes one or more of your capacity providers.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeCapacityProvidersOutcomeCallable DescribeCapacityProvidersCallable(const Model::DescribeCapacityProvidersRequest& request) const; /** *

Describes one or more of your capacity providers.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeCapacityProvidersAsync(const Model::DescribeCapacityProvidersRequest& request, const DescribeCapacityProvidersResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes one or more of your clusters.

See Also:

AWS * API Reference

*/ virtual Model::DescribeClustersOutcome DescribeClusters(const Model::DescribeClustersRequest& request) const; /** *

Describes one or more of your clusters.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeClustersOutcomeCallable DescribeClustersCallable(const Model::DescribeClustersRequest& request) const; /** *

Describes one or more of your clusters.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeClustersAsync(const Model::DescribeClustersRequest& request, const DescribeClustersResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes Amazon Elastic Container Service container instances. Returns * metadata about registered and remaining resources on each container instance * requested.

See Also:

AWS * API Reference

*/ virtual Model::DescribeContainerInstancesOutcome DescribeContainerInstances(const Model::DescribeContainerInstancesRequest& request) const; /** *

Describes Amazon Elastic Container Service container instances. Returns * metadata about registered and remaining resources on each container instance * requested.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeContainerInstancesOutcomeCallable DescribeContainerInstancesCallable(const Model::DescribeContainerInstancesRequest& request) const; /** *

Describes Amazon Elastic Container Service container instances. Returns * metadata about registered and remaining resources on each container instance * requested.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeContainerInstancesAsync(const Model::DescribeContainerInstancesRequest& request, const DescribeContainerInstancesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes the specified services running in your cluster.

See * Also:

AWS * API Reference

*/ virtual Model::DescribeServicesOutcome DescribeServices(const Model::DescribeServicesRequest& request) const; /** *

Describes the specified services running in your cluster.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeServicesOutcomeCallable DescribeServicesCallable(const Model::DescribeServicesRequest& request) const; /** *

Describes the specified services running in your cluster.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeServicesAsync(const Model::DescribeServicesRequest& request, const DescribeServicesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes a task definition. You can specify a family and * revision to find information about a specific task definition, or * you can simply specify the family to find the latest ACTIVE * revision in that family.

You can only describe * INACTIVE task definitions while an active task or service * references them.

See Also:

AWS * API Reference

*/ virtual Model::DescribeTaskDefinitionOutcome DescribeTaskDefinition(const Model::DescribeTaskDefinitionRequest& request) const; /** *

Describes a task definition. You can specify a family and * revision to find information about a specific task definition, or * you can simply specify the family to find the latest ACTIVE * revision in that family.

You can only describe * INACTIVE task definitions while an active task or service * references them.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeTaskDefinitionOutcomeCallable DescribeTaskDefinitionCallable(const Model::DescribeTaskDefinitionRequest& request) const; /** *

Describes a task definition. You can specify a family and * revision to find information about a specific task definition, or * you can simply specify the family to find the latest ACTIVE * revision in that family.

You can only describe * INACTIVE task definitions while an active task or service * references them.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeTaskDefinitionAsync(const Model::DescribeTaskDefinitionRequest& request, const DescribeTaskDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes the task sets in the specified cluster and service. This is used * when a service uses the EXTERNAL deployment controller type. For * more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

*/ virtual Model::DescribeTaskSetsOutcome DescribeTaskSets(const Model::DescribeTaskSetsRequest& request) const; /** *

Describes the task sets in the specified cluster and service. This is used * when a service uses the EXTERNAL deployment controller type. For * more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeTaskSetsOutcomeCallable DescribeTaskSetsCallable(const Model::DescribeTaskSetsRequest& request) const; /** *

Describes the task sets in the specified cluster and service. This is used * when a service uses the EXTERNAL deployment controller type. For * more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeTaskSetsAsync(const Model::DescribeTaskSetsRequest& request, const DescribeTaskSetsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Describes a specified task or tasks.

See Also:

AWS * API Reference

*/ virtual Model::DescribeTasksOutcome DescribeTasks(const Model::DescribeTasksRequest& request) const; /** *

Describes a specified task or tasks.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DescribeTasksOutcomeCallable DescribeTasksCallable(const Model::DescribeTasksRequest& request) const; /** *

Describes a specified task or tasks.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DescribeTasksAsync(const Model::DescribeTasksRequest& request, const DescribeTasksResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Returns an endpoint for * the Amazon ECS agent to poll for updates.

See Also:

AWS * API Reference

*/ virtual Model::DiscoverPollEndpointOutcome DiscoverPollEndpoint(const Model::DiscoverPollEndpointRequest& request) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Returns an endpoint for * the Amazon ECS agent to poll for updates.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DiscoverPollEndpointOutcomeCallable DiscoverPollEndpointCallable(const Model::DiscoverPollEndpointRequest& request) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Returns an endpoint for * the Amazon ECS agent to poll for updates.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DiscoverPollEndpointAsync(const Model::DiscoverPollEndpointRequest& request, const DiscoverPollEndpointResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the account settings for a specified principal.

See * Also:

AWS * API Reference

*/ virtual Model::ListAccountSettingsOutcome ListAccountSettings(const Model::ListAccountSettingsRequest& request) const; /** *

Lists the account settings for a specified principal.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListAccountSettingsOutcomeCallable ListAccountSettingsCallable(const Model::ListAccountSettingsRequest& request) const; /** *

Lists the account settings for a specified principal.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListAccountSettingsAsync(const Model::ListAccountSettingsRequest& request, const ListAccountSettingsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the attributes for Amazon ECS resources within a specified target type * and cluster. When you specify a target type and cluster, * ListAttributes returns a list of attribute objects, one for each * attribute on each resource. You can filter the list of results to a single * attribute name to only return results that have that name. You can also filter * the results by attribute name and value, for example, to see which container * instances in a cluster are running a Linux AMI (ecs.os-type=linux). *

See Also:

AWS * API Reference

*/ virtual Model::ListAttributesOutcome ListAttributes(const Model::ListAttributesRequest& request) const; /** *

Lists the attributes for Amazon ECS resources within a specified target type * and cluster. When you specify a target type and cluster, * ListAttributes returns a list of attribute objects, one for each * attribute on each resource. You can filter the list of results to a single * attribute name to only return results that have that name. You can also filter * the results by attribute name and value, for example, to see which container * instances in a cluster are running a Linux AMI (ecs.os-type=linux). *

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListAttributesOutcomeCallable ListAttributesCallable(const Model::ListAttributesRequest& request) const; /** *

Lists the attributes for Amazon ECS resources within a specified target type * and cluster. When you specify a target type and cluster, * ListAttributes returns a list of attribute objects, one for each * attribute on each resource. You can filter the list of results to a single * attribute name to only return results that have that name. You can also filter * the results by attribute name and value, for example, to see which container * instances in a cluster are running a Linux AMI (ecs.os-type=linux). *

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListAttributesAsync(const Model::ListAttributesRequest& request, const ListAttributesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a list of existing clusters.

See Also:

AWS * API Reference

*/ virtual Model::ListClustersOutcome ListClusters(const Model::ListClustersRequest& request) const; /** *

Returns a list of existing clusters.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListClustersOutcomeCallable ListClustersCallable(const Model::ListClustersRequest& request) const; /** *

Returns a list of existing clusters.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListClustersAsync(const Model::ListClustersRequest& request, const ListClustersResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a list of container instances in a specified cluster. You can filter * the results of a ListContainerInstances operation with cluster * query language statements inside the filter parameter. For more * information, see Cluster * Query Language in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

*/ virtual Model::ListContainerInstancesOutcome ListContainerInstances(const Model::ListContainerInstancesRequest& request) const; /** *

Returns a list of container instances in a specified cluster. You can filter * the results of a ListContainerInstances operation with cluster * query language statements inside the filter parameter. For more * information, see Cluster * Query Language in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListContainerInstancesOutcomeCallable ListContainerInstancesCallable(const Model::ListContainerInstancesRequest& request) const; /** *

Returns a list of container instances in a specified cluster. You can filter * the results of a ListContainerInstances operation with cluster * query language statements inside the filter parameter. For more * information, see Cluster * Query Language in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListContainerInstancesAsync(const Model::ListContainerInstancesRequest& request, const ListContainerInstancesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Lists the services that are running in a specified cluster.

See * Also:

AWS * API Reference

*/ virtual Model::ListServicesOutcome ListServices(const Model::ListServicesRequest& request) const; /** *

Lists the services that are running in a specified cluster.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListServicesOutcomeCallable ListServicesCallable(const Model::ListServicesRequest& request) const; /** *

Lists the services that are running in a specified cluster.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListServicesAsync(const Model::ListServicesRequest& request, const ListServicesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

List the tags for an Amazon ECS resource.

See Also:

AWS * API Reference

*/ virtual Model::ListTagsForResourceOutcome ListTagsForResource(const Model::ListTagsForResourceRequest& request) const; /** *

List the tags for an Amazon ECS resource.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListTagsForResourceOutcomeCallable ListTagsForResourceCallable(const Model::ListTagsForResourceRequest& request) const; /** *

List the tags for an Amazon ECS resource.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListTagsForResourceAsync(const Model::ListTagsForResourceRequest& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a list of task definition families that are registered to your * account (which may include task definition families that no longer have any * ACTIVE task definition revisions).

You can filter out task * definition families that do not contain any ACTIVE task definition * revisions by setting the status parameter to ACTIVE. * You can also filter the results with the familyPrefix * parameter.

See Also:

AWS * API Reference

*/ virtual Model::ListTaskDefinitionFamiliesOutcome ListTaskDefinitionFamilies(const Model::ListTaskDefinitionFamiliesRequest& request) const; /** *

Returns a list of task definition families that are registered to your * account (which may include task definition families that no longer have any * ACTIVE task definition revisions).

You can filter out task * definition families that do not contain any ACTIVE task definition * revisions by setting the status parameter to ACTIVE. * You can also filter the results with the familyPrefix * parameter.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListTaskDefinitionFamiliesOutcomeCallable ListTaskDefinitionFamiliesCallable(const Model::ListTaskDefinitionFamiliesRequest& request) const; /** *

Returns a list of task definition families that are registered to your * account (which may include task definition families that no longer have any * ACTIVE task definition revisions).

You can filter out task * definition families that do not contain any ACTIVE task definition * revisions by setting the status parameter to ACTIVE. * You can also filter the results with the familyPrefix * parameter.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListTaskDefinitionFamiliesAsync(const Model::ListTaskDefinitionFamiliesRequest& request, const ListTaskDefinitionFamiliesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a list of task definitions that are registered to your account. You * can filter the results by family name with the familyPrefix * parameter or by status with the status parameter.

See * Also:

AWS * API Reference

*/ virtual Model::ListTaskDefinitionsOutcome ListTaskDefinitions(const Model::ListTaskDefinitionsRequest& request) const; /** *

Returns a list of task definitions that are registered to your account. You * can filter the results by family name with the familyPrefix * parameter or by status with the status parameter.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListTaskDefinitionsOutcomeCallable ListTaskDefinitionsCallable(const Model::ListTaskDefinitionsRequest& request) const; /** *

Returns a list of task definitions that are registered to your account. You * can filter the results by family name with the familyPrefix * parameter or by status with the status parameter.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListTaskDefinitionsAsync(const Model::ListTaskDefinitionsRequest& request, const ListTaskDefinitionsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Returns a list of tasks for a specified cluster. You can filter the results * by family name, by a particular container instance, or by the desired status of * the task with the family, containerInstance, and * desiredStatus parameters.

Recently stopped tasks might * appear in the returned results. Currently, stopped tasks appear in the returned * results for at least one hour.

See Also:

AWS API * Reference

*/ virtual Model::ListTasksOutcome ListTasks(const Model::ListTasksRequest& request) const; /** *

Returns a list of tasks for a specified cluster. You can filter the results * by family name, by a particular container instance, or by the desired status of * the task with the family, containerInstance, and * desiredStatus parameters.

Recently stopped tasks might * appear in the returned results. Currently, stopped tasks appear in the returned * results for at least one hour.

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListTasksOutcomeCallable ListTasksCallable(const Model::ListTasksRequest& request) const; /** *

Returns a list of tasks for a specified cluster. You can filter the results * by family name, by a particular container instance, or by the desired status of * the task with the family, containerInstance, and * desiredStatus parameters.

Recently stopped tasks might * appear in the returned results. Currently, stopped tasks appear in the returned * results for at least one hour.

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListTasksAsync(const Model::ListTasksRequest& request, const ListTasksResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Modifies an account setting. Account settings are set on a per-Region * basis.

If you change the account setting for the root user, the default * settings for all of the IAM users and roles for which no individual account * setting has been specified are reset. For more information, see Account * Settings in the Amazon Elastic Container Service Developer Guide.

*

When serviceLongArnFormat, taskLongArnFormat, or * containerInstanceLongArnFormat are specified, the Amazon Resource * Name (ARN) and resource ID format of the resource type for a specified IAM user, * IAM role, or the root user for an account is affected. The opt-in and opt-out * account setting must be set for each Amazon ECS resource separately. The ARN and * resource ID format of a resource will be defined by the opt-in status of the IAM * user or role that created the resource. You must enable this setting to use * Amazon ECS features such as resource tagging.

When * awsvpcTrunking is specified, the elastic network interface (ENI) * limit for any new container instances that support the feature is changed. If * awsvpcTrunking is enabled, any new container instances that support * the feature are launched have the increased ENI limits available to them. For * more information, see Elastic * Network Interface Trunking in the Amazon Elastic Container Service * Developer Guide.

When containerInsights is specified, * the default setting indicating whether CloudWatch Container Insights is enabled * for your clusters is changed. If containerInsights is enabled, any * new clusters that are created will have Container Insights enabled unless you * disable it during cluster creation. For more information, see CloudWatch * Container Insights in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

*/ virtual Model::PutAccountSettingOutcome PutAccountSetting(const Model::PutAccountSettingRequest& request) const; /** *

Modifies an account setting. Account settings are set on a per-Region * basis.

If you change the account setting for the root user, the default * settings for all of the IAM users and roles for which no individual account * setting has been specified are reset. For more information, see Account * Settings in the Amazon Elastic Container Service Developer Guide.

*

When serviceLongArnFormat, taskLongArnFormat, or * containerInstanceLongArnFormat are specified, the Amazon Resource * Name (ARN) and resource ID format of the resource type for a specified IAM user, * IAM role, or the root user for an account is affected. The opt-in and opt-out * account setting must be set for each Amazon ECS resource separately. The ARN and * resource ID format of a resource will be defined by the opt-in status of the IAM * user or role that created the resource. You must enable this setting to use * Amazon ECS features such as resource tagging.

When * awsvpcTrunking is specified, the elastic network interface (ENI) * limit for any new container instances that support the feature is changed. If * awsvpcTrunking is enabled, any new container instances that support * the feature are launched have the increased ENI limits available to them. For * more information, see Elastic * Network Interface Trunking in the Amazon Elastic Container Service * Developer Guide.

When containerInsights is specified, * the default setting indicating whether CloudWatch Container Insights is enabled * for your clusters is changed. If containerInsights is enabled, any * new clusters that are created will have Container Insights enabled unless you * disable it during cluster creation. For more information, see CloudWatch * Container Insights in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutAccountSettingOutcomeCallable PutAccountSettingCallable(const Model::PutAccountSettingRequest& request) const; /** *

Modifies an account setting. Account settings are set on a per-Region * basis.

If you change the account setting for the root user, the default * settings for all of the IAM users and roles for which no individual account * setting has been specified are reset. For more information, see Account * Settings in the Amazon Elastic Container Service Developer Guide.

*

When serviceLongArnFormat, taskLongArnFormat, or * containerInstanceLongArnFormat are specified, the Amazon Resource * Name (ARN) and resource ID format of the resource type for a specified IAM user, * IAM role, or the root user for an account is affected. The opt-in and opt-out * account setting must be set for each Amazon ECS resource separately. The ARN and * resource ID format of a resource will be defined by the opt-in status of the IAM * user or role that created the resource. You must enable this setting to use * Amazon ECS features such as resource tagging.

When * awsvpcTrunking is specified, the elastic network interface (ENI) * limit for any new container instances that support the feature is changed. If * awsvpcTrunking is enabled, any new container instances that support * the feature are launched have the increased ENI limits available to them. For * more information, see Elastic * Network Interface Trunking in the Amazon Elastic Container Service * Developer Guide.

When containerInsights is specified, * the default setting indicating whether CloudWatch Container Insights is enabled * for your clusters is changed. If containerInsights is enabled, any * new clusters that are created will have Container Insights enabled unless you * disable it during cluster creation. For more information, see CloudWatch * Container Insights in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutAccountSettingAsync(const Model::PutAccountSettingRequest& request, const PutAccountSettingResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Modifies an account setting for all IAM users on an account for whom no * individual account setting has been specified. Account settings are set on a * per-Region basis.

See Also:

AWS * API Reference

*/ virtual Model::PutAccountSettingDefaultOutcome PutAccountSettingDefault(const Model::PutAccountSettingDefaultRequest& request) const; /** *

Modifies an account setting for all IAM users on an account for whom no * individual account setting has been specified. Account settings are set on a * per-Region basis.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutAccountSettingDefaultOutcomeCallable PutAccountSettingDefaultCallable(const Model::PutAccountSettingDefaultRequest& request) const; /** *

Modifies an account setting for all IAM users on an account for whom no * individual account setting has been specified. Account settings are set on a * per-Region basis.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutAccountSettingDefaultAsync(const Model::PutAccountSettingDefaultRequest& request, const PutAccountSettingDefaultResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Create or update an attribute on an Amazon ECS resource. If the attribute * does not exist, it is created. If the attribute exists, its value is replaced * with the specified value. To delete an attribute, use DeleteAttributes. * For more information, see Attributes * in the Amazon Elastic Container Service Developer Guide.

See * Also:

AWS * API Reference

*/ virtual Model::PutAttributesOutcome PutAttributes(const Model::PutAttributesRequest& request) const; /** *

Create or update an attribute on an Amazon ECS resource. If the attribute * does not exist, it is created. If the attribute exists, its value is replaced * with the specified value. To delete an attribute, use DeleteAttributes. * For more information, see Attributes * in the Amazon Elastic Container Service Developer Guide.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutAttributesOutcomeCallable PutAttributesCallable(const Model::PutAttributesRequest& request) const; /** *

Create or update an attribute on an Amazon ECS resource. If the attribute * does not exist, it is created. If the attribute exists, its value is replaced * with the specified value. To delete an attribute, use DeleteAttributes. * For more information, see Attributes * in the Amazon Elastic Container Service Developer Guide.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutAttributesAsync(const Model::PutAttributesRequest& request, const PutAttributesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Modifies the available capacity providers and the default capacity provider * strategy for a cluster.

You must specify both the available capacity * providers and a default capacity provider strategy for the cluster. If the * specified cluster has existing capacity providers associated with it, you must * specify all existing capacity providers in addition to any new ones you want to * add. Any existing capacity providers associated with a cluster that are omitted * from a PutClusterCapacityProviders API call will be disassociated with * the cluster. You can only disassociate an existing capacity provider from a * cluster if it's not being used by any existing tasks.

When creating a * service or running a task on a cluster, if no capacity provider or launch type * is specified, then the cluster's default capacity provider strategy is used. It * is recommended to define a default capacity provider strategy for your cluster, * however you may specify an empty array ([]) to bypass defining a * default strategy.

See Also:

AWS * API Reference

*/ virtual Model::PutClusterCapacityProvidersOutcome PutClusterCapacityProviders(const Model::PutClusterCapacityProvidersRequest& request) const; /** *

Modifies the available capacity providers and the default capacity provider * strategy for a cluster.

You must specify both the available capacity * providers and a default capacity provider strategy for the cluster. If the * specified cluster has existing capacity providers associated with it, you must * specify all existing capacity providers in addition to any new ones you want to * add. Any existing capacity providers associated with a cluster that are omitted * from a PutClusterCapacityProviders API call will be disassociated with * the cluster. You can only disassociate an existing capacity provider from a * cluster if it's not being used by any existing tasks.

When creating a * service or running a task on a cluster, if no capacity provider or launch type * is specified, then the cluster's default capacity provider strategy is used. It * is recommended to define a default capacity provider strategy for your cluster, * however you may specify an empty array ([]) to bypass defining a * default strategy.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutClusterCapacityProvidersOutcomeCallable PutClusterCapacityProvidersCallable(const Model::PutClusterCapacityProvidersRequest& request) const; /** *

Modifies the available capacity providers and the default capacity provider * strategy for a cluster.

You must specify both the available capacity * providers and a default capacity provider strategy for the cluster. If the * specified cluster has existing capacity providers associated with it, you must * specify all existing capacity providers in addition to any new ones you want to * add. Any existing capacity providers associated with a cluster that are omitted * from a PutClusterCapacityProviders API call will be disassociated with * the cluster. You can only disassociate an existing capacity provider from a * cluster if it's not being used by any existing tasks.

When creating a * service or running a task on a cluster, if no capacity provider or launch type * is specified, then the cluster's default capacity provider strategy is used. It * is recommended to define a default capacity provider strategy for your cluster, * however you may specify an empty array ([]) to bypass defining a * default strategy.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutClusterCapacityProvidersAsync(const Model::PutClusterCapacityProvidersRequest& request, const PutClusterCapacityProvidersResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Registers an EC2 instance * into the specified cluster. This instance becomes available to place containers * on.

See Also:

AWS * API Reference

*/ virtual Model::RegisterContainerInstanceOutcome RegisterContainerInstance(const Model::RegisterContainerInstanceRequest& request) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Registers an EC2 instance * into the specified cluster. This instance becomes available to place containers * on.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::RegisterContainerInstanceOutcomeCallable RegisterContainerInstanceCallable(const Model::RegisterContainerInstanceRequest& request) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Registers an EC2 instance * into the specified cluster. This instance becomes available to place containers * on.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void RegisterContainerInstanceAsync(const Model::RegisterContainerInstanceRequest& request, const RegisterContainerInstanceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Registers a new task definition from the supplied family and * containerDefinitions. Optionally, you can add data volumes to your * containers with the volumes parameter. For more information about * task definition parameters and defaults, see Amazon * ECS Task Definitions in the Amazon Elastic Container Service Developer * Guide.

You can specify an IAM role for your task with the * taskRoleArn parameter. When you specify an IAM role for a task, its * containers can then use the latest versions of the AWS CLI or SDKs to make API * requests to the AWS services that are specified in the IAM policy associated * with the role. For more information, see IAM * Roles for Tasks in the Amazon Elastic Container Service Developer * Guide.

You can specify a Docker networking mode for the containers in * your task definition with the networkMode parameter. The available * network modes correspond to those described in Network * settings in the Docker run reference. If you specify the awsvpc * network mode, the task is allocated an elastic network interface, and you must * specify a NetworkConfiguration when you create a service or run a task * with the task definition. For more information, see Task * Networking in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

*/ virtual Model::RegisterTaskDefinitionOutcome RegisterTaskDefinition(const Model::RegisterTaskDefinitionRequest& request) const; /** *

Registers a new task definition from the supplied family and * containerDefinitions. Optionally, you can add data volumes to your * containers with the volumes parameter. For more information about * task definition parameters and defaults, see Amazon * ECS Task Definitions in the Amazon Elastic Container Service Developer * Guide.

You can specify an IAM role for your task with the * taskRoleArn parameter. When you specify an IAM role for a task, its * containers can then use the latest versions of the AWS CLI or SDKs to make API * requests to the AWS services that are specified in the IAM policy associated * with the role. For more information, see IAM * Roles for Tasks in the Amazon Elastic Container Service Developer * Guide.

You can specify a Docker networking mode for the containers in * your task definition with the networkMode parameter. The available * network modes correspond to those described in Network * settings in the Docker run reference. If you specify the awsvpc * network mode, the task is allocated an elastic network interface, and you must * specify a NetworkConfiguration when you create a service or run a task * with the task definition. For more information, see Task * Networking in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::RegisterTaskDefinitionOutcomeCallable RegisterTaskDefinitionCallable(const Model::RegisterTaskDefinitionRequest& request) const; /** *

Registers a new task definition from the supplied family and * containerDefinitions. Optionally, you can add data volumes to your * containers with the volumes parameter. For more information about * task definition parameters and defaults, see Amazon * ECS Task Definitions in the Amazon Elastic Container Service Developer * Guide.

You can specify an IAM role for your task with the * taskRoleArn parameter. When you specify an IAM role for a task, its * containers can then use the latest versions of the AWS CLI or SDKs to make API * requests to the AWS services that are specified in the IAM policy associated * with the role. For more information, see IAM * Roles for Tasks in the Amazon Elastic Container Service Developer * Guide.

You can specify a Docker networking mode for the containers in * your task definition with the networkMode parameter. The available * network modes correspond to those described in Network * settings in the Docker run reference. If you specify the awsvpc * network mode, the task is allocated an elastic network interface, and you must * specify a NetworkConfiguration when you create a service or run a task * with the task definition. For more information, see Task * Networking in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void RegisterTaskDefinitionAsync(const Model::RegisterTaskDefinitionRequest& request, const RegisterTaskDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts a new task using the specified task definition.

You can allow * Amazon ECS to place tasks for you, or you can customize how Amazon ECS places * tasks using placement constraints and placement strategies. For more * information, see Scheduling * Tasks in the Amazon Elastic Container Service Developer Guide.

*

Alternatively, you can use StartTask to use your own scheduler or * place tasks manually on specific container instances.

The Amazon ECS API * follows an eventual consistency model, due to the distributed nature of the * system supporting the API. This means that the result of an API command you run * that affects your Amazon ECS resources might not be immediately visible to all * subsequent commands you run. Keep this in mind when you carry out an API command * that immediately follows a previous API command.

To manage eventual * consistency, you can do the following:

  • Confirm the state of the * resource before you run a command to modify it. Run the DescribeTasks command * using an exponential backoff algorithm to ensure that you allow enough time for * the previous command to propagate through the system. To do this, run the * DescribeTasks command repeatedly, starting with a couple of seconds of wait time * and increasing gradually up to five minutes of wait time.

  • Add * wait time between subsequent commands, even if the DescribeTasks command returns * an accurate response. Apply an exponential backoff algorithm starting with a * couple of seconds of wait time, and increase gradually up to about five minutes * of wait time.

See Also:

AWS API * Reference

*/ virtual Model::RunTaskOutcome RunTask(const Model::RunTaskRequest& request) const; /** *

Starts a new task using the specified task definition.

You can allow * Amazon ECS to place tasks for you, or you can customize how Amazon ECS places * tasks using placement constraints and placement strategies. For more * information, see Scheduling * Tasks in the Amazon Elastic Container Service Developer Guide.

*

Alternatively, you can use StartTask to use your own scheduler or * place tasks manually on specific container instances.

The Amazon ECS API * follows an eventual consistency model, due to the distributed nature of the * system supporting the API. This means that the result of an API command you run * that affects your Amazon ECS resources might not be immediately visible to all * subsequent commands you run. Keep this in mind when you carry out an API command * that immediately follows a previous API command.

To manage eventual * consistency, you can do the following:

  • Confirm the state of the * resource before you run a command to modify it. Run the DescribeTasks command * using an exponential backoff algorithm to ensure that you allow enough time for * the previous command to propagate through the system. To do this, run the * DescribeTasks command repeatedly, starting with a couple of seconds of wait time * and increasing gradually up to five minutes of wait time.

  • Add * wait time between subsequent commands, even if the DescribeTasks command returns * an accurate response. Apply an exponential backoff algorithm starting with a * couple of seconds of wait time, and increase gradually up to about five minutes * of wait time.

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::RunTaskOutcomeCallable RunTaskCallable(const Model::RunTaskRequest& request) const; /** *

Starts a new task using the specified task definition.

You can allow * Amazon ECS to place tasks for you, or you can customize how Amazon ECS places * tasks using placement constraints and placement strategies. For more * information, see Scheduling * Tasks in the Amazon Elastic Container Service Developer Guide.

*

Alternatively, you can use StartTask to use your own scheduler or * place tasks manually on specific container instances.

The Amazon ECS API * follows an eventual consistency model, due to the distributed nature of the * system supporting the API. This means that the result of an API command you run * that affects your Amazon ECS resources might not be immediately visible to all * subsequent commands you run. Keep this in mind when you carry out an API command * that immediately follows a previous API command.

To manage eventual * consistency, you can do the following:

  • Confirm the state of the * resource before you run a command to modify it. Run the DescribeTasks command * using an exponential backoff algorithm to ensure that you allow enough time for * the previous command to propagate through the system. To do this, run the * DescribeTasks command repeatedly, starting with a couple of seconds of wait time * and increasing gradually up to five minutes of wait time.

  • Add * wait time between subsequent commands, even if the DescribeTasks command returns * an accurate response. Apply an exponential backoff algorithm starting with a * couple of seconds of wait time, and increase gradually up to about five minutes * of wait time.

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void RunTaskAsync(const Model::RunTaskRequest& request, const RunTaskResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Starts a new task from the specified task definition on the specified * container instance or instances.

Alternatively, you can use * RunTask to place tasks for you. For more information, see Scheduling * Tasks in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS API * Reference

*/ virtual Model::StartTaskOutcome StartTask(const Model::StartTaskRequest& request) const; /** *

Starts a new task from the specified task definition on the specified * container instance or instances.

Alternatively, you can use * RunTask to place tasks for you. For more information, see Scheduling * Tasks in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StartTaskOutcomeCallable StartTaskCallable(const Model::StartTaskRequest& request) const; /** *

Starts a new task from the specified task definition on the specified * container instance or instances.

Alternatively, you can use * RunTask to place tasks for you. For more information, see Scheduling * Tasks in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StartTaskAsync(const Model::StartTaskRequest& request, const StartTaskResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Stops a running task. Any tags associated with the task will be deleted.

*

When StopTask is called on a task, the equivalent of docker * stop is issued to the containers running in the task. This results in a * SIGTERM value and a default 30-second timeout, after which the * SIGKILL value is sent and the containers are forcibly stopped. If * the container handles the SIGTERM value gracefully and exits within * 30 seconds from receiving it, no SIGKILL value is sent.

*

The default 30-second timeout can be configured on the Amazon ECS container * agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more * information, see Amazon * ECS Container Agent Configuration in the Amazon Elastic Container Service * Developer Guide.

See Also:

AWS API * Reference

*/ virtual Model::StopTaskOutcome StopTask(const Model::StopTaskRequest& request) const; /** *

Stops a running task. Any tags associated with the task will be deleted.

*

When StopTask is called on a task, the equivalent of docker * stop is issued to the containers running in the task. This results in a * SIGTERM value and a default 30-second timeout, after which the * SIGKILL value is sent and the containers are forcibly stopped. If * the container handles the SIGTERM value gracefully and exits within * 30 seconds from receiving it, no SIGKILL value is sent.

*

The default 30-second timeout can be configured on the Amazon ECS container * agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more * information, see Amazon * ECS Container Agent Configuration in the Amazon Elastic Container Service * Developer Guide.

See Also:

AWS API * Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::StopTaskOutcomeCallable StopTaskCallable(const Model::StopTaskRequest& request) const; /** *

Stops a running task. Any tags associated with the task will be deleted.

*

When StopTask is called on a task, the equivalent of docker * stop is issued to the containers running in the task. This results in a * SIGTERM value and a default 30-second timeout, after which the * SIGKILL value is sent and the containers are forcibly stopped. If * the container handles the SIGTERM value gracefully and exits within * 30 seconds from receiving it, no SIGKILL value is sent.

*

The default 30-second timeout can be configured on the Amazon ECS container * agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more * information, see Amazon * ECS Container Agent Configuration in the Amazon Elastic Container Service * Developer Guide.

See Also:

AWS API * Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void StopTaskAsync(const Model::StopTaskRequest& request, const StopTaskResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Sent to acknowledge that * an attachment changed states.

See Also:

AWS * API Reference

*/ virtual Model::SubmitAttachmentStateChangesOutcome SubmitAttachmentStateChanges(const Model::SubmitAttachmentStateChangesRequest& request) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Sent to acknowledge that * an attachment changed states.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::SubmitAttachmentStateChangesOutcomeCallable SubmitAttachmentStateChangesCallable(const Model::SubmitAttachmentStateChangesRequest& request) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Sent to acknowledge that * an attachment changed states.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void SubmitAttachmentStateChangesAsync(const Model::SubmitAttachmentStateChangesRequest& request, const SubmitAttachmentStateChangesResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Sent to acknowledge that a * container changed states.

See Also:

AWS * API Reference

*/ virtual Model::SubmitContainerStateChangeOutcome SubmitContainerStateChange(const Model::SubmitContainerStateChangeRequest& request) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Sent to acknowledge that a * container changed states.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::SubmitContainerStateChangeOutcomeCallable SubmitContainerStateChangeCallable(const Model::SubmitContainerStateChangeRequest& request) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Sent to acknowledge that a * container changed states.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void SubmitContainerStateChangeAsync(const Model::SubmitContainerStateChangeRequest& request, const SubmitContainerStateChangeResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Sent to acknowledge that a * task changed states.

See Also:

AWS * API Reference

*/ virtual Model::SubmitTaskStateChangeOutcome SubmitTaskStateChange(const Model::SubmitTaskStateChangeRequest& request) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Sent to acknowledge that a * task changed states.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::SubmitTaskStateChangeOutcomeCallable SubmitTaskStateChangeCallable(const Model::SubmitTaskStateChangeRequest& request) const; /** *

This action is only used by the Amazon ECS agent, and it is not * intended for use outside of the agent.

Sent to acknowledge that a * task changed states.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void SubmitTaskStateChangeAsync(const Model::SubmitTaskStateChangeRequest& request, const SubmitTaskStateChangeResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Associates the specified tags to a resource with the specified * resourceArn. If existing tags on a resource are not specified in * the request parameters, they are not changed. When a resource is deleted, the * tags associated with that resource are deleted as well.

See Also:

* AWS * API Reference

*/ virtual Model::TagResourceOutcome TagResource(const Model::TagResourceRequest& request) const; /** *

Associates the specified tags to a resource with the specified * resourceArn. If existing tags on a resource are not specified in * the request parameters, they are not changed. When a resource is deleted, the * tags associated with that resource are deleted as well.

See Also:

* AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::TagResourceOutcomeCallable TagResourceCallable(const Model::TagResourceRequest& request) const; /** *

Associates the specified tags to a resource with the specified * resourceArn. If existing tags on a resource are not specified in * the request parameters, they are not changed. When a resource is deleted, the * tags associated with that resource are deleted as well.

See Also:

* AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void TagResourceAsync(const Model::TagResourceRequest& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Deletes specified tags from a resource.

See Also:

AWS * API Reference

*/ virtual Model::UntagResourceOutcome UntagResource(const Model::UntagResourceRequest& request) const; /** *

Deletes specified tags from a resource.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UntagResourceOutcomeCallable UntagResourceCallable(const Model::UntagResourceRequest& request) const; /** *

Deletes specified tags from a resource.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UntagResourceAsync(const Model::UntagResourceRequest& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Modifies the settings to use for a cluster.

See Also:

AWS * API Reference

*/ virtual Model::UpdateClusterSettingsOutcome UpdateClusterSettings(const Model::UpdateClusterSettingsRequest& request) const; /** *

Modifies the settings to use for a cluster.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateClusterSettingsOutcomeCallable UpdateClusterSettingsCallable(const Model::UpdateClusterSettingsRequest& request) const; /** *

Modifies the settings to use for a cluster.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateClusterSettingsAsync(const Model::UpdateClusterSettingsRequest& request, const UpdateClusterSettingsResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updates the Amazon ECS container agent on a specified container instance. * Updating the Amazon ECS container agent does not interrupt running tasks or * services on the container instance. The process for updating the agent differs * depending on whether your container instance was launched with the Amazon * ECS-optimized AMI or another operating system.

* UpdateContainerAgent requires the Amazon ECS-optimized AMI or * Amazon Linux with the ecs-init service installed and running. For * help updating the Amazon ECS container agent on other operating systems, see Manually * Updating the Amazon ECS Container Agent in the Amazon Elastic Container * Service Developer Guide.

See Also:

AWS * API Reference

*/ virtual Model::UpdateContainerAgentOutcome UpdateContainerAgent(const Model::UpdateContainerAgentRequest& request) const; /** *

Updates the Amazon ECS container agent on a specified container instance. * Updating the Amazon ECS container agent does not interrupt running tasks or * services on the container instance. The process for updating the agent differs * depending on whether your container instance was launched with the Amazon * ECS-optimized AMI or another operating system.

* UpdateContainerAgent requires the Amazon ECS-optimized AMI or * Amazon Linux with the ecs-init service installed and running. For * help updating the Amazon ECS container agent on other operating systems, see Manually * Updating the Amazon ECS Container Agent in the Amazon Elastic Container * Service Developer Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateContainerAgentOutcomeCallable UpdateContainerAgentCallable(const Model::UpdateContainerAgentRequest& request) const; /** *

Updates the Amazon ECS container agent on a specified container instance. * Updating the Amazon ECS container agent does not interrupt running tasks or * services on the container instance. The process for updating the agent differs * depending on whether your container instance was launched with the Amazon * ECS-optimized AMI or another operating system.

* UpdateContainerAgent requires the Amazon ECS-optimized AMI or * Amazon Linux with the ecs-init service installed and running. For * help updating the Amazon ECS container agent on other operating systems, see Manually * Updating the Amazon ECS Container Agent in the Amazon Elastic Container * Service Developer Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateContainerAgentAsync(const Model::UpdateContainerAgentRequest& request, const UpdateContainerAgentResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Modifies the status of an Amazon ECS container instance.

Once a * container instance has reached an ACTIVE state, you can change the * status of a container instance to DRAINING to manually remove an * instance from a cluster, for example to perform system updates, update the * Docker daemon, or scale down the cluster size.

A container * instance cannot be changed to DRAINING until it has reached an * ACTIVE status. If the instance is in any other status, an error * will be received.

When you set a container instance to * DRAINING, Amazon ECS prevents new tasks from being scheduled for * placement on the container instance and replacement service tasks are started on * other container instances in the cluster if the resources are available. Service * tasks on the container instance that are in the PENDING state are * stopped immediately.

Service tasks on the container instance that are in * the RUNNING state are stopped and replaced according to the * service's deployment configuration parameters, * minimumHealthyPercent and maximumPercent. You can * change the deployment configuration of your service using * UpdateService.

  • If minimumHealthyPercent is * below 100%, the scheduler can ignore desiredCount temporarily * during task replacement. For example, desiredCount is four tasks, a * minimum of 50% allows the scheduler to stop two existing tasks before starting * two new tasks. If the minimum is 100%, the service scheduler can't remove * existing tasks until the replacement tasks are considered healthy. Tasks for * services that do not use a load balancer are considered healthy if they are in * the RUNNING state. Tasks for services that use a load balancer are * considered healthy if they are in the RUNNING state and the * container instance they are hosted on is reported as healthy by the load * balancer.

  • The maximumPercent parameter represents * an upper limit on the number of running tasks during task replacement, which * enables you to define the replacement batch size. For example, if * desiredCount is four tasks, a maximum of 200% starts four new tasks * before stopping the four tasks to be drained, provided that the cluster * resources required to do this are available. If the maximum is 100%, then * replacement tasks can't start until the draining tasks have stopped.

  • *

Any PENDING or RUNNING tasks that do not * belong to a service are not affected. You must wait for them to finish or stop * them manually.

A container instance has completed draining when it has no * more RUNNING tasks. You can verify this using ListTasks.

*

When a container instance has been drained, you can set a container instance * to ACTIVE status and once it has reached that status the Amazon ECS * scheduler can begin scheduling tasks on the instance again.

See * Also:

AWS * API Reference

*/ virtual Model::UpdateContainerInstancesStateOutcome UpdateContainerInstancesState(const Model::UpdateContainerInstancesStateRequest& request) const; /** *

Modifies the status of an Amazon ECS container instance.

Once a * container instance has reached an ACTIVE state, you can change the * status of a container instance to DRAINING to manually remove an * instance from a cluster, for example to perform system updates, update the * Docker daemon, or scale down the cluster size.

A container * instance cannot be changed to DRAINING until it has reached an * ACTIVE status. If the instance is in any other status, an error * will be received.

When you set a container instance to * DRAINING, Amazon ECS prevents new tasks from being scheduled for * placement on the container instance and replacement service tasks are started on * other container instances in the cluster if the resources are available. Service * tasks on the container instance that are in the PENDING state are * stopped immediately.

Service tasks on the container instance that are in * the RUNNING state are stopped and replaced according to the * service's deployment configuration parameters, * minimumHealthyPercent and maximumPercent. You can * change the deployment configuration of your service using * UpdateService.

  • If minimumHealthyPercent is * below 100%, the scheduler can ignore desiredCount temporarily * during task replacement. For example, desiredCount is four tasks, a * minimum of 50% allows the scheduler to stop two existing tasks before starting * two new tasks. If the minimum is 100%, the service scheduler can't remove * existing tasks until the replacement tasks are considered healthy. Tasks for * services that do not use a load balancer are considered healthy if they are in * the RUNNING state. Tasks for services that use a load balancer are * considered healthy if they are in the RUNNING state and the * container instance they are hosted on is reported as healthy by the load * balancer.

  • The maximumPercent parameter represents * an upper limit on the number of running tasks during task replacement, which * enables you to define the replacement batch size. For example, if * desiredCount is four tasks, a maximum of 200% starts four new tasks * before stopping the four tasks to be drained, provided that the cluster * resources required to do this are available. If the maximum is 100%, then * replacement tasks can't start until the draining tasks have stopped.

  • *

Any PENDING or RUNNING tasks that do not * belong to a service are not affected. You must wait for them to finish or stop * them manually.

A container instance has completed draining when it has no * more RUNNING tasks. You can verify this using ListTasks.

*

When a container instance has been drained, you can set a container instance * to ACTIVE status and once it has reached that status the Amazon ECS * scheduler can begin scheduling tasks on the instance again.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateContainerInstancesStateOutcomeCallable UpdateContainerInstancesStateCallable(const Model::UpdateContainerInstancesStateRequest& request) const; /** *

Modifies the status of an Amazon ECS container instance.

Once a * container instance has reached an ACTIVE state, you can change the * status of a container instance to DRAINING to manually remove an * instance from a cluster, for example to perform system updates, update the * Docker daemon, or scale down the cluster size.

A container * instance cannot be changed to DRAINING until it has reached an * ACTIVE status. If the instance is in any other status, an error * will be received.

When you set a container instance to * DRAINING, Amazon ECS prevents new tasks from being scheduled for * placement on the container instance and replacement service tasks are started on * other container instances in the cluster if the resources are available. Service * tasks on the container instance that are in the PENDING state are * stopped immediately.

Service tasks on the container instance that are in * the RUNNING state are stopped and replaced according to the * service's deployment configuration parameters, * minimumHealthyPercent and maximumPercent. You can * change the deployment configuration of your service using * UpdateService.

  • If minimumHealthyPercent is * below 100%, the scheduler can ignore desiredCount temporarily * during task replacement. For example, desiredCount is four tasks, a * minimum of 50% allows the scheduler to stop two existing tasks before starting * two new tasks. If the minimum is 100%, the service scheduler can't remove * existing tasks until the replacement tasks are considered healthy. Tasks for * services that do not use a load balancer are considered healthy if they are in * the RUNNING state. Tasks for services that use a load balancer are * considered healthy if they are in the RUNNING state and the * container instance they are hosted on is reported as healthy by the load * balancer.

  • The maximumPercent parameter represents * an upper limit on the number of running tasks during task replacement, which * enables you to define the replacement batch size. For example, if * desiredCount is four tasks, a maximum of 200% starts four new tasks * before stopping the four tasks to be drained, provided that the cluster * resources required to do this are available. If the maximum is 100%, then * replacement tasks can't start until the draining tasks have stopped.

  • *

Any PENDING or RUNNING tasks that do not * belong to a service are not affected. You must wait for them to finish or stop * them manually.

A container instance has completed draining when it has no * more RUNNING tasks. You can verify this using ListTasks.

*

When a container instance has been drained, you can set a container instance * to ACTIVE status and once it has reached that status the Amazon ECS * scheduler can begin scheduling tasks on the instance again.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateContainerInstancesStateAsync(const Model::UpdateContainerInstancesStateRequest& request, const UpdateContainerInstancesStateResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Updating the task placement strategies and constraints on an * Amazon ECS service remains in preview and is a Beta Service as defined by and * subject to the Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms * ("Beta Terms"). These Beta Terms apply to your participation in this * preview.

Modifies the parameters of a service.

For * services using the rolling update (ECS) deployment controller, the * desired count, deployment configuration, network configuration, task placement * constraints and strategies, or task definition used can be updated.

For * services using the blue/green (CODE_DEPLOY) deployment controller, * only the desired count, deployment configuration, task placement constraints and * strategies, and health check grace period can be updated using this API. If the * network configuration, platform version, or task definition need to be updated, * a new AWS CodeDeploy deployment should be created. For more information, see CreateDeployment * in the AWS CodeDeploy API Reference.

For services using an * external deployment controller, you can update only the desired count, task * placement constraints and strategies, and health check grace period using this * API. If the launch type, load balancer, network configuration, platform version, * or task definition need to be updated, you should create a new task set. For * more information, see CreateTaskSet.

You can add to or subtract * from the number of instantiations of a task definition in a service by * specifying the cluster that the service is running in and a new * desiredCount parameter.

If you have updated the Docker image * of your application, you can create a new task definition with that image and * deploy it to your service. The service scheduler uses the minimum healthy * percent and maximum percent parameters (in the service's deployment * configuration) to determine the deployment strategy.

If your * updated Docker image uses the same tag as what is in the existing task * definition for your service (for example, my_image:latest), you do * not need to create a new revision of your task definition. You can update the * service using the forceNewDeployment option. The new tasks launched * by the deployment pull the current image/tag combination from your repository * when they start.

You can also update the deployment configuration * of a service. When a deployment is triggered by updating the task definition of * a service, the service scheduler uses the deployment configuration parameters, * minimumHealthyPercent and maximumPercent, to determine * the deployment strategy.

  • If minimumHealthyPercent * is below 100%, the scheduler can ignore desiredCount temporarily * during a deployment. For example, if desiredCount is four tasks, a * minimum of 50% allows the scheduler to stop two existing tasks before starting * two new tasks. Tasks for services that do not use a load balancer are considered * healthy if they are in the RUNNING state. Tasks for services that * use a load balancer are considered healthy if they are in the * RUNNING state and the container instance they are hosted on is * reported as healthy by the load balancer.

  • The * maximumPercent parameter represents an upper limit on the number of * running tasks during a deployment, which enables you to define the deployment * batch size. For example, if desiredCount is four tasks, a maximum * of 200% starts four new tasks before stopping the four older tasks (provided * that the cluster resources required to do this are available).

*

When UpdateService stops a task during a deployment, the equivalent of * docker stop is issued to the containers running in the task. This * results in a SIGTERM and a 30-second timeout, after which * SIGKILL is sent and the containers are forcibly stopped. If the * container handles the SIGTERM gracefully and exits within 30 * seconds from receiving it, no SIGKILL is sent.

When the * service scheduler launches new tasks, it determines task placement in your * cluster with the following logic:

  • Determine which of the * container instances in your cluster can support your service's task definition * (for example, they have the required CPU, memory, ports, and container instance * attributes).

  • By default, the service scheduler attempts to * balance tasks across Availability Zones in this manner (although you can choose * a different placement strategy):

    • Sort the valid container * instances by the fewest number of running tasks for this service in the same * Availability Zone as the instance. For example, if zone A has one running * service task and zones B and C each have zero, valid container instances in * either zone B or C are considered optimal for placement.

    • Place * the new service task on a valid container instance in an optimal Availability * Zone (based on the previous steps), favoring container instances with the fewest * number of running tasks for this service.

When * the service scheduler stops running tasks, it attempts to maintain balance * across the Availability Zones in your cluster using the following logic:

*
  • Sort the container instances by the largest number of running tasks * for this service in the same Availability Zone as the instance. For example, if * zone A has one running service task and zones B and C each have two, container * instances in either zone B or C are considered optimal for termination.

    *
  • Stop the task on a container instance in an optimal Availability * Zone (based on the previous steps), favoring container instances with the * largest number of running tasks for this service.

See * Also:

AWS * API Reference

*/ virtual Model::UpdateServiceOutcome UpdateService(const Model::UpdateServiceRequest& request) const; /** *

Updating the task placement strategies and constraints on an * Amazon ECS service remains in preview and is a Beta Service as defined by and * subject to the Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms * ("Beta Terms"). These Beta Terms apply to your participation in this * preview.

Modifies the parameters of a service.

For * services using the rolling update (ECS) deployment controller, the * desired count, deployment configuration, network configuration, task placement * constraints and strategies, or task definition used can be updated.

For * services using the blue/green (CODE_DEPLOY) deployment controller, * only the desired count, deployment configuration, task placement constraints and * strategies, and health check grace period can be updated using this API. If the * network configuration, platform version, or task definition need to be updated, * a new AWS CodeDeploy deployment should be created. For more information, see CreateDeployment * in the AWS CodeDeploy API Reference.

For services using an * external deployment controller, you can update only the desired count, task * placement constraints and strategies, and health check grace period using this * API. If the launch type, load balancer, network configuration, platform version, * or task definition need to be updated, you should create a new task set. For * more information, see CreateTaskSet.

You can add to or subtract * from the number of instantiations of a task definition in a service by * specifying the cluster that the service is running in and a new * desiredCount parameter.

If you have updated the Docker image * of your application, you can create a new task definition with that image and * deploy it to your service. The service scheduler uses the minimum healthy * percent and maximum percent parameters (in the service's deployment * configuration) to determine the deployment strategy.

If your * updated Docker image uses the same tag as what is in the existing task * definition for your service (for example, my_image:latest), you do * not need to create a new revision of your task definition. You can update the * service using the forceNewDeployment option. The new tasks launched * by the deployment pull the current image/tag combination from your repository * when they start.

You can also update the deployment configuration * of a service. When a deployment is triggered by updating the task definition of * a service, the service scheduler uses the deployment configuration parameters, * minimumHealthyPercent and maximumPercent, to determine * the deployment strategy.

  • If minimumHealthyPercent * is below 100%, the scheduler can ignore desiredCount temporarily * during a deployment. For example, if desiredCount is four tasks, a * minimum of 50% allows the scheduler to stop two existing tasks before starting * two new tasks. Tasks for services that do not use a load balancer are considered * healthy if they are in the RUNNING state. Tasks for services that * use a load balancer are considered healthy if they are in the * RUNNING state and the container instance they are hosted on is * reported as healthy by the load balancer.

  • The * maximumPercent parameter represents an upper limit on the number of * running tasks during a deployment, which enables you to define the deployment * batch size. For example, if desiredCount is four tasks, a maximum * of 200% starts four new tasks before stopping the four older tasks (provided * that the cluster resources required to do this are available).

*

When UpdateService stops a task during a deployment, the equivalent of * docker stop is issued to the containers running in the task. This * results in a SIGTERM and a 30-second timeout, after which * SIGKILL is sent and the containers are forcibly stopped. If the * container handles the SIGTERM gracefully and exits within 30 * seconds from receiving it, no SIGKILL is sent.

When the * service scheduler launches new tasks, it determines task placement in your * cluster with the following logic:

  • Determine which of the * container instances in your cluster can support your service's task definition * (for example, they have the required CPU, memory, ports, and container instance * attributes).

  • By default, the service scheduler attempts to * balance tasks across Availability Zones in this manner (although you can choose * a different placement strategy):

    • Sort the valid container * instances by the fewest number of running tasks for this service in the same * Availability Zone as the instance. For example, if zone A has one running * service task and zones B and C each have zero, valid container instances in * either zone B or C are considered optimal for placement.

    • Place * the new service task on a valid container instance in an optimal Availability * Zone (based on the previous steps), favoring container instances with the fewest * number of running tasks for this service.

When * the service scheduler stops running tasks, it attempts to maintain balance * across the Availability Zones in your cluster using the following logic:

*
  • Sort the container instances by the largest number of running tasks * for this service in the same Availability Zone as the instance. For example, if * zone A has one running service task and zones B and C each have two, container * instances in either zone B or C are considered optimal for termination.

    *
  • Stop the task on a container instance in an optimal Availability * Zone (based on the previous steps), favoring container instances with the * largest number of running tasks for this service.

See * Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateServiceOutcomeCallable UpdateServiceCallable(const Model::UpdateServiceRequest& request) const; /** *

Updating the task placement strategies and constraints on an * Amazon ECS service remains in preview and is a Beta Service as defined by and * subject to the Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms * ("Beta Terms"). These Beta Terms apply to your participation in this * preview.

Modifies the parameters of a service.

For * services using the rolling update (ECS) deployment controller, the * desired count, deployment configuration, network configuration, task placement * constraints and strategies, or task definition used can be updated.

For * services using the blue/green (CODE_DEPLOY) deployment controller, * only the desired count, deployment configuration, task placement constraints and * strategies, and health check grace period can be updated using this API. If the * network configuration, platform version, or task definition need to be updated, * a new AWS CodeDeploy deployment should be created. For more information, see CreateDeployment * in the AWS CodeDeploy API Reference.

For services using an * external deployment controller, you can update only the desired count, task * placement constraints and strategies, and health check grace period using this * API. If the launch type, load balancer, network configuration, platform version, * or task definition need to be updated, you should create a new task set. For * more information, see CreateTaskSet.

You can add to or subtract * from the number of instantiations of a task definition in a service by * specifying the cluster that the service is running in and a new * desiredCount parameter.

If you have updated the Docker image * of your application, you can create a new task definition with that image and * deploy it to your service. The service scheduler uses the minimum healthy * percent and maximum percent parameters (in the service's deployment * configuration) to determine the deployment strategy.

If your * updated Docker image uses the same tag as what is in the existing task * definition for your service (for example, my_image:latest), you do * not need to create a new revision of your task definition. You can update the * service using the forceNewDeployment option. The new tasks launched * by the deployment pull the current image/tag combination from your repository * when they start.

You can also update the deployment configuration * of a service. When a deployment is triggered by updating the task definition of * a service, the service scheduler uses the deployment configuration parameters, * minimumHealthyPercent and maximumPercent, to determine * the deployment strategy.

  • If minimumHealthyPercent * is below 100%, the scheduler can ignore desiredCount temporarily * during a deployment. For example, if desiredCount is four tasks, a * minimum of 50% allows the scheduler to stop two existing tasks before starting * two new tasks. Tasks for services that do not use a load balancer are considered * healthy if they are in the RUNNING state. Tasks for services that * use a load balancer are considered healthy if they are in the * RUNNING state and the container instance they are hosted on is * reported as healthy by the load balancer.

  • The * maximumPercent parameter represents an upper limit on the number of * running tasks during a deployment, which enables you to define the deployment * batch size. For example, if desiredCount is four tasks, a maximum * of 200% starts four new tasks before stopping the four older tasks (provided * that the cluster resources required to do this are available).

*

When UpdateService stops a task during a deployment, the equivalent of * docker stop is issued to the containers running in the task. This * results in a SIGTERM and a 30-second timeout, after which * SIGKILL is sent and the containers are forcibly stopped. If the * container handles the SIGTERM gracefully and exits within 30 * seconds from receiving it, no SIGKILL is sent.

When the * service scheduler launches new tasks, it determines task placement in your * cluster with the following logic:

  • Determine which of the * container instances in your cluster can support your service's task definition * (for example, they have the required CPU, memory, ports, and container instance * attributes).

  • By default, the service scheduler attempts to * balance tasks across Availability Zones in this manner (although you can choose * a different placement strategy):

    • Sort the valid container * instances by the fewest number of running tasks for this service in the same * Availability Zone as the instance. For example, if zone A has one running * service task and zones B and C each have zero, valid container instances in * either zone B or C are considered optimal for placement.

    • Place * the new service task on a valid container instance in an optimal Availability * Zone (based on the previous steps), favoring container instances with the fewest * number of running tasks for this service.

When * the service scheduler stops running tasks, it attempts to maintain balance * across the Availability Zones in your cluster using the following logic:

*
  • Sort the container instances by the largest number of running tasks * for this service in the same Availability Zone as the instance. For example, if * zone A has one running service task and zones B and C each have two, container * instances in either zone B or C are considered optimal for termination.

    *
  • Stop the task on a container instance in an optimal Availability * Zone (based on the previous steps), favoring container instances with the * largest number of running tasks for this service.

See * Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateServiceAsync(const Model::UpdateServiceRequest& request, const UpdateServiceResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Modifies which task set in a service is the primary task set. Any parameters * that are updated on the primary task set in a service will transition to the * service. This is used when a service uses the EXTERNAL deployment * controller type. For more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

*/ virtual Model::UpdateServicePrimaryTaskSetOutcome UpdateServicePrimaryTaskSet(const Model::UpdateServicePrimaryTaskSetRequest& request) const; /** *

Modifies which task set in a service is the primary task set. Any parameters * that are updated on the primary task set in a service will transition to the * service. This is used when a service uses the EXTERNAL deployment * controller type. For more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateServicePrimaryTaskSetOutcomeCallable UpdateServicePrimaryTaskSetCallable(const Model::UpdateServicePrimaryTaskSetRequest& request) const; /** *

Modifies which task set in a service is the primary task set. Any parameters * that are updated on the primary task set in a service will transition to the * service. This is used when a service uses the EXTERNAL deployment * controller type. For more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateServicePrimaryTaskSetAsync(const Model::UpdateServicePrimaryTaskSetRequest& request, const UpdateServicePrimaryTaskSetResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; /** *

Modifies a task set. This is used when a service uses the * EXTERNAL deployment controller type. For more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

*/ virtual Model::UpdateTaskSetOutcome UpdateTaskSet(const Model::UpdateTaskSetRequest& request) const; /** *

Modifies a task set. This is used when a service uses the * EXTERNAL deployment controller type. For more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateTaskSetOutcomeCallable UpdateTaskSetCallable(const Model::UpdateTaskSetRequest& request) const; /** *

Modifies a task set. This is used when a service uses the * EXTERNAL deployment controller type. For more information, see Amazon * ECS Deployment Types in the Amazon Elastic Container Service Developer * Guide.

See Also:

AWS * API Reference

* * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateTaskSetAsync(const Model::UpdateTaskSetRequest& request, const UpdateTaskSetResponseReceivedHandler& handler, const std::shared_ptr& context = nullptr) const; void OverrideEndpoint(const Aws::String& endpoint); private: void init(const Aws::Client::ClientConfiguration& clientConfiguration); void CreateCapacityProviderAsyncHelper(const Model::CreateCapacityProviderRequest& request, const CreateCapacityProviderResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateClusterAsyncHelper(const Model::CreateClusterRequest& request, const CreateClusterResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateServiceAsyncHelper(const Model::CreateServiceRequest& request, const CreateServiceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void CreateTaskSetAsyncHelper(const Model::CreateTaskSetRequest& request, const CreateTaskSetResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteAccountSettingAsyncHelper(const Model::DeleteAccountSettingRequest& request, const DeleteAccountSettingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteAttributesAsyncHelper(const Model::DeleteAttributesRequest& request, const DeleteAttributesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteCapacityProviderAsyncHelper(const Model::DeleteCapacityProviderRequest& request, const DeleteCapacityProviderResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteClusterAsyncHelper(const Model::DeleteClusterRequest& request, const DeleteClusterResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteServiceAsyncHelper(const Model::DeleteServiceRequest& request, const DeleteServiceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeleteTaskSetAsyncHelper(const Model::DeleteTaskSetRequest& request, const DeleteTaskSetResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeregisterContainerInstanceAsyncHelper(const Model::DeregisterContainerInstanceRequest& request, const DeregisterContainerInstanceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DeregisterTaskDefinitionAsyncHelper(const Model::DeregisterTaskDefinitionRequest& request, const DeregisterTaskDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeCapacityProvidersAsyncHelper(const Model::DescribeCapacityProvidersRequest& request, const DescribeCapacityProvidersResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeClustersAsyncHelper(const Model::DescribeClustersRequest& request, const DescribeClustersResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeContainerInstancesAsyncHelper(const Model::DescribeContainerInstancesRequest& request, const DescribeContainerInstancesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeServicesAsyncHelper(const Model::DescribeServicesRequest& request, const DescribeServicesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeTaskDefinitionAsyncHelper(const Model::DescribeTaskDefinitionRequest& request, const DescribeTaskDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeTaskSetsAsyncHelper(const Model::DescribeTaskSetsRequest& request, const DescribeTaskSetsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DescribeTasksAsyncHelper(const Model::DescribeTasksRequest& request, const DescribeTasksResponseReceivedHandler& handler, const std::shared_ptr& context) const; void DiscoverPollEndpointAsyncHelper(const Model::DiscoverPollEndpointRequest& request, const DiscoverPollEndpointResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListAccountSettingsAsyncHelper(const Model::ListAccountSettingsRequest& request, const ListAccountSettingsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListAttributesAsyncHelper(const Model::ListAttributesRequest& request, const ListAttributesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListClustersAsyncHelper(const Model::ListClustersRequest& request, const ListClustersResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListContainerInstancesAsyncHelper(const Model::ListContainerInstancesRequest& request, const ListContainerInstancesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListServicesAsyncHelper(const Model::ListServicesRequest& request, const ListServicesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListTagsForResourceAsyncHelper(const Model::ListTagsForResourceRequest& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListTaskDefinitionFamiliesAsyncHelper(const Model::ListTaskDefinitionFamiliesRequest& request, const ListTaskDefinitionFamiliesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListTaskDefinitionsAsyncHelper(const Model::ListTaskDefinitionsRequest& request, const ListTaskDefinitionsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void ListTasksAsyncHelper(const Model::ListTasksRequest& request, const ListTasksResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutAccountSettingAsyncHelper(const Model::PutAccountSettingRequest& request, const PutAccountSettingResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutAccountSettingDefaultAsyncHelper(const Model::PutAccountSettingDefaultRequest& request, const PutAccountSettingDefaultResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutAttributesAsyncHelper(const Model::PutAttributesRequest& request, const PutAttributesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void PutClusterCapacityProvidersAsyncHelper(const Model::PutClusterCapacityProvidersRequest& request, const PutClusterCapacityProvidersResponseReceivedHandler& handler, const std::shared_ptr& context) const; void RegisterContainerInstanceAsyncHelper(const Model::RegisterContainerInstanceRequest& request, const RegisterContainerInstanceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void RegisterTaskDefinitionAsyncHelper(const Model::RegisterTaskDefinitionRequest& request, const RegisterTaskDefinitionResponseReceivedHandler& handler, const std::shared_ptr& context) const; void RunTaskAsyncHelper(const Model::RunTaskRequest& request, const RunTaskResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StartTaskAsyncHelper(const Model::StartTaskRequest& request, const StartTaskResponseReceivedHandler& handler, const std::shared_ptr& context) const; void StopTaskAsyncHelper(const Model::StopTaskRequest& request, const StopTaskResponseReceivedHandler& handler, const std::shared_ptr& context) const; void SubmitAttachmentStateChangesAsyncHelper(const Model::SubmitAttachmentStateChangesRequest& request, const SubmitAttachmentStateChangesResponseReceivedHandler& handler, const std::shared_ptr& context) const; void SubmitContainerStateChangeAsyncHelper(const Model::SubmitContainerStateChangeRequest& request, const SubmitContainerStateChangeResponseReceivedHandler& handler, const std::shared_ptr& context) const; void SubmitTaskStateChangeAsyncHelper(const Model::SubmitTaskStateChangeRequest& request, const SubmitTaskStateChangeResponseReceivedHandler& handler, const std::shared_ptr& context) const; void TagResourceAsyncHelper(const Model::TagResourceRequest& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UntagResourceAsyncHelper(const Model::UntagResourceRequest& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateClusterSettingsAsyncHelper(const Model::UpdateClusterSettingsRequest& request, const UpdateClusterSettingsResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateContainerAgentAsyncHelper(const Model::UpdateContainerAgentRequest& request, const UpdateContainerAgentResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateContainerInstancesStateAsyncHelper(const Model::UpdateContainerInstancesStateRequest& request, const UpdateContainerInstancesStateResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateServiceAsyncHelper(const Model::UpdateServiceRequest& request, const UpdateServiceResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateServicePrimaryTaskSetAsyncHelper(const Model::UpdateServicePrimaryTaskSetRequest& request, const UpdateServicePrimaryTaskSetResponseReceivedHandler& handler, const std::shared_ptr& context) const; void UpdateTaskSetAsyncHelper(const Model::UpdateTaskSetRequest& request, const UpdateTaskSetResponseReceivedHandler& handler, const std::shared_ptr& context) const; Aws::String m_uri; Aws::String m_configScheme; std::shared_ptr m_executor; }; } // namespace ECS } // namespace Aws