/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace Aws { namespace ECS { namespace Model { /** */ class AWS_ECS_API CreateServiceRequest : public ECSRequest { public: CreateServiceRequest(); // Service request name is the Operation name which will send this request out, // each operation should has unique request name, so that we can get operation's name from this request. // Note: this is not true for response, multiple operations may have the same response name, // so we can not get operation's name from response. inline virtual const char* GetServiceRequestName() const override { return "CreateService"; } Aws::String SerializePayload() const override; Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override; /** *

The short name or full Amazon Resource Name (ARN) of the cluster on which to * run your service. If you do not specify a cluster, the default cluster is * assumed.

*/ inline const Aws::String& GetCluster() const{ return m_cluster; } /** *

The short name or full Amazon Resource Name (ARN) of the cluster on which to * run your service. If you do not specify a cluster, the default cluster is * assumed.

*/ inline bool ClusterHasBeenSet() const { return m_clusterHasBeenSet; } /** *

The short name or full Amazon Resource Name (ARN) of the cluster on which to * run your service. If you do not specify a cluster, the default cluster is * assumed.

*/ inline void SetCluster(const Aws::String& value) { m_clusterHasBeenSet = true; m_cluster = value; } /** *

The short name or full Amazon Resource Name (ARN) of the cluster on which to * run your service. If you do not specify a cluster, the default cluster is * assumed.

*/ inline void SetCluster(Aws::String&& value) { m_clusterHasBeenSet = true; m_cluster = std::move(value); } /** *

The short name or full Amazon Resource Name (ARN) of the cluster on which to * run your service. If you do not specify a cluster, the default cluster is * assumed.

*/ inline void SetCluster(const char* value) { m_clusterHasBeenSet = true; m_cluster.assign(value); } /** *

The short name or full Amazon Resource Name (ARN) of the cluster on which to * run your service. If you do not specify a cluster, the default cluster is * assumed.

*/ inline CreateServiceRequest& WithCluster(const Aws::String& value) { SetCluster(value); return *this;} /** *

The short name or full Amazon Resource Name (ARN) of the cluster on which to * run your service. If you do not specify a cluster, the default cluster is * assumed.

*/ inline CreateServiceRequest& WithCluster(Aws::String&& value) { SetCluster(std::move(value)); return *this;} /** *

The short name or full Amazon Resource Name (ARN) of the cluster on which to * run your service. If you do not specify a cluster, the default cluster is * assumed.

*/ inline CreateServiceRequest& WithCluster(const char* value) { SetCluster(value); return *this;} /** *

The name of your service. Up to 255 letters (uppercase and lowercase), * numbers, and hyphens are allowed. Service names must be unique within a cluster, * but you can have similarly named services in multiple clusters within a Region * or across multiple Regions.

*/ inline const Aws::String& GetServiceName() const{ return m_serviceName; } /** *

The name of your service. Up to 255 letters (uppercase and lowercase), * numbers, and hyphens are allowed. Service names must be unique within a cluster, * but you can have similarly named services in multiple clusters within a Region * or across multiple Regions.

*/ inline bool ServiceNameHasBeenSet() const { return m_serviceNameHasBeenSet; } /** *

The name of your service. Up to 255 letters (uppercase and lowercase), * numbers, and hyphens are allowed. Service names must be unique within a cluster, * but you can have similarly named services in multiple clusters within a Region * or across multiple Regions.

*/ inline void SetServiceName(const Aws::String& value) { m_serviceNameHasBeenSet = true; m_serviceName = value; } /** *

The name of your service. Up to 255 letters (uppercase and lowercase), * numbers, and hyphens are allowed. Service names must be unique within a cluster, * but you can have similarly named services in multiple clusters within a Region * or across multiple Regions.

*/ inline void SetServiceName(Aws::String&& value) { m_serviceNameHasBeenSet = true; m_serviceName = std::move(value); } /** *

The name of your service. Up to 255 letters (uppercase and lowercase), * numbers, and hyphens are allowed. Service names must be unique within a cluster, * but you can have similarly named services in multiple clusters within a Region * or across multiple Regions.

*/ inline void SetServiceName(const char* value) { m_serviceNameHasBeenSet = true; m_serviceName.assign(value); } /** *

The name of your service. Up to 255 letters (uppercase and lowercase), * numbers, and hyphens are allowed. Service names must be unique within a cluster, * but you can have similarly named services in multiple clusters within a Region * or across multiple Regions.

*/ inline CreateServiceRequest& WithServiceName(const Aws::String& value) { SetServiceName(value); return *this;} /** *

The name of your service. Up to 255 letters (uppercase and lowercase), * numbers, and hyphens are allowed. Service names must be unique within a cluster, * but you can have similarly named services in multiple clusters within a Region * or across multiple Regions.

*/ inline CreateServiceRequest& WithServiceName(Aws::String&& value) { SetServiceName(std::move(value)); return *this;} /** *

The name of your service. Up to 255 letters (uppercase and lowercase), * numbers, and hyphens are allowed. Service names must be unique within a cluster, * but you can have similarly named services in multiple clusters within a Region * or across multiple Regions.

*/ inline CreateServiceRequest& WithServiceName(const char* value) { SetServiceName(value); return *this;} /** *

The family and revision * (family:revision) or full ARN of the task definition to run in your * service. If a revision is not specified, the latest * ACTIVE revision is used.

A task definition must be specified * if the service is using the ECS deployment controller.

*/ inline const Aws::String& GetTaskDefinition() const{ return m_taskDefinition; } /** *

The family and revision * (family:revision) or full ARN of the task definition to run in your * service. If a revision is not specified, the latest * ACTIVE revision is used.

A task definition must be specified * if the service is using the ECS deployment controller.

*/ inline bool TaskDefinitionHasBeenSet() const { return m_taskDefinitionHasBeenSet; } /** *

The family and revision * (family:revision) or full ARN of the task definition to run in your * service. If a revision is not specified, the latest * ACTIVE revision is used.

A task definition must be specified * if the service is using the ECS deployment controller.

*/ inline void SetTaskDefinition(const Aws::String& value) { m_taskDefinitionHasBeenSet = true; m_taskDefinition = value; } /** *

The family and revision * (family:revision) or full ARN of the task definition to run in your * service. If a revision is not specified, the latest * ACTIVE revision is used.

A task definition must be specified * if the service is using the ECS deployment controller.

*/ inline void SetTaskDefinition(Aws::String&& value) { m_taskDefinitionHasBeenSet = true; m_taskDefinition = std::move(value); } /** *

The family and revision * (family:revision) or full ARN of the task definition to run in your * service. If a revision is not specified, the latest * ACTIVE revision is used.

A task definition must be specified * if the service is using the ECS deployment controller.

*/ inline void SetTaskDefinition(const char* value) { m_taskDefinitionHasBeenSet = true; m_taskDefinition.assign(value); } /** *

The family and revision * (family:revision) or full ARN of the task definition to run in your * service. If a revision is not specified, the latest * ACTIVE revision is used.

A task definition must be specified * if the service is using the ECS deployment controller.

*/ inline CreateServiceRequest& WithTaskDefinition(const Aws::String& value) { SetTaskDefinition(value); return *this;} /** *

The family and revision * (family:revision) or full ARN of the task definition to run in your * service. If a revision is not specified, the latest * ACTIVE revision is used.

A task definition must be specified * if the service is using the ECS deployment controller.

*/ inline CreateServiceRequest& WithTaskDefinition(Aws::String&& value) { SetTaskDefinition(std::move(value)); return *this;} /** *

The family and revision * (family:revision) or full ARN of the task definition to run in your * service. If a revision is not specified, the latest * ACTIVE revision is used.

A task definition must be specified * if the service is using the ECS deployment controller.

*/ inline CreateServiceRequest& WithTaskDefinition(const char* value) { SetTaskDefinition(value); return *this;} /** *

A load balancer object representing the load balancers to use with your * service. For more information, see Service * Load Balancing in the Amazon Elastic Container Service Developer * Guide.

If the service is using the rolling update (ECS) * deployment controller and using either an Application Load Balancer or Network * Load Balancer, you can specify multiple target groups to attach to the service. * The service-linked role is required for services that make use of multiple * target groups. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If the service is using the * CODE_DEPLOY deployment controller, the service is required to use * either an Application Load Balancer or Network Load Balancer. When creating an * AWS CodeDeploy deployment group, you specify two target groups (referred to as a * targetGroupPair). During a deployment, AWS CodeDeploy determines * which task set in your service has the status PRIMARY and * associates one target group with it, and then associates the other target group * with the replacement task set. The load balancer can also have up to two * listeners: a required listener for production traffic and an optional listener * that allows you perform validation tests with Lambda functions before routing * production traffic to it.

After you create a service using the * ECS deployment controller, the load balancer name or target group * ARN, container name, and container port specified in the service definition are * immutable. If you are using the CODE_DEPLOY deployment controller, * these values can be changed when updating the service.

For Application * Load Balancers and Network Load Balancers, this object must contain the load * balancer target group ARN, the container name (as it appears in a container * definition), and the container port to access from the load balancer. When a * task from this service is placed on a container instance, the container instance * and port combination is registered as a target in the target group specified * here.

For Classic Load Balancers, this object must contain the load * balancer name, the container name (as it appears in a container definition), and * the container port to access from the load balancer. When a task from this * service is placed on a container instance, the container instance is registered * with the load balancer specified here.

Services with tasks that use the * awsvpc network mode (for example, those with the Fargate launch * type) only support Application Load Balancers and Network Load Balancers. * Classic Load Balancers are not supported. Also, when you create any target * groups for these services, you must choose ip as the target type, * not instance, because tasks that use the awsvpc * network mode are associated with an elastic network interface, not an Amazon EC2 * instance.

*/ inline const Aws::Vector& GetLoadBalancers() const{ return m_loadBalancers; } /** *

A load balancer object representing the load balancers to use with your * service. For more information, see Service * Load Balancing in the Amazon Elastic Container Service Developer * Guide.

If the service is using the rolling update (ECS) * deployment controller and using either an Application Load Balancer or Network * Load Balancer, you can specify multiple target groups to attach to the service. * The service-linked role is required for services that make use of multiple * target groups. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If the service is using the * CODE_DEPLOY deployment controller, the service is required to use * either an Application Load Balancer or Network Load Balancer. When creating an * AWS CodeDeploy deployment group, you specify two target groups (referred to as a * targetGroupPair). During a deployment, AWS CodeDeploy determines * which task set in your service has the status PRIMARY and * associates one target group with it, and then associates the other target group * with the replacement task set. The load balancer can also have up to two * listeners: a required listener for production traffic and an optional listener * that allows you perform validation tests with Lambda functions before routing * production traffic to it.

After you create a service using the * ECS deployment controller, the load balancer name or target group * ARN, container name, and container port specified in the service definition are * immutable. If you are using the CODE_DEPLOY deployment controller, * these values can be changed when updating the service.

For Application * Load Balancers and Network Load Balancers, this object must contain the load * balancer target group ARN, the container name (as it appears in a container * definition), and the container port to access from the load balancer. When a * task from this service is placed on a container instance, the container instance * and port combination is registered as a target in the target group specified * here.

For Classic Load Balancers, this object must contain the load * balancer name, the container name (as it appears in a container definition), and * the container port to access from the load balancer. When a task from this * service is placed on a container instance, the container instance is registered * with the load balancer specified here.

Services with tasks that use the * awsvpc network mode (for example, those with the Fargate launch * type) only support Application Load Balancers and Network Load Balancers. * Classic Load Balancers are not supported. Also, when you create any target * groups for these services, you must choose ip as the target type, * not instance, because tasks that use the awsvpc * network mode are associated with an elastic network interface, not an Amazon EC2 * instance.

*/ inline bool LoadBalancersHasBeenSet() const { return m_loadBalancersHasBeenSet; } /** *

A load balancer object representing the load balancers to use with your * service. For more information, see Service * Load Balancing in the Amazon Elastic Container Service Developer * Guide.

If the service is using the rolling update (ECS) * deployment controller and using either an Application Load Balancer or Network * Load Balancer, you can specify multiple target groups to attach to the service. * The service-linked role is required for services that make use of multiple * target groups. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If the service is using the * CODE_DEPLOY deployment controller, the service is required to use * either an Application Load Balancer or Network Load Balancer. When creating an * AWS CodeDeploy deployment group, you specify two target groups (referred to as a * targetGroupPair). During a deployment, AWS CodeDeploy determines * which task set in your service has the status PRIMARY and * associates one target group with it, and then associates the other target group * with the replacement task set. The load balancer can also have up to two * listeners: a required listener for production traffic and an optional listener * that allows you perform validation tests with Lambda functions before routing * production traffic to it.

After you create a service using the * ECS deployment controller, the load balancer name or target group * ARN, container name, and container port specified in the service definition are * immutable. If you are using the CODE_DEPLOY deployment controller, * these values can be changed when updating the service.

For Application * Load Balancers and Network Load Balancers, this object must contain the load * balancer target group ARN, the container name (as it appears in a container * definition), and the container port to access from the load balancer. When a * task from this service is placed on a container instance, the container instance * and port combination is registered as a target in the target group specified * here.

For Classic Load Balancers, this object must contain the load * balancer name, the container name (as it appears in a container definition), and * the container port to access from the load balancer. When a task from this * service is placed on a container instance, the container instance is registered * with the load balancer specified here.

Services with tasks that use the * awsvpc network mode (for example, those with the Fargate launch * type) only support Application Load Balancers and Network Load Balancers. * Classic Load Balancers are not supported. Also, when you create any target * groups for these services, you must choose ip as the target type, * not instance, because tasks that use the awsvpc * network mode are associated with an elastic network interface, not an Amazon EC2 * instance.

*/ inline void SetLoadBalancers(const Aws::Vector& value) { m_loadBalancersHasBeenSet = true; m_loadBalancers = value; } /** *

A load balancer object representing the load balancers to use with your * service. For more information, see Service * Load Balancing in the Amazon Elastic Container Service Developer * Guide.

If the service is using the rolling update (ECS) * deployment controller and using either an Application Load Balancer or Network * Load Balancer, you can specify multiple target groups to attach to the service. * The service-linked role is required for services that make use of multiple * target groups. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If the service is using the * CODE_DEPLOY deployment controller, the service is required to use * either an Application Load Balancer or Network Load Balancer. When creating an * AWS CodeDeploy deployment group, you specify two target groups (referred to as a * targetGroupPair). During a deployment, AWS CodeDeploy determines * which task set in your service has the status PRIMARY and * associates one target group with it, and then associates the other target group * with the replacement task set. The load balancer can also have up to two * listeners: a required listener for production traffic and an optional listener * that allows you perform validation tests with Lambda functions before routing * production traffic to it.

After you create a service using the * ECS deployment controller, the load balancer name or target group * ARN, container name, and container port specified in the service definition are * immutable. If you are using the CODE_DEPLOY deployment controller, * these values can be changed when updating the service.

For Application * Load Balancers and Network Load Balancers, this object must contain the load * balancer target group ARN, the container name (as it appears in a container * definition), and the container port to access from the load balancer. When a * task from this service is placed on a container instance, the container instance * and port combination is registered as a target in the target group specified * here.

For Classic Load Balancers, this object must contain the load * balancer name, the container name (as it appears in a container definition), and * the container port to access from the load balancer. When a task from this * service is placed on a container instance, the container instance is registered * with the load balancer specified here.

Services with tasks that use the * awsvpc network mode (for example, those with the Fargate launch * type) only support Application Load Balancers and Network Load Balancers. * Classic Load Balancers are not supported. Also, when you create any target * groups for these services, you must choose ip as the target type, * not instance, because tasks that use the awsvpc * network mode are associated with an elastic network interface, not an Amazon EC2 * instance.

*/ inline void SetLoadBalancers(Aws::Vector&& value) { m_loadBalancersHasBeenSet = true; m_loadBalancers = std::move(value); } /** *

A load balancer object representing the load balancers to use with your * service. For more information, see Service * Load Balancing in the Amazon Elastic Container Service Developer * Guide.

If the service is using the rolling update (ECS) * deployment controller and using either an Application Load Balancer or Network * Load Balancer, you can specify multiple target groups to attach to the service. * The service-linked role is required for services that make use of multiple * target groups. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If the service is using the * CODE_DEPLOY deployment controller, the service is required to use * either an Application Load Balancer or Network Load Balancer. When creating an * AWS CodeDeploy deployment group, you specify two target groups (referred to as a * targetGroupPair). During a deployment, AWS CodeDeploy determines * which task set in your service has the status PRIMARY and * associates one target group with it, and then associates the other target group * with the replacement task set. The load balancer can also have up to two * listeners: a required listener for production traffic and an optional listener * that allows you perform validation tests with Lambda functions before routing * production traffic to it.

After you create a service using the * ECS deployment controller, the load balancer name or target group * ARN, container name, and container port specified in the service definition are * immutable. If you are using the CODE_DEPLOY deployment controller, * these values can be changed when updating the service.

For Application * Load Balancers and Network Load Balancers, this object must contain the load * balancer target group ARN, the container name (as it appears in a container * definition), and the container port to access from the load balancer. When a * task from this service is placed on a container instance, the container instance * and port combination is registered as a target in the target group specified * here.

For Classic Load Balancers, this object must contain the load * balancer name, the container name (as it appears in a container definition), and * the container port to access from the load balancer. When a task from this * service is placed on a container instance, the container instance is registered * with the load balancer specified here.

Services with tasks that use the * awsvpc network mode (for example, those with the Fargate launch * type) only support Application Load Balancers and Network Load Balancers. * Classic Load Balancers are not supported. Also, when you create any target * groups for these services, you must choose ip as the target type, * not instance, because tasks that use the awsvpc * network mode are associated with an elastic network interface, not an Amazon EC2 * instance.

*/ inline CreateServiceRequest& WithLoadBalancers(const Aws::Vector& value) { SetLoadBalancers(value); return *this;} /** *

A load balancer object representing the load balancers to use with your * service. For more information, see Service * Load Balancing in the Amazon Elastic Container Service Developer * Guide.

If the service is using the rolling update (ECS) * deployment controller and using either an Application Load Balancer or Network * Load Balancer, you can specify multiple target groups to attach to the service. * The service-linked role is required for services that make use of multiple * target groups. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If the service is using the * CODE_DEPLOY deployment controller, the service is required to use * either an Application Load Balancer or Network Load Balancer. When creating an * AWS CodeDeploy deployment group, you specify two target groups (referred to as a * targetGroupPair). During a deployment, AWS CodeDeploy determines * which task set in your service has the status PRIMARY and * associates one target group with it, and then associates the other target group * with the replacement task set. The load balancer can also have up to two * listeners: a required listener for production traffic and an optional listener * that allows you perform validation tests with Lambda functions before routing * production traffic to it.

After you create a service using the * ECS deployment controller, the load balancer name or target group * ARN, container name, and container port specified in the service definition are * immutable. If you are using the CODE_DEPLOY deployment controller, * these values can be changed when updating the service.

For Application * Load Balancers and Network Load Balancers, this object must contain the load * balancer target group ARN, the container name (as it appears in a container * definition), and the container port to access from the load balancer. When a * task from this service is placed on a container instance, the container instance * and port combination is registered as a target in the target group specified * here.

For Classic Load Balancers, this object must contain the load * balancer name, the container name (as it appears in a container definition), and * the container port to access from the load balancer. When a task from this * service is placed on a container instance, the container instance is registered * with the load balancer specified here.

Services with tasks that use the * awsvpc network mode (for example, those with the Fargate launch * type) only support Application Load Balancers and Network Load Balancers. * Classic Load Balancers are not supported. Also, when you create any target * groups for these services, you must choose ip as the target type, * not instance, because tasks that use the awsvpc * network mode are associated with an elastic network interface, not an Amazon EC2 * instance.

*/ inline CreateServiceRequest& WithLoadBalancers(Aws::Vector&& value) { SetLoadBalancers(std::move(value)); return *this;} /** *

A load balancer object representing the load balancers to use with your * service. For more information, see Service * Load Balancing in the Amazon Elastic Container Service Developer * Guide.

If the service is using the rolling update (ECS) * deployment controller and using either an Application Load Balancer or Network * Load Balancer, you can specify multiple target groups to attach to the service. * The service-linked role is required for services that make use of multiple * target groups. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If the service is using the * CODE_DEPLOY deployment controller, the service is required to use * either an Application Load Balancer or Network Load Balancer. When creating an * AWS CodeDeploy deployment group, you specify two target groups (referred to as a * targetGroupPair). During a deployment, AWS CodeDeploy determines * which task set in your service has the status PRIMARY and * associates one target group with it, and then associates the other target group * with the replacement task set. The load balancer can also have up to two * listeners: a required listener for production traffic and an optional listener * that allows you perform validation tests with Lambda functions before routing * production traffic to it.

After you create a service using the * ECS deployment controller, the load balancer name or target group * ARN, container name, and container port specified in the service definition are * immutable. If you are using the CODE_DEPLOY deployment controller, * these values can be changed when updating the service.

For Application * Load Balancers and Network Load Balancers, this object must contain the load * balancer target group ARN, the container name (as it appears in a container * definition), and the container port to access from the load balancer. When a * task from this service is placed on a container instance, the container instance * and port combination is registered as a target in the target group specified * here.

For Classic Load Balancers, this object must contain the load * balancer name, the container name (as it appears in a container definition), and * the container port to access from the load balancer. When a task from this * service is placed on a container instance, the container instance is registered * with the load balancer specified here.

Services with tasks that use the * awsvpc network mode (for example, those with the Fargate launch * type) only support Application Load Balancers and Network Load Balancers. * Classic Load Balancers are not supported. Also, when you create any target * groups for these services, you must choose ip as the target type, * not instance, because tasks that use the awsvpc * network mode are associated with an elastic network interface, not an Amazon EC2 * instance.

*/ inline CreateServiceRequest& AddLoadBalancers(const LoadBalancer& value) { m_loadBalancersHasBeenSet = true; m_loadBalancers.push_back(value); return *this; } /** *

A load balancer object representing the load balancers to use with your * service. For more information, see Service * Load Balancing in the Amazon Elastic Container Service Developer * Guide.

If the service is using the rolling update (ECS) * deployment controller and using either an Application Load Balancer or Network * Load Balancer, you can specify multiple target groups to attach to the service. * The service-linked role is required for services that make use of multiple * target groups. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If the service is using the * CODE_DEPLOY deployment controller, the service is required to use * either an Application Load Balancer or Network Load Balancer. When creating an * AWS CodeDeploy deployment group, you specify two target groups (referred to as a * targetGroupPair). During a deployment, AWS CodeDeploy determines * which task set in your service has the status PRIMARY and * associates one target group with it, and then associates the other target group * with the replacement task set. The load balancer can also have up to two * listeners: a required listener for production traffic and an optional listener * that allows you perform validation tests with Lambda functions before routing * production traffic to it.

After you create a service using the * ECS deployment controller, the load balancer name or target group * ARN, container name, and container port specified in the service definition are * immutable. If you are using the CODE_DEPLOY deployment controller, * these values can be changed when updating the service.

For Application * Load Balancers and Network Load Balancers, this object must contain the load * balancer target group ARN, the container name (as it appears in a container * definition), and the container port to access from the load balancer. When a * task from this service is placed on a container instance, the container instance * and port combination is registered as a target in the target group specified * here.

For Classic Load Balancers, this object must contain the load * balancer name, the container name (as it appears in a container definition), and * the container port to access from the load balancer. When a task from this * service is placed on a container instance, the container instance is registered * with the load balancer specified here.

Services with tasks that use the * awsvpc network mode (for example, those with the Fargate launch * type) only support Application Load Balancers and Network Load Balancers. * Classic Load Balancers are not supported. Also, when you create any target * groups for these services, you must choose ip as the target type, * not instance, because tasks that use the awsvpc * network mode are associated with an elastic network interface, not an Amazon EC2 * instance.

*/ inline CreateServiceRequest& AddLoadBalancers(LoadBalancer&& value) { m_loadBalancersHasBeenSet = true; m_loadBalancers.push_back(std::move(value)); return *this; } /** *

The details of the service discovery registries to assign to this service. * For more information, see Service * Discovery.

Service discovery is supported for Fargate tasks if * you are using platform version v1.1.0 or later. For more information, see AWS * Fargate Platform Versions.

*/ inline const Aws::Vector& GetServiceRegistries() const{ return m_serviceRegistries; } /** *

The details of the service discovery registries to assign to this service. * For more information, see Service * Discovery.

Service discovery is supported for Fargate tasks if * you are using platform version v1.1.0 or later. For more information, see AWS * Fargate Platform Versions.

*/ inline bool ServiceRegistriesHasBeenSet() const { return m_serviceRegistriesHasBeenSet; } /** *

The details of the service discovery registries to assign to this service. * For more information, see Service * Discovery.

Service discovery is supported for Fargate tasks if * you are using platform version v1.1.0 or later. For more information, see AWS * Fargate Platform Versions.

*/ inline void SetServiceRegistries(const Aws::Vector& value) { m_serviceRegistriesHasBeenSet = true; m_serviceRegistries = value; } /** *

The details of the service discovery registries to assign to this service. * For more information, see Service * Discovery.

Service discovery is supported for Fargate tasks if * you are using platform version v1.1.0 or later. For more information, see AWS * Fargate Platform Versions.

*/ inline void SetServiceRegistries(Aws::Vector&& value) { m_serviceRegistriesHasBeenSet = true; m_serviceRegistries = std::move(value); } /** *

The details of the service discovery registries to assign to this service. * For more information, see Service * Discovery.

Service discovery is supported for Fargate tasks if * you are using platform version v1.1.0 or later. For more information, see AWS * Fargate Platform Versions.

*/ inline CreateServiceRequest& WithServiceRegistries(const Aws::Vector& value) { SetServiceRegistries(value); return *this;} /** *

The details of the service discovery registries to assign to this service. * For more information, see Service * Discovery.

Service discovery is supported for Fargate tasks if * you are using platform version v1.1.0 or later. For more information, see AWS * Fargate Platform Versions.

*/ inline CreateServiceRequest& WithServiceRegistries(Aws::Vector&& value) { SetServiceRegistries(std::move(value)); return *this;} /** *

The details of the service discovery registries to assign to this service. * For more information, see Service * Discovery.

Service discovery is supported for Fargate tasks if * you are using platform version v1.1.0 or later. For more information, see AWS * Fargate Platform Versions.

*/ inline CreateServiceRequest& AddServiceRegistries(const ServiceRegistry& value) { m_serviceRegistriesHasBeenSet = true; m_serviceRegistries.push_back(value); return *this; } /** *

The details of the service discovery registries to assign to this service. * For more information, see Service * Discovery.

Service discovery is supported for Fargate tasks if * you are using platform version v1.1.0 or later. For more information, see AWS * Fargate Platform Versions.

*/ inline CreateServiceRequest& AddServiceRegistries(ServiceRegistry&& value) { m_serviceRegistriesHasBeenSet = true; m_serviceRegistries.push_back(std::move(value)); return *this; } /** *

The number of instantiations of the specified task definition to place and * keep running on your cluster.

This is required if * schedulingStrategy is REPLICA or is not specified. If * schedulingStrategy is DAEMON then this is not * required.

*/ inline int GetDesiredCount() const{ return m_desiredCount; } /** *

The number of instantiations of the specified task definition to place and * keep running on your cluster.

This is required if * schedulingStrategy is REPLICA or is not specified. If * schedulingStrategy is DAEMON then this is not * required.

*/ inline bool DesiredCountHasBeenSet() const { return m_desiredCountHasBeenSet; } /** *

The number of instantiations of the specified task definition to place and * keep running on your cluster.

This is required if * schedulingStrategy is REPLICA or is not specified. If * schedulingStrategy is DAEMON then this is not * required.

*/ inline void SetDesiredCount(int value) { m_desiredCountHasBeenSet = true; m_desiredCount = value; } /** *

The number of instantiations of the specified task definition to place and * keep running on your cluster.

This is required if * schedulingStrategy is REPLICA or is not specified. If * schedulingStrategy is DAEMON then this is not * required.

*/ inline CreateServiceRequest& WithDesiredCount(int value) { SetDesiredCount(value); return *this;} /** *

Unique, case-sensitive identifier that you provide to ensure the idempotency * of the request. Up to 32 ASCII characters are allowed.

*/ inline const Aws::String& GetClientToken() const{ return m_clientToken; } /** *

Unique, case-sensitive identifier that you provide to ensure the idempotency * of the request. Up to 32 ASCII characters are allowed.

*/ inline bool ClientTokenHasBeenSet() const { return m_clientTokenHasBeenSet; } /** *

Unique, case-sensitive identifier that you provide to ensure the idempotency * of the request. Up to 32 ASCII characters are allowed.

*/ inline void SetClientToken(const Aws::String& value) { m_clientTokenHasBeenSet = true; m_clientToken = value; } /** *

Unique, case-sensitive identifier that you provide to ensure the idempotency * of the request. Up to 32 ASCII characters are allowed.

*/ inline void SetClientToken(Aws::String&& value) { m_clientTokenHasBeenSet = true; m_clientToken = std::move(value); } /** *

Unique, case-sensitive identifier that you provide to ensure the idempotency * of the request. Up to 32 ASCII characters are allowed.

*/ inline void SetClientToken(const char* value) { m_clientTokenHasBeenSet = true; m_clientToken.assign(value); } /** *

Unique, case-sensitive identifier that you provide to ensure the idempotency * of the request. Up to 32 ASCII characters are allowed.

*/ inline CreateServiceRequest& WithClientToken(const Aws::String& value) { SetClientToken(value); return *this;} /** *

Unique, case-sensitive identifier that you provide to ensure the idempotency * of the request. Up to 32 ASCII characters are allowed.

*/ inline CreateServiceRequest& WithClientToken(Aws::String&& value) { SetClientToken(std::move(value)); return *this;} /** *

Unique, case-sensitive identifier that you provide to ensure the idempotency * of the request. Up to 32 ASCII characters are allowed.

*/ inline CreateServiceRequest& WithClientToken(const char* value) { SetClientToken(value); return *this;} /** *

The launch type on which to run your service. For more information, see Amazon * ECS Launch Types in the Amazon Elastic Container Service Developer * Guide.

If a launchType is specified, the * capacityProviderStrategy parameter must be omitted.

*/ inline const LaunchType& GetLaunchType() const{ return m_launchType; } /** *

The launch type on which to run your service. For more information, see Amazon * ECS Launch Types in the Amazon Elastic Container Service Developer * Guide.

If a launchType is specified, the * capacityProviderStrategy parameter must be omitted.

*/ inline bool LaunchTypeHasBeenSet() const { return m_launchTypeHasBeenSet; } /** *

The launch type on which to run your service. For more information, see Amazon * ECS Launch Types in the Amazon Elastic Container Service Developer * Guide.

If a launchType is specified, the * capacityProviderStrategy parameter must be omitted.

*/ inline void SetLaunchType(const LaunchType& value) { m_launchTypeHasBeenSet = true; m_launchType = value; } /** *

The launch type on which to run your service. For more information, see Amazon * ECS Launch Types in the Amazon Elastic Container Service Developer * Guide.

If a launchType is specified, the * capacityProviderStrategy parameter must be omitted.

*/ inline void SetLaunchType(LaunchType&& value) { m_launchTypeHasBeenSet = true; m_launchType = std::move(value); } /** *

The launch type on which to run your service. For more information, see Amazon * ECS Launch Types in the Amazon Elastic Container Service Developer * Guide.

If a launchType is specified, the * capacityProviderStrategy parameter must be omitted.

*/ inline CreateServiceRequest& WithLaunchType(const LaunchType& value) { SetLaunchType(value); return *this;} /** *

The launch type on which to run your service. For more information, see Amazon * ECS Launch Types in the Amazon Elastic Container Service Developer * Guide.

If a launchType is specified, the * capacityProviderStrategy parameter must be omitted.

*/ inline CreateServiceRequest& WithLaunchType(LaunchType&& value) { SetLaunchType(std::move(value)); return *this;} /** *

The capacity provider strategy to use for the service.

A capacity * provider strategy consists of one or more capacity providers along with the * base and weight to assign to them. A capacity provider * must be associated with the cluster to be used in a capacity provider strategy. * The PutClusterCapacityProviders API is used to associate a capacity * provider with a cluster. Only capacity providers with an ACTIVE or * UPDATING status can be used.

If a * capacityProviderStrategy is specified, the launchType * parameter must be omitted. If no capacityProviderStrategy or * launchType is specified, the * defaultCapacityProviderStrategy for the cluster is used.

If * specifying a capacity provider that uses an Auto Scaling group, the capacity * provider must already be created. New capacity providers can be created with the * CreateCapacityProvider API operation.

To use a AWS Fargate * capacity provider, specify either the FARGATE or * FARGATE_SPOT capacity providers. The AWS Fargate capacity providers * are available to all accounts and only need to be associated with a cluster to * be used.

The PutClusterCapacityProviders API operation is used to * update the list of available capacity providers for a cluster after the cluster * is created.

*/ inline const Aws::Vector& GetCapacityProviderStrategy() const{ return m_capacityProviderStrategy; } /** *

The capacity provider strategy to use for the service.

A capacity * provider strategy consists of one or more capacity providers along with the * base and weight to assign to them. A capacity provider * must be associated with the cluster to be used in a capacity provider strategy. * The PutClusterCapacityProviders API is used to associate a capacity * provider with a cluster. Only capacity providers with an ACTIVE or * UPDATING status can be used.

If a * capacityProviderStrategy is specified, the launchType * parameter must be omitted. If no capacityProviderStrategy or * launchType is specified, the * defaultCapacityProviderStrategy for the cluster is used.

If * specifying a capacity provider that uses an Auto Scaling group, the capacity * provider must already be created. New capacity providers can be created with the * CreateCapacityProvider API operation.

To use a AWS Fargate * capacity provider, specify either the FARGATE or * FARGATE_SPOT capacity providers. The AWS Fargate capacity providers * are available to all accounts and only need to be associated with a cluster to * be used.

The PutClusterCapacityProviders API operation is used to * update the list of available capacity providers for a cluster after the cluster * is created.

*/ inline bool CapacityProviderStrategyHasBeenSet() const { return m_capacityProviderStrategyHasBeenSet; } /** *

The capacity provider strategy to use for the service.

A capacity * provider strategy consists of one or more capacity providers along with the * base and weight to assign to them. A capacity provider * must be associated with the cluster to be used in a capacity provider strategy. * The PutClusterCapacityProviders API is used to associate a capacity * provider with a cluster. Only capacity providers with an ACTIVE or * UPDATING status can be used.

If a * capacityProviderStrategy is specified, the launchType * parameter must be omitted. If no capacityProviderStrategy or * launchType is specified, the * defaultCapacityProviderStrategy for the cluster is used.

If * specifying a capacity provider that uses an Auto Scaling group, the capacity * provider must already be created. New capacity providers can be created with the * CreateCapacityProvider API operation.

To use a AWS Fargate * capacity provider, specify either the FARGATE or * FARGATE_SPOT capacity providers. The AWS Fargate capacity providers * are available to all accounts and only need to be associated with a cluster to * be used.

The PutClusterCapacityProviders API operation is used to * update the list of available capacity providers for a cluster after the cluster * is created.

*/ inline void SetCapacityProviderStrategy(const Aws::Vector& value) { m_capacityProviderStrategyHasBeenSet = true; m_capacityProviderStrategy = value; } /** *

The capacity provider strategy to use for the service.

A capacity * provider strategy consists of one or more capacity providers along with the * base and weight to assign to them. A capacity provider * must be associated with the cluster to be used in a capacity provider strategy. * The PutClusterCapacityProviders API is used to associate a capacity * provider with a cluster. Only capacity providers with an ACTIVE or * UPDATING status can be used.

If a * capacityProviderStrategy is specified, the launchType * parameter must be omitted. If no capacityProviderStrategy or * launchType is specified, the * defaultCapacityProviderStrategy for the cluster is used.

If * specifying a capacity provider that uses an Auto Scaling group, the capacity * provider must already be created. New capacity providers can be created with the * CreateCapacityProvider API operation.

To use a AWS Fargate * capacity provider, specify either the FARGATE or * FARGATE_SPOT capacity providers. The AWS Fargate capacity providers * are available to all accounts and only need to be associated with a cluster to * be used.

The PutClusterCapacityProviders API operation is used to * update the list of available capacity providers for a cluster after the cluster * is created.

*/ inline void SetCapacityProviderStrategy(Aws::Vector&& value) { m_capacityProviderStrategyHasBeenSet = true; m_capacityProviderStrategy = std::move(value); } /** *

The capacity provider strategy to use for the service.

A capacity * provider strategy consists of one or more capacity providers along with the * base and weight to assign to them. A capacity provider * must be associated with the cluster to be used in a capacity provider strategy. * The PutClusterCapacityProviders API is used to associate a capacity * provider with a cluster. Only capacity providers with an ACTIVE or * UPDATING status can be used.

If a * capacityProviderStrategy is specified, the launchType * parameter must be omitted. If no capacityProviderStrategy or * launchType is specified, the * defaultCapacityProviderStrategy for the cluster is used.

If * specifying a capacity provider that uses an Auto Scaling group, the capacity * provider must already be created. New capacity providers can be created with the * CreateCapacityProvider API operation.

To use a AWS Fargate * capacity provider, specify either the FARGATE or * FARGATE_SPOT capacity providers. The AWS Fargate capacity providers * are available to all accounts and only need to be associated with a cluster to * be used.

The PutClusterCapacityProviders API operation is used to * update the list of available capacity providers for a cluster after the cluster * is created.

*/ inline CreateServiceRequest& WithCapacityProviderStrategy(const Aws::Vector& value) { SetCapacityProviderStrategy(value); return *this;} /** *

The capacity provider strategy to use for the service.

A capacity * provider strategy consists of one or more capacity providers along with the * base and weight to assign to them. A capacity provider * must be associated with the cluster to be used in a capacity provider strategy. * The PutClusterCapacityProviders API is used to associate a capacity * provider with a cluster. Only capacity providers with an ACTIVE or * UPDATING status can be used.

If a * capacityProviderStrategy is specified, the launchType * parameter must be omitted. If no capacityProviderStrategy or * launchType is specified, the * defaultCapacityProviderStrategy for the cluster is used.

If * specifying a capacity provider that uses an Auto Scaling group, the capacity * provider must already be created. New capacity providers can be created with the * CreateCapacityProvider API operation.

To use a AWS Fargate * capacity provider, specify either the FARGATE or * FARGATE_SPOT capacity providers. The AWS Fargate capacity providers * are available to all accounts and only need to be associated with a cluster to * be used.

The PutClusterCapacityProviders API operation is used to * update the list of available capacity providers for a cluster after the cluster * is created.

*/ inline CreateServiceRequest& WithCapacityProviderStrategy(Aws::Vector&& value) { SetCapacityProviderStrategy(std::move(value)); return *this;} /** *

The capacity provider strategy to use for the service.

A capacity * provider strategy consists of one or more capacity providers along with the * base and weight to assign to them. A capacity provider * must be associated with the cluster to be used in a capacity provider strategy. * The PutClusterCapacityProviders API is used to associate a capacity * provider with a cluster. Only capacity providers with an ACTIVE or * UPDATING status can be used.

If a * capacityProviderStrategy is specified, the launchType * parameter must be omitted. If no capacityProviderStrategy or * launchType is specified, the * defaultCapacityProviderStrategy for the cluster is used.

If * specifying a capacity provider that uses an Auto Scaling group, the capacity * provider must already be created. New capacity providers can be created with the * CreateCapacityProvider API operation.

To use a AWS Fargate * capacity provider, specify either the FARGATE or * FARGATE_SPOT capacity providers. The AWS Fargate capacity providers * are available to all accounts and only need to be associated with a cluster to * be used.

The PutClusterCapacityProviders API operation is used to * update the list of available capacity providers for a cluster after the cluster * is created.

*/ inline CreateServiceRequest& AddCapacityProviderStrategy(const CapacityProviderStrategyItem& value) { m_capacityProviderStrategyHasBeenSet = true; m_capacityProviderStrategy.push_back(value); return *this; } /** *

The capacity provider strategy to use for the service.

A capacity * provider strategy consists of one or more capacity providers along with the * base and weight to assign to them. A capacity provider * must be associated with the cluster to be used in a capacity provider strategy. * The PutClusterCapacityProviders API is used to associate a capacity * provider with a cluster. Only capacity providers with an ACTIVE or * UPDATING status can be used.

If a * capacityProviderStrategy is specified, the launchType * parameter must be omitted. If no capacityProviderStrategy or * launchType is specified, the * defaultCapacityProviderStrategy for the cluster is used.

If * specifying a capacity provider that uses an Auto Scaling group, the capacity * provider must already be created. New capacity providers can be created with the * CreateCapacityProvider API operation.

To use a AWS Fargate * capacity provider, specify either the FARGATE or * FARGATE_SPOT capacity providers. The AWS Fargate capacity providers * are available to all accounts and only need to be associated with a cluster to * be used.

The PutClusterCapacityProviders API operation is used to * update the list of available capacity providers for a cluster after the cluster * is created.

*/ inline CreateServiceRequest& AddCapacityProviderStrategy(CapacityProviderStrategyItem&& value) { m_capacityProviderStrategyHasBeenSet = true; m_capacityProviderStrategy.push_back(std::move(value)); return *this; } /** *

The platform version that your tasks in the service are running on. A * platform version is specified only for tasks using the Fargate launch type. If * one isn't specified, the LATEST platform version is used by * default. For more information, see AWS * Fargate Platform Versions in the Amazon Elastic Container Service * Developer Guide.

*/ inline const Aws::String& GetPlatformVersion() const{ return m_platformVersion; } /** *

The platform version that your tasks in the service are running on. A * platform version is specified only for tasks using the Fargate launch type. If * one isn't specified, the LATEST platform version is used by * default. For more information, see AWS * Fargate Platform Versions in the Amazon Elastic Container Service * Developer Guide.

*/ inline bool PlatformVersionHasBeenSet() const { return m_platformVersionHasBeenSet; } /** *

The platform version that your tasks in the service are running on. A * platform version is specified only for tasks using the Fargate launch type. If * one isn't specified, the LATEST platform version is used by * default. For more information, see AWS * Fargate Platform Versions in the Amazon Elastic Container Service * Developer Guide.

*/ inline void SetPlatformVersion(const Aws::String& value) { m_platformVersionHasBeenSet = true; m_platformVersion = value; } /** *

The platform version that your tasks in the service are running on. A * platform version is specified only for tasks using the Fargate launch type. If * one isn't specified, the LATEST platform version is used by * default. For more information, see AWS * Fargate Platform Versions in the Amazon Elastic Container Service * Developer Guide.

*/ inline void SetPlatformVersion(Aws::String&& value) { m_platformVersionHasBeenSet = true; m_platformVersion = std::move(value); } /** *

The platform version that your tasks in the service are running on. A * platform version is specified only for tasks using the Fargate launch type. If * one isn't specified, the LATEST platform version is used by * default. For more information, see AWS * Fargate Platform Versions in the Amazon Elastic Container Service * Developer Guide.

*/ inline void SetPlatformVersion(const char* value) { m_platformVersionHasBeenSet = true; m_platformVersion.assign(value); } /** *

The platform version that your tasks in the service are running on. A * platform version is specified only for tasks using the Fargate launch type. If * one isn't specified, the LATEST platform version is used by * default. For more information, see AWS * Fargate Platform Versions in the Amazon Elastic Container Service * Developer Guide.

*/ inline CreateServiceRequest& WithPlatformVersion(const Aws::String& value) { SetPlatformVersion(value); return *this;} /** *

The platform version that your tasks in the service are running on. A * platform version is specified only for tasks using the Fargate launch type. If * one isn't specified, the LATEST platform version is used by * default. For more information, see AWS * Fargate Platform Versions in the Amazon Elastic Container Service * Developer Guide.

*/ inline CreateServiceRequest& WithPlatformVersion(Aws::String&& value) { SetPlatformVersion(std::move(value)); return *this;} /** *

The platform version that your tasks in the service are running on. A * platform version is specified only for tasks using the Fargate launch type. If * one isn't specified, the LATEST platform version is used by * default. For more information, see AWS * Fargate Platform Versions in the Amazon Elastic Container Service * Developer Guide.

*/ inline CreateServiceRequest& WithPlatformVersion(const char* value) { SetPlatformVersion(value); return *this;} /** *

The name or full Amazon Resource Name (ARN) of the IAM role that allows * Amazon ECS to make calls to your load balancer on your behalf. This parameter is * only permitted if you are using a load balancer with your service and your task * definition does not use the awsvpc network mode. If you specify the * role parameter, you must also specify a load balancer object with * the loadBalancers parameter.

If your account has * already created the Amazon ECS service-linked role, that role is used by default * for your service unless you specify a role here. The service-linked role is * required if your task definition uses the awsvpc network mode or if * the service is configured to use service discovery, an external deployment * controller, multiple target groups, or Elastic Inference accelerators in which * case you should not specify a role here. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If your specified role has a * path other than /, then you must either specify the full role ARN * (this is recommended) or prefix the role name with the path. For example, if a * role with the name bar has a path of /foo/ then you * would specify /foo/bar as the role name. For more information, see * Friendly * Names and Paths in the IAM User Guide.

*/ inline const Aws::String& GetRole() const{ return m_role; } /** *

The name or full Amazon Resource Name (ARN) of the IAM role that allows * Amazon ECS to make calls to your load balancer on your behalf. This parameter is * only permitted if you are using a load balancer with your service and your task * definition does not use the awsvpc network mode. If you specify the * role parameter, you must also specify a load balancer object with * the loadBalancers parameter.

If your account has * already created the Amazon ECS service-linked role, that role is used by default * for your service unless you specify a role here. The service-linked role is * required if your task definition uses the awsvpc network mode or if * the service is configured to use service discovery, an external deployment * controller, multiple target groups, or Elastic Inference accelerators in which * case you should not specify a role here. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If your specified role has a * path other than /, then you must either specify the full role ARN * (this is recommended) or prefix the role name with the path. For example, if a * role with the name bar has a path of /foo/ then you * would specify /foo/bar as the role name. For more information, see * Friendly * Names and Paths in the IAM User Guide.

*/ inline bool RoleHasBeenSet() const { return m_roleHasBeenSet; } /** *

The name or full Amazon Resource Name (ARN) of the IAM role that allows * Amazon ECS to make calls to your load balancer on your behalf. This parameter is * only permitted if you are using a load balancer with your service and your task * definition does not use the awsvpc network mode. If you specify the * role parameter, you must also specify a load balancer object with * the loadBalancers parameter.

If your account has * already created the Amazon ECS service-linked role, that role is used by default * for your service unless you specify a role here. The service-linked role is * required if your task definition uses the awsvpc network mode or if * the service is configured to use service discovery, an external deployment * controller, multiple target groups, or Elastic Inference accelerators in which * case you should not specify a role here. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If your specified role has a * path other than /, then you must either specify the full role ARN * (this is recommended) or prefix the role name with the path. For example, if a * role with the name bar has a path of /foo/ then you * would specify /foo/bar as the role name. For more information, see * Friendly * Names and Paths in the IAM User Guide.

*/ inline void SetRole(const Aws::String& value) { m_roleHasBeenSet = true; m_role = value; } /** *

The name or full Amazon Resource Name (ARN) of the IAM role that allows * Amazon ECS to make calls to your load balancer on your behalf. This parameter is * only permitted if you are using a load balancer with your service and your task * definition does not use the awsvpc network mode. If you specify the * role parameter, you must also specify a load balancer object with * the loadBalancers parameter.

If your account has * already created the Amazon ECS service-linked role, that role is used by default * for your service unless you specify a role here. The service-linked role is * required if your task definition uses the awsvpc network mode or if * the service is configured to use service discovery, an external deployment * controller, multiple target groups, or Elastic Inference accelerators in which * case you should not specify a role here. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If your specified role has a * path other than /, then you must either specify the full role ARN * (this is recommended) or prefix the role name with the path. For example, if a * role with the name bar has a path of /foo/ then you * would specify /foo/bar as the role name. For more information, see * Friendly * Names and Paths in the IAM User Guide.

*/ inline void SetRole(Aws::String&& value) { m_roleHasBeenSet = true; m_role = std::move(value); } /** *

The name or full Amazon Resource Name (ARN) of the IAM role that allows * Amazon ECS to make calls to your load balancer on your behalf. This parameter is * only permitted if you are using a load balancer with your service and your task * definition does not use the awsvpc network mode. If you specify the * role parameter, you must also specify a load balancer object with * the loadBalancers parameter.

If your account has * already created the Amazon ECS service-linked role, that role is used by default * for your service unless you specify a role here. The service-linked role is * required if your task definition uses the awsvpc network mode or if * the service is configured to use service discovery, an external deployment * controller, multiple target groups, or Elastic Inference accelerators in which * case you should not specify a role here. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If your specified role has a * path other than /, then you must either specify the full role ARN * (this is recommended) or prefix the role name with the path. For example, if a * role with the name bar has a path of /foo/ then you * would specify /foo/bar as the role name. For more information, see * Friendly * Names and Paths in the IAM User Guide.

*/ inline void SetRole(const char* value) { m_roleHasBeenSet = true; m_role.assign(value); } /** *

The name or full Amazon Resource Name (ARN) of the IAM role that allows * Amazon ECS to make calls to your load balancer on your behalf. This parameter is * only permitted if you are using a load balancer with your service and your task * definition does not use the awsvpc network mode. If you specify the * role parameter, you must also specify a load balancer object with * the loadBalancers parameter.

If your account has * already created the Amazon ECS service-linked role, that role is used by default * for your service unless you specify a role here. The service-linked role is * required if your task definition uses the awsvpc network mode or if * the service is configured to use service discovery, an external deployment * controller, multiple target groups, or Elastic Inference accelerators in which * case you should not specify a role here. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If your specified role has a * path other than /, then you must either specify the full role ARN * (this is recommended) or prefix the role name with the path. For example, if a * role with the name bar has a path of /foo/ then you * would specify /foo/bar as the role name. For more information, see * Friendly * Names and Paths in the IAM User Guide.

*/ inline CreateServiceRequest& WithRole(const Aws::String& value) { SetRole(value); return *this;} /** *

The name or full Amazon Resource Name (ARN) of the IAM role that allows * Amazon ECS to make calls to your load balancer on your behalf. This parameter is * only permitted if you are using a load balancer with your service and your task * definition does not use the awsvpc network mode. If you specify the * role parameter, you must also specify a load balancer object with * the loadBalancers parameter.

If your account has * already created the Amazon ECS service-linked role, that role is used by default * for your service unless you specify a role here. The service-linked role is * required if your task definition uses the awsvpc network mode or if * the service is configured to use service discovery, an external deployment * controller, multiple target groups, or Elastic Inference accelerators in which * case you should not specify a role here. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If your specified role has a * path other than /, then you must either specify the full role ARN * (this is recommended) or prefix the role name with the path. For example, if a * role with the name bar has a path of /foo/ then you * would specify /foo/bar as the role name. For more information, see * Friendly * Names and Paths in the IAM User Guide.

*/ inline CreateServiceRequest& WithRole(Aws::String&& value) { SetRole(std::move(value)); return *this;} /** *

The name or full Amazon Resource Name (ARN) of the IAM role that allows * Amazon ECS to make calls to your load balancer on your behalf. This parameter is * only permitted if you are using a load balancer with your service and your task * definition does not use the awsvpc network mode. If you specify the * role parameter, you must also specify a load balancer object with * the loadBalancers parameter.

If your account has * already created the Amazon ECS service-linked role, that role is used by default * for your service unless you specify a role here. The service-linked role is * required if your task definition uses the awsvpc network mode or if * the service is configured to use service discovery, an external deployment * controller, multiple target groups, or Elastic Inference accelerators in which * case you should not specify a role here. For more information, see Using * Service-Linked Roles for Amazon ECS in the Amazon Elastic Container * Service Developer Guide.

If your specified role has a * path other than /, then you must either specify the full role ARN * (this is recommended) or prefix the role name with the path. For example, if a * role with the name bar has a path of /foo/ then you * would specify /foo/bar as the role name. For more information, see * Friendly * Names and Paths in the IAM User Guide.

*/ inline CreateServiceRequest& WithRole(const char* value) { SetRole(value); return *this;} /** *

Optional deployment parameters that control how many tasks run during the * deployment and the ordering of stopping and starting tasks.

*/ inline const DeploymentConfiguration& GetDeploymentConfiguration() const{ return m_deploymentConfiguration; } /** *

Optional deployment parameters that control how many tasks run during the * deployment and the ordering of stopping and starting tasks.

*/ inline bool DeploymentConfigurationHasBeenSet() const { return m_deploymentConfigurationHasBeenSet; } /** *

Optional deployment parameters that control how many tasks run during the * deployment and the ordering of stopping and starting tasks.

*/ inline void SetDeploymentConfiguration(const DeploymentConfiguration& value) { m_deploymentConfigurationHasBeenSet = true; m_deploymentConfiguration = value; } /** *

Optional deployment parameters that control how many tasks run during the * deployment and the ordering of stopping and starting tasks.

*/ inline void SetDeploymentConfiguration(DeploymentConfiguration&& value) { m_deploymentConfigurationHasBeenSet = true; m_deploymentConfiguration = std::move(value); } /** *

Optional deployment parameters that control how many tasks run during the * deployment and the ordering of stopping and starting tasks.

*/ inline CreateServiceRequest& WithDeploymentConfiguration(const DeploymentConfiguration& value) { SetDeploymentConfiguration(value); return *this;} /** *

Optional deployment parameters that control how many tasks run during the * deployment and the ordering of stopping and starting tasks.

*/ inline CreateServiceRequest& WithDeploymentConfiguration(DeploymentConfiguration&& value) { SetDeploymentConfiguration(std::move(value)); return *this;} /** *

An array of placement constraint objects to use for tasks in your service. * You can specify a maximum of 10 constraints per task (this limit includes * constraints in the task definition and those specified at runtime).

*/ inline const Aws::Vector& GetPlacementConstraints() const{ return m_placementConstraints; } /** *

An array of placement constraint objects to use for tasks in your service. * You can specify a maximum of 10 constraints per task (this limit includes * constraints in the task definition and those specified at runtime).

*/ inline bool PlacementConstraintsHasBeenSet() const { return m_placementConstraintsHasBeenSet; } /** *

An array of placement constraint objects to use for tasks in your service. * You can specify a maximum of 10 constraints per task (this limit includes * constraints in the task definition and those specified at runtime).

*/ inline void SetPlacementConstraints(const Aws::Vector& value) { m_placementConstraintsHasBeenSet = true; m_placementConstraints = value; } /** *

An array of placement constraint objects to use for tasks in your service. * You can specify a maximum of 10 constraints per task (this limit includes * constraints in the task definition and those specified at runtime).

*/ inline void SetPlacementConstraints(Aws::Vector&& value) { m_placementConstraintsHasBeenSet = true; m_placementConstraints = std::move(value); } /** *

An array of placement constraint objects to use for tasks in your service. * You can specify a maximum of 10 constraints per task (this limit includes * constraints in the task definition and those specified at runtime).

*/ inline CreateServiceRequest& WithPlacementConstraints(const Aws::Vector& value) { SetPlacementConstraints(value); return *this;} /** *

An array of placement constraint objects to use for tasks in your service. * You can specify a maximum of 10 constraints per task (this limit includes * constraints in the task definition and those specified at runtime).

*/ inline CreateServiceRequest& WithPlacementConstraints(Aws::Vector&& value) { SetPlacementConstraints(std::move(value)); return *this;} /** *

An array of placement constraint objects to use for tasks in your service. * You can specify a maximum of 10 constraints per task (this limit includes * constraints in the task definition and those specified at runtime).

*/ inline CreateServiceRequest& AddPlacementConstraints(const PlacementConstraint& value) { m_placementConstraintsHasBeenSet = true; m_placementConstraints.push_back(value); return *this; } /** *

An array of placement constraint objects to use for tasks in your service. * You can specify a maximum of 10 constraints per task (this limit includes * constraints in the task definition and those specified at runtime).

*/ inline CreateServiceRequest& AddPlacementConstraints(PlacementConstraint&& value) { m_placementConstraintsHasBeenSet = true; m_placementConstraints.push_back(std::move(value)); return *this; } /** *

The placement strategy objects to use for tasks in your service. You can * specify a maximum of five strategy rules per service.

*/ inline const Aws::Vector& GetPlacementStrategy() const{ return m_placementStrategy; } /** *

The placement strategy objects to use for tasks in your service. You can * specify a maximum of five strategy rules per service.

*/ inline bool PlacementStrategyHasBeenSet() const { return m_placementStrategyHasBeenSet; } /** *

The placement strategy objects to use for tasks in your service. You can * specify a maximum of five strategy rules per service.

*/ inline void SetPlacementStrategy(const Aws::Vector& value) { m_placementStrategyHasBeenSet = true; m_placementStrategy = value; } /** *

The placement strategy objects to use for tasks in your service. You can * specify a maximum of five strategy rules per service.

*/ inline void SetPlacementStrategy(Aws::Vector&& value) { m_placementStrategyHasBeenSet = true; m_placementStrategy = std::move(value); } /** *

The placement strategy objects to use for tasks in your service. You can * specify a maximum of five strategy rules per service.

*/ inline CreateServiceRequest& WithPlacementStrategy(const Aws::Vector& value) { SetPlacementStrategy(value); return *this;} /** *

The placement strategy objects to use for tasks in your service. You can * specify a maximum of five strategy rules per service.

*/ inline CreateServiceRequest& WithPlacementStrategy(Aws::Vector&& value) { SetPlacementStrategy(std::move(value)); return *this;} /** *

The placement strategy objects to use for tasks in your service. You can * specify a maximum of five strategy rules per service.

*/ inline CreateServiceRequest& AddPlacementStrategy(const PlacementStrategy& value) { m_placementStrategyHasBeenSet = true; m_placementStrategy.push_back(value); return *this; } /** *

The placement strategy objects to use for tasks in your service. You can * specify a maximum of five strategy rules per service.

*/ inline CreateServiceRequest& AddPlacementStrategy(PlacementStrategy&& value) { m_placementStrategyHasBeenSet = true; m_placementStrategy.push_back(std::move(value)); return *this; } /** *

The network configuration for the service. This parameter is required for * task definitions that use the awsvpc network mode to receive their * own elastic network interface, and it is not supported for other network modes. * For more information, see Task * Networking in the Amazon Elastic Container Service Developer * Guide.

*/ inline const NetworkConfiguration& GetNetworkConfiguration() const{ return m_networkConfiguration; } /** *

The network configuration for the service. This parameter is required for * task definitions that use the awsvpc network mode to receive their * own elastic network interface, and it is not supported for other network modes. * For more information, see Task * Networking in the Amazon Elastic Container Service Developer * Guide.

*/ inline bool NetworkConfigurationHasBeenSet() const { return m_networkConfigurationHasBeenSet; } /** *

The network configuration for the service. This parameter is required for * task definitions that use the awsvpc network mode to receive their * own elastic network interface, and it is not supported for other network modes. * For more information, see Task * Networking in the Amazon Elastic Container Service Developer * Guide.

*/ inline void SetNetworkConfiguration(const NetworkConfiguration& value) { m_networkConfigurationHasBeenSet = true; m_networkConfiguration = value; } /** *

The network configuration for the service. This parameter is required for * task definitions that use the awsvpc network mode to receive their * own elastic network interface, and it is not supported for other network modes. * For more information, see Task * Networking in the Amazon Elastic Container Service Developer * Guide.

*/ inline void SetNetworkConfiguration(NetworkConfiguration&& value) { m_networkConfigurationHasBeenSet = true; m_networkConfiguration = std::move(value); } /** *

The network configuration for the service. This parameter is required for * task definitions that use the awsvpc network mode to receive their * own elastic network interface, and it is not supported for other network modes. * For more information, see Task * Networking in the Amazon Elastic Container Service Developer * Guide.

*/ inline CreateServiceRequest& WithNetworkConfiguration(const NetworkConfiguration& value) { SetNetworkConfiguration(value); return *this;} /** *

The network configuration for the service. This parameter is required for * task definitions that use the awsvpc network mode to receive their * own elastic network interface, and it is not supported for other network modes. * For more information, see Task * Networking in the Amazon Elastic Container Service Developer * Guide.

*/ inline CreateServiceRequest& WithNetworkConfiguration(NetworkConfiguration&& value) { SetNetworkConfiguration(std::move(value)); return *this;} /** *

The period of time, in seconds, that the Amazon ECS service scheduler should * ignore unhealthy Elastic Load Balancing target health checks after a task has * first started. This is only used when your service is configured to use a load * balancer. If your service has a load balancer defined and you don't specify a * health check grace period value, the default value of 0 is * used.

If your service's tasks take a while to start and respond to * Elastic Load Balancing health checks, you can specify a health check grace * period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service * scheduler ignores health check status. This grace period can prevent the service * scheduler from marking tasks as unhealthy and stopping them before they have * time to come up.

*/ inline int GetHealthCheckGracePeriodSeconds() const{ return m_healthCheckGracePeriodSeconds; } /** *

The period of time, in seconds, that the Amazon ECS service scheduler should * ignore unhealthy Elastic Load Balancing target health checks after a task has * first started. This is only used when your service is configured to use a load * balancer. If your service has a load balancer defined and you don't specify a * health check grace period value, the default value of 0 is * used.

If your service's tasks take a while to start and respond to * Elastic Load Balancing health checks, you can specify a health check grace * period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service * scheduler ignores health check status. This grace period can prevent the service * scheduler from marking tasks as unhealthy and stopping them before they have * time to come up.

*/ inline bool HealthCheckGracePeriodSecondsHasBeenSet() const { return m_healthCheckGracePeriodSecondsHasBeenSet; } /** *

The period of time, in seconds, that the Amazon ECS service scheduler should * ignore unhealthy Elastic Load Balancing target health checks after a task has * first started. This is only used when your service is configured to use a load * balancer. If your service has a load balancer defined and you don't specify a * health check grace period value, the default value of 0 is * used.

If your service's tasks take a while to start and respond to * Elastic Load Balancing health checks, you can specify a health check grace * period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service * scheduler ignores health check status. This grace period can prevent the service * scheduler from marking tasks as unhealthy and stopping them before they have * time to come up.

*/ inline void SetHealthCheckGracePeriodSeconds(int value) { m_healthCheckGracePeriodSecondsHasBeenSet = true; m_healthCheckGracePeriodSeconds = value; } /** *

The period of time, in seconds, that the Amazon ECS service scheduler should * ignore unhealthy Elastic Load Balancing target health checks after a task has * first started. This is only used when your service is configured to use a load * balancer. If your service has a load balancer defined and you don't specify a * health check grace period value, the default value of 0 is * used.

If your service's tasks take a while to start and respond to * Elastic Load Balancing health checks, you can specify a health check grace * period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service * scheduler ignores health check status. This grace period can prevent the service * scheduler from marking tasks as unhealthy and stopping them before they have * time to come up.

*/ inline CreateServiceRequest& WithHealthCheckGracePeriodSeconds(int value) { SetHealthCheckGracePeriodSeconds(value); return *this;} /** *

The scheduling strategy to use for the service. For more information, see Services.

*

There are two service scheduler strategies available:

  • * REPLICA-The replica scheduling strategy places and maintains the * desired number of tasks across your cluster. By default, the service scheduler * spreads tasks across Availability Zones. You can use task placement strategies * and constraints to customize task placement decisions. This scheduler strategy * is required if the service is using the CODE_DEPLOY or * EXTERNAL deployment controller types.

  • * DAEMON-The daemon scheduling strategy deploys exactly one task on * each active container instance that meets all of the task placement constraints * that you specify in your cluster. The service scheduler also evaluates the task * placement constraints for running tasks and will stop tasks that do not meet the * placement constraints. When you're using this strategy, you don't need to * specify a desired number of tasks, a task placement strategy, or use Service * Auto Scaling policies.

    Tasks using the Fargate launch type or the * CODE_DEPLOY or EXTERNAL deployment controller types * don't support the DAEMON scheduling strategy.

  • *
*/ inline const SchedulingStrategy& GetSchedulingStrategy() const{ return m_schedulingStrategy; } /** *

The scheduling strategy to use for the service. For more information, see Services.

*

There are two service scheduler strategies available:

  • * REPLICA-The replica scheduling strategy places and maintains the * desired number of tasks across your cluster. By default, the service scheduler * spreads tasks across Availability Zones. You can use task placement strategies * and constraints to customize task placement decisions. This scheduler strategy * is required if the service is using the CODE_DEPLOY or * EXTERNAL deployment controller types.

  • * DAEMON-The daemon scheduling strategy deploys exactly one task on * each active container instance that meets all of the task placement constraints * that you specify in your cluster. The service scheduler also evaluates the task * placement constraints for running tasks and will stop tasks that do not meet the * placement constraints. When you're using this strategy, you don't need to * specify a desired number of tasks, a task placement strategy, or use Service * Auto Scaling policies.

    Tasks using the Fargate launch type or the * CODE_DEPLOY or EXTERNAL deployment controller types * don't support the DAEMON scheduling strategy.

  • *
*/ inline bool SchedulingStrategyHasBeenSet() const { return m_schedulingStrategyHasBeenSet; } /** *

The scheduling strategy to use for the service. For more information, see Services.

*

There are two service scheduler strategies available:

  • * REPLICA-The replica scheduling strategy places and maintains the * desired number of tasks across your cluster. By default, the service scheduler * spreads tasks across Availability Zones. You can use task placement strategies * and constraints to customize task placement decisions. This scheduler strategy * is required if the service is using the CODE_DEPLOY or * EXTERNAL deployment controller types.

  • * DAEMON-The daemon scheduling strategy deploys exactly one task on * each active container instance that meets all of the task placement constraints * that you specify in your cluster. The service scheduler also evaluates the task * placement constraints for running tasks and will stop tasks that do not meet the * placement constraints. When you're using this strategy, you don't need to * specify a desired number of tasks, a task placement strategy, or use Service * Auto Scaling policies.

    Tasks using the Fargate launch type or the * CODE_DEPLOY or EXTERNAL deployment controller types * don't support the DAEMON scheduling strategy.

  • *
*/ inline void SetSchedulingStrategy(const SchedulingStrategy& value) { m_schedulingStrategyHasBeenSet = true; m_schedulingStrategy = value; } /** *

The scheduling strategy to use for the service. For more information, see Services.

*

There are two service scheduler strategies available:

  • * REPLICA-The replica scheduling strategy places and maintains the * desired number of tasks across your cluster. By default, the service scheduler * spreads tasks across Availability Zones. You can use task placement strategies * and constraints to customize task placement decisions. This scheduler strategy * is required if the service is using the CODE_DEPLOY or * EXTERNAL deployment controller types.

  • * DAEMON-The daemon scheduling strategy deploys exactly one task on * each active container instance that meets all of the task placement constraints * that you specify in your cluster. The service scheduler also evaluates the task * placement constraints for running tasks and will stop tasks that do not meet the * placement constraints. When you're using this strategy, you don't need to * specify a desired number of tasks, a task placement strategy, or use Service * Auto Scaling policies.

    Tasks using the Fargate launch type or the * CODE_DEPLOY or EXTERNAL deployment controller types * don't support the DAEMON scheduling strategy.

  • *
*/ inline void SetSchedulingStrategy(SchedulingStrategy&& value) { m_schedulingStrategyHasBeenSet = true; m_schedulingStrategy = std::move(value); } /** *

The scheduling strategy to use for the service. For more information, see Services.

*

There are two service scheduler strategies available:

  • * REPLICA-The replica scheduling strategy places and maintains the * desired number of tasks across your cluster. By default, the service scheduler * spreads tasks across Availability Zones. You can use task placement strategies * and constraints to customize task placement decisions. This scheduler strategy * is required if the service is using the CODE_DEPLOY or * EXTERNAL deployment controller types.

  • * DAEMON-The daemon scheduling strategy deploys exactly one task on * each active container instance that meets all of the task placement constraints * that you specify in your cluster. The service scheduler also evaluates the task * placement constraints for running tasks and will stop tasks that do not meet the * placement constraints. When you're using this strategy, you don't need to * specify a desired number of tasks, a task placement strategy, or use Service * Auto Scaling policies.

    Tasks using the Fargate launch type or the * CODE_DEPLOY or EXTERNAL deployment controller types * don't support the DAEMON scheduling strategy.

  • *
*/ inline CreateServiceRequest& WithSchedulingStrategy(const SchedulingStrategy& value) { SetSchedulingStrategy(value); return *this;} /** *

The scheduling strategy to use for the service. For more information, see Services.

*

There are two service scheduler strategies available:

  • * REPLICA-The replica scheduling strategy places and maintains the * desired number of tasks across your cluster. By default, the service scheduler * spreads tasks across Availability Zones. You can use task placement strategies * and constraints to customize task placement decisions. This scheduler strategy * is required if the service is using the CODE_DEPLOY or * EXTERNAL deployment controller types.

  • * DAEMON-The daemon scheduling strategy deploys exactly one task on * each active container instance that meets all of the task placement constraints * that you specify in your cluster. The service scheduler also evaluates the task * placement constraints for running tasks and will stop tasks that do not meet the * placement constraints. When you're using this strategy, you don't need to * specify a desired number of tasks, a task placement strategy, or use Service * Auto Scaling policies.

    Tasks using the Fargate launch type or the * CODE_DEPLOY or EXTERNAL deployment controller types * don't support the DAEMON scheduling strategy.

  • *
*/ inline CreateServiceRequest& WithSchedulingStrategy(SchedulingStrategy&& value) { SetSchedulingStrategy(std::move(value)); return *this;} /** *

The deployment controller to use for the service.

*/ inline const DeploymentController& GetDeploymentController() const{ return m_deploymentController; } /** *

The deployment controller to use for the service.

*/ inline bool DeploymentControllerHasBeenSet() const { return m_deploymentControllerHasBeenSet; } /** *

The deployment controller to use for the service.

*/ inline void SetDeploymentController(const DeploymentController& value) { m_deploymentControllerHasBeenSet = true; m_deploymentController = value; } /** *

The deployment controller to use for the service.

*/ inline void SetDeploymentController(DeploymentController&& value) { m_deploymentControllerHasBeenSet = true; m_deploymentController = std::move(value); } /** *

The deployment controller to use for the service.

*/ inline CreateServiceRequest& WithDeploymentController(const DeploymentController& value) { SetDeploymentController(value); return *this;} /** *

The deployment controller to use for the service.

*/ inline CreateServiceRequest& WithDeploymentController(DeploymentController&& value) { SetDeploymentController(std::move(value)); return *this;} /** *

The metadata that you apply to the service to help you categorize and * organize them. Each tag consists of a key and an optional value, both of which * you define. When a service is deleted, the tags are deleted as well.

The * following basic restrictions apply to tags:

  • Maximum number of * tags per resource - 50

  • For each resource, each tag key must be * unique, and each tag key can have only one value.

  • Maximum key * length - 128 Unicode characters in UTF-8

  • Maximum value length * - 256 Unicode characters in UTF-8

  • If your tagging schema is * used across multiple services and resources, remember that other services may * have restrictions on allowed characters. Generally allowed characters are: * letters, numbers, and spaces representable in UTF-8, and the following * characters: + - = . _ : / @.

  • Tag keys and values are * case-sensitive.

  • Do not use aws:, * AWS:, or any upper or lowercase combination of such as a prefix for * either keys or values as it is reserved for AWS use. You cannot edit or delete * tag keys or values with this prefix. Tags with this prefix do not count against * your tags per resource limit.

*/ inline const Aws::Vector& GetTags() const{ return m_tags; } /** *

The metadata that you apply to the service to help you categorize and * organize them. Each tag consists of a key and an optional value, both of which * you define. When a service is deleted, the tags are deleted as well.

The * following basic restrictions apply to tags:

  • Maximum number of * tags per resource - 50

  • For each resource, each tag key must be * unique, and each tag key can have only one value.

  • Maximum key * length - 128 Unicode characters in UTF-8

  • Maximum value length * - 256 Unicode characters in UTF-8

  • If your tagging schema is * used across multiple services and resources, remember that other services may * have restrictions on allowed characters. Generally allowed characters are: * letters, numbers, and spaces representable in UTF-8, and the following * characters: + - = . _ : / @.

  • Tag keys and values are * case-sensitive.

  • Do not use aws:, * AWS:, or any upper or lowercase combination of such as a prefix for * either keys or values as it is reserved for AWS use. You cannot edit or delete * tag keys or values with this prefix. Tags with this prefix do not count against * your tags per resource limit.

*/ inline bool TagsHasBeenSet() const { return m_tagsHasBeenSet; } /** *

The metadata that you apply to the service to help you categorize and * organize them. Each tag consists of a key and an optional value, both of which * you define. When a service is deleted, the tags are deleted as well.

The * following basic restrictions apply to tags:

  • Maximum number of * tags per resource - 50

  • For each resource, each tag key must be * unique, and each tag key can have only one value.

  • Maximum key * length - 128 Unicode characters in UTF-8

  • Maximum value length * - 256 Unicode characters in UTF-8

  • If your tagging schema is * used across multiple services and resources, remember that other services may * have restrictions on allowed characters. Generally allowed characters are: * letters, numbers, and spaces representable in UTF-8, and the following * characters: + - = . _ : / @.

  • Tag keys and values are * case-sensitive.

  • Do not use aws:, * AWS:, or any upper or lowercase combination of such as a prefix for * either keys or values as it is reserved for AWS use. You cannot edit or delete * tag keys or values with this prefix. Tags with this prefix do not count against * your tags per resource limit.

*/ inline void SetTags(const Aws::Vector& value) { m_tagsHasBeenSet = true; m_tags = value; } /** *

The metadata that you apply to the service to help you categorize and * organize them. Each tag consists of a key and an optional value, both of which * you define. When a service is deleted, the tags are deleted as well.

The * following basic restrictions apply to tags:

  • Maximum number of * tags per resource - 50

  • For each resource, each tag key must be * unique, and each tag key can have only one value.

  • Maximum key * length - 128 Unicode characters in UTF-8

  • Maximum value length * - 256 Unicode characters in UTF-8

  • If your tagging schema is * used across multiple services and resources, remember that other services may * have restrictions on allowed characters. Generally allowed characters are: * letters, numbers, and spaces representable in UTF-8, and the following * characters: + - = . _ : / @.

  • Tag keys and values are * case-sensitive.

  • Do not use aws:, * AWS:, or any upper or lowercase combination of such as a prefix for * either keys or values as it is reserved for AWS use. You cannot edit or delete * tag keys or values with this prefix. Tags with this prefix do not count against * your tags per resource limit.

*/ inline void SetTags(Aws::Vector&& value) { m_tagsHasBeenSet = true; m_tags = std::move(value); } /** *

The metadata that you apply to the service to help you categorize and * organize them. Each tag consists of a key and an optional value, both of which * you define. When a service is deleted, the tags are deleted as well.

The * following basic restrictions apply to tags:

  • Maximum number of * tags per resource - 50

  • For each resource, each tag key must be * unique, and each tag key can have only one value.

  • Maximum key * length - 128 Unicode characters in UTF-8

  • Maximum value length * - 256 Unicode characters in UTF-8

  • If your tagging schema is * used across multiple services and resources, remember that other services may * have restrictions on allowed characters. Generally allowed characters are: * letters, numbers, and spaces representable in UTF-8, and the following * characters: + - = . _ : / @.

  • Tag keys and values are * case-sensitive.

  • Do not use aws:, * AWS:, or any upper or lowercase combination of such as a prefix for * either keys or values as it is reserved for AWS use. You cannot edit or delete * tag keys or values with this prefix. Tags with this prefix do not count against * your tags per resource limit.

*/ inline CreateServiceRequest& WithTags(const Aws::Vector& value) { SetTags(value); return *this;} /** *

The metadata that you apply to the service to help you categorize and * organize them. Each tag consists of a key and an optional value, both of which * you define. When a service is deleted, the tags are deleted as well.

The * following basic restrictions apply to tags:

  • Maximum number of * tags per resource - 50

  • For each resource, each tag key must be * unique, and each tag key can have only one value.

  • Maximum key * length - 128 Unicode characters in UTF-8

  • Maximum value length * - 256 Unicode characters in UTF-8

  • If your tagging schema is * used across multiple services and resources, remember that other services may * have restrictions on allowed characters. Generally allowed characters are: * letters, numbers, and spaces representable in UTF-8, and the following * characters: + - = . _ : / @.

  • Tag keys and values are * case-sensitive.

  • Do not use aws:, * AWS:, or any upper or lowercase combination of such as a prefix for * either keys or values as it is reserved for AWS use. You cannot edit or delete * tag keys or values with this prefix. Tags with this prefix do not count against * your tags per resource limit.

*/ inline CreateServiceRequest& WithTags(Aws::Vector&& value) { SetTags(std::move(value)); return *this;} /** *

The metadata that you apply to the service to help you categorize and * organize them. Each tag consists of a key and an optional value, both of which * you define. When a service is deleted, the tags are deleted as well.

The * following basic restrictions apply to tags:

  • Maximum number of * tags per resource - 50

  • For each resource, each tag key must be * unique, and each tag key can have only one value.

  • Maximum key * length - 128 Unicode characters in UTF-8

  • Maximum value length * - 256 Unicode characters in UTF-8

  • If your tagging schema is * used across multiple services and resources, remember that other services may * have restrictions on allowed characters. Generally allowed characters are: * letters, numbers, and spaces representable in UTF-8, and the following * characters: + - = . _ : / @.

  • Tag keys and values are * case-sensitive.

  • Do not use aws:, * AWS:, or any upper or lowercase combination of such as a prefix for * either keys or values as it is reserved for AWS use. You cannot edit or delete * tag keys or values with this prefix. Tags with this prefix do not count against * your tags per resource limit.

*/ inline CreateServiceRequest& AddTags(const Tag& value) { m_tagsHasBeenSet = true; m_tags.push_back(value); return *this; } /** *

The metadata that you apply to the service to help you categorize and * organize them. Each tag consists of a key and an optional value, both of which * you define. When a service is deleted, the tags are deleted as well.

The * following basic restrictions apply to tags:

  • Maximum number of * tags per resource - 50

  • For each resource, each tag key must be * unique, and each tag key can have only one value.

  • Maximum key * length - 128 Unicode characters in UTF-8

  • Maximum value length * - 256 Unicode characters in UTF-8

  • If your tagging schema is * used across multiple services and resources, remember that other services may * have restrictions on allowed characters. Generally allowed characters are: * letters, numbers, and spaces representable in UTF-8, and the following * characters: + - = . _ : / @.

  • Tag keys and values are * case-sensitive.

  • Do not use aws:, * AWS:, or any upper or lowercase combination of such as a prefix for * either keys or values as it is reserved for AWS use. You cannot edit or delete * tag keys or values with this prefix. Tags with this prefix do not count against * your tags per resource limit.

*/ inline CreateServiceRequest& AddTags(Tag&& value) { m_tagsHasBeenSet = true; m_tags.push_back(std::move(value)); return *this; } /** *

Specifies whether to enable Amazon ECS managed tags for the tasks within the * service. For more information, see Tagging * Your Amazon ECS Resources in the Amazon Elastic Container Service * Developer Guide.

*/ inline bool GetEnableECSManagedTags() const{ return m_enableECSManagedTags; } /** *

Specifies whether to enable Amazon ECS managed tags for the tasks within the * service. For more information, see Tagging * Your Amazon ECS Resources in the Amazon Elastic Container Service * Developer Guide.

*/ inline bool EnableECSManagedTagsHasBeenSet() const { return m_enableECSManagedTagsHasBeenSet; } /** *

Specifies whether to enable Amazon ECS managed tags for the tasks within the * service. For more information, see Tagging * Your Amazon ECS Resources in the Amazon Elastic Container Service * Developer Guide.

*/ inline void SetEnableECSManagedTags(bool value) { m_enableECSManagedTagsHasBeenSet = true; m_enableECSManagedTags = value; } /** *

Specifies whether to enable Amazon ECS managed tags for the tasks within the * service. For more information, see Tagging * Your Amazon ECS Resources in the Amazon Elastic Container Service * Developer Guide.

*/ inline CreateServiceRequest& WithEnableECSManagedTags(bool value) { SetEnableECSManagedTags(value); return *this;} /** *

Specifies whether to propagate the tags from the task definition or the * service to the tasks in the service. If no value is specified, the tags are not * propagated. Tags can only be propagated to the tasks within the service during * service creation. To add tags to a task after service creation, use the * TagResource API action.

*/ inline const PropagateTags& GetPropagateTags() const{ return m_propagateTags; } /** *

Specifies whether to propagate the tags from the task definition or the * service to the tasks in the service. If no value is specified, the tags are not * propagated. Tags can only be propagated to the tasks within the service during * service creation. To add tags to a task after service creation, use the * TagResource API action.

*/ inline bool PropagateTagsHasBeenSet() const { return m_propagateTagsHasBeenSet; } /** *

Specifies whether to propagate the tags from the task definition or the * service to the tasks in the service. If no value is specified, the tags are not * propagated. Tags can only be propagated to the tasks within the service during * service creation. To add tags to a task after service creation, use the * TagResource API action.

*/ inline void SetPropagateTags(const PropagateTags& value) { m_propagateTagsHasBeenSet = true; m_propagateTags = value; } /** *

Specifies whether to propagate the tags from the task definition or the * service to the tasks in the service. If no value is specified, the tags are not * propagated. Tags can only be propagated to the tasks within the service during * service creation. To add tags to a task after service creation, use the * TagResource API action.

*/ inline void SetPropagateTags(PropagateTags&& value) { m_propagateTagsHasBeenSet = true; m_propagateTags = std::move(value); } /** *

Specifies whether to propagate the tags from the task definition or the * service to the tasks in the service. If no value is specified, the tags are not * propagated. Tags can only be propagated to the tasks within the service during * service creation. To add tags to a task after service creation, use the * TagResource API action.

*/ inline CreateServiceRequest& WithPropagateTags(const PropagateTags& value) { SetPropagateTags(value); return *this;} /** *

Specifies whether to propagate the tags from the task definition or the * service to the tasks in the service. If no value is specified, the tags are not * propagated. Tags can only be propagated to the tasks within the service during * service creation. To add tags to a task after service creation, use the * TagResource API action.

*/ inline CreateServiceRequest& WithPropagateTags(PropagateTags&& value) { SetPropagateTags(std::move(value)); return *this;} private: Aws::String m_cluster; bool m_clusterHasBeenSet; Aws::String m_serviceName; bool m_serviceNameHasBeenSet; Aws::String m_taskDefinition; bool m_taskDefinitionHasBeenSet; Aws::Vector m_loadBalancers; bool m_loadBalancersHasBeenSet; Aws::Vector m_serviceRegistries; bool m_serviceRegistriesHasBeenSet; int m_desiredCount; bool m_desiredCountHasBeenSet; Aws::String m_clientToken; bool m_clientTokenHasBeenSet; LaunchType m_launchType; bool m_launchTypeHasBeenSet; Aws::Vector m_capacityProviderStrategy; bool m_capacityProviderStrategyHasBeenSet; Aws::String m_platformVersion; bool m_platformVersionHasBeenSet; Aws::String m_role; bool m_roleHasBeenSet; DeploymentConfiguration m_deploymentConfiguration; bool m_deploymentConfigurationHasBeenSet; Aws::Vector m_placementConstraints; bool m_placementConstraintsHasBeenSet; Aws::Vector m_placementStrategy; bool m_placementStrategyHasBeenSet; NetworkConfiguration m_networkConfiguration; bool m_networkConfigurationHasBeenSet; int m_healthCheckGracePeriodSeconds; bool m_healthCheckGracePeriodSecondsHasBeenSet; SchedulingStrategy m_schedulingStrategy; bool m_schedulingStrategyHasBeenSet; DeploymentController m_deploymentController; bool m_deploymentControllerHasBeenSet; Aws::Vector m_tags; bool m_tagsHasBeenSet; bool m_enableECSManagedTags; bool m_enableECSManagedTagsHasBeenSet; PropagateTags m_propagateTags; bool m_propagateTagsHasBeenSet; }; } // namespace Model } // namespace ECS } // namespace Aws