1// Code generated by smithy-go-codegen DO NOT EDIT.
2
3package ecs
4
5import (
6	"context"
7	awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
8	"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
9	"github.com/aws/aws-sdk-go-v2/service/ecs/types"
10	"github.com/aws/smithy-go/middleware"
11	smithyhttp "github.com/aws/smithy-go/transport/http"
12)
13
14// Runs and maintains a desired number of tasks from a specified task definition.
15// If the number of tasks running in a service drops below the desiredCount, Amazon
16// ECS runs another copy of the task in the specified cluster. To update an
17// existing service, see the UpdateService action. In addition to maintaining the
18// desired count of tasks in your service, you can optionally run your service
19// behind one or more load balancers. The load balancers distribute traffic across
20// the tasks that are associated with the service. For more information, see
21// Service Load Balancing
22// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html)
23// in the Amazon Elastic Container Service Developer Guide. Tasks for services that
24// do not use a load balancer are considered healthy if they're in the RUNNING
25// state. Tasks for services that do use a load balancer are considered healthy if
26// they're in the RUNNING state and the container instance that they're hosted on
27// is reported as healthy by the load balancer. There are two service scheduler
28// strategies available:
29//
30// * REPLICA - The replica scheduling strategy places and
31// maintains the desired number of tasks across your cluster. By default, the
32// service scheduler spreads tasks across Availability Zones. You can use task
33// placement strategies and constraints to customize task placement decisions. For
34// more information, see Service Scheduler Concepts
35// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html)
36// in the Amazon Elastic Container Service Developer Guide.
37//
38// * DAEMON - The daemon
39// scheduling strategy deploys exactly one task on each active container instance
40// that meets all of the task placement constraints that you specify in your
41// cluster. The service scheduler also evaluates the task placement constraints for
42// running tasks and will stop tasks that do not meet the placement constraints.
43// When using this strategy, you don't need to specify a desired number of tasks, a
44// task placement strategy, or use Service Auto Scaling policies. For more
45// information, see Service Scheduler Concepts
46// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html)
47// in the Amazon Elastic Container Service Developer Guide.
48//
49// You can optionally
50// specify a deployment configuration for your service. The deployment is triggered
51// by changing properties, such as the task definition or the desired count of a
52// service, with an UpdateService operation. The default value for a replica
53// service for minimumHealthyPercent is 100%. The default value for a daemon
54// service for minimumHealthyPercent is 0%. If a service is using the ECS
55// deployment controller, the minimum healthy percent represents a lower limit on
56// the number of tasks in a service that must remain in the RUNNING state during a
57// deployment, as a percentage of the desired number of tasks (rounded up to the
58// nearest integer), and while any container instances are in the DRAINING state if
59// the service contains tasks using the EC2 launch type. This parameter enables you
60// to deploy without using additional cluster capacity. For example, if your
61// service has a desired number of four tasks and a minimum healthy percent of 50%,
62// the scheduler might stop two existing tasks to free up cluster capacity before
63// starting two new tasks. Tasks for services that do not use a load balancer are
64// considered healthy if they're in the RUNNING state. Tasks for services that do
65// use a load balancer are considered healthy if they're in the RUNNING state and
66// they're reported as healthy by the load balancer. The default value for minimum
67// healthy percent is 100%. If a service is using the ECS deployment controller,
68// the maximum percent parameter represents an upper limit on the number of tasks
69// in a service that are allowed in the RUNNING or PENDING state during a
70// deployment, as a percentage of the desired number of tasks (rounded down to the
71// nearest integer), and while any container instances are in the DRAINING state if
72// the service contains tasks using the EC2 launch type. This parameter enables you
73// to define the deployment batch size. For example, if your service has a desired
74// number of four tasks and a maximum percent value of 200%, the scheduler may
75// start four new tasks before stopping the four older tasks (provided that the
76// cluster resources required to do this are available). The default value for
77// maximum percent is 200%. If a service is using either the CODE_DEPLOY or
78// EXTERNAL deployment controller types and tasks that use the EC2 launch type, the
79// minimum healthy percent and maximum percent values are used only to define the
80// lower and upper limit on the number of the tasks in the service that remain in
81// the RUNNING state while the container instances are in the DRAINING state. If
82// the tasks in the service use the Fargate launch type, the minimum healthy
83// percent and maximum percent values aren't used, although they're currently
84// visible when describing your service. When creating a service that uses the
85// EXTERNAL deployment controller, you can specify only parameters that aren't
86// controlled at the task set level. The only required parameter is the service
87// name. You control your services using the CreateTaskSet operation. For more
88// information, see Amazon ECS Deployment Types
89// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html)
90// in the Amazon Elastic Container Service Developer Guide. When the service
91// scheduler launches new tasks, it determines task placement in your cluster using
92// the following logic:
93//
94// * Determine which of the container instances in your
95// cluster can support your service's task definition (for example, they have the
96// required CPU, memory, ports, and container instance attributes).
97//
98// * By default,
99// the service scheduler attempts to balance tasks across Availability Zones in
100// this manner (although you can choose a different placement strategy) with the
101// placementStrategy parameter):
102//
103// * Sort the valid container instances, giving
104// priority to instances that have the fewest number of running tasks for this
105// service in their respective Availability Zone. For example, if zone A has one
106// running service task and zones B and C each have zero, valid container instances
107// in either zone B or C are considered optimal for placement.
108//
109// * Place the new
110// service task on a valid container instance in an optimal Availability Zone
111// (based on the previous steps), favoring container instances with the fewest
112// number of running tasks for this service.
113func (c *Client) CreateService(ctx context.Context, params *CreateServiceInput, optFns ...func(*Options)) (*CreateServiceOutput, error) {
114	if params == nil {
115		params = &CreateServiceInput{}
116	}
117
118	result, metadata, err := c.invokeOperation(ctx, "CreateService", params, optFns, addOperationCreateServiceMiddlewares)
119	if err != nil {
120		return nil, err
121	}
122
123	out := result.(*CreateServiceOutput)
124	out.ResultMetadata = metadata
125	return out, nil
126}
127
128type CreateServiceInput struct {
129
130	// The name of your service. Up to 255 letters (uppercase and lowercase), numbers,
131	// and hyphens are allowed. Service names must be unique within a cluster, but you
132	// can have similarly named services in multiple clusters within a Region or across
133	// multiple Regions.
134	//
135	// This member is required.
136	ServiceName *string
137
138	// The capacity provider strategy to use for the service. A capacity provider
139	// strategy consists of one or more capacity providers along with the base and
140	// weight to assign to them. A capacity provider must be associated with the
141	// cluster to be used in a capacity provider strategy. The
142	// PutClusterCapacityProviders API is used to associate a capacity provider with a
143	// cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.
144	// If a capacityProviderStrategy is specified, the launchType parameter must be
145	// omitted. If no capacityProviderStrategy or launchType is specified, the
146	// defaultCapacityProviderStrategy for the cluster is used. If specifying a
147	// capacity provider that uses an Auto Scaling group, the capacity provider must
148	// already be created. New capacity providers can be created with the
149	// CreateCapacityProvider API operation. To use a AWS Fargate capacity provider,
150	// specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate
151	// capacity providers are available to all accounts and only need to be associated
152	// with a cluster to be used. The PutClusterCapacityProviders API operation is used
153	// to update the list of available capacity providers for a cluster after the
154	// cluster is created.
155	CapacityProviderStrategy []types.CapacityProviderStrategyItem
156
157	// Unique, case-sensitive identifier that you provide to ensure the idempotency of
158	// the request. Up to 32 ASCII characters are allowed.
159	ClientToken *string
160
161	// The short name or full Amazon Resource Name (ARN) of the cluster on which to run
162	// your service. If you do not specify a cluster, the default cluster is assumed.
163	Cluster *string
164
165	// Optional deployment parameters that control how many tasks run during the
166	// deployment and the ordering of stopping and starting tasks.
167	DeploymentConfiguration *types.DeploymentConfiguration
168
169	// The deployment controller to use for the service.
170	DeploymentController *types.DeploymentController
171
172	// The number of instantiations of the specified task definition to place and keep
173	// running on your cluster. This is required if schedulingStrategy is REPLICA or is
174	// not specified. If schedulingStrategy is DAEMON then this is not required.
175	DesiredCount *int32
176
177	// Specifies whether to enable Amazon ECS managed tags for the tasks within the
178	// service. For more information, see Tagging Your Amazon ECS Resources
179	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html)
180	// in the Amazon Elastic Container Service Developer Guide.
181	EnableECSManagedTags bool
182
183	// The period of time, in seconds, that the Amazon ECS service scheduler should
184	// ignore unhealthy Elastic Load Balancing target health checks after a task has
185	// first started. This is only used when your service is configured to use a load
186	// balancer. If your service has a load balancer defined and you don't specify a
187	// health check grace period value, the default value of 0 is used. If your
188	// service's tasks take a while to start and respond to Elastic Load Balancing
189	// health checks, you can specify a health check grace period of up to
190	// 2,147,483,647 seconds. During that time, the Amazon ECS service scheduler
191	// ignores health check status. This grace period can prevent the service scheduler
192	// from marking tasks as unhealthy and stopping them before they have time to come
193	// up.
194	HealthCheckGracePeriodSeconds *int32
195
196	// The launch type on which to run your service. For more information, see Amazon
197	// ECS Launch Types
198	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
199	// in the Amazon Elastic Container Service Developer Guide. If a launchType is
200	// specified, the capacityProviderStrategy parameter must be omitted.
201	LaunchType types.LaunchType
202
203	// A load balancer object representing the load balancers to use with your service.
204	// For more information, see Service Load Balancing
205	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html)
206	// in the Amazon Elastic Container Service Developer Guide. If the service is using
207	// the rolling update (ECS) deployment controller and using either an Application
208	// Load Balancer or Network Load Balancer, you must specify one or more target
209	// group ARNs to attach to the service. The service-linked role is required for
210	// services that make use of multiple target groups. For more information, see
211	// Using Service-Linked Roles for Amazon ECS
212	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html)
213	// in the Amazon Elastic Container Service Developer Guide. If the service is using
214	// the CODE_DEPLOY deployment controller, the service is required to use either an
215	// Application Load Balancer or Network Load Balancer. When creating an AWS
216	// CodeDeploy deployment group, you specify two target groups (referred to as a
217	// targetGroupPair). During a deployment, AWS CodeDeploy determines which task set
218	// in your service has the status PRIMARY and associates one target group with it,
219	// and then associates the other target group with the replacement task set. The
220	// load balancer can also have up to two listeners: a required listener for
221	// production traffic and an optional listener that allows you perform validation
222	// tests with Lambda functions before routing production traffic to it. After you
223	// create a service using the ECS deployment controller, the load balancer name or
224	// target group ARN, container name, and container port specified in the service
225	// definition are immutable. If you are using the CODE_DEPLOY deployment
226	// controller, these values can be changed when updating the service. For
227	// Application Load Balancers and Network Load Balancers, this object must contain
228	// the load balancer target group ARN, the container name (as it appears in a
229	// container definition), and the container port to access from the load balancer.
230	// The load balancer name parameter must be omitted. When a task from this service
231	// is placed on a container instance, the container instance and port combination
232	// is registered as a target in the target group specified here. For Classic Load
233	// Balancers, this object must contain the load balancer name, the container name
234	// (as it appears in a container definition), and the container port to access from
235	// the load balancer. The target group ARN parameter must be omitted. When a task
236	// from this service is placed on a container instance, the container instance is
237	// registered with the load balancer specified here. Services with tasks that use
238	// the awsvpc network mode (for example, those with the Fargate launch type) only
239	// support Application Load Balancers and Network Load Balancers. Classic Load
240	// Balancers are not supported. Also, when you create any target groups for these
241	// services, you must choose ip as the target type, not instance, because tasks
242	// that use the awsvpc network mode are associated with an elastic network
243	// interface, not an Amazon EC2 instance.
244	LoadBalancers []types.LoadBalancer
245
246	// The network configuration for the service. This parameter is required for task
247	// definitions that use the awsvpc network mode to receive their own elastic
248	// network interface, and it is not supported for other network modes. For more
249	// information, see Task Networking
250	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html)
251	// in the Amazon Elastic Container Service Developer Guide.
252	NetworkConfiguration *types.NetworkConfiguration
253
254	// An array of placement constraint objects to use for tasks in your service. You
255	// can specify a maximum of 10 constraints per task (this limit includes
256	// constraints in the task definition and those specified at runtime).
257	PlacementConstraints []types.PlacementConstraint
258
259	// The placement strategy objects to use for tasks in your service. You can specify
260	// a maximum of five strategy rules per service.
261	PlacementStrategy []types.PlacementStrategy
262
263	// The platform version that your tasks in the service are running on. A platform
264	// version is specified only for tasks using the Fargate launch type. If one isn't
265	// specified, the LATEST platform version is used by default. For more information,
266	// see AWS Fargate Platform Versions
267	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html)
268	// in the Amazon Elastic Container Service Developer Guide.
269	PlatformVersion *string
270
271	// Specifies whether to propagate the tags from the task definition or the service
272	// to the tasks in the service. If no value is specified, the tags are not
273	// propagated. Tags can only be propagated to the tasks within the service during
274	// service creation. To add tags to a task after service creation, use the
275	// TagResource API action.
276	PropagateTags types.PropagateTags
277
278	// The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon
279	// ECS to make calls to your load balancer on your behalf. This parameter is only
280	// permitted if you are using a load balancer with your service and your task
281	// definition does not use the awsvpc network mode. If you specify the role
282	// parameter, you must also specify a load balancer object with the loadBalancers
283	// parameter. If your account has already created the Amazon ECS service-linked
284	// role, that role is used by default for your service unless you specify a role
285	// here. The service-linked role is required if your task definition uses the
286	// awsvpc network mode or if the service is configured to use service discovery, an
287	// external deployment controller, multiple target groups, or Elastic Inference
288	// accelerators in which case you should not specify a role here. For more
289	// information, see Using Service-Linked Roles for Amazon ECS
290	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html)
291	// in the Amazon Elastic Container Service Developer Guide. If your specified role
292	// has a path other than /, then you must either specify the full role ARN (this is
293	// recommended) or prefix the role name with the path. For example, if a role with
294	// the name bar has a path of /foo/ then you would specify /foo/bar as the role
295	// name. For more information, see Friendly Names and Paths
296	// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names)
297	// in the IAM User Guide.
298	Role *string
299
300	// The scheduling strategy to use for the service. For more information, see
301	// Services
302	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html).
303	// There are two service scheduler strategies available:
304	//
305	// * REPLICA-The replica
306	// scheduling strategy places and maintains the desired number of tasks across your
307	// cluster. By default, the service scheduler spreads tasks across Availability
308	// Zones. You can use task placement strategies and constraints to customize task
309	// placement decisions. This scheduler strategy is required if the service is using
310	// the CODE_DEPLOY or EXTERNAL deployment controller types.
311	//
312	// * DAEMON-The daemon
313	// scheduling strategy deploys exactly one task on each active container instance
314	// that meets all of the task placement constraints that you specify in your
315	// cluster. The service scheduler also evaluates the task placement constraints for
316	// running tasks and will stop tasks that do not meet the placement constraints.
317	// When you're using this strategy, you don't need to specify a desired number of
318	// tasks, a task placement strategy, or use Service Auto Scaling policies. Tasks
319	// using the Fargate launch type or the CODE_DEPLOY or EXTERNAL deployment
320	// controller types don't support the DAEMON scheduling strategy.
321	SchedulingStrategy types.SchedulingStrategy
322
323	// The details of the service discovery registries to assign to this service. For
324	// more information, see Service Discovery
325	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html).
326	// Service discovery is supported for Fargate tasks if you are using platform
327	// version v1.1.0 or later. For more information, see AWS Fargate Platform Versions
328	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html).
329	ServiceRegistries []types.ServiceRegistry
330
331	// The metadata that you apply to the service to help you categorize and organize
332	// them. Each tag consists of a key and an optional value, both of which you
333	// define. When a service is deleted, the tags are deleted as well. The following
334	// basic restrictions apply to tags:
335	//
336	// * Maximum number of tags per resource - 50
337	//
338	// *
339	// For each resource, each tag key must be unique, and each tag key can have only
340	// one value.
341	//
342	// * Maximum key length - 128 Unicode characters in UTF-8
343	//
344	// * Maximum
345	// value length - 256 Unicode characters in UTF-8
346	//
347	// * If your tagging schema is used
348	// across multiple services and resources, remember that other services may have
349	// restrictions on allowed characters. Generally allowed characters are: letters,
350	// numbers, and spaces representable in UTF-8, and the following characters: + - =
351	// . _ : / @.
352	//
353	// * Tag keys and values are case-sensitive.
354	//
355	// * Do not use aws:, AWS:,
356	// or any upper or lowercase combination of such as a prefix for either keys or
357	// values as it is reserved for AWS use. You cannot edit or delete tag keys or
358	// values with this prefix. Tags with this prefix do not count against your tags
359	// per resource limit.
360	Tags []types.Tag
361
362	// The family and revision (family:revision) or full ARN of the task definition to
363	// run in your service. If a revision is not specified, the latest ACTIVE revision
364	// is used. A task definition must be specified if the service is using either the
365	// ECS or CODE_DEPLOY deployment controllers.
366	TaskDefinition *string
367}
368
369type CreateServiceOutput struct {
370
371	// The full description of your service following the create call. If a service is
372	// using the ECS deployment controller, the deploymentController and taskSets
373	// parameters will not be returned. If the service is using the CODE_DEPLOY
374	// deployment controller, the deploymentController, taskSets and deployments
375	// parameters will be returned, however the deployments parameter will be an empty
376	// list.
377	Service *types.Service
378
379	// Metadata pertaining to the operation's result.
380	ResultMetadata middleware.Metadata
381}
382
383func addOperationCreateServiceMiddlewares(stack *middleware.Stack, options Options) (err error) {
384	err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateService{}, middleware.After)
385	if err != nil {
386		return err
387	}
388	err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateService{}, middleware.After)
389	if err != nil {
390		return err
391	}
392	if err = addSetLoggerMiddleware(stack, options); err != nil {
393		return err
394	}
395	if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
396		return err
397	}
398	if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
399		return err
400	}
401	if err = addResolveEndpointMiddleware(stack, options); err != nil {
402		return err
403	}
404	if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
405		return err
406	}
407	if err = addRetryMiddlewares(stack, options); err != nil {
408		return err
409	}
410	if err = addHTTPSignerV4Middleware(stack, options); err != nil {
411		return err
412	}
413	if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
414		return err
415	}
416	if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
417		return err
418	}
419	if err = addClientUserAgent(stack); err != nil {
420		return err
421	}
422	if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
423		return err
424	}
425	if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
426		return err
427	}
428	if err = addOpCreateServiceValidationMiddleware(stack); err != nil {
429		return err
430	}
431	if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateService(options.Region), middleware.Before); err != nil {
432		return err
433	}
434	if err = addRequestIDRetrieverMiddleware(stack); err != nil {
435		return err
436	}
437	if err = addResponseErrorMiddleware(stack); err != nil {
438		return err
439	}
440	if err = addRequestResponseLogging(stack, options); err != nil {
441		return err
442	}
443	return nil
444}
445
446func newServiceMetadataMiddleware_opCreateService(region string) *awsmiddleware.RegisterServiceMetadata {
447	return &awsmiddleware.RegisterServiceMetadata{
448		Region:        region,
449		ServiceID:     ServiceID,
450		SigningName:   "ecs",
451		OperationName: "CreateService",
452	}
453}
454