1// Code generated by smithy-go-codegen DO NOT EDIT.
2
3package types
4
5// An object representing an AWS Batch array job.
6type ArrayProperties struct {
7
8	// The size of the array job.
9	Size int32
10}
11
12// An object representing the array properties of a job.
13type ArrayPropertiesDetail struct {
14
15	// The job index within the array that's associated with this job. This parameter
16	// is returned for array job children.
17	Index int32
18
19	// The size of the array job. This parameter is returned for parent array jobs.
20	Size int32
21
22	// A summary of the number of array job children in each available job status. This
23	// parameter is returned for parent array jobs.
24	StatusSummary map[string]int32
25}
26
27// An object representing the array properties of a job.
28type ArrayPropertiesSummary struct {
29
30	// The job index within the array that's associated with this job. This parameter
31	// is returned for children of array jobs.
32	Index int32
33
34	// The size of the array job. This parameter is returned for parent array jobs.
35	Size int32
36}
37
38// An object representing the details of a container that's part of a job attempt.
39type AttemptContainerDetail struct {
40
41	// The Amazon Resource Name (ARN) of the Amazon ECS container instance that hosts
42	// the job attempt.
43	ContainerInstanceArn *string
44
45	// The exit code for the job attempt. A non-zero exit code is considered a failure.
46	ExitCode int32
47
48	// The name of the CloudWatch Logs log stream associated with the container. The
49	// log group for AWS Batch jobs is /aws/batch/job. Each container attempt receives
50	// a log stream name when they reach the RUNNING status.
51	LogStreamName *string
52
53	// The network interfaces associated with the job attempt.
54	NetworkInterfaces []NetworkInterface
55
56	// A short (255 max characters) human-readable string to provide additional details
57	// about a running or stopped container.
58	Reason *string
59
60	// The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with the
61	// job attempt. Each container attempt receives a task ARN when they reach the
62	// STARTING status.
63	TaskArn *string
64}
65
66// An object representing a job attempt.
67type AttemptDetail struct {
68
69	// Details about the container in this job attempt.
70	Container *AttemptContainerDetail
71
72	// The Unix timestamp (in milliseconds) for when the attempt was started (when the
73	// attempt transitioned from the STARTING state to the RUNNING state).
74	StartedAt int64
75
76	// A short, human-readable string to provide additional details about the current
77	// status of the job attempt.
78	StatusReason *string
79
80	// The Unix timestamp (in milliseconds) for when the attempt was stopped (when the
81	// attempt transitioned from the RUNNING state to a terminal state, such as
82	// SUCCEEDED or FAILED).
83	StoppedAt int64
84}
85
86// An object representing an AWS Batch compute environment.
87type ComputeEnvironmentDetail struct {
88
89	// The Amazon Resource Name (ARN) of the compute environment.
90	//
91	// This member is required.
92	ComputeEnvironmentArn *string
93
94	// The name of the compute environment. Up to 128 letters (uppercase and
95	// lowercase), numbers, hyphens, and underscores are allowed.
96	//
97	// This member is required.
98	ComputeEnvironmentName *string
99
100	// The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster used by the
101	// compute environment.
102	//
103	// This member is required.
104	EcsClusterArn *string
105
106	// The compute resources defined for the compute environment. For more information,
107	// see Compute Environments
108	// (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html)
109	// in the AWS Batch User Guide.
110	ComputeResources *ComputeResource
111
112	// The service role associated with the compute environment that allows AWS Batch
113	// to make calls to AWS API operations on your behalf. For more information, see
114	// AWS Batch service IAM role
115	// (https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html) in
116	// the AWS Batch User Guide.
117	ServiceRole *string
118
119	// The state of the compute environment. The valid values are ENABLED or DISABLED.
120	// If the state is ENABLED, then the AWS Batch scheduler can attempt to place jobs
121	// from an associated job queue on the compute resources within the environment. If
122	// the compute environment is managed, then it can scale its instances out or in
123	// automatically, based on the job queue demand. If the state is DISABLED, then the
124	// AWS Batch scheduler doesn't attempt to place jobs within the environment. Jobs
125	// in a STARTING or RUNNING state continue to progress normally. Managed compute
126	// environments in the DISABLED state don't scale out. However, they scale in to
127	// minvCpus value after instances become idle.
128	State CEState
129
130	// The current status of the compute environment (for example, CREATING or VALID).
131	Status CEStatus
132
133	// A short, human-readable string to provide additional details about the current
134	// status of the compute environment.
135	StatusReason *string
136
137	// The tags applied to the compute environment.
138	Tags map[string]string
139
140	// The type of the compute environment: MANAGED or UNMANAGED. For more information,
141	// see Compute Environments
142	// (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html)
143	// in the AWS Batch User Guide.
144	Type CEType
145}
146
147// The order in which compute environments are tried for job placement within a
148// queue. Compute environments are tried in ascending order. For example, if two
149// compute environments are associated with a job queue, the compute environment
150// with a lower order integer value is tried for job placement first. Compute
151// environments must be in the VALID state before you can associate them with a job
152// queue. All of the compute environments must be either EC2 (EC2 or SPOT) or
153// Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be
154// mixed. All compute environments that are associated with a job queue must share
155// the same architecture. AWS Batch doesn't support mixing compute environment
156// architecture types in a single job queue.
157type ComputeEnvironmentOrder struct {
158
159	// The Amazon Resource Name (ARN) of the compute environment.
160	//
161	// This member is required.
162	ComputeEnvironment *string
163
164	// The order of the compute environment. Compute environments are tried in
165	// ascending order. For example, if two compute environments are associated with a
166	// job queue, the compute environment with a lower order integer value is tried for
167	// job placement first.
168	//
169	// This member is required.
170	Order int32
171}
172
173// An object representing an AWS Batch compute resource. For more information, see
174// Compute Environments
175// (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html)
176// in the AWS Batch User Guide.
177type ComputeResource struct {
178
179	// The maximum number of Amazon EC2 vCPUs that a compute environment can reach.
180	// With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation
181	// strategies, AWS Batch might need to go above maxvCpus to meet your capacity
182	// requirements. In this event, AWS Batch will never go above maxvCpus by more than
183	// a single instance (e.g., no more than a single instance from among those
184	// specified in your compute environment).
185	//
186	// This member is required.
187	MaxvCpus int32
188
189	// The VPC subnets into which the compute resources are launched. These subnets
190	// must be within the same VPC. This parameter is required for jobs running on
191	// Fargate resources, where it can contain up to 16 subnets. For more information,
192	// see VPCs and Subnets
193	// (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in the
194	// Amazon VPC User Guide.
195	//
196	// This member is required.
197	Subnets []string
198
199	// The type of compute environment: EC2, SPOT, FARGATE, or FARGATE_SPOT. For more
200	// information, see Compute Environments
201	// (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html)
202	// in the AWS Batch User Guide. If you choose SPOT, you must also specify an Amazon
203	// EC2 Spot Fleet role with the spotIamFleetRole parameter. For more information,
204	// see Amazon EC2 Spot Fleet role
205	// (https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html) in
206	// the AWS Batch User Guide.
207	//
208	// This member is required.
209	Type CRType
210
211	// The allocation strategy to use for the compute resource if not enough instances
212	// of the best fitting instance type can be allocated. This might be because of
213	// availability of the instance type in the Region or Amazon EC2 service limits
214	// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html).
215	// For more information, see Allocation Strategies
216	// (https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html)
217	// in the AWS Batch User Guide. This parameter isn't applicable to jobs running on
218	// Fargate resources, and shouldn't be specified. BEST_FIT (default) AWS Batch
219	// selects an instance type that best fits the needs of the jobs with a preference
220	// for the lowest-cost instance type. If additional instances of the selected
221	// instance type aren't available, AWS Batch will wait for the additional instances
222	// to be available. If there are not enough instances available, or if the user is
223	// hitting Amazon EC2 service limits
224	// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html)
225	// then additional jobs aren't run until currently running jobs have completed.
226	// This allocation strategy keeps costs lower but can limit scaling. If you are
227	// using Spot Fleets with BEST_FIT then the Spot Fleet IAM Role must be specified.
228	// BEST_FIT_PROGRESSIVE AWS Batch will select additional instance types that are
229	// large enough to meet the requirements of the jobs in the queue, with a
230	// preference for instance types with a lower cost per unit vCPU. If additional
231	// instances of the previously selected instance types aren't available, AWS Batch
232	// will select new instance types. SPOT_CAPACITY_OPTIMIZED AWS Batch will select
233	// one or more instance types that are large enough to meet the requirements of the
234	// jobs in the queue, with a preference for instance types that are less likely to
235	// be interrupted. This allocation strategy is only available for Spot Instance
236	// compute resources. With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED
237	// strategies, AWS Batch might need to go above maxvCpus to meet your capacity
238	// requirements. In this event, AWS Batch never exceeds maxvCpus by more than a
239	// single instance.
240	AllocationStrategy CRAllocationStrategy
241
242	// The maximum percentage that a Spot Instance price can be when compared with the
243	// On-Demand price for that instance type before instances are launched. For
244	// example, if your maximum percentage is 20%, then the Spot price must be less
245	// than 20% of the current On-Demand price for that Amazon EC2 instance. You always
246	// pay the lowest (market) price and never more than your maximum percentage. If
247	// you leave this field empty, the default value is 100% of the On-Demand price.
248	// This parameter isn't applicable to jobs running on Fargate resources, and
249	// shouldn't be specified.
250	BidPercentage int32
251
252	// The desired number of Amazon EC2 vCPUS in the compute environment. AWS Batch
253	// modifies this value between the minimum and maximum values, based on job queue
254	// demand. This parameter isn't applicable to jobs running on Fargate resources,
255	// and shouldn't be specified.
256	DesiredvCpus int32
257
258	// Provides information used to select Amazon Machine Images (AMIs) for EC2
259	// instances in the compute environment. If Ec2Configuration isn't specified, the
260	// default is ECS_AL1. This parameter isn't applicable to jobs running on Fargate
261	// resources, and shouldn't be specified.
262	Ec2Configuration []Ec2Configuration
263
264	// The Amazon EC2 key pair that's used for instances launched in the compute
265	// environment. You can use this key pair to log in to your instances with SSH.
266	// This parameter isn't applicable to jobs running on Fargate resources, and
267	// shouldn't be specified.
268	Ec2KeyPair *string
269
270	// The Amazon Machine Image (AMI) ID used for instances launched in the compute
271	// environment. This parameter is overridden by the imageIdOverride member of the
272	// Ec2Configuration structure. This parameter isn't applicable to jobs running on
273	// Fargate resources, and shouldn't be specified. The AMI that you choose for a
274	// compute environment must match the architecture of the instance types that you
275	// intend to use for that compute environment. For example, if your compute
276	// environment uses A1 instance types, the compute resource AMI that you choose
277	// must support ARM instances. Amazon ECS vends both x86 and ARM versions of the
278	// Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see Amazon
279	// ECS-optimized Amazon Linux 2 AMI
280	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html)
281	// in the Amazon Elastic Container Service Developer Guide.
282	//
283	// Deprecated: This field is deprecated, use ec2Configuration[].imageIdOverride
284	// instead.
285	ImageId *string
286
287	// The Amazon ECS instance profile applied to Amazon EC2 instances in a compute
288	// environment. You can specify the short name or full Amazon Resource Name (ARN)
289	// of an instance profile. For example,  ecsInstanceRole  or
290	// arn:aws:iam:::instance-profile/ecsInstanceRole . For more information, see
291	// Amazon ECS Instance Role
292	// (https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html) in
293	// the AWS Batch User Guide. This parameter isn't applicable to jobs running on
294	// Fargate resources, and shouldn't be specified.
295	InstanceRole *string
296
297	// The instances types that can be launched. You can specify instance families to
298	// launch any instance type within those families (for example, c5 or p3), or you
299	// can specify specific sizes within a family (such as c5.8xlarge). You can also
300	// choose optimal to select instance types (from the C4, M4, and R4 instance
301	// families) on the fly that match the demand of your job queues. This parameter
302	// isn't applicable to jobs running on Fargate resources, and shouldn't be
303	// specified. When you create a compute environment, the instance types that you
304	// select for the compute environment must share the same architecture. For
305	// example, you can't mix x86 and ARM instances in the same compute environment.
306	// Currently, optimal uses instance types from the C4, M4, and R4 instance
307	// families. In Regions that don't have instance types from those instance
308	// families, instance types from the C5, M5. and R5 instance families are used.
309	InstanceTypes []string
310
311	// The launch template to use for your compute resources. Any other compute
312	// resource parameters that you specify in a CreateComputeEnvironment API operation
313	// override the same parameters in the launch template. You must specify either the
314	// launch template ID or launch template name in the request, but not both. For
315	// more information, see Launch Template Support
316	// (https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html) in
317	// the AWS Batch User Guide. This parameter isn't applicable to jobs running on
318	// Fargate resources, and shouldn't be specified.
319	LaunchTemplate *LaunchTemplateSpecification
320
321	// The minimum number of Amazon EC2 vCPUs that an environment should maintain (even
322	// if the compute environment is DISABLED). This parameter isn't applicable to jobs
323	// running on Fargate resources, and shouldn't be specified.
324	MinvCpus int32
325
326	// The Amazon EC2 placement group to associate with your compute resources. If you
327	// intend to submit multi-node parallel jobs to your compute environment, you
328	// should consider creating a cluster placement group and associate it with your
329	// compute resources. This keeps your multi-node parallel job on a logical grouping
330	// of instances within a single Availability Zone with high network flow potential.
331	// For more information, see Placement Groups
332	// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) in
333	// the Amazon EC2 User Guide for Linux Instances. This parameter isn't applicable
334	// to jobs running on Fargate resources, and shouldn't be specified.
335	PlacementGroup *string
336
337	// The Amazon EC2 security groups associated with instances launched in the compute
338	// environment. One or more security groups must be specified, either in
339	// securityGroupIds or using a launch template referenced in launchTemplate. This
340	// parameter is required for jobs running on Fargate resources and must contain at
341	// least one security group. (Fargate does not support launch templates.) If
342	// security groups are specified using both securityGroupIds and launchTemplate,
343	// the values in securityGroupIds will be used.
344	SecurityGroupIds []string
345
346	// The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to
347	// a SPOT compute environment. This role is required if the allocation strategy set
348	// to BEST_FIT or if the allocation strategy isn't specified. For more information,
349	// see Amazon EC2 Spot Fleet Role
350	// (https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html) in
351	// the AWS Batch User Guide. This parameter isn't applicable to jobs running on
352	// Fargate resources, and shouldn't be specified. To tag your Spot Instances on
353	// creation, the Spot Fleet IAM role specified here must use the newer
354	// AmazonEC2SpotFleetTaggingRole managed policy. The previously recommended
355	// AmazonEC2SpotFleetRole managed policy doesn't have the required permissions to
356	// tag Spot Instances. For more information, see Spot Instances not tagged on
357	// creation
358	// (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#spot-instance-no-tag)
359	// in the AWS Batch User Guide.
360	SpotIamFleetRole *string
361
362	// Key-value pair tags to be applied to EC2 resources that are launched in the
363	// compute environment. For AWS Batch, these take the form of "String1": "String2",
364	// where String1 is the tag key and String2 is the tag value−for example, { "Name":
365	// "AWS Batch Instance - C4OnDemand" }. This is helpful for recognizing your AWS
366	// Batch instances in the Amazon EC2 console. These tags can't be updated or
367	// removed after the compute environment has been created; any changes require
368	// creating a new compute environment and removing the old compute environment.
369	// These tags are not seen when using the AWS Batch ListTagsForResource API
370	// operation. This parameter isn't applicable to jobs running on Fargate resources,
371	// and shouldn't be specified.
372	Tags map[string]string
373}
374
375// An object representing the attributes of a compute environment that can be
376// updated. For more information, see Compute Environments
377// (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html)
378// in the AWS Batch User Guide.
379type ComputeResourceUpdate struct {
380
381	// The desired number of Amazon EC2 vCPUS in the compute environment. This
382	// parameter isn't applicable to jobs running on Fargate resources, and shouldn't
383	// be specified.
384	DesiredvCpus int32
385
386	// The maximum number of Amazon EC2 vCPUs that an environment can reach. With both
387	// BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS
388	// Batch might need to go above maxvCpus to meet your capacity requirements. In
389	// this event, AWS Batch will never go above maxvCpus by more than a single
390	// instance (e.g., no more than a single instance from among those specified in
391	// your compute environment).
392	MaxvCpus int32
393
394	// The minimum number of Amazon EC2 vCPUs that an environment should maintain. This
395	// parameter isn't applicable to jobs running on Fargate resources, and shouldn't
396	// be specified.
397	MinvCpus int32
398
399	// The Amazon EC2 security groups associated with instances launched in the compute
400	// environment. This parameter is required for Fargate compute resources, where it
401	// can contain up to 5 security groups. This can't be specified for EC2 compute
402	// resources. Providing an empty list is handled as if this parameter wasn't
403	// specified and no change is made.
404	SecurityGroupIds []string
405
406	// The VPC subnets that the compute resources are launched into. This parameter is
407	// required for jobs running on Fargate compute resources, where it can contain up
408	// to 16 subnets. For more information, see VPCs and Subnets
409	// (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in the
410	// Amazon VPC User Guide. This can't be specified for EC2 compute resources.
411	// Providing an empty list will be handled as if this parameter wasn't specified
412	// and no change is made.
413	Subnets []string
414}
415
416// An object representing the details of a container that's part of a job.
417type ContainerDetail struct {
418
419	// The command that's passed to the container.
420	Command []string
421
422	// The Amazon Resource Name (ARN) of the container instance that the container is
423	// running on.
424	ContainerInstanceArn *string
425
426	// The environment variables to pass to a container. Environment variables must not
427	// start with AWS_BATCH; this naming convention is reserved for variables that are
428	// set by the AWS Batch service.
429	Environment []KeyValuePair
430
431	// The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume.
432	// For more information, see AWS Batch execution IAM role
433	// (https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) in
434	// the AWS Batch User Guide.
435	ExecutionRoleArn *string
436
437	// The exit code to return upon completion.
438	ExitCode int32
439
440	// The platform configuration for jobs running on Fargate resources. Jobs running
441	// on EC2 resources must not specify this parameter.
442	FargatePlatformConfiguration *FargatePlatformConfiguration
443
444	// The image used to start the container.
445	Image *string
446
447	// The instance type of the underlying host infrastructure of a multi-node parallel
448	// job. This parameter isn't applicable to jobs running on Fargate resources.
449	InstanceType *string
450
451	// The Amazon Resource Name (ARN) associated with the job upon execution.
452	JobRoleArn *string
453
454	// Linux-specific modifications that are applied to the container, such as details
455	// for device mappings.
456	LinuxParameters *LinuxParameters
457
458	// The log configuration specification for the container. This parameter maps to
459	// LogConfig in the Create a container
460	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
461	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
462	// --log-driver option to docker run
463	// (https://docs.docker.com/engine/reference/run/). By default, containers use the
464	// same logging driver that the Docker daemon uses. However the container might use
465	// a different logging driver than the Docker daemon by specifying a log driver
466	// with this parameter in the container definition. To use a different logging
467	// driver for a container, the log system must be configured properly on the
468	// container instance. Or, alternatively, it must be configured on a different log
469	// server for remote logging options. For more information on the options for
470	// different supported log drivers, see Configure logging drivers
471	// (https://docs.docker.com/engine/admin/logging/overview/) in the Docker
472	// documentation. AWS Batch currently supports a subset of the logging drivers
473	// available to the Docker daemon (shown in the LogConfiguration data type).
474	// Additional log drivers might be available in future releases of the Amazon ECS
475	// container agent. This parameter requires version 1.18 of the Docker Remote API
476	// or greater on your container instance. To check the Docker Remote API version on
477	// your container instance, log into your container instance and run the following
478	// command: sudo docker version | grep "Server API version" The Amazon ECS
479	// container agent running on a container instance must register the logging
480	// drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS
481	// environment variable before containers placed on that instance can use these log
482	// configuration options. For more information, see Amazon ECS Container Agent
483	// Configuration
484	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html)
485	// in the Amazon Elastic Container Service Developer Guide.
486	LogConfiguration *LogConfiguration
487
488	// The name of the CloudWatch Logs log stream associated with the container. The
489	// log group for AWS Batch jobs is /aws/batch/job. Each container attempt receives
490	// a log stream name when they reach the RUNNING status.
491	LogStreamName *string
492
493	// For jobs run on EC2 resources that didn't specify memory requirements using
494	// ResourceRequirement, the number of MiB of memory reserved for the job. For other
495	// jobs, including all run on Fargate resources, see resourceRequirements.
496	Memory int32
497
498	// The mount points for data volumes in your container.
499	MountPoints []MountPoint
500
501	// The network configuration for jobs running on Fargate resources. Jobs running on
502	// EC2 resources must not specify this parameter.
503	NetworkConfiguration *NetworkConfiguration
504
505	// The network interfaces associated with the job.
506	NetworkInterfaces []NetworkInterface
507
508	// When this parameter is true, the container is given elevated permissions on the
509	// host container instance (similar to the root user). The default value is false.
510	// This parameter isn't applicable to jobs running on Fargate resources and
511	// shouldn't be provided, or specified as false.
512	Privileged bool
513
514	// When this parameter is true, the container is given read-only access to its root
515	// file system. This parameter maps to ReadonlyRootfs in the Create a container
516	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
517	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
518	// --read-only option to docker run
519	// (https://docs.docker.com/engine/reference/commandline/run/).
520	ReadonlyRootFilesystem bool
521
522	// A short (255 max characters) human-readable string to provide additional details
523	// about a running or stopped container.
524	Reason *string
525
526	// The type and amount of resources to assign to a container. The supported
527	// resources include GPU, MEMORY, and VCPU.
528	ResourceRequirements []ResourceRequirement
529
530	// The secrets to pass to the container. For more information, see Specifying
531	// sensitive data
532	// (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html)
533	// in the AWS Batch User Guide.
534	Secrets []Secret
535
536	// The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with the
537	// container job. Each container attempt receives a task ARN when they reach the
538	// STARTING status.
539	TaskArn *string
540
541	// A list of ulimit values to set in the container. This parameter maps to Ulimits
542	// in the Create a container
543	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
544	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the --ulimit
545	// option to docker run (https://docs.docker.com/engine/reference/run/). This
546	// parameter isn't applicable to jobs running on Fargate resources.
547	Ulimits []Ulimit
548
549	// The user name to use inside the container. This parameter maps to User in the
550	// Create a container
551	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
552	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the --user
553	// option to docker run (https://docs.docker.com/engine/reference/run/).
554	User *string
555
556	// The number of vCPUs reserved for the container. Jobs running on EC2 resources
557	// can specify the vCPU requirement for the job using resourceRequirements but the
558	// vCPU requirements can't be specified both here and in the resourceRequirement
559	// object. This parameter maps to CpuShares in the Create a container
560	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
561	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
562	// --cpu-shares option to docker run
563	// (https://docs.docker.com/engine/reference/run/). Each vCPU is equivalent to
564	// 1,024 CPU shares. You must specify at least one vCPU. This is required but can
565	// be specified in several places. It must be specified for each node at least
566	// once. This parameter isn't applicable to jobs running on Fargate resources. Jobs
567	// running on Fargate resources must specify the vCPU requirement for the job using
568	// resourceRequirements.
569	Vcpus int32
570
571	// A list of volumes associated with the job.
572	Volumes []Volume
573}
574
575// The overrides that should be sent to a container.
576type ContainerOverrides struct {
577
578	// The command to send to the container that overrides the default command from the
579	// Docker image or the job definition.
580	Command []string
581
582	// The environment variables to send to the container. You can add new environment
583	// variables, which are added to the container at launch, or you can override the
584	// existing environment variables from the Docker image or the job definition.
585	// Environment variables must not start with AWS_BATCH; this naming convention is
586	// reserved for variables that are set by the AWS Batch service.
587	Environment []KeyValuePair
588
589	// The instance type to use for a multi-node parallel job. This parameter isn't
590	// applicable to single-node container jobs or for jobs running on Fargate
591	// resources and shouldn't be provided.
592	InstanceType *string
593
594	// This parameter is deprecated and not supported for jobs run on Fargate
595	// resources, use ResourceRequirement. For jobs run on EC2 resource, the number of
596	// MiB of memory reserved for the job. This value overrides the value set in the
597	// job definition.
598	//
599	// Deprecated: This field is deprecated, use resourceRequirements instead.
600	Memory int32
601
602	// The type and amount of resources to assign to a container. This overrides the
603	// settings in the job definition. The supported resources include GPU, MEMORY, and
604	// VCPU.
605	ResourceRequirements []ResourceRequirement
606
607	// This parameter is deprecated and not supported for jobs run on Fargate
608	// resources, see resourceRequirement. For jobs run on EC2 resources, the number of
609	// vCPUs to reserve for the container. This value overrides the value set in the
610	// job definition. Jobs run on EC2 resources can specify the vCPU requirement using
611	// resourceRequirement but the vCPU requirements can't be specified both here and
612	// in resourceRequirement. This parameter maps to CpuShares in the Create a
613	// container (https://docs.docker.com/engine/api/v1.23/#create-a-container) section
614	// of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
615	// --cpu-shares option to docker run
616	// (https://docs.docker.com/engine/reference/run/). Each vCPU is equivalent to
617	// 1,024 CPU shares. You must specify at least one vCPU. This parameter isn't
618	// applicable to jobs running on Fargate resources and shouldn't be provided. Jobs
619	// running on Fargate resources must specify the vCPU requirement for the job using
620	// resourceRequirements.
621	//
622	// Deprecated: This field is deprecated, use resourceRequirements instead.
623	Vcpus int32
624}
625
626// Container properties are used in job definitions to describe the container
627// that's launched as part of a job.
628type ContainerProperties struct {
629
630	// The command that's passed to the container. This parameter maps to Cmd in the
631	// Create a container
632	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
633	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the COMMAND
634	// parameter to docker run (https://docs.docker.com/engine/reference/run/). For
635	// more information, see https://docs.docker.com/engine/reference/builder/#cmd
636	// (https://docs.docker.com/engine/reference/builder/#cmd).
637	Command []string
638
639	// The environment variables to pass to a container. This parameter maps to Env in
640	// the Create a container
641	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
642	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the --env
643	// option to docker run (https://docs.docker.com/engine/reference/run/). We don't
644	// recommend using plaintext environment variables for sensitive information, such
645	// as credential data. Environment variables must not start with AWS_BATCH; this
646	// naming convention is reserved for variables that are set by the AWS Batch
647	// service.
648	Environment []KeyValuePair
649
650	// The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume.
651	// Jobs running on Fargate resources must provide an execution role. For more
652	// information, see AWS Batch execution IAM role
653	// (https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) in
654	// the AWS Batch User Guide.
655	ExecutionRoleArn *string
656
657	// The platform configuration for jobs running on Fargate resources. Jobs running
658	// on EC2 resources must not specify this parameter.
659	FargatePlatformConfiguration *FargatePlatformConfiguration
660
661	// The image used to start a container. This string is passed directly to the
662	// Docker daemon. Images in the Docker Hub registry are available by default. Other
663	// repositories are specified with  repository-url/image:tag . Up to 255 letters
664	// (uppercase and lowercase), numbers, hyphens, underscores, colons, periods,
665	// forward slashes, and number signs are allowed. This parameter maps to Image in
666	// the Create a container
667	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
668	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the IMAGE
669	// parameter of docker run (https://docs.docker.com/engine/reference/run/). Docker
670	// image architecture must match the processor architecture of the compute
671	// resources that they're scheduled on. For example, ARM-based Docker images can
672	// only run on ARM-based compute resources.
673	//
674	// * Images in Amazon ECR repositories
675	// use the full registry and repository URI (for example,
676	// 012345678910.dkr.ecr..amazonaws.com/).
677	//
678	// * Images in official repositories on
679	// Docker Hub use a single name (for example, ubuntu or mongo).
680	//
681	// * Images in other
682	// repositories on Docker Hub are qualified with an organization name (for example,
683	// amazon/amazon-ecs-agent).
684	//
685	// * Images in other online repositories are qualified
686	// further by a domain name (for example, quay.io/assemblyline/ubuntu).
687	Image *string
688
689	// The instance type to use for a multi-node parallel job. All node groups in a
690	// multi-node parallel job must use the same instance type. This parameter isn't
691	// applicable to single-node container jobs or for jobs running on Fargate
692	// resources and shouldn't be provided.
693	InstanceType *string
694
695	// The Amazon Resource Name (ARN) of the IAM role that the container can assume for
696	// AWS permissions. For more information, see IAM Roles for Tasks
697	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html)
698	// in the Amazon Elastic Container Service Developer Guide.
699	JobRoleArn *string
700
701	// Linux-specific modifications that are applied to the container, such as details
702	// for device mappings.
703	LinuxParameters *LinuxParameters
704
705	// The log configuration specification for the container. This parameter maps to
706	// LogConfig in the Create a container
707	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
708	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
709	// --log-driver option to docker run
710	// (https://docs.docker.com/engine/reference/run/). By default, containers use the
711	// same logging driver that the Docker daemon uses. However the container might use
712	// a different logging driver than the Docker daemon by specifying a log driver
713	// with this parameter in the container definition. To use a different logging
714	// driver for a container, the log system must be configured properly on the
715	// container instance (or on a different log server for remote logging options).
716	// For more information on the options for different supported log drivers, see
717	// Configure logging drivers
718	// (https://docs.docker.com/engine/admin/logging/overview/) in the Docker
719	// documentation. AWS Batch currently supports a subset of the logging drivers
720	// available to the Docker daemon (shown in the LogConfiguration data type). This
721	// parameter requires version 1.18 of the Docker Remote API or greater on your
722	// container instance. To check the Docker Remote API version on your container
723	// instance, log into your container instance and run the following command: sudo
724	// docker version | grep "Server API version" The Amazon ECS container agent
725	// running on a container instance must register the logging drivers available on
726	// that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before
727	// containers placed on that instance can use these log configuration options. For
728	// more information, see Amazon ECS Container Agent Configuration
729	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html)
730	// in the Amazon Elastic Container Service Developer Guide.
731	LogConfiguration *LogConfiguration
732
733	// This parameter is deprecated and not supported for jobs run on Fargate
734	// resources, use ResourceRequirement. For jobs run on EC2 resources can specify
735	// the memory requirement using the ResourceRequirement structure. The hard limit
736	// (in MiB) of memory to present to the container. If your container attempts to
737	// exceed the memory specified here, the container is killed. This parameter maps
738	// to Memory in the Create a container
739	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
740	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the --memory
741	// option to docker run (https://docs.docker.com/engine/reference/run/). You must
742	// specify at least 4 MiB of memory for a job. This is required but can be
743	// specified in several places; it must be specified for each node at least once.
744	// If you're trying to maximize your resource utilization by providing your jobs as
745	// much memory as possible for a particular instance type, see Memory Management
746	// (https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in
747	// the AWS Batch User Guide.
748	//
749	// Deprecated: This field is deprecated, use resourceRequirements instead.
750	Memory int32
751
752	// The mount points for data volumes in your container. This parameter maps to
753	// Volumes in the Create a container
754	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
755	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the --volume
756	// option to docker run (https://docs.docker.com/engine/reference/run/).
757	MountPoints []MountPoint
758
759	// The network configuration for jobs running on Fargate resources. Jobs running on
760	// EC2 resources must not specify this parameter.
761	NetworkConfiguration *NetworkConfiguration
762
763	// When this parameter is true, the container is given elevated permissions on the
764	// host container instance (similar to the root user). This parameter maps to
765	// Privileged in the Create a container
766	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
767	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
768	// --privileged option to docker run
769	// (https://docs.docker.com/engine/reference/run/). The default value is false.
770	// This parameter isn't applicable to jobs running on Fargate resources and
771	// shouldn't be provided, or specified as false.
772	Privileged bool
773
774	// When this parameter is true, the container is given read-only access to its root
775	// file system. This parameter maps to ReadonlyRootfs in the Create a container
776	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
777	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
778	// --read-only option to docker run.
779	ReadonlyRootFilesystem bool
780
781	// The type and amount of resources to assign to a container. The supported
782	// resources include GPU, MEMORY, and VCPU.
783	ResourceRequirements []ResourceRequirement
784
785	// The secrets for the container. For more information, see Specifying sensitive
786	// data
787	// (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html)
788	// in the AWS Batch User Guide.
789	Secrets []Secret
790
791	// A list of ulimits to set in the container. This parameter maps to Ulimits in the
792	// Create a container
793	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
794	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the --ulimit
795	// option to docker run (https://docs.docker.com/engine/reference/run/). This
796	// parameter isn't applicable to jobs running on Fargate resources and shouldn't be
797	// provided.
798	Ulimits []Ulimit
799
800	// The user name to use inside the container. This parameter maps to User in the
801	// Create a container
802	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
803	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the --user
804	// option to docker run (https://docs.docker.com/engine/reference/run/).
805	User *string
806
807	// This parameter is deprecated and not supported for jobs run on Fargate
808	// resources, see resourceRequirement. The number of vCPUs reserved for the
809	// container. Jobs running on EC2 resources can specify the vCPU requirement for
810	// the job using resourceRequirements but the vCPU requirements can't be specified
811	// both here and in the resourceRequirement structure. This parameter maps to
812	// CpuShares in the Create a container
813	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
814	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
815	// --cpu-shares option to docker run
816	// (https://docs.docker.com/engine/reference/run/). Each vCPU is equivalent to
817	// 1,024 CPU shares. You must specify at least one vCPU. This is required but can
818	// be specified in several places. It must be specified for each node at least
819	// once. This parameter isn't applicable to jobs running on Fargate resources and
820	// shouldn't be provided. Jobs running on Fargate resources must specify the vCPU
821	// requirement for the job using resourceRequirements.
822	//
823	// Deprecated: This field is deprecated, use resourceRequirements instead.
824	Vcpus int32
825
826	// A list of data volumes used in a job.
827	Volumes []Volume
828}
829
830// An object representing summary details of a container within a job.
831type ContainerSummary struct {
832
833	// The exit code to return upon completion.
834	ExitCode int32
835
836	// A short (255 max characters) human-readable string to provide additional details
837	// about a running or stopped container.
838	Reason *string
839}
840
841// An object representing a container instance host device. This object isn't
842// applicable to jobs running on Fargate resources and shouldn't be provided.
843type Device struct {
844
845	// The path for the device on the host container instance.
846	//
847	// This member is required.
848	HostPath *string
849
850	// The path inside the container used to expose the host device. By default the
851	// hostPath value is used.
852	ContainerPath *string
853
854	// The explicit permissions to provide to the container for the device. By default,
855	// the container has permissions for read, write, and mknod for the device.
856	Permissions []DeviceCgroupPermission
857}
858
859// Provides information used to select Amazon Machine Images (AMIs) for instances
860// in the compute environment. If the Ec2Configuration isn't specified, the default
861// is ECS_AL1. This object isn't applicable to jobs running on Fargate resources.
862type Ec2Configuration struct {
863
864	// The image type to match with the instance type to select an AMI. If the
865	// imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized
866	// AMI
867	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html)
868	// is used. ECS_AL2 Amazon Linux 2
869	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami)
870	// Default for all AWS Graviton-based instance families (for example, C6g, M6g,
871	// R6g, and T4g) and can be used for all non-GPU instance types. ECS_AL2_NVIDIA
872	// Amazon Linux 2 (GPU)
873	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#gpuami)−Default
874	// for all GPU instance families (for example P4 and G4) and can be used for all
875	// non-AWS Graviton-based instance types. ECS_AL1 Amazon Linux
876	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami)−Default
877	// for all non-GPU, non-AWS Graviton instance families. Amazon Linux is reaching
878	// the end-of-life of standard support. For more information, see Amazon Linux AMI
879	// (http://aws.amazon.com/amazon-linux-ami/).
880	//
881	// This member is required.
882	ImageType *string
883
884	// The AMI ID used for instances launched in the compute environment that match the
885	// image type. This setting overrides the imageId set in the computeResource
886	// object.
887	ImageIdOverride *string
888}
889
890// Specifies a set of conditions to be met, and an action to take (RETRY or EXIT)
891// if all conditions are met.
892type EvaluateOnExit struct {
893
894	// Specifies the action to take if all of the specified conditions (onStatusReason,
895	// onReason, and onExitCode) are met. The values are not case sensitive.
896	//
897	// This member is required.
898	Action RetryAction
899
900	// Contains a glob pattern to match against the decimal representation of the
901	// ExitCode returned for a job. The patten can be up to 512 characters long, can
902	// contain only numbers, and can optionally end with an asterisk (*) so that only
903	// the start of the string needs to be an exact match.
904	OnExitCode *string
905
906	// Contains a glob pattern to match against the Reason returned for a job. The
907	// patten can be up to 512 characters long, can contain letters, numbers, periods
908	// (.), colons (:), and white space (spaces, tabs), and can optionally end with an
909	// asterisk (*) so that only the start of the string needs to be an exact match.
910	OnReason *string
911
912	// Contains a glob pattern to match against the StatusReason returned for a job.
913	// The patten can be up to 512 characters long, can contain letters, numbers,
914	// periods (.), colons (:), and white space (spaces, tabs). and can optionally end
915	// with an asterisk (*) so that only the start of the string needs to be an exact
916	// match.
917	OnStatusReason *string
918}
919
920// The platform configuration for jobs running on Fargate resources. Jobs running
921// on EC2 resources must not specify this parameter.
922type FargatePlatformConfiguration struct {
923
924	// The AWS Fargate platform version on which the jobs are running. A platform
925	// version is specified only for jobs running on Fargate resources. If one isn't
926	// specified, the LATEST platform version is used by default. This will use a
927	// recent, approved version of the AWS Fargate platform for compute resources. For
928	// more information, see AWS Fargate platform versions
929	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html)
930	// in the Amazon Elastic Container Service Developer Guide.
931	PlatformVersion *string
932}
933
934// Determine whether your data volume persists on the host container instance and
935// where it is stored. If this parameter is empty, then the Docker daemon assigns a
936// host path for your data volume, but the data isn't guaranteed to persist after
937// the containers associated with it stop running.
938type Host struct {
939
940	// The path on the host container instance that's presented to the container. If
941	// this parameter is empty, then the Docker daemon has assigned a host path for
942	// you. If this parameter contains a file location, then the data volume persists
943	// at the specified location on the host container instance until you delete it
944	// manually. If the source path location does not exist on the host container
945	// instance, the Docker daemon creates it. If the location does exist, the contents
946	// of the source path folder are exported. This parameter isn't applicable to jobs
947	// running on Fargate resources and shouldn't be provided.
948	SourcePath *string
949}
950
951// An object representing an AWS Batch job definition.
952type JobDefinition struct {
953
954	// The Amazon Resource Name (ARN) for the job definition.
955	//
956	// This member is required.
957	JobDefinitionArn *string
958
959	// The name of the job definition.
960	//
961	// This member is required.
962	JobDefinitionName *string
963
964	// The revision of the job definition.
965	//
966	// This member is required.
967	Revision int32
968
969	// The type of job definition. If the job is run on Fargate resources, then
970	// multinode isn't supported. For more information about multi-node parallel jobs,
971	// see Creating a multi-node parallel job definition
972	// (https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html) in
973	// the AWS Batch User Guide.
974	//
975	// This member is required.
976	Type *string
977
978	// An object with various properties specific to container-based jobs.
979	ContainerProperties *ContainerProperties
980
981	// An object with various properties specific to multi-node parallel jobs. If the
982	// job runs on Fargate resources, then you must not specify nodeProperties; use
983	// containerProperties instead.
984	NodeProperties *NodeProperties
985
986	// Default parameters or parameter substitution placeholders that are set in the
987	// job definition. Parameters are specified as a key-value pair mapping. Parameters
988	// in a SubmitJob request override any corresponding parameter defaults from the
989	// job definition. For more information about specifying parameters, see Job
990	// Definition Parameters
991	// (https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html)
992	// in the AWS Batch User Guide.
993	Parameters map[string]string
994
995	// The platform capabilities required by the job definition. If no value is
996	// specified, it defaults to EC2. Jobs run on Fargate resources specify FARGATE.
997	PlatformCapabilities []PlatformCapability
998
999	// Specifies whether to propagate the tags from the job or job definition to the
1000	// corresponding Amazon ECS task. If no value is specified, the tags aren't
1001	// propagated. Tags can only be propagated to the tasks during task creation. For
1002	// tags with the same name, job tags are given priority over job definitions tags.
1003	// If the total number of combined tags from the job and job definition is over 50,
1004	// the job is moved to the FAILED state.
1005	PropagateTags bool
1006
1007	// The retry strategy to use for failed jobs that are submitted with this job
1008	// definition.
1009	RetryStrategy *RetryStrategy
1010
1011	// The status of the job definition.
1012	Status *string
1013
1014	// The tags applied to the job definition.
1015	Tags map[string]string
1016
1017	// The timeout configuration for jobs that are submitted with this job definition.
1018	// You can specify a timeout duration after which AWS Batch terminates your jobs if
1019	// they haven't finished.
1020	Timeout *JobTimeout
1021}
1022
1023// An object representing an AWS Batch job dependency.
1024type JobDependency struct {
1025
1026	// The job ID of the AWS Batch job associated with this dependency.
1027	JobId *string
1028
1029	// The type of the job dependency.
1030	Type ArrayJobDependency
1031}
1032
1033// An object representing an AWS Batch job.
1034type JobDetail struct {
1035
1036	// The job definition that's used by this job.
1037	//
1038	// This member is required.
1039	JobDefinition *string
1040
1041	// The ID for the job.
1042	//
1043	// This member is required.
1044	JobId *string
1045
1046	// The name of the job.
1047	//
1048	// This member is required.
1049	JobName *string
1050
1051	// The Amazon Resource Name (ARN) of the job queue that the job is associated with.
1052	//
1053	// This member is required.
1054	JobQueue *string
1055
1056	// The Unix timestamp (in milliseconds) for when the job was started (when the job
1057	// transitioned from the STARTING state to the RUNNING state). This parameter isn't
1058	// provided for child jobs of array jobs or multi-node parallel jobs.
1059	//
1060	// This member is required.
1061	StartedAt int64
1062
1063	// The current status for the job. If your jobs don't progress to STARTING, see
1064	// Jobs Stuck in RUNNABLE Status
1065	// (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#job_stuck_in_runnable)
1066	// in the troubleshooting section of the AWS Batch User Guide.
1067	//
1068	// This member is required.
1069	Status JobStatus
1070
1071	// The array properties of the job, if it is an array job.
1072	ArrayProperties *ArrayPropertiesDetail
1073
1074	// A list of job attempts associated with this job.
1075	Attempts []AttemptDetail
1076
1077	// An object representing the details of the container that's associated with the
1078	// job.
1079	Container *ContainerDetail
1080
1081	// The Unix timestamp (in milliseconds) for when the job was created. For non-array
1082	// jobs and parent array jobs, this is when the job entered the SUBMITTED state (at
1083	// the time SubmitJob was called). For array child jobs, this is when the child job
1084	// was spawned by its parent and entered the PENDING state.
1085	CreatedAt int64
1086
1087	// A list of job IDs that this job depends on.
1088	DependsOn []JobDependency
1089
1090	// The Amazon Resource Name (ARN) of the job.
1091	JobArn *string
1092
1093	// An object representing the details of a node that's associated with a multi-node
1094	// parallel job.
1095	NodeDetails *NodeDetails
1096
1097	// An object representing the node properties of a multi-node parallel job. This
1098	// isn't applicable to jobs running on Fargate resources.
1099	NodeProperties *NodeProperties
1100
1101	// Additional parameters passed to the job that replace parameter substitution
1102	// placeholders or override any corresponding parameter defaults from the job
1103	// definition.
1104	Parameters map[string]string
1105
1106	// The platform capabilities required by the job definition. If no value is
1107	// specified, it defaults to EC2. Jobs run on Fargate resources specify FARGATE.
1108	PlatformCapabilities []PlatformCapability
1109
1110	// Specifies whether to propagate the tags from the job or job definition to the
1111	// corresponding Amazon ECS task. If no value is specified, the tags are not
1112	// propagated. Tags can only be propagated to the tasks during task creation. For
1113	// tags with the same name, job tags are given priority over job definitions tags.
1114	// If the total number of combined tags from the job and job definition is over 50,
1115	// the job is moved to the FAILED state.
1116	PropagateTags bool
1117
1118	// The retry strategy to use for this job if an attempt fails.
1119	RetryStrategy *RetryStrategy
1120
1121	// A short, human-readable string to provide additional details about the current
1122	// status of the job.
1123	StatusReason *string
1124
1125	// The Unix timestamp (in milliseconds) for when the job was stopped (when the job
1126	// transitioned from the RUNNING state to a terminal state, such as SUCCEEDED or
1127	// FAILED).
1128	StoppedAt int64
1129
1130	// The tags applied to the job.
1131	Tags map[string]string
1132
1133	// The timeout configuration for the job.
1134	Timeout *JobTimeout
1135}
1136
1137// An object representing the details of an AWS Batch job queue.
1138type JobQueueDetail struct {
1139
1140	// The compute environments that are attached to the job queue and the order that
1141	// job placement is preferred. Compute environments are selected for job placement
1142	// in ascending order.
1143	//
1144	// This member is required.
1145	ComputeEnvironmentOrder []ComputeEnvironmentOrder
1146
1147	// The Amazon Resource Name (ARN) of the job queue.
1148	//
1149	// This member is required.
1150	JobQueueArn *string
1151
1152	// The name of the job queue.
1153	//
1154	// This member is required.
1155	JobQueueName *string
1156
1157	// The priority of the job queue. Job queues with a higher priority (or a higher
1158	// integer value for the priority parameter) are evaluated first when associated
1159	// with the same compute environment. Priority is determined in descending order,
1160	// for example, a job queue with a priority value of 10 is given scheduling
1161	// preference over a job queue with a priority value of 1. All of the compute
1162	// environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or
1163	// FARGATE_SPOT); EC2 and Fargate compute environments cannot be mixed.
1164	//
1165	// This member is required.
1166	Priority int32
1167
1168	// Describes the ability of the queue to accept new jobs. If the job queue state is
1169	// ENABLED, it's able to accept jobs. If the job queue state is DISABLED, new jobs
1170	// can't be added to the queue, but jobs already in the queue can finish.
1171	//
1172	// This member is required.
1173	State JQState
1174
1175	// The status of the job queue (for example, CREATING or VALID).
1176	Status JQStatus
1177
1178	// A short, human-readable string to provide additional details about the current
1179	// status of the job queue.
1180	StatusReason *string
1181
1182	// The tags applied to the job queue. For more information, see Tagging your AWS
1183	// Batch resources
1184	// (https://docs.aws.amazon.com/batch/latest/userguide/using-tags.html) in AWS
1185	// Batch User Guide.
1186	Tags map[string]string
1187}
1188
1189// An object representing summary details of a job.
1190type JobSummary struct {
1191
1192	// The ID of the job.
1193	//
1194	// This member is required.
1195	JobId *string
1196
1197	// The name of the job.
1198	//
1199	// This member is required.
1200	JobName *string
1201
1202	// The array properties of the job, if it is an array job.
1203	ArrayProperties *ArrayPropertiesSummary
1204
1205	// An object representing the details of the container that's associated with the
1206	// job.
1207	Container *ContainerSummary
1208
1209	// The Unix timestamp for when the job was created. For non-array jobs and parent
1210	// array jobs, this is when the job entered the SUBMITTED state (at the time
1211	// SubmitJob was called). For array child jobs, this is when the child job was
1212	// spawned by its parent and entered the PENDING state.
1213	CreatedAt int64
1214
1215	// The Amazon Resource Name (ARN) of the job.
1216	JobArn *string
1217
1218	// The node properties for a single node in a job summary list. This isn't
1219	// applicable to jobs running on Fargate resources.
1220	NodeProperties *NodePropertiesSummary
1221
1222	// The Unix timestamp for when the job was started (when the job transitioned from
1223	// the STARTING state to the RUNNING state).
1224	StartedAt int64
1225
1226	// The current status for the job.
1227	Status JobStatus
1228
1229	// A short, human-readable string to provide additional details about the current
1230	// status of the job.
1231	StatusReason *string
1232
1233	// The Unix timestamp for when the job was stopped (when the job transitioned from
1234	// the RUNNING state to a terminal state, such as SUCCEEDED or FAILED).
1235	StoppedAt int64
1236}
1237
1238// An object representing a job timeout configuration.
1239type JobTimeout struct {
1240
1241	// The time duration in seconds (measured from the job attempt's startedAt
1242	// timestamp) after which AWS Batch terminates your jobs if they have not finished.
1243	// The minimum value for the timeout is 60 seconds.
1244	AttemptDurationSeconds int32
1245}
1246
1247// A key-value pair object.
1248type KeyValuePair struct {
1249
1250	// The name of the key-value pair. For environment variables, this is the name of
1251	// the environment variable.
1252	Name *string
1253
1254	// The value of the key-value pair. For environment variables, this is the value of
1255	// the environment variable.
1256	Value *string
1257}
1258
1259// An object representing a launch template associated with a compute resource. You
1260// must specify either the launch template ID or launch template name in the
1261// request, but not both. If security groups are specified using both the
1262// securityGroupIds parameter of CreateComputeEnvironment and the launch template,
1263// the values in the securityGroupIds parameter of CreateComputeEnvironment will be
1264// used. This object isn't applicable to jobs running on Fargate resources.
1265type LaunchTemplateSpecification struct {
1266
1267	// The ID of the launch template.
1268	LaunchTemplateId *string
1269
1270	// The name of the launch template.
1271	LaunchTemplateName *string
1272
1273	// The version number of the launch template, $Latest, or $Default. If the value is
1274	// $Latest, the latest version of the launch template is used. If the value is
1275	// $Default, the default version of the launch template is used. Default: $Default.
1276	Version *string
1277}
1278
1279// Linux-specific modifications that are applied to the container, such as details
1280// for device mappings.
1281type LinuxParameters struct {
1282
1283	// Any host devices to expose to the container. This parameter maps to Devices in
1284	// the Create a container
1285	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
1286	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the --device
1287	// option to docker run (https://docs.docker.com/engine/reference/run/). This
1288	// parameter isn't applicable to jobs running on Fargate resources and shouldn't be
1289	// provided.
1290	Devices []Device
1291
1292	// If true, run an init process inside the container that forwards signals and
1293	// reaps processes. This parameter maps to the --init option to docker run
1294	// (https://docs.docker.com/engine/reference/run/). This parameter requires version
1295	// 1.25 of the Docker Remote API or greater on your container instance. To check
1296	// the Docker Remote API version on your container instance, log into your
1297	// container instance and run the following command: sudo docker version | grep
1298	// "Server API version"
1299	InitProcessEnabled bool
1300
1301	// The total amount of swap memory (in MiB) a container can use. This parameter is
1302	// translated to the --memory-swap option to docker run
1303	// (https://docs.docker.com/engine/reference/run/) where the value is the sum of
1304	// the container memory plus the maxSwap value. For more information, see
1305	// --memory-swap details
1306	// (https://docs.docker.com/config/containers/resource_constraints/#--memory-swap-details)
1307	// in the Docker documentation. If a maxSwap value of 0 is specified, the container
1308	// doesn't use swap. Accepted values are 0 or any positive integer. If the maxSwap
1309	// parameter is omitted, the container doesn't use the swap configuration for the
1310	// container instance it is running on. A maxSwap value must be set for the
1311	// swappiness parameter to be used. This parameter isn't applicable to jobs running
1312	// on Fargate resources and shouldn't be provided.
1313	MaxSwap int32
1314
1315	// The value for the size (in MiB) of the /dev/shm volume. This parameter maps to
1316	// the --shm-size option to docker run
1317	// (https://docs.docker.com/engine/reference/run/). This parameter isn't applicable
1318	// to jobs running on Fargate resources and shouldn't be provided.
1319	SharedMemorySize int32
1320
1321	// This allows you to tune a container's memory swappiness behavior. A swappiness
1322	// value of 0 causes swapping not to happen unless absolutely necessary. A
1323	// swappiness value of 100 causes pages to be swapped very aggressively. Accepted
1324	// values are whole numbers between 0 and 100. If the swappiness parameter isn't
1325	// specified, a default value of 60 is used. If a value isn't specified for maxSwap
1326	// then this parameter is ignored. If maxSwap is set to 0, the container doesn't
1327	// use swap. This parameter maps to the --memory-swappiness option to docker run
1328	// (https://docs.docker.com/engine/reference/run/). Consider the following when you
1329	// use a per-container swap configuration.
1330	//
1331	// * Swap space must be enabled and
1332	// allocated on the container instance for the containers to use. The Amazon ECS
1333	// optimized AMIs don't have swap enabled by default. You must enable swap on the
1334	// instance to use this feature. For more information, see Instance Store Swap
1335	// Volumes
1336	// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-store-swap-volumes.html)
1337	// in the Amazon EC2 User Guide for Linux Instances or How do I allocate memory to
1338	// work as swap space in an Amazon EC2 instance by using a swap file?
1339	// (http://aws.amazon.com/premiumsupport/knowledge-center/ec2-memory-swap-file/)
1340	//
1341	// *
1342	// The swap space parameters are only supported for job definitions using EC2
1343	// resources.
1344	//
1345	// * If the maxSwap and swappiness parameters are omitted from a job
1346	// definition, each container will have a default swappiness value of 60 and the
1347	// total swap usage will be limited to two times the memory reservation of the
1348	// container.
1349	//
1350	// This parameter isn't applicable to jobs running on Fargate resources
1351	// and shouldn't be provided.
1352	Swappiness int32
1353
1354	// The container path, mount options, and size (in MiB) of the tmpfs mount. This
1355	// parameter maps to the --tmpfs option to docker run
1356	// (https://docs.docker.com/engine/reference/run/). This parameter isn't applicable
1357	// to jobs running on Fargate resources and shouldn't be provided.
1358	Tmpfs []Tmpfs
1359}
1360
1361// Log configuration options to send to a custom log driver for the container.
1362type LogConfiguration struct {
1363
1364	// The log driver to use for the container. The valid values listed for this
1365	// parameter are log drivers that the Amazon ECS container agent can communicate
1366	// with by default. The supported log drivers are awslogs, fluentd, gelf,
1367	// json-file, journald, logentries, syslog, and splunk. Jobs running on Fargate
1368	// resources are restricted to the awslogs and splunk log drivers. awslogs
1369	// Specifies the Amazon CloudWatch Logs logging driver. For more information, see
1370	// Using the awslogs Log Driver
1371	// (https://docs.aws.amazon.com/batch/latest/userguide/using_awslogs.html) in the
1372	// AWS Batch User Guide and Amazon CloudWatch Logs logging driver
1373	// (https://docs.docker.com/config/containers/logging/awslogs/) in the Docker
1374	// documentation. fluentd Specifies the Fluentd logging driver. For more
1375	// information, including usage and options, see Fluentd logging driver
1376	// (https://docs.docker.com/config/containers/logging/fluentd/) in the Docker
1377	// documentation. gelf Specifies the Graylog Extended Format (GELF) logging driver.
1378	// For more information, including usage and options, see Graylog Extended Format
1379	// logging driver (https://docs.docker.com/config/containers/logging/gelf/) in the
1380	// Docker documentation. journald Specifies the journald logging driver. For more
1381	// information, including usage and options, see Journald logging driver
1382	// (https://docs.docker.com/config/containers/logging/journald/) in the Docker
1383	// documentation. json-file Specifies the JSON file logging driver. For more
1384	// information, including usage and options, see JSON File logging driver
1385	// (https://docs.docker.com/config/containers/logging/json-file/) in the Docker
1386	// documentation. splunk Specifies the Splunk logging driver. For more information,
1387	// including usage and options, see Splunk logging driver
1388	// (https://docs.docker.com/config/containers/logging/splunk/) in the Docker
1389	// documentation. syslog Specifies the syslog logging driver. For more information,
1390	// including usage and options, see Syslog logging driver
1391	// (https://docs.docker.com/config/containers/logging/syslog/) in the Docker
1392	// documentation. If you have a custom driver that'sn't listed earlier that you
1393	// want to work with the Amazon ECS container agent, you can fork the Amazon ECS
1394	// container agent project that's available on GitHub
1395	// (https://github.com/aws/amazon-ecs-agent) and customize it to work with that
1396	// driver. We encourage you to submit pull requests for changes that you want to
1397	// have included. However, Amazon Web Services doesn't currently support running
1398	// modified copies of this software. This parameter requires version 1.18 of the
1399	// Docker Remote API or greater on your container instance. To check the Docker
1400	// Remote API version on your container instance, log into your container instance
1401	// and run the following command: sudo docker version | grep "Server API version"
1402	//
1403	// This member is required.
1404	LogDriver LogDriver
1405
1406	// The configuration options to send to the log driver. This parameter requires
1407	// version 1.19 of the Docker Remote API or greater on your container instance. To
1408	// check the Docker Remote API version on your container instance, log into your
1409	// container instance and run the following command: sudo docker version | grep
1410	// "Server API version"
1411	Options map[string]string
1412
1413	// The secrets to pass to the log configuration. For more information, see
1414	// Specifying Sensitive Data
1415	// (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html)
1416	// in the AWS Batch User Guide.
1417	SecretOptions []Secret
1418}
1419
1420// Details on a Docker volume mount point that's used in a job's container
1421// properties. This parameter maps to Volumes in the Create a container
1422// (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.19/#create-a-container)
1423// section of the Docker Remote API and the --volume option to docker run.
1424type MountPoint struct {
1425
1426	// The path on the container where the host volume is mounted.
1427	ContainerPath *string
1428
1429	// If this value is true, the container has read-only access to the volume.
1430	// Otherwise, the container can write to the volume. The default value is false.
1431	ReadOnly bool
1432
1433	// The name of the volume to mount.
1434	SourceVolume *string
1435}
1436
1437// The network configuration for jobs running on Fargate resources. Jobs running on
1438// EC2 resources must not specify this parameter.
1439type NetworkConfiguration struct {
1440
1441	// Indicates whether the job should have a public IP address. For a job running on
1442	// Fargate resources in a private subnet to send outbound traffic to the internet
1443	// (for example, in order to pull container images), the private subnet requires a
1444	// NAT gateway be attached to route requests to the internet. For more information,
1445	// see Amazon ECS task networking
1446	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html).
1447	// The default value is "DISABLED".
1448	AssignPublicIp AssignPublicIp
1449}
1450
1451// An object representing the elastic network interface for a multi-node parallel
1452// job node.
1453type NetworkInterface struct {
1454
1455	// The attachment ID for the network interface.
1456	AttachmentId *string
1457
1458	// The private IPv6 address for the network interface.
1459	Ipv6Address *string
1460
1461	// The private IPv4 address for the network interface.
1462	PrivateIpv4Address *string
1463}
1464
1465// An object representing the details of a multi-node parallel job node.
1466type NodeDetails struct {
1467
1468	// Specifies whether the current node is the main node for a multi-node parallel
1469	// job.
1470	IsMainNode bool
1471
1472	// The node index for the node. Node index numbering begins at zero. This index is
1473	// also available on the node with the AWS_BATCH_JOB_NODE_INDEX environment
1474	// variable.
1475	NodeIndex int32
1476}
1477
1478// Object representing any node overrides to a job definition that's used in a
1479// SubmitJob API operation. This isn't applicable to jobs running on Fargate
1480// resources and shouldn't be provided; use containerOverrides instead.
1481type NodeOverrides struct {
1482
1483	// The node property overrides for the job.
1484	NodePropertyOverrides []NodePropertyOverride
1485
1486	// The number of nodes to use with a multi-node parallel job. This value overrides
1487	// the number of nodes that are specified in the job definition. To use this
1488	// override:
1489	//
1490	// * There must be at least one node range in your job definition that
1491	// has an open upper boundary (such as : or n:).
1492	//
1493	// * The lower boundary of the node
1494	// range specified in the job definition must be fewer than the number of nodes
1495	// specified in the override.
1496	//
1497	// * The main node index specified in the job
1498	// definition must be fewer than the number of nodes specified in the override.
1499	NumNodes int32
1500}
1501
1502// An object representing the node properties of a multi-node parallel job.
1503type NodeProperties struct {
1504
1505	// Specifies the node index for the main node of a multi-node parallel job. This
1506	// node index value must be fewer than the number of nodes.
1507	//
1508	// This member is required.
1509	MainNode int32
1510
1511	// A list of node ranges and their properties associated with a multi-node parallel
1512	// job.
1513	//
1514	// This member is required.
1515	NodeRangeProperties []NodeRangeProperty
1516
1517	// The number of nodes associated with a multi-node parallel job.
1518	//
1519	// This member is required.
1520	NumNodes int32
1521}
1522
1523// An object representing the properties of a node that's associated with a
1524// multi-node parallel job.
1525type NodePropertiesSummary struct {
1526
1527	// Specifies whether the current node is the main node for a multi-node parallel
1528	// job.
1529	IsMainNode bool
1530
1531	// The node index for the node. Node index numbering begins at zero. This index is
1532	// also available on the node with the AWS_BATCH_JOB_NODE_INDEX environment
1533	// variable.
1534	NodeIndex int32
1535
1536	// The number of nodes associated with a multi-node parallel job.
1537	NumNodes int32
1538}
1539
1540// Object representing any node overrides to a job definition that's used in a
1541// SubmitJob API operation.
1542type NodePropertyOverride struct {
1543
1544	// The range of nodes, using node index values, that's used to override. A range of
1545	// 0:3 indicates nodes with index values of 0 through 3. If the starting range
1546	// value is omitted (:n), then 0 is used to start the range. If the ending range
1547	// value is omitted (n:), then the highest possible node index is used to end the
1548	// range.
1549	//
1550	// This member is required.
1551	TargetNodes *string
1552
1553	// The overrides that should be sent to a node range.
1554	ContainerOverrides *ContainerOverrides
1555}
1556
1557// An object representing the properties of the node range for a multi-node
1558// parallel job.
1559type NodeRangeProperty struct {
1560
1561	// The range of nodes, using node index values. A range of 0:3 indicates nodes with
1562	// index values of 0 through 3. If the starting range value is omitted (:n), then 0
1563	// is used to start the range. If the ending range value is omitted (n:), then the
1564	// highest possible node index is used to end the range. Your accumulative node
1565	// ranges must account for all nodes (0:n). You can nest node ranges, for example
1566	// 0:10 and 4:5, in which case the 4:5 range properties override the 0:10
1567	// properties.
1568	//
1569	// This member is required.
1570	TargetNodes *string
1571
1572	// The container details for the node range.
1573	Container *ContainerProperties
1574}
1575
1576// The type and amount of a resource to assign to a container. The supported
1577// resources include GPU, MEMORY, and VCPU.
1578type ResourceRequirement struct {
1579
1580	// The type of resource to assign to a container. The supported resources include
1581	// GPU, MEMORY, and VCPU.
1582	//
1583	// This member is required.
1584	Type ResourceType
1585
1586	// The quantity of the specified resource to reserve for the container. The values
1587	// vary based on the type specified. type="GPU" The number of physical GPUs to
1588	// reserve for the container. The number of GPUs reserved for all containers in a
1589	// job shouldn't exceed the number of available GPUs on the compute resource that
1590	// the job is launched on. GPUs are not available for jobs running on Fargate
1591	// resources. type="MEMORY" For jobs running on EC2 resources, the hard limit (in
1592	// MiB) of memory to present to the container. If your container attempts to exceed
1593	// the memory specified here, the container is killed. This parameter maps to
1594	// Memory in the Create a container
1595	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
1596	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the --memory
1597	// option to docker run (https://docs.docker.com/engine/reference/run/). You must
1598	// specify at least 4 MiB of memory for a job. This is required but can be
1599	// specified in several places for multi-node parallel (MNP) jobs. It must be
1600	// specified for each node at least once. This parameter maps to Memory in the
1601	// Create a container
1602	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
1603	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the --memory
1604	// option to docker run (https://docs.docker.com/engine/reference/run/). If you're
1605	// trying to maximize your resource utilization by providing your jobs as much
1606	// memory as possible for a particular instance type, see Memory Management
1607	// (https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in
1608	// the AWS Batch User Guide. For jobs running on Fargate resources, then value is
1609	// the hard limit (in MiB), and must match one of the supported values and the VCPU
1610	// values must be one of the values supported for that memory value. value = 512
1611	// VCPU = 0.25 value = 1024 VCPU = 0.25 or 0.5 value = 2048 VCPU = 0.25, 0.5, or 1
1612	// value = 3072 VCPU = 0.5, or 1 value = 4096 VCPU = 0.5, 1, or 2 value = 5120,
1613	// 6144, or 7168 VCPU = 1 or 2 value = 8192 VCPU = 1, 2, or 4 value = 9216, 10240,
1614	// 11264, 12288, 13312, 14336, 15360, or 16384 VCPU = 2 or 4 value = 17408, 18432,
1615	// 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or
1616	// 30720 VCPU = 4 type="VCPU" The number of vCPUs reserved for the container. This
1617	// parameter maps to CpuShares in the Create a container
1618	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the
1619	// Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
1620	// --cpu-shares option to docker run
1621	// (https://docs.docker.com/engine/reference/run/). Each vCPU is equivalent to
1622	// 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is
1623	// required but can be specified in several places; it must be specified for each
1624	// node at least once. For jobs running on Fargate resources, then value must match
1625	// one of the supported values and the MEMORY values must be one of the values
1626	// supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, and 4
1627	// value = 0.25 MEMORY = 512, 1024, or 2048 value = 0.5 MEMORY = 1024, 2048, 3072,
1628	// or 4096 value = 1 MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192 value = 2
1629	// MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336,
1630	// 15360, or 16384 value = 4 MEMORY = 8192, 9216, 10240, 11264, 12288, 13312,
1631	// 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576,
1632	// 25600, 26624, 27648, 28672, 29696, or 30720
1633	//
1634	// This member is required.
1635	Value *string
1636}
1637
1638// The retry strategy associated with a job. For more information, see Automated
1639// job retries
1640// (https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html) in the AWS
1641// Batch User Guide.
1642type RetryStrategy struct {
1643
1644	// The number of times to move a job to the RUNNABLE status. You can specify
1645	// between 1 and 10 attempts. If the value of attempts is greater than one, the job
1646	// is retried on failure the same number of attempts as the value.
1647	Attempts int32
1648
1649	// Array of up to 5 objects that specify conditions under which the job should be
1650	// retried or failed. If this parameter is specified, then the attempts parameter
1651	// must also be specified.
1652	EvaluateOnExit []EvaluateOnExit
1653}
1654
1655// An object representing the secret to expose to your container. Secrets can be
1656// exposed to a container in the following ways:
1657//
1658// * To inject sensitive data into
1659// your containers as environment variables, use the secrets container definition
1660// parameter.
1661//
1662// * To reference sensitive information in the log configuration of a
1663// container, use the secretOptions container definition parameter.
1664//
1665// For more
1666// information, see Specifying sensitive data
1667// (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html)
1668// in the AWS Batch User Guide.
1669type Secret struct {
1670
1671	// The name of the secret.
1672	//
1673	// This member is required.
1674	Name *string
1675
1676	// The secret to expose to the container. The supported values are either the full
1677	// ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the
1678	// AWS Systems Manager Parameter Store. If the AWS Systems Manager Parameter Store
1679	// parameter exists in the same Region as the job you are launching, then you can
1680	// use either the full ARN or name of the parameter. If the parameter exists in a
1681	// different Region, then the full ARN must be specified.
1682	//
1683	// This member is required.
1684	ValueFrom *string
1685}
1686
1687// The container path, mount options, and size of the tmpfs mount. This object
1688// isn't applicable to jobs running on Fargate resources.
1689type Tmpfs struct {
1690
1691	// The absolute file path in the container where the tmpfs volume is mounted.
1692	//
1693	// This member is required.
1694	ContainerPath *string
1695
1696	// The size (in MiB) of the tmpfs volume.
1697	//
1698	// This member is required.
1699	Size int32
1700
1701	// The list of tmpfs volume mount options. Valid values: "defaults" | "ro" | "rw" |
1702	// "suid" | "nosuid" | "dev" | "nodev" | "exec" | "noexec" | "sync" | "async" |
1703	// "dirsync" | "remount" | "mand" | "nomand" | "atime" | "noatime" | "diratime" |
1704	// "nodiratime" | "bind" | "rbind" | "unbindable" | "runbindable" | "private" |
1705	// "rprivate" | "shared" | "rshared" | "slave" | "rslave" | "relatime" |
1706	// "norelatime" | "strictatime" | "nostrictatime" | "mode" | "uid" | "gid" |
1707	// "nr_inodes" | "nr_blocks" | "mpol"
1708	MountOptions []string
1709}
1710
1711// The ulimit settings to pass to the container. This object isn't applicable to
1712// jobs running on Fargate resources.
1713type Ulimit struct {
1714
1715	// The hard limit for the ulimit type.
1716	//
1717	// This member is required.
1718	HardLimit int32
1719
1720	// The type of the ulimit.
1721	//
1722	// This member is required.
1723	Name *string
1724
1725	// The soft limit for the ulimit type.
1726	//
1727	// This member is required.
1728	SoftLimit int32
1729}
1730
1731// A data volume used in a job's container properties.
1732type Volume struct {
1733
1734	// The contents of the host parameter determine whether your data volume persists
1735	// on the host container instance and where it is stored. If the host parameter is
1736	// empty, then the Docker daemon assigns a host path for your data volume. However,
1737	// the data isn't guaranteed to persist after the containers associated with it
1738	// stop running. This parameter isn't applicable to jobs running on Fargate
1739	// resources and shouldn't be provided.
1740	Host *Host
1741
1742	// The name of the volume. Up to 255 letters (uppercase and lowercase), numbers,
1743	// hyphens, and underscores are allowed. This name is referenced in the
1744	// sourceVolume parameter of container definition mountPoints.
1745	Name *string
1746}
1747