1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3package mediaconvert
4
5import (
6	"fmt"
7	"time"
8
9	"github.com/aws/aws-sdk-go/aws"
10	"github.com/aws/aws-sdk-go/aws/awsutil"
11	"github.com/aws/aws-sdk-go/aws/request"
12	"github.com/aws/aws-sdk-go/private/protocol"
13	"github.com/aws/aws-sdk-go/private/protocol/restjson"
14)
15
16const opAssociateCertificate = "AssociateCertificate"
17
18// AssociateCertificateRequest generates a "aws/request.Request" representing the
19// client's request for the AssociateCertificate operation. The "output" return
20// value will be populated with the request's response once the request completes
21// successfully.
22//
23// Use "Send" method on the returned Request to send the API call to the service.
24// the "output" return value is not valid until after Send returns without error.
25//
26// See AssociateCertificate for more information on using the AssociateCertificate
27// API call, and error handling.
28//
29// This method is useful when you want to inject custom logic or configuration
30// into the SDK's request lifecycle. Such as custom headers, or retry logic.
31//
32//
33//    // Example sending a request using the AssociateCertificateRequest method.
34//    req, resp := client.AssociateCertificateRequest(params)
35//
36//    err := req.Send()
37//    if err == nil { // resp is now filled
38//        fmt.Println(resp)
39//    }
40//
41// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AssociateCertificate
42func (c *MediaConvert) AssociateCertificateRequest(input *AssociateCertificateInput) (req *request.Request, output *AssociateCertificateOutput) {
43	op := &request.Operation{
44		Name:       opAssociateCertificate,
45		HTTPMethod: "POST",
46		HTTPPath:   "/2017-08-29/certificates",
47	}
48
49	if input == nil {
50		input = &AssociateCertificateInput{}
51	}
52
53	output = &AssociateCertificateOutput{}
54	req = c.newRequest(op, input, output)
55	req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
56	return
57}
58
59// AssociateCertificate API operation for AWS Elemental MediaConvert.
60//
61// Associates an AWS Certificate Manager (ACM) Amazon Resource Name (ARN) with
62// AWS Elemental MediaConvert.
63//
64// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
65// with awserr.Error's Code and Message methods to get detailed information about
66// the error.
67//
68// See the AWS API reference guide for AWS Elemental MediaConvert's
69// API operation AssociateCertificate for usage and error information.
70//
71// Returned Error Types:
72//   * BadRequestException
73//
74//   * InternalServerErrorException
75//
76//   * ForbiddenException
77//
78//   * NotFoundException
79//
80//   * TooManyRequestsException
81//
82//   * ConflictException
83//
84// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AssociateCertificate
85func (c *MediaConvert) AssociateCertificate(input *AssociateCertificateInput) (*AssociateCertificateOutput, error) {
86	req, out := c.AssociateCertificateRequest(input)
87	return out, req.Send()
88}
89
90// AssociateCertificateWithContext is the same as AssociateCertificate with the addition of
91// the ability to pass a context and additional request options.
92//
93// See AssociateCertificate for details on how to use this API operation.
94//
95// The context must be non-nil and will be used for request cancellation. If
96// the context is nil a panic will occur. In the future the SDK may create
97// sub-contexts for http.Requests. See https://golang.org/pkg/context/
98// for more information on using Contexts.
99func (c *MediaConvert) AssociateCertificateWithContext(ctx aws.Context, input *AssociateCertificateInput, opts ...request.Option) (*AssociateCertificateOutput, error) {
100	req, out := c.AssociateCertificateRequest(input)
101	req.SetContext(ctx)
102	req.ApplyOptions(opts...)
103	return out, req.Send()
104}
105
106const opCancelJob = "CancelJob"
107
108// CancelJobRequest generates a "aws/request.Request" representing the
109// client's request for the CancelJob operation. The "output" return
110// value will be populated with the request's response once the request completes
111// successfully.
112//
113// Use "Send" method on the returned Request to send the API call to the service.
114// the "output" return value is not valid until after Send returns without error.
115//
116// See CancelJob for more information on using the CancelJob
117// API call, and error handling.
118//
119// This method is useful when you want to inject custom logic or configuration
120// into the SDK's request lifecycle. Such as custom headers, or retry logic.
121//
122//
123//    // Example sending a request using the CancelJobRequest method.
124//    req, resp := client.CancelJobRequest(params)
125//
126//    err := req.Send()
127//    if err == nil { // resp is now filled
128//        fmt.Println(resp)
129//    }
130//
131// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CancelJob
132func (c *MediaConvert) CancelJobRequest(input *CancelJobInput) (req *request.Request, output *CancelJobOutput) {
133	op := &request.Operation{
134		Name:       opCancelJob,
135		HTTPMethod: "DELETE",
136		HTTPPath:   "/2017-08-29/jobs/{id}",
137	}
138
139	if input == nil {
140		input = &CancelJobInput{}
141	}
142
143	output = &CancelJobOutput{}
144	req = c.newRequest(op, input, output)
145	req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
146	return
147}
148
149// CancelJob API operation for AWS Elemental MediaConvert.
150//
151// Permanently cancel a job. Once you have canceled a job, you can't start it
152// again.
153//
154// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
155// with awserr.Error's Code and Message methods to get detailed information about
156// the error.
157//
158// See the AWS API reference guide for AWS Elemental MediaConvert's
159// API operation CancelJob for usage and error information.
160//
161// Returned Error Types:
162//   * BadRequestException
163//
164//   * InternalServerErrorException
165//
166//   * ForbiddenException
167//
168//   * NotFoundException
169//
170//   * TooManyRequestsException
171//
172//   * ConflictException
173//
174// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CancelJob
175func (c *MediaConvert) CancelJob(input *CancelJobInput) (*CancelJobOutput, error) {
176	req, out := c.CancelJobRequest(input)
177	return out, req.Send()
178}
179
180// CancelJobWithContext is the same as CancelJob with the addition of
181// the ability to pass a context and additional request options.
182//
183// See CancelJob for details on how to use this API operation.
184//
185// The context must be non-nil and will be used for request cancellation. If
186// the context is nil a panic will occur. In the future the SDK may create
187// sub-contexts for http.Requests. See https://golang.org/pkg/context/
188// for more information on using Contexts.
189func (c *MediaConvert) CancelJobWithContext(ctx aws.Context, input *CancelJobInput, opts ...request.Option) (*CancelJobOutput, error) {
190	req, out := c.CancelJobRequest(input)
191	req.SetContext(ctx)
192	req.ApplyOptions(opts...)
193	return out, req.Send()
194}
195
196const opCreateJob = "CreateJob"
197
198// CreateJobRequest generates a "aws/request.Request" representing the
199// client's request for the CreateJob operation. The "output" return
200// value will be populated with the request's response once the request completes
201// successfully.
202//
203// Use "Send" method on the returned Request to send the API call to the service.
204// the "output" return value is not valid until after Send returns without error.
205//
206// See CreateJob for more information on using the CreateJob
207// API call, and error handling.
208//
209// This method is useful when you want to inject custom logic or configuration
210// into the SDK's request lifecycle. Such as custom headers, or retry logic.
211//
212//
213//    // Example sending a request using the CreateJobRequest method.
214//    req, resp := client.CreateJobRequest(params)
215//
216//    err := req.Send()
217//    if err == nil { // resp is now filled
218//        fmt.Println(resp)
219//    }
220//
221// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJob
222func (c *MediaConvert) CreateJobRequest(input *CreateJobInput) (req *request.Request, output *CreateJobOutput) {
223	op := &request.Operation{
224		Name:       opCreateJob,
225		HTTPMethod: "POST",
226		HTTPPath:   "/2017-08-29/jobs",
227	}
228
229	if input == nil {
230		input = &CreateJobInput{}
231	}
232
233	output = &CreateJobOutput{}
234	req = c.newRequest(op, input, output)
235	return
236}
237
238// CreateJob API operation for AWS Elemental MediaConvert.
239//
240// Create a new transcoding job. For information about jobs and job settings,
241// see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html
242//
243// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
244// with awserr.Error's Code and Message methods to get detailed information about
245// the error.
246//
247// See the AWS API reference guide for AWS Elemental MediaConvert's
248// API operation CreateJob for usage and error information.
249//
250// Returned Error Types:
251//   * BadRequestException
252//
253//   * InternalServerErrorException
254//
255//   * ForbiddenException
256//
257//   * NotFoundException
258//
259//   * TooManyRequestsException
260//
261//   * ConflictException
262//
263// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJob
264func (c *MediaConvert) CreateJob(input *CreateJobInput) (*CreateJobOutput, error) {
265	req, out := c.CreateJobRequest(input)
266	return out, req.Send()
267}
268
269// CreateJobWithContext is the same as CreateJob with the addition of
270// the ability to pass a context and additional request options.
271//
272// See CreateJob for details on how to use this API operation.
273//
274// The context must be non-nil and will be used for request cancellation. If
275// the context is nil a panic will occur. In the future the SDK may create
276// sub-contexts for http.Requests. See https://golang.org/pkg/context/
277// for more information on using Contexts.
278func (c *MediaConvert) CreateJobWithContext(ctx aws.Context, input *CreateJobInput, opts ...request.Option) (*CreateJobOutput, error) {
279	req, out := c.CreateJobRequest(input)
280	req.SetContext(ctx)
281	req.ApplyOptions(opts...)
282	return out, req.Send()
283}
284
285const opCreateJobTemplate = "CreateJobTemplate"
286
287// CreateJobTemplateRequest generates a "aws/request.Request" representing the
288// client's request for the CreateJobTemplate operation. The "output" return
289// value will be populated with the request's response once the request completes
290// successfully.
291//
292// Use "Send" method on the returned Request to send the API call to the service.
293// the "output" return value is not valid until after Send returns without error.
294//
295// See CreateJobTemplate for more information on using the CreateJobTemplate
296// API call, and error handling.
297//
298// This method is useful when you want to inject custom logic or configuration
299// into the SDK's request lifecycle. Such as custom headers, or retry logic.
300//
301//
302//    // Example sending a request using the CreateJobTemplateRequest method.
303//    req, resp := client.CreateJobTemplateRequest(params)
304//
305//    err := req.Send()
306//    if err == nil { // resp is now filled
307//        fmt.Println(resp)
308//    }
309//
310// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJobTemplate
311func (c *MediaConvert) CreateJobTemplateRequest(input *CreateJobTemplateInput) (req *request.Request, output *CreateJobTemplateOutput) {
312	op := &request.Operation{
313		Name:       opCreateJobTemplate,
314		HTTPMethod: "POST",
315		HTTPPath:   "/2017-08-29/jobTemplates",
316	}
317
318	if input == nil {
319		input = &CreateJobTemplateInput{}
320	}
321
322	output = &CreateJobTemplateOutput{}
323	req = c.newRequest(op, input, output)
324	return
325}
326
327// CreateJobTemplate API operation for AWS Elemental MediaConvert.
328//
329// Create a new job template. For information about job templates see the User
330// Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html
331//
332// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
333// with awserr.Error's Code and Message methods to get detailed information about
334// the error.
335//
336// See the AWS API reference guide for AWS Elemental MediaConvert's
337// API operation CreateJobTemplate for usage and error information.
338//
339// Returned Error Types:
340//   * BadRequestException
341//
342//   * InternalServerErrorException
343//
344//   * ForbiddenException
345//
346//   * NotFoundException
347//
348//   * TooManyRequestsException
349//
350//   * ConflictException
351//
352// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJobTemplate
353func (c *MediaConvert) CreateJobTemplate(input *CreateJobTemplateInput) (*CreateJobTemplateOutput, error) {
354	req, out := c.CreateJobTemplateRequest(input)
355	return out, req.Send()
356}
357
358// CreateJobTemplateWithContext is the same as CreateJobTemplate with the addition of
359// the ability to pass a context and additional request options.
360//
361// See CreateJobTemplate for details on how to use this API operation.
362//
363// The context must be non-nil and will be used for request cancellation. If
364// the context is nil a panic will occur. In the future the SDK may create
365// sub-contexts for http.Requests. See https://golang.org/pkg/context/
366// for more information on using Contexts.
367func (c *MediaConvert) CreateJobTemplateWithContext(ctx aws.Context, input *CreateJobTemplateInput, opts ...request.Option) (*CreateJobTemplateOutput, error) {
368	req, out := c.CreateJobTemplateRequest(input)
369	req.SetContext(ctx)
370	req.ApplyOptions(opts...)
371	return out, req.Send()
372}
373
374const opCreatePreset = "CreatePreset"
375
376// CreatePresetRequest generates a "aws/request.Request" representing the
377// client's request for the CreatePreset operation. The "output" return
378// value will be populated with the request's response once the request completes
379// successfully.
380//
381// Use "Send" method on the returned Request to send the API call to the service.
382// the "output" return value is not valid until after Send returns without error.
383//
384// See CreatePreset for more information on using the CreatePreset
385// API call, and error handling.
386//
387// This method is useful when you want to inject custom logic or configuration
388// into the SDK's request lifecycle. Such as custom headers, or retry logic.
389//
390//
391//    // Example sending a request using the CreatePresetRequest method.
392//    req, resp := client.CreatePresetRequest(params)
393//
394//    err := req.Send()
395//    if err == nil { // resp is now filled
396//        fmt.Println(resp)
397//    }
398//
399// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreatePreset
400func (c *MediaConvert) CreatePresetRequest(input *CreatePresetInput) (req *request.Request, output *CreatePresetOutput) {
401	op := &request.Operation{
402		Name:       opCreatePreset,
403		HTTPMethod: "POST",
404		HTTPPath:   "/2017-08-29/presets",
405	}
406
407	if input == nil {
408		input = &CreatePresetInput{}
409	}
410
411	output = &CreatePresetOutput{}
412	req = c.newRequest(op, input, output)
413	return
414}
415
416// CreatePreset API operation for AWS Elemental MediaConvert.
417//
418// Create a new preset. For information about job templates see the User Guide
419// at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html
420//
421// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
422// with awserr.Error's Code and Message methods to get detailed information about
423// the error.
424//
425// See the AWS API reference guide for AWS Elemental MediaConvert's
426// API operation CreatePreset for usage and error information.
427//
428// Returned Error Types:
429//   * BadRequestException
430//
431//   * InternalServerErrorException
432//
433//   * ForbiddenException
434//
435//   * NotFoundException
436//
437//   * TooManyRequestsException
438//
439//   * ConflictException
440//
441// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreatePreset
442func (c *MediaConvert) CreatePreset(input *CreatePresetInput) (*CreatePresetOutput, error) {
443	req, out := c.CreatePresetRequest(input)
444	return out, req.Send()
445}
446
447// CreatePresetWithContext is the same as CreatePreset with the addition of
448// the ability to pass a context and additional request options.
449//
450// See CreatePreset for details on how to use this API operation.
451//
452// The context must be non-nil and will be used for request cancellation. If
453// the context is nil a panic will occur. In the future the SDK may create
454// sub-contexts for http.Requests. See https://golang.org/pkg/context/
455// for more information on using Contexts.
456func (c *MediaConvert) CreatePresetWithContext(ctx aws.Context, input *CreatePresetInput, opts ...request.Option) (*CreatePresetOutput, error) {
457	req, out := c.CreatePresetRequest(input)
458	req.SetContext(ctx)
459	req.ApplyOptions(opts...)
460	return out, req.Send()
461}
462
463const opCreateQueue = "CreateQueue"
464
465// CreateQueueRequest generates a "aws/request.Request" representing the
466// client's request for the CreateQueue operation. The "output" return
467// value will be populated with the request's response once the request completes
468// successfully.
469//
470// Use "Send" method on the returned Request to send the API call to the service.
471// the "output" return value is not valid until after Send returns without error.
472//
473// See CreateQueue for more information on using the CreateQueue
474// API call, and error handling.
475//
476// This method is useful when you want to inject custom logic or configuration
477// into the SDK's request lifecycle. Such as custom headers, or retry logic.
478//
479//
480//    // Example sending a request using the CreateQueueRequest method.
481//    req, resp := client.CreateQueueRequest(params)
482//
483//    err := req.Send()
484//    if err == nil { // resp is now filled
485//        fmt.Println(resp)
486//    }
487//
488// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateQueue
489func (c *MediaConvert) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, output *CreateQueueOutput) {
490	op := &request.Operation{
491		Name:       opCreateQueue,
492		HTTPMethod: "POST",
493		HTTPPath:   "/2017-08-29/queues",
494	}
495
496	if input == nil {
497		input = &CreateQueueInput{}
498	}
499
500	output = &CreateQueueOutput{}
501	req = c.newRequest(op, input, output)
502	return
503}
504
505// CreateQueue API operation for AWS Elemental MediaConvert.
506//
507// Create a new transcoding queue. For information about queues, see Working
508// With Queues in the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html
509//
510// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
511// with awserr.Error's Code and Message methods to get detailed information about
512// the error.
513//
514// See the AWS API reference guide for AWS Elemental MediaConvert's
515// API operation CreateQueue for usage and error information.
516//
517// Returned Error Types:
518//   * BadRequestException
519//
520//   * InternalServerErrorException
521//
522//   * ForbiddenException
523//
524//   * NotFoundException
525//
526//   * TooManyRequestsException
527//
528//   * ConflictException
529//
530// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateQueue
531func (c *MediaConvert) CreateQueue(input *CreateQueueInput) (*CreateQueueOutput, error) {
532	req, out := c.CreateQueueRequest(input)
533	return out, req.Send()
534}
535
536// CreateQueueWithContext is the same as CreateQueue with the addition of
537// the ability to pass a context and additional request options.
538//
539// See CreateQueue for details on how to use this API operation.
540//
541// The context must be non-nil and will be used for request cancellation. If
542// the context is nil a panic will occur. In the future the SDK may create
543// sub-contexts for http.Requests. See https://golang.org/pkg/context/
544// for more information on using Contexts.
545func (c *MediaConvert) CreateQueueWithContext(ctx aws.Context, input *CreateQueueInput, opts ...request.Option) (*CreateQueueOutput, error) {
546	req, out := c.CreateQueueRequest(input)
547	req.SetContext(ctx)
548	req.ApplyOptions(opts...)
549	return out, req.Send()
550}
551
552const opDeleteJobTemplate = "DeleteJobTemplate"
553
554// DeleteJobTemplateRequest generates a "aws/request.Request" representing the
555// client's request for the DeleteJobTemplate operation. The "output" return
556// value will be populated with the request's response once the request completes
557// successfully.
558//
559// Use "Send" method on the returned Request to send the API call to the service.
560// the "output" return value is not valid until after Send returns without error.
561//
562// See DeleteJobTemplate for more information on using the DeleteJobTemplate
563// API call, and error handling.
564//
565// This method is useful when you want to inject custom logic or configuration
566// into the SDK's request lifecycle. Such as custom headers, or retry logic.
567//
568//
569//    // Example sending a request using the DeleteJobTemplateRequest method.
570//    req, resp := client.DeleteJobTemplateRequest(params)
571//
572//    err := req.Send()
573//    if err == nil { // resp is now filled
574//        fmt.Println(resp)
575//    }
576//
577// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteJobTemplate
578func (c *MediaConvert) DeleteJobTemplateRequest(input *DeleteJobTemplateInput) (req *request.Request, output *DeleteJobTemplateOutput) {
579	op := &request.Operation{
580		Name:       opDeleteJobTemplate,
581		HTTPMethod: "DELETE",
582		HTTPPath:   "/2017-08-29/jobTemplates/{name}",
583	}
584
585	if input == nil {
586		input = &DeleteJobTemplateInput{}
587	}
588
589	output = &DeleteJobTemplateOutput{}
590	req = c.newRequest(op, input, output)
591	req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
592	return
593}
594
595// DeleteJobTemplate API operation for AWS Elemental MediaConvert.
596//
597// Permanently delete a job template you have created.
598//
599// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
600// with awserr.Error's Code and Message methods to get detailed information about
601// the error.
602//
603// See the AWS API reference guide for AWS Elemental MediaConvert's
604// API operation DeleteJobTemplate for usage and error information.
605//
606// Returned Error Types:
607//   * BadRequestException
608//
609//   * InternalServerErrorException
610//
611//   * ForbiddenException
612//
613//   * NotFoundException
614//
615//   * TooManyRequestsException
616//
617//   * ConflictException
618//
619// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteJobTemplate
620func (c *MediaConvert) DeleteJobTemplate(input *DeleteJobTemplateInput) (*DeleteJobTemplateOutput, error) {
621	req, out := c.DeleteJobTemplateRequest(input)
622	return out, req.Send()
623}
624
625// DeleteJobTemplateWithContext is the same as DeleteJobTemplate with the addition of
626// the ability to pass a context and additional request options.
627//
628// See DeleteJobTemplate for details on how to use this API operation.
629//
630// The context must be non-nil and will be used for request cancellation. If
631// the context is nil a panic will occur. In the future the SDK may create
632// sub-contexts for http.Requests. See https://golang.org/pkg/context/
633// for more information on using Contexts.
634func (c *MediaConvert) DeleteJobTemplateWithContext(ctx aws.Context, input *DeleteJobTemplateInput, opts ...request.Option) (*DeleteJobTemplateOutput, error) {
635	req, out := c.DeleteJobTemplateRequest(input)
636	req.SetContext(ctx)
637	req.ApplyOptions(opts...)
638	return out, req.Send()
639}
640
641const opDeletePreset = "DeletePreset"
642
643// DeletePresetRequest generates a "aws/request.Request" representing the
644// client's request for the DeletePreset operation. The "output" return
645// value will be populated with the request's response once the request completes
646// successfully.
647//
648// Use "Send" method on the returned Request to send the API call to the service.
649// the "output" return value is not valid until after Send returns without error.
650//
651// See DeletePreset for more information on using the DeletePreset
652// API call, and error handling.
653//
654// This method is useful when you want to inject custom logic or configuration
655// into the SDK's request lifecycle. Such as custom headers, or retry logic.
656//
657//
658//    // Example sending a request using the DeletePresetRequest method.
659//    req, resp := client.DeletePresetRequest(params)
660//
661//    err := req.Send()
662//    if err == nil { // resp is now filled
663//        fmt.Println(resp)
664//    }
665//
666// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeletePreset
667func (c *MediaConvert) DeletePresetRequest(input *DeletePresetInput) (req *request.Request, output *DeletePresetOutput) {
668	op := &request.Operation{
669		Name:       opDeletePreset,
670		HTTPMethod: "DELETE",
671		HTTPPath:   "/2017-08-29/presets/{name}",
672	}
673
674	if input == nil {
675		input = &DeletePresetInput{}
676	}
677
678	output = &DeletePresetOutput{}
679	req = c.newRequest(op, input, output)
680	req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
681	return
682}
683
684// DeletePreset API operation for AWS Elemental MediaConvert.
685//
686// Permanently delete a preset you have created.
687//
688// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
689// with awserr.Error's Code and Message methods to get detailed information about
690// the error.
691//
692// See the AWS API reference guide for AWS Elemental MediaConvert's
693// API operation DeletePreset for usage and error information.
694//
695// Returned Error Types:
696//   * BadRequestException
697//
698//   * InternalServerErrorException
699//
700//   * ForbiddenException
701//
702//   * NotFoundException
703//
704//   * TooManyRequestsException
705//
706//   * ConflictException
707//
708// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeletePreset
709func (c *MediaConvert) DeletePreset(input *DeletePresetInput) (*DeletePresetOutput, error) {
710	req, out := c.DeletePresetRequest(input)
711	return out, req.Send()
712}
713
714// DeletePresetWithContext is the same as DeletePreset with the addition of
715// the ability to pass a context and additional request options.
716//
717// See DeletePreset for details on how to use this API operation.
718//
719// The context must be non-nil and will be used for request cancellation. If
720// the context is nil a panic will occur. In the future the SDK may create
721// sub-contexts for http.Requests. See https://golang.org/pkg/context/
722// for more information on using Contexts.
723func (c *MediaConvert) DeletePresetWithContext(ctx aws.Context, input *DeletePresetInput, opts ...request.Option) (*DeletePresetOutput, error) {
724	req, out := c.DeletePresetRequest(input)
725	req.SetContext(ctx)
726	req.ApplyOptions(opts...)
727	return out, req.Send()
728}
729
730const opDeleteQueue = "DeleteQueue"
731
732// DeleteQueueRequest generates a "aws/request.Request" representing the
733// client's request for the DeleteQueue operation. The "output" return
734// value will be populated with the request's response once the request completes
735// successfully.
736//
737// Use "Send" method on the returned Request to send the API call to the service.
738// the "output" return value is not valid until after Send returns without error.
739//
740// See DeleteQueue for more information on using the DeleteQueue
741// API call, and error handling.
742//
743// This method is useful when you want to inject custom logic or configuration
744// into the SDK's request lifecycle. Such as custom headers, or retry logic.
745//
746//
747//    // Example sending a request using the DeleteQueueRequest method.
748//    req, resp := client.DeleteQueueRequest(params)
749//
750//    err := req.Send()
751//    if err == nil { // resp is now filled
752//        fmt.Println(resp)
753//    }
754//
755// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteQueue
756func (c *MediaConvert) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, output *DeleteQueueOutput) {
757	op := &request.Operation{
758		Name:       opDeleteQueue,
759		HTTPMethod: "DELETE",
760		HTTPPath:   "/2017-08-29/queues/{name}",
761	}
762
763	if input == nil {
764		input = &DeleteQueueInput{}
765	}
766
767	output = &DeleteQueueOutput{}
768	req = c.newRequest(op, input, output)
769	req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
770	return
771}
772
773// DeleteQueue API operation for AWS Elemental MediaConvert.
774//
775// Permanently delete a queue you have created.
776//
777// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
778// with awserr.Error's Code and Message methods to get detailed information about
779// the error.
780//
781// See the AWS API reference guide for AWS Elemental MediaConvert's
782// API operation DeleteQueue for usage and error information.
783//
784// Returned Error Types:
785//   * BadRequestException
786//
787//   * InternalServerErrorException
788//
789//   * ForbiddenException
790//
791//   * NotFoundException
792//
793//   * TooManyRequestsException
794//
795//   * ConflictException
796//
797// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteQueue
798func (c *MediaConvert) DeleteQueue(input *DeleteQueueInput) (*DeleteQueueOutput, error) {
799	req, out := c.DeleteQueueRequest(input)
800	return out, req.Send()
801}
802
803// DeleteQueueWithContext is the same as DeleteQueue with the addition of
804// the ability to pass a context and additional request options.
805//
806// See DeleteQueue for details on how to use this API operation.
807//
808// The context must be non-nil and will be used for request cancellation. If
809// the context is nil a panic will occur. In the future the SDK may create
810// sub-contexts for http.Requests. See https://golang.org/pkg/context/
811// for more information on using Contexts.
812func (c *MediaConvert) DeleteQueueWithContext(ctx aws.Context, input *DeleteQueueInput, opts ...request.Option) (*DeleteQueueOutput, error) {
813	req, out := c.DeleteQueueRequest(input)
814	req.SetContext(ctx)
815	req.ApplyOptions(opts...)
816	return out, req.Send()
817}
818
819const opDescribeEndpoints = "DescribeEndpoints"
820
821// DescribeEndpointsRequest generates a "aws/request.Request" representing the
822// client's request for the DescribeEndpoints operation. The "output" return
823// value will be populated with the request's response once the request completes
824// successfully.
825//
826// Use "Send" method on the returned Request to send the API call to the service.
827// the "output" return value is not valid until after Send returns without error.
828//
829// See DescribeEndpoints for more information on using the DescribeEndpoints
830// API call, and error handling.
831//
832// This method is useful when you want to inject custom logic or configuration
833// into the SDK's request lifecycle. Such as custom headers, or retry logic.
834//
835//
836//    // Example sending a request using the DescribeEndpointsRequest method.
837//    req, resp := client.DescribeEndpointsRequest(params)
838//
839//    err := req.Send()
840//    if err == nil { // resp is now filled
841//        fmt.Println(resp)
842//    }
843//
844// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DescribeEndpoints
845func (c *MediaConvert) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) {
846	op := &request.Operation{
847		Name:       opDescribeEndpoints,
848		HTTPMethod: "POST",
849		HTTPPath:   "/2017-08-29/endpoints",
850		Paginator: &request.Paginator{
851			InputTokens:     []string{"NextToken"},
852			OutputTokens:    []string{"NextToken"},
853			LimitToken:      "MaxResults",
854			TruncationToken: "",
855		},
856	}
857
858	if input == nil {
859		input = &DescribeEndpointsInput{}
860	}
861
862	output = &DescribeEndpointsOutput{}
863	req = c.newRequest(op, input, output)
864	return
865}
866
867// DescribeEndpoints API operation for AWS Elemental MediaConvert.
868//
869// Send an request with an empty body to the regional API endpoint to get your
870// account API endpoint.
871//
872// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
873// with awserr.Error's Code and Message methods to get detailed information about
874// the error.
875//
876// See the AWS API reference guide for AWS Elemental MediaConvert's
877// API operation DescribeEndpoints for usage and error information.
878//
879// Returned Error Types:
880//   * BadRequestException
881//
882//   * InternalServerErrorException
883//
884//   * ForbiddenException
885//
886//   * NotFoundException
887//
888//   * TooManyRequestsException
889//
890//   * ConflictException
891//
892// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DescribeEndpoints
893func (c *MediaConvert) DescribeEndpoints(input *DescribeEndpointsInput) (*DescribeEndpointsOutput, error) {
894	req, out := c.DescribeEndpointsRequest(input)
895	return out, req.Send()
896}
897
898// DescribeEndpointsWithContext is the same as DescribeEndpoints with the addition of
899// the ability to pass a context and additional request options.
900//
901// See DescribeEndpoints for details on how to use this API operation.
902//
903// The context must be non-nil and will be used for request cancellation. If
904// the context is nil a panic will occur. In the future the SDK may create
905// sub-contexts for http.Requests. See https://golang.org/pkg/context/
906// for more information on using Contexts.
907func (c *MediaConvert) DescribeEndpointsWithContext(ctx aws.Context, input *DescribeEndpointsInput, opts ...request.Option) (*DescribeEndpointsOutput, error) {
908	req, out := c.DescribeEndpointsRequest(input)
909	req.SetContext(ctx)
910	req.ApplyOptions(opts...)
911	return out, req.Send()
912}
913
914// DescribeEndpointsPages iterates over the pages of a DescribeEndpoints operation,
915// calling the "fn" function with the response data for each page. To stop
916// iterating, return false from the fn function.
917//
918// See DescribeEndpoints method for more information on how to use this operation.
919//
920// Note: This operation can generate multiple requests to a service.
921//
922//    // Example iterating over at most 3 pages of a DescribeEndpoints operation.
923//    pageNum := 0
924//    err := client.DescribeEndpointsPages(params,
925//        func(page *mediaconvert.DescribeEndpointsOutput, lastPage bool) bool {
926//            pageNum++
927//            fmt.Println(page)
928//            return pageNum <= 3
929//        })
930//
931func (c *MediaConvert) DescribeEndpointsPages(input *DescribeEndpointsInput, fn func(*DescribeEndpointsOutput, bool) bool) error {
932	return c.DescribeEndpointsPagesWithContext(aws.BackgroundContext(), input, fn)
933}
934
935// DescribeEndpointsPagesWithContext same as DescribeEndpointsPages except
936// it takes a Context and allows setting request options on the pages.
937//
938// The context must be non-nil and will be used for request cancellation. If
939// the context is nil a panic will occur. In the future the SDK may create
940// sub-contexts for http.Requests. See https://golang.org/pkg/context/
941// for more information on using Contexts.
942func (c *MediaConvert) DescribeEndpointsPagesWithContext(ctx aws.Context, input *DescribeEndpointsInput, fn func(*DescribeEndpointsOutput, bool) bool, opts ...request.Option) error {
943	p := request.Pagination{
944		NewRequest: func() (*request.Request, error) {
945			var inCpy *DescribeEndpointsInput
946			if input != nil {
947				tmp := *input
948				inCpy = &tmp
949			}
950			req, _ := c.DescribeEndpointsRequest(inCpy)
951			req.SetContext(ctx)
952			req.ApplyOptions(opts...)
953			return req, nil
954		},
955	}
956
957	for p.Next() {
958		if !fn(p.Page().(*DescribeEndpointsOutput), !p.HasNextPage()) {
959			break
960		}
961	}
962
963	return p.Err()
964}
965
966const opDisassociateCertificate = "DisassociateCertificate"
967
968// DisassociateCertificateRequest generates a "aws/request.Request" representing the
969// client's request for the DisassociateCertificate operation. The "output" return
970// value will be populated with the request's response once the request completes
971// successfully.
972//
973// Use "Send" method on the returned Request to send the API call to the service.
974// the "output" return value is not valid until after Send returns without error.
975//
976// See DisassociateCertificate for more information on using the DisassociateCertificate
977// API call, and error handling.
978//
979// This method is useful when you want to inject custom logic or configuration
980// into the SDK's request lifecycle. Such as custom headers, or retry logic.
981//
982//
983//    // Example sending a request using the DisassociateCertificateRequest method.
984//    req, resp := client.DisassociateCertificateRequest(params)
985//
986//    err := req.Send()
987//    if err == nil { // resp is now filled
988//        fmt.Println(resp)
989//    }
990//
991// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DisassociateCertificate
992func (c *MediaConvert) DisassociateCertificateRequest(input *DisassociateCertificateInput) (req *request.Request, output *DisassociateCertificateOutput) {
993	op := &request.Operation{
994		Name:       opDisassociateCertificate,
995		HTTPMethod: "DELETE",
996		HTTPPath:   "/2017-08-29/certificates/{arn}",
997	}
998
999	if input == nil {
1000		input = &DisassociateCertificateInput{}
1001	}
1002
1003	output = &DisassociateCertificateOutput{}
1004	req = c.newRequest(op, input, output)
1005	req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
1006	return
1007}
1008
1009// DisassociateCertificate API operation for AWS Elemental MediaConvert.
1010//
1011// Removes an association between the Amazon Resource Name (ARN) of an AWS Certificate
1012// Manager (ACM) certificate and an AWS Elemental MediaConvert resource.
1013//
1014// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1015// with awserr.Error's Code and Message methods to get detailed information about
1016// the error.
1017//
1018// See the AWS API reference guide for AWS Elemental MediaConvert's
1019// API operation DisassociateCertificate for usage and error information.
1020//
1021// Returned Error Types:
1022//   * BadRequestException
1023//
1024//   * InternalServerErrorException
1025//
1026//   * ForbiddenException
1027//
1028//   * NotFoundException
1029//
1030//   * TooManyRequestsException
1031//
1032//   * ConflictException
1033//
1034// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DisassociateCertificate
1035func (c *MediaConvert) DisassociateCertificate(input *DisassociateCertificateInput) (*DisassociateCertificateOutput, error) {
1036	req, out := c.DisassociateCertificateRequest(input)
1037	return out, req.Send()
1038}
1039
1040// DisassociateCertificateWithContext is the same as DisassociateCertificate with the addition of
1041// the ability to pass a context and additional request options.
1042//
1043// See DisassociateCertificate for details on how to use this API operation.
1044//
1045// The context must be non-nil and will be used for request cancellation. If
1046// the context is nil a panic will occur. In the future the SDK may create
1047// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1048// for more information on using Contexts.
1049func (c *MediaConvert) DisassociateCertificateWithContext(ctx aws.Context, input *DisassociateCertificateInput, opts ...request.Option) (*DisassociateCertificateOutput, error) {
1050	req, out := c.DisassociateCertificateRequest(input)
1051	req.SetContext(ctx)
1052	req.ApplyOptions(opts...)
1053	return out, req.Send()
1054}
1055
1056const opGetJob = "GetJob"
1057
1058// GetJobRequest generates a "aws/request.Request" representing the
1059// client's request for the GetJob operation. The "output" return
1060// value will be populated with the request's response once the request completes
1061// successfully.
1062//
1063// Use "Send" method on the returned Request to send the API call to the service.
1064// the "output" return value is not valid until after Send returns without error.
1065//
1066// See GetJob for more information on using the GetJob
1067// API call, and error handling.
1068//
1069// This method is useful when you want to inject custom logic or configuration
1070// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1071//
1072//
1073//    // Example sending a request using the GetJobRequest method.
1074//    req, resp := client.GetJobRequest(params)
1075//
1076//    err := req.Send()
1077//    if err == nil { // resp is now filled
1078//        fmt.Println(resp)
1079//    }
1080//
1081// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJob
1082func (c *MediaConvert) GetJobRequest(input *GetJobInput) (req *request.Request, output *GetJobOutput) {
1083	op := &request.Operation{
1084		Name:       opGetJob,
1085		HTTPMethod: "GET",
1086		HTTPPath:   "/2017-08-29/jobs/{id}",
1087	}
1088
1089	if input == nil {
1090		input = &GetJobInput{}
1091	}
1092
1093	output = &GetJobOutput{}
1094	req = c.newRequest(op, input, output)
1095	return
1096}
1097
1098// GetJob API operation for AWS Elemental MediaConvert.
1099//
1100// Retrieve the JSON for a specific completed transcoding job.
1101//
1102// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1103// with awserr.Error's Code and Message methods to get detailed information about
1104// the error.
1105//
1106// See the AWS API reference guide for AWS Elemental MediaConvert's
1107// API operation GetJob for usage and error information.
1108//
1109// Returned Error Types:
1110//   * BadRequestException
1111//
1112//   * InternalServerErrorException
1113//
1114//   * ForbiddenException
1115//
1116//   * NotFoundException
1117//
1118//   * TooManyRequestsException
1119//
1120//   * ConflictException
1121//
1122// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJob
1123func (c *MediaConvert) GetJob(input *GetJobInput) (*GetJobOutput, error) {
1124	req, out := c.GetJobRequest(input)
1125	return out, req.Send()
1126}
1127
1128// GetJobWithContext is the same as GetJob with the addition of
1129// the ability to pass a context and additional request options.
1130//
1131// See GetJob for details on how to use this API operation.
1132//
1133// The context must be non-nil and will be used for request cancellation. If
1134// the context is nil a panic will occur. In the future the SDK may create
1135// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1136// for more information on using Contexts.
1137func (c *MediaConvert) GetJobWithContext(ctx aws.Context, input *GetJobInput, opts ...request.Option) (*GetJobOutput, error) {
1138	req, out := c.GetJobRequest(input)
1139	req.SetContext(ctx)
1140	req.ApplyOptions(opts...)
1141	return out, req.Send()
1142}
1143
1144const opGetJobTemplate = "GetJobTemplate"
1145
1146// GetJobTemplateRequest generates a "aws/request.Request" representing the
1147// client's request for the GetJobTemplate operation. The "output" return
1148// value will be populated with the request's response once the request completes
1149// successfully.
1150//
1151// Use "Send" method on the returned Request to send the API call to the service.
1152// the "output" return value is not valid until after Send returns without error.
1153//
1154// See GetJobTemplate for more information on using the GetJobTemplate
1155// API call, and error handling.
1156//
1157// This method is useful when you want to inject custom logic or configuration
1158// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1159//
1160//
1161//    // Example sending a request using the GetJobTemplateRequest method.
1162//    req, resp := client.GetJobTemplateRequest(params)
1163//
1164//    err := req.Send()
1165//    if err == nil { // resp is now filled
1166//        fmt.Println(resp)
1167//    }
1168//
1169// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJobTemplate
1170func (c *MediaConvert) GetJobTemplateRequest(input *GetJobTemplateInput) (req *request.Request, output *GetJobTemplateOutput) {
1171	op := &request.Operation{
1172		Name:       opGetJobTemplate,
1173		HTTPMethod: "GET",
1174		HTTPPath:   "/2017-08-29/jobTemplates/{name}",
1175	}
1176
1177	if input == nil {
1178		input = &GetJobTemplateInput{}
1179	}
1180
1181	output = &GetJobTemplateOutput{}
1182	req = c.newRequest(op, input, output)
1183	return
1184}
1185
1186// GetJobTemplate API operation for AWS Elemental MediaConvert.
1187//
1188// Retrieve the JSON for a specific job template.
1189//
1190// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1191// with awserr.Error's Code and Message methods to get detailed information about
1192// the error.
1193//
1194// See the AWS API reference guide for AWS Elemental MediaConvert's
1195// API operation GetJobTemplate for usage and error information.
1196//
1197// Returned Error Types:
1198//   * BadRequestException
1199//
1200//   * InternalServerErrorException
1201//
1202//   * ForbiddenException
1203//
1204//   * NotFoundException
1205//
1206//   * TooManyRequestsException
1207//
1208//   * ConflictException
1209//
1210// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJobTemplate
1211func (c *MediaConvert) GetJobTemplate(input *GetJobTemplateInput) (*GetJobTemplateOutput, error) {
1212	req, out := c.GetJobTemplateRequest(input)
1213	return out, req.Send()
1214}
1215
1216// GetJobTemplateWithContext is the same as GetJobTemplate with the addition of
1217// the ability to pass a context and additional request options.
1218//
1219// See GetJobTemplate for details on how to use this API operation.
1220//
1221// The context must be non-nil and will be used for request cancellation. If
1222// the context is nil a panic will occur. In the future the SDK may create
1223// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1224// for more information on using Contexts.
1225func (c *MediaConvert) GetJobTemplateWithContext(ctx aws.Context, input *GetJobTemplateInput, opts ...request.Option) (*GetJobTemplateOutput, error) {
1226	req, out := c.GetJobTemplateRequest(input)
1227	req.SetContext(ctx)
1228	req.ApplyOptions(opts...)
1229	return out, req.Send()
1230}
1231
1232const opGetPreset = "GetPreset"
1233
1234// GetPresetRequest generates a "aws/request.Request" representing the
1235// client's request for the GetPreset operation. The "output" return
1236// value will be populated with the request's response once the request completes
1237// successfully.
1238//
1239// Use "Send" method on the returned Request to send the API call to the service.
1240// the "output" return value is not valid until after Send returns without error.
1241//
1242// See GetPreset for more information on using the GetPreset
1243// API call, and error handling.
1244//
1245// This method is useful when you want to inject custom logic or configuration
1246// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1247//
1248//
1249//    // Example sending a request using the GetPresetRequest method.
1250//    req, resp := client.GetPresetRequest(params)
1251//
1252//    err := req.Send()
1253//    if err == nil { // resp is now filled
1254//        fmt.Println(resp)
1255//    }
1256//
1257// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetPreset
1258func (c *MediaConvert) GetPresetRequest(input *GetPresetInput) (req *request.Request, output *GetPresetOutput) {
1259	op := &request.Operation{
1260		Name:       opGetPreset,
1261		HTTPMethod: "GET",
1262		HTTPPath:   "/2017-08-29/presets/{name}",
1263	}
1264
1265	if input == nil {
1266		input = &GetPresetInput{}
1267	}
1268
1269	output = &GetPresetOutput{}
1270	req = c.newRequest(op, input, output)
1271	return
1272}
1273
1274// GetPreset API operation for AWS Elemental MediaConvert.
1275//
1276// Retrieve the JSON for a specific preset.
1277//
1278// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1279// with awserr.Error's Code and Message methods to get detailed information about
1280// the error.
1281//
1282// See the AWS API reference guide for AWS Elemental MediaConvert's
1283// API operation GetPreset for usage and error information.
1284//
1285// Returned Error Types:
1286//   * BadRequestException
1287//
1288//   * InternalServerErrorException
1289//
1290//   * ForbiddenException
1291//
1292//   * NotFoundException
1293//
1294//   * TooManyRequestsException
1295//
1296//   * ConflictException
1297//
1298// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetPreset
1299func (c *MediaConvert) GetPreset(input *GetPresetInput) (*GetPresetOutput, error) {
1300	req, out := c.GetPresetRequest(input)
1301	return out, req.Send()
1302}
1303
1304// GetPresetWithContext is the same as GetPreset with the addition of
1305// the ability to pass a context and additional request options.
1306//
1307// See GetPreset for details on how to use this API operation.
1308//
1309// The context must be non-nil and will be used for request cancellation. If
1310// the context is nil a panic will occur. In the future the SDK may create
1311// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1312// for more information on using Contexts.
1313func (c *MediaConvert) GetPresetWithContext(ctx aws.Context, input *GetPresetInput, opts ...request.Option) (*GetPresetOutput, error) {
1314	req, out := c.GetPresetRequest(input)
1315	req.SetContext(ctx)
1316	req.ApplyOptions(opts...)
1317	return out, req.Send()
1318}
1319
1320const opGetQueue = "GetQueue"
1321
1322// GetQueueRequest generates a "aws/request.Request" representing the
1323// client's request for the GetQueue operation. The "output" return
1324// value will be populated with the request's response once the request completes
1325// successfully.
1326//
1327// Use "Send" method on the returned Request to send the API call to the service.
1328// the "output" return value is not valid until after Send returns without error.
1329//
1330// See GetQueue for more information on using the GetQueue
1331// API call, and error handling.
1332//
1333// This method is useful when you want to inject custom logic or configuration
1334// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1335//
1336//
1337//    // Example sending a request using the GetQueueRequest method.
1338//    req, resp := client.GetQueueRequest(params)
1339//
1340//    err := req.Send()
1341//    if err == nil { // resp is now filled
1342//        fmt.Println(resp)
1343//    }
1344//
1345// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetQueue
1346func (c *MediaConvert) GetQueueRequest(input *GetQueueInput) (req *request.Request, output *GetQueueOutput) {
1347	op := &request.Operation{
1348		Name:       opGetQueue,
1349		HTTPMethod: "GET",
1350		HTTPPath:   "/2017-08-29/queues/{name}",
1351	}
1352
1353	if input == nil {
1354		input = &GetQueueInput{}
1355	}
1356
1357	output = &GetQueueOutput{}
1358	req = c.newRequest(op, input, output)
1359	return
1360}
1361
1362// GetQueue API operation for AWS Elemental MediaConvert.
1363//
1364// Retrieve the JSON for a specific queue.
1365//
1366// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1367// with awserr.Error's Code and Message methods to get detailed information about
1368// the error.
1369//
1370// See the AWS API reference guide for AWS Elemental MediaConvert's
1371// API operation GetQueue for usage and error information.
1372//
1373// Returned Error Types:
1374//   * BadRequestException
1375//
1376//   * InternalServerErrorException
1377//
1378//   * ForbiddenException
1379//
1380//   * NotFoundException
1381//
1382//   * TooManyRequestsException
1383//
1384//   * ConflictException
1385//
1386// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetQueue
1387func (c *MediaConvert) GetQueue(input *GetQueueInput) (*GetQueueOutput, error) {
1388	req, out := c.GetQueueRequest(input)
1389	return out, req.Send()
1390}
1391
1392// GetQueueWithContext is the same as GetQueue with the addition of
1393// the ability to pass a context and additional request options.
1394//
1395// See GetQueue for details on how to use this API operation.
1396//
1397// The context must be non-nil and will be used for request cancellation. If
1398// the context is nil a panic will occur. In the future the SDK may create
1399// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1400// for more information on using Contexts.
1401func (c *MediaConvert) GetQueueWithContext(ctx aws.Context, input *GetQueueInput, opts ...request.Option) (*GetQueueOutput, error) {
1402	req, out := c.GetQueueRequest(input)
1403	req.SetContext(ctx)
1404	req.ApplyOptions(opts...)
1405	return out, req.Send()
1406}
1407
1408const opListJobTemplates = "ListJobTemplates"
1409
1410// ListJobTemplatesRequest generates a "aws/request.Request" representing the
1411// client's request for the ListJobTemplates operation. The "output" return
1412// value will be populated with the request's response once the request completes
1413// successfully.
1414//
1415// Use "Send" method on the returned Request to send the API call to the service.
1416// the "output" return value is not valid until after Send returns without error.
1417//
1418// See ListJobTemplates for more information on using the ListJobTemplates
1419// API call, and error handling.
1420//
1421// This method is useful when you want to inject custom logic or configuration
1422// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1423//
1424//
1425//    // Example sending a request using the ListJobTemplatesRequest method.
1426//    req, resp := client.ListJobTemplatesRequest(params)
1427//
1428//    err := req.Send()
1429//    if err == nil { // resp is now filled
1430//        fmt.Println(resp)
1431//    }
1432//
1433// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobTemplates
1434func (c *MediaConvert) ListJobTemplatesRequest(input *ListJobTemplatesInput) (req *request.Request, output *ListJobTemplatesOutput) {
1435	op := &request.Operation{
1436		Name:       opListJobTemplates,
1437		HTTPMethod: "GET",
1438		HTTPPath:   "/2017-08-29/jobTemplates",
1439		Paginator: &request.Paginator{
1440			InputTokens:     []string{"NextToken"},
1441			OutputTokens:    []string{"NextToken"},
1442			LimitToken:      "MaxResults",
1443			TruncationToken: "",
1444		},
1445	}
1446
1447	if input == nil {
1448		input = &ListJobTemplatesInput{}
1449	}
1450
1451	output = &ListJobTemplatesOutput{}
1452	req = c.newRequest(op, input, output)
1453	return
1454}
1455
1456// ListJobTemplates API operation for AWS Elemental MediaConvert.
1457//
1458// Retrieve a JSON array of up to twenty of your job templates. This will return
1459// the templates themselves, not just a list of them. To retrieve the next twenty
1460// templates, use the nextToken string returned with the array
1461//
1462// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1463// with awserr.Error's Code and Message methods to get detailed information about
1464// the error.
1465//
1466// See the AWS API reference guide for AWS Elemental MediaConvert's
1467// API operation ListJobTemplates for usage and error information.
1468//
1469// Returned Error Types:
1470//   * BadRequestException
1471//
1472//   * InternalServerErrorException
1473//
1474//   * ForbiddenException
1475//
1476//   * NotFoundException
1477//
1478//   * TooManyRequestsException
1479//
1480//   * ConflictException
1481//
1482// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobTemplates
1483func (c *MediaConvert) ListJobTemplates(input *ListJobTemplatesInput) (*ListJobTemplatesOutput, error) {
1484	req, out := c.ListJobTemplatesRequest(input)
1485	return out, req.Send()
1486}
1487
1488// ListJobTemplatesWithContext is the same as ListJobTemplates with the addition of
1489// the ability to pass a context and additional request options.
1490//
1491// See ListJobTemplates for details on how to use this API operation.
1492//
1493// The context must be non-nil and will be used for request cancellation. If
1494// the context is nil a panic will occur. In the future the SDK may create
1495// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1496// for more information on using Contexts.
1497func (c *MediaConvert) ListJobTemplatesWithContext(ctx aws.Context, input *ListJobTemplatesInput, opts ...request.Option) (*ListJobTemplatesOutput, error) {
1498	req, out := c.ListJobTemplatesRequest(input)
1499	req.SetContext(ctx)
1500	req.ApplyOptions(opts...)
1501	return out, req.Send()
1502}
1503
1504// ListJobTemplatesPages iterates over the pages of a ListJobTemplates operation,
1505// calling the "fn" function with the response data for each page. To stop
1506// iterating, return false from the fn function.
1507//
1508// See ListJobTemplates method for more information on how to use this operation.
1509//
1510// Note: This operation can generate multiple requests to a service.
1511//
1512//    // Example iterating over at most 3 pages of a ListJobTemplates operation.
1513//    pageNum := 0
1514//    err := client.ListJobTemplatesPages(params,
1515//        func(page *mediaconvert.ListJobTemplatesOutput, lastPage bool) bool {
1516//            pageNum++
1517//            fmt.Println(page)
1518//            return pageNum <= 3
1519//        })
1520//
1521func (c *MediaConvert) ListJobTemplatesPages(input *ListJobTemplatesInput, fn func(*ListJobTemplatesOutput, bool) bool) error {
1522	return c.ListJobTemplatesPagesWithContext(aws.BackgroundContext(), input, fn)
1523}
1524
1525// ListJobTemplatesPagesWithContext same as ListJobTemplatesPages except
1526// it takes a Context and allows setting request options on the pages.
1527//
1528// The context must be non-nil and will be used for request cancellation. If
1529// the context is nil a panic will occur. In the future the SDK may create
1530// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1531// for more information on using Contexts.
1532func (c *MediaConvert) ListJobTemplatesPagesWithContext(ctx aws.Context, input *ListJobTemplatesInput, fn func(*ListJobTemplatesOutput, bool) bool, opts ...request.Option) error {
1533	p := request.Pagination{
1534		NewRequest: func() (*request.Request, error) {
1535			var inCpy *ListJobTemplatesInput
1536			if input != nil {
1537				tmp := *input
1538				inCpy = &tmp
1539			}
1540			req, _ := c.ListJobTemplatesRequest(inCpy)
1541			req.SetContext(ctx)
1542			req.ApplyOptions(opts...)
1543			return req, nil
1544		},
1545	}
1546
1547	for p.Next() {
1548		if !fn(p.Page().(*ListJobTemplatesOutput), !p.HasNextPage()) {
1549			break
1550		}
1551	}
1552
1553	return p.Err()
1554}
1555
1556const opListJobs = "ListJobs"
1557
1558// ListJobsRequest generates a "aws/request.Request" representing the
1559// client's request for the ListJobs operation. The "output" return
1560// value will be populated with the request's response once the request completes
1561// successfully.
1562//
1563// Use "Send" method on the returned Request to send the API call to the service.
1564// the "output" return value is not valid until after Send returns without error.
1565//
1566// See ListJobs for more information on using the ListJobs
1567// API call, and error handling.
1568//
1569// This method is useful when you want to inject custom logic or configuration
1570// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1571//
1572//
1573//    // Example sending a request using the ListJobsRequest method.
1574//    req, resp := client.ListJobsRequest(params)
1575//
1576//    err := req.Send()
1577//    if err == nil { // resp is now filled
1578//        fmt.Println(resp)
1579//    }
1580//
1581// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobs
1582func (c *MediaConvert) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) {
1583	op := &request.Operation{
1584		Name:       opListJobs,
1585		HTTPMethod: "GET",
1586		HTTPPath:   "/2017-08-29/jobs",
1587		Paginator: &request.Paginator{
1588			InputTokens:     []string{"NextToken"},
1589			OutputTokens:    []string{"NextToken"},
1590			LimitToken:      "MaxResults",
1591			TruncationToken: "",
1592		},
1593	}
1594
1595	if input == nil {
1596		input = &ListJobsInput{}
1597	}
1598
1599	output = &ListJobsOutput{}
1600	req = c.newRequest(op, input, output)
1601	return
1602}
1603
1604// ListJobs API operation for AWS Elemental MediaConvert.
1605//
1606// Retrieve a JSON array of up to twenty of your most recently created jobs.
1607// This array includes in-process, completed, and errored jobs. This will return
1608// the jobs themselves, not just a list of the jobs. To retrieve the twenty
1609// next most recent jobs, use the nextToken string returned with the array.
1610//
1611// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1612// with awserr.Error's Code and Message methods to get detailed information about
1613// the error.
1614//
1615// See the AWS API reference guide for AWS Elemental MediaConvert's
1616// API operation ListJobs for usage and error information.
1617//
1618// Returned Error Types:
1619//   * BadRequestException
1620//
1621//   * InternalServerErrorException
1622//
1623//   * ForbiddenException
1624//
1625//   * NotFoundException
1626//
1627//   * TooManyRequestsException
1628//
1629//   * ConflictException
1630//
1631// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobs
1632func (c *MediaConvert) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) {
1633	req, out := c.ListJobsRequest(input)
1634	return out, req.Send()
1635}
1636
1637// ListJobsWithContext is the same as ListJobs with the addition of
1638// the ability to pass a context and additional request options.
1639//
1640// See ListJobs for details on how to use this API operation.
1641//
1642// The context must be non-nil and will be used for request cancellation. If
1643// the context is nil a panic will occur. In the future the SDK may create
1644// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1645// for more information on using Contexts.
1646func (c *MediaConvert) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts ...request.Option) (*ListJobsOutput, error) {
1647	req, out := c.ListJobsRequest(input)
1648	req.SetContext(ctx)
1649	req.ApplyOptions(opts...)
1650	return out, req.Send()
1651}
1652
1653// ListJobsPages iterates over the pages of a ListJobs operation,
1654// calling the "fn" function with the response data for each page. To stop
1655// iterating, return false from the fn function.
1656//
1657// See ListJobs method for more information on how to use this operation.
1658//
1659// Note: This operation can generate multiple requests to a service.
1660//
1661//    // Example iterating over at most 3 pages of a ListJobs operation.
1662//    pageNum := 0
1663//    err := client.ListJobsPages(params,
1664//        func(page *mediaconvert.ListJobsOutput, lastPage bool) bool {
1665//            pageNum++
1666//            fmt.Println(page)
1667//            return pageNum <= 3
1668//        })
1669//
1670func (c *MediaConvert) ListJobsPages(input *ListJobsInput, fn func(*ListJobsOutput, bool) bool) error {
1671	return c.ListJobsPagesWithContext(aws.BackgroundContext(), input, fn)
1672}
1673
1674// ListJobsPagesWithContext same as ListJobsPages except
1675// it takes a Context and allows setting request options on the pages.
1676//
1677// The context must be non-nil and will be used for request cancellation. If
1678// the context is nil a panic will occur. In the future the SDK may create
1679// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1680// for more information on using Contexts.
1681func (c *MediaConvert) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput, fn func(*ListJobsOutput, bool) bool, opts ...request.Option) error {
1682	p := request.Pagination{
1683		NewRequest: func() (*request.Request, error) {
1684			var inCpy *ListJobsInput
1685			if input != nil {
1686				tmp := *input
1687				inCpy = &tmp
1688			}
1689			req, _ := c.ListJobsRequest(inCpy)
1690			req.SetContext(ctx)
1691			req.ApplyOptions(opts...)
1692			return req, nil
1693		},
1694	}
1695
1696	for p.Next() {
1697		if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) {
1698			break
1699		}
1700	}
1701
1702	return p.Err()
1703}
1704
1705const opListPresets = "ListPresets"
1706
1707// ListPresetsRequest generates a "aws/request.Request" representing the
1708// client's request for the ListPresets operation. The "output" return
1709// value will be populated with the request's response once the request completes
1710// successfully.
1711//
1712// Use "Send" method on the returned Request to send the API call to the service.
1713// the "output" return value is not valid until after Send returns without error.
1714//
1715// See ListPresets for more information on using the ListPresets
1716// API call, and error handling.
1717//
1718// This method is useful when you want to inject custom logic or configuration
1719// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1720//
1721//
1722//    // Example sending a request using the ListPresetsRequest method.
1723//    req, resp := client.ListPresetsRequest(params)
1724//
1725//    err := req.Send()
1726//    if err == nil { // resp is now filled
1727//        fmt.Println(resp)
1728//    }
1729//
1730// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListPresets
1731func (c *MediaConvert) ListPresetsRequest(input *ListPresetsInput) (req *request.Request, output *ListPresetsOutput) {
1732	op := &request.Operation{
1733		Name:       opListPresets,
1734		HTTPMethod: "GET",
1735		HTTPPath:   "/2017-08-29/presets",
1736		Paginator: &request.Paginator{
1737			InputTokens:     []string{"NextToken"},
1738			OutputTokens:    []string{"NextToken"},
1739			LimitToken:      "MaxResults",
1740			TruncationToken: "",
1741		},
1742	}
1743
1744	if input == nil {
1745		input = &ListPresetsInput{}
1746	}
1747
1748	output = &ListPresetsOutput{}
1749	req = c.newRequest(op, input, output)
1750	return
1751}
1752
1753// ListPresets API operation for AWS Elemental MediaConvert.
1754//
1755// Retrieve a JSON array of up to twenty of your presets. This will return the
1756// presets themselves, not just a list of them. To retrieve the next twenty
1757// presets, use the nextToken string returned with the array.
1758//
1759// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1760// with awserr.Error's Code and Message methods to get detailed information about
1761// the error.
1762//
1763// See the AWS API reference guide for AWS Elemental MediaConvert's
1764// API operation ListPresets for usage and error information.
1765//
1766// Returned Error Types:
1767//   * BadRequestException
1768//
1769//   * InternalServerErrorException
1770//
1771//   * ForbiddenException
1772//
1773//   * NotFoundException
1774//
1775//   * TooManyRequestsException
1776//
1777//   * ConflictException
1778//
1779// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListPresets
1780func (c *MediaConvert) ListPresets(input *ListPresetsInput) (*ListPresetsOutput, error) {
1781	req, out := c.ListPresetsRequest(input)
1782	return out, req.Send()
1783}
1784
1785// ListPresetsWithContext is the same as ListPresets with the addition of
1786// the ability to pass a context and additional request options.
1787//
1788// See ListPresets for details on how to use this API operation.
1789//
1790// The context must be non-nil and will be used for request cancellation. If
1791// the context is nil a panic will occur. In the future the SDK may create
1792// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1793// for more information on using Contexts.
1794func (c *MediaConvert) ListPresetsWithContext(ctx aws.Context, input *ListPresetsInput, opts ...request.Option) (*ListPresetsOutput, error) {
1795	req, out := c.ListPresetsRequest(input)
1796	req.SetContext(ctx)
1797	req.ApplyOptions(opts...)
1798	return out, req.Send()
1799}
1800
1801// ListPresetsPages iterates over the pages of a ListPresets operation,
1802// calling the "fn" function with the response data for each page. To stop
1803// iterating, return false from the fn function.
1804//
1805// See ListPresets method for more information on how to use this operation.
1806//
1807// Note: This operation can generate multiple requests to a service.
1808//
1809//    // Example iterating over at most 3 pages of a ListPresets operation.
1810//    pageNum := 0
1811//    err := client.ListPresetsPages(params,
1812//        func(page *mediaconvert.ListPresetsOutput, lastPage bool) bool {
1813//            pageNum++
1814//            fmt.Println(page)
1815//            return pageNum <= 3
1816//        })
1817//
1818func (c *MediaConvert) ListPresetsPages(input *ListPresetsInput, fn func(*ListPresetsOutput, bool) bool) error {
1819	return c.ListPresetsPagesWithContext(aws.BackgroundContext(), input, fn)
1820}
1821
1822// ListPresetsPagesWithContext same as ListPresetsPages except
1823// it takes a Context and allows setting request options on the pages.
1824//
1825// The context must be non-nil and will be used for request cancellation. If
1826// the context is nil a panic will occur. In the future the SDK may create
1827// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1828// for more information on using Contexts.
1829func (c *MediaConvert) ListPresetsPagesWithContext(ctx aws.Context, input *ListPresetsInput, fn func(*ListPresetsOutput, bool) bool, opts ...request.Option) error {
1830	p := request.Pagination{
1831		NewRequest: func() (*request.Request, error) {
1832			var inCpy *ListPresetsInput
1833			if input != nil {
1834				tmp := *input
1835				inCpy = &tmp
1836			}
1837			req, _ := c.ListPresetsRequest(inCpy)
1838			req.SetContext(ctx)
1839			req.ApplyOptions(opts...)
1840			return req, nil
1841		},
1842	}
1843
1844	for p.Next() {
1845		if !fn(p.Page().(*ListPresetsOutput), !p.HasNextPage()) {
1846			break
1847		}
1848	}
1849
1850	return p.Err()
1851}
1852
1853const opListQueues = "ListQueues"
1854
1855// ListQueuesRequest generates a "aws/request.Request" representing the
1856// client's request for the ListQueues operation. The "output" return
1857// value will be populated with the request's response once the request completes
1858// successfully.
1859//
1860// Use "Send" method on the returned Request to send the API call to the service.
1861// the "output" return value is not valid until after Send returns without error.
1862//
1863// See ListQueues for more information on using the ListQueues
1864// API call, and error handling.
1865//
1866// This method is useful when you want to inject custom logic or configuration
1867// into the SDK's request lifecycle. Such as custom headers, or retry logic.
1868//
1869//
1870//    // Example sending a request using the ListQueuesRequest method.
1871//    req, resp := client.ListQueuesRequest(params)
1872//
1873//    err := req.Send()
1874//    if err == nil { // resp is now filled
1875//        fmt.Println(resp)
1876//    }
1877//
1878// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListQueues
1879func (c *MediaConvert) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, output *ListQueuesOutput) {
1880	op := &request.Operation{
1881		Name:       opListQueues,
1882		HTTPMethod: "GET",
1883		HTTPPath:   "/2017-08-29/queues",
1884		Paginator: &request.Paginator{
1885			InputTokens:     []string{"NextToken"},
1886			OutputTokens:    []string{"NextToken"},
1887			LimitToken:      "MaxResults",
1888			TruncationToken: "",
1889		},
1890	}
1891
1892	if input == nil {
1893		input = &ListQueuesInput{}
1894	}
1895
1896	output = &ListQueuesOutput{}
1897	req = c.newRequest(op, input, output)
1898	return
1899}
1900
1901// ListQueues API operation for AWS Elemental MediaConvert.
1902//
1903// Retrieve a JSON array of up to twenty of your queues. This will return the
1904// queues themselves, not just a list of them. To retrieve the next twenty queues,
1905// use the nextToken string returned with the array.
1906//
1907// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1908// with awserr.Error's Code and Message methods to get detailed information about
1909// the error.
1910//
1911// See the AWS API reference guide for AWS Elemental MediaConvert's
1912// API operation ListQueues for usage and error information.
1913//
1914// Returned Error Types:
1915//   * BadRequestException
1916//
1917//   * InternalServerErrorException
1918//
1919//   * ForbiddenException
1920//
1921//   * NotFoundException
1922//
1923//   * TooManyRequestsException
1924//
1925//   * ConflictException
1926//
1927// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListQueues
1928func (c *MediaConvert) ListQueues(input *ListQueuesInput) (*ListQueuesOutput, error) {
1929	req, out := c.ListQueuesRequest(input)
1930	return out, req.Send()
1931}
1932
1933// ListQueuesWithContext is the same as ListQueues with the addition of
1934// the ability to pass a context and additional request options.
1935//
1936// See ListQueues for details on how to use this API operation.
1937//
1938// The context must be non-nil and will be used for request cancellation. If
1939// the context is nil a panic will occur. In the future the SDK may create
1940// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1941// for more information on using Contexts.
1942func (c *MediaConvert) ListQueuesWithContext(ctx aws.Context, input *ListQueuesInput, opts ...request.Option) (*ListQueuesOutput, error) {
1943	req, out := c.ListQueuesRequest(input)
1944	req.SetContext(ctx)
1945	req.ApplyOptions(opts...)
1946	return out, req.Send()
1947}
1948
1949// ListQueuesPages iterates over the pages of a ListQueues operation,
1950// calling the "fn" function with the response data for each page. To stop
1951// iterating, return false from the fn function.
1952//
1953// See ListQueues method for more information on how to use this operation.
1954//
1955// Note: This operation can generate multiple requests to a service.
1956//
1957//    // Example iterating over at most 3 pages of a ListQueues operation.
1958//    pageNum := 0
1959//    err := client.ListQueuesPages(params,
1960//        func(page *mediaconvert.ListQueuesOutput, lastPage bool) bool {
1961//            pageNum++
1962//            fmt.Println(page)
1963//            return pageNum <= 3
1964//        })
1965//
1966func (c *MediaConvert) ListQueuesPages(input *ListQueuesInput, fn func(*ListQueuesOutput, bool) bool) error {
1967	return c.ListQueuesPagesWithContext(aws.BackgroundContext(), input, fn)
1968}
1969
1970// ListQueuesPagesWithContext same as ListQueuesPages except
1971// it takes a Context and allows setting request options on the pages.
1972//
1973// The context must be non-nil and will be used for request cancellation. If
1974// the context is nil a panic will occur. In the future the SDK may create
1975// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1976// for more information on using Contexts.
1977func (c *MediaConvert) ListQueuesPagesWithContext(ctx aws.Context, input *ListQueuesInput, fn func(*ListQueuesOutput, bool) bool, opts ...request.Option) error {
1978	p := request.Pagination{
1979		NewRequest: func() (*request.Request, error) {
1980			var inCpy *ListQueuesInput
1981			if input != nil {
1982				tmp := *input
1983				inCpy = &tmp
1984			}
1985			req, _ := c.ListQueuesRequest(inCpy)
1986			req.SetContext(ctx)
1987			req.ApplyOptions(opts...)
1988			return req, nil
1989		},
1990	}
1991
1992	for p.Next() {
1993		if !fn(p.Page().(*ListQueuesOutput), !p.HasNextPage()) {
1994			break
1995		}
1996	}
1997
1998	return p.Err()
1999}
2000
2001const opListTagsForResource = "ListTagsForResource"
2002
2003// ListTagsForResourceRequest generates a "aws/request.Request" representing the
2004// client's request for the ListTagsForResource operation. The "output" return
2005// value will be populated with the request's response once the request completes
2006// successfully.
2007//
2008// Use "Send" method on the returned Request to send the API call to the service.
2009// the "output" return value is not valid until after Send returns without error.
2010//
2011// See ListTagsForResource for more information on using the ListTagsForResource
2012// API call, and error handling.
2013//
2014// This method is useful when you want to inject custom logic or configuration
2015// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2016//
2017//
2018//    // Example sending a request using the ListTagsForResourceRequest method.
2019//    req, resp := client.ListTagsForResourceRequest(params)
2020//
2021//    err := req.Send()
2022//    if err == nil { // resp is now filled
2023//        fmt.Println(resp)
2024//    }
2025//
2026// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListTagsForResource
2027func (c *MediaConvert) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
2028	op := &request.Operation{
2029		Name:       opListTagsForResource,
2030		HTTPMethod: "GET",
2031		HTTPPath:   "/2017-08-29/tags/{arn}",
2032	}
2033
2034	if input == nil {
2035		input = &ListTagsForResourceInput{}
2036	}
2037
2038	output = &ListTagsForResourceOutput{}
2039	req = c.newRequest(op, input, output)
2040	return
2041}
2042
2043// ListTagsForResource API operation for AWS Elemental MediaConvert.
2044//
2045// Retrieve the tags for a MediaConvert resource.
2046//
2047// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2048// with awserr.Error's Code and Message methods to get detailed information about
2049// the error.
2050//
2051// See the AWS API reference guide for AWS Elemental MediaConvert's
2052// API operation ListTagsForResource for usage and error information.
2053//
2054// Returned Error Types:
2055//   * BadRequestException
2056//
2057//   * InternalServerErrorException
2058//
2059//   * ForbiddenException
2060//
2061//   * NotFoundException
2062//
2063//   * TooManyRequestsException
2064//
2065//   * ConflictException
2066//
2067// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListTagsForResource
2068func (c *MediaConvert) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) {
2069	req, out := c.ListTagsForResourceRequest(input)
2070	return out, req.Send()
2071}
2072
2073// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of
2074// the ability to pass a context and additional request options.
2075//
2076// See ListTagsForResource for details on how to use this API operation.
2077//
2078// The context must be non-nil and will be used for request cancellation. If
2079// the context is nil a panic will occur. In the future the SDK may create
2080// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2081// for more information on using Contexts.
2082func (c *MediaConvert) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) {
2083	req, out := c.ListTagsForResourceRequest(input)
2084	req.SetContext(ctx)
2085	req.ApplyOptions(opts...)
2086	return out, req.Send()
2087}
2088
2089const opTagResource = "TagResource"
2090
2091// TagResourceRequest generates a "aws/request.Request" representing the
2092// client's request for the TagResource operation. The "output" return
2093// value will be populated with the request's response once the request completes
2094// successfully.
2095//
2096// Use "Send" method on the returned Request to send the API call to the service.
2097// the "output" return value is not valid until after Send returns without error.
2098//
2099// See TagResource for more information on using the TagResource
2100// API call, and error handling.
2101//
2102// This method is useful when you want to inject custom logic or configuration
2103// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2104//
2105//
2106//    // Example sending a request using the TagResourceRequest method.
2107//    req, resp := client.TagResourceRequest(params)
2108//
2109//    err := req.Send()
2110//    if err == nil { // resp is now filled
2111//        fmt.Println(resp)
2112//    }
2113//
2114// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/TagResource
2115func (c *MediaConvert) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) {
2116	op := &request.Operation{
2117		Name:       opTagResource,
2118		HTTPMethod: "POST",
2119		HTTPPath:   "/2017-08-29/tags",
2120	}
2121
2122	if input == nil {
2123		input = &TagResourceInput{}
2124	}
2125
2126	output = &TagResourceOutput{}
2127	req = c.newRequest(op, input, output)
2128	req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
2129	return
2130}
2131
2132// TagResource API operation for AWS Elemental MediaConvert.
2133//
2134// Add tags to a MediaConvert queue, preset, or job template. For information
2135// about tagging, see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/tagging-resources.html
2136//
2137// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2138// with awserr.Error's Code and Message methods to get detailed information about
2139// the error.
2140//
2141// See the AWS API reference guide for AWS Elemental MediaConvert's
2142// API operation TagResource for usage and error information.
2143//
2144// Returned Error Types:
2145//   * BadRequestException
2146//
2147//   * InternalServerErrorException
2148//
2149//   * ForbiddenException
2150//
2151//   * NotFoundException
2152//
2153//   * TooManyRequestsException
2154//
2155//   * ConflictException
2156//
2157// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/TagResource
2158func (c *MediaConvert) TagResource(input *TagResourceInput) (*TagResourceOutput, error) {
2159	req, out := c.TagResourceRequest(input)
2160	return out, req.Send()
2161}
2162
2163// TagResourceWithContext is the same as TagResource with the addition of
2164// the ability to pass a context and additional request options.
2165//
2166// See TagResource for details on how to use this API operation.
2167//
2168// The context must be non-nil and will be used for request cancellation. If
2169// the context is nil a panic will occur. In the future the SDK may create
2170// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2171// for more information on using Contexts.
2172func (c *MediaConvert) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) {
2173	req, out := c.TagResourceRequest(input)
2174	req.SetContext(ctx)
2175	req.ApplyOptions(opts...)
2176	return out, req.Send()
2177}
2178
2179const opUntagResource = "UntagResource"
2180
2181// UntagResourceRequest generates a "aws/request.Request" representing the
2182// client's request for the UntagResource operation. The "output" return
2183// value will be populated with the request's response once the request completes
2184// successfully.
2185//
2186// Use "Send" method on the returned Request to send the API call to the service.
2187// the "output" return value is not valid until after Send returns without error.
2188//
2189// See UntagResource for more information on using the UntagResource
2190// API call, and error handling.
2191//
2192// This method is useful when you want to inject custom logic or configuration
2193// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2194//
2195//
2196//    // Example sending a request using the UntagResourceRequest method.
2197//    req, resp := client.UntagResourceRequest(params)
2198//
2199//    err := req.Send()
2200//    if err == nil { // resp is now filled
2201//        fmt.Println(resp)
2202//    }
2203//
2204// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UntagResource
2205func (c *MediaConvert) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) {
2206	op := &request.Operation{
2207		Name:       opUntagResource,
2208		HTTPMethod: "PUT",
2209		HTTPPath:   "/2017-08-29/tags/{arn}",
2210	}
2211
2212	if input == nil {
2213		input = &UntagResourceInput{}
2214	}
2215
2216	output = &UntagResourceOutput{}
2217	req = c.newRequest(op, input, output)
2218	req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
2219	return
2220}
2221
2222// UntagResource API operation for AWS Elemental MediaConvert.
2223//
2224// Remove tags from a MediaConvert queue, preset, or job template. For information
2225// about tagging, see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/tagging-resources.html
2226//
2227// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2228// with awserr.Error's Code and Message methods to get detailed information about
2229// the error.
2230//
2231// See the AWS API reference guide for AWS Elemental MediaConvert's
2232// API operation UntagResource for usage and error information.
2233//
2234// Returned Error Types:
2235//   * BadRequestException
2236//
2237//   * InternalServerErrorException
2238//
2239//   * ForbiddenException
2240//
2241//   * NotFoundException
2242//
2243//   * TooManyRequestsException
2244//
2245//   * ConflictException
2246//
2247// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UntagResource
2248func (c *MediaConvert) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) {
2249	req, out := c.UntagResourceRequest(input)
2250	return out, req.Send()
2251}
2252
2253// UntagResourceWithContext is the same as UntagResource with the addition of
2254// the ability to pass a context and additional request options.
2255//
2256// See UntagResource for details on how to use this API operation.
2257//
2258// The context must be non-nil and will be used for request cancellation. If
2259// the context is nil a panic will occur. In the future the SDK may create
2260// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2261// for more information on using Contexts.
2262func (c *MediaConvert) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
2263	req, out := c.UntagResourceRequest(input)
2264	req.SetContext(ctx)
2265	req.ApplyOptions(opts...)
2266	return out, req.Send()
2267}
2268
2269const opUpdateJobTemplate = "UpdateJobTemplate"
2270
2271// UpdateJobTemplateRequest generates a "aws/request.Request" representing the
2272// client's request for the UpdateJobTemplate operation. The "output" return
2273// value will be populated with the request's response once the request completes
2274// successfully.
2275//
2276// Use "Send" method on the returned Request to send the API call to the service.
2277// the "output" return value is not valid until after Send returns without error.
2278//
2279// See UpdateJobTemplate for more information on using the UpdateJobTemplate
2280// API call, and error handling.
2281//
2282// This method is useful when you want to inject custom logic or configuration
2283// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2284//
2285//
2286//    // Example sending a request using the UpdateJobTemplateRequest method.
2287//    req, resp := client.UpdateJobTemplateRequest(params)
2288//
2289//    err := req.Send()
2290//    if err == nil { // resp is now filled
2291//        fmt.Println(resp)
2292//    }
2293//
2294// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateJobTemplate
2295func (c *MediaConvert) UpdateJobTemplateRequest(input *UpdateJobTemplateInput) (req *request.Request, output *UpdateJobTemplateOutput) {
2296	op := &request.Operation{
2297		Name:       opUpdateJobTemplate,
2298		HTTPMethod: "PUT",
2299		HTTPPath:   "/2017-08-29/jobTemplates/{name}",
2300	}
2301
2302	if input == nil {
2303		input = &UpdateJobTemplateInput{}
2304	}
2305
2306	output = &UpdateJobTemplateOutput{}
2307	req = c.newRequest(op, input, output)
2308	return
2309}
2310
2311// UpdateJobTemplate API operation for AWS Elemental MediaConvert.
2312//
2313// Modify one of your existing job templates.
2314//
2315// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2316// with awserr.Error's Code and Message methods to get detailed information about
2317// the error.
2318//
2319// See the AWS API reference guide for AWS Elemental MediaConvert's
2320// API operation UpdateJobTemplate for usage and error information.
2321//
2322// Returned Error Types:
2323//   * BadRequestException
2324//
2325//   * InternalServerErrorException
2326//
2327//   * ForbiddenException
2328//
2329//   * NotFoundException
2330//
2331//   * TooManyRequestsException
2332//
2333//   * ConflictException
2334//
2335// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateJobTemplate
2336func (c *MediaConvert) UpdateJobTemplate(input *UpdateJobTemplateInput) (*UpdateJobTemplateOutput, error) {
2337	req, out := c.UpdateJobTemplateRequest(input)
2338	return out, req.Send()
2339}
2340
2341// UpdateJobTemplateWithContext is the same as UpdateJobTemplate with the addition of
2342// the ability to pass a context and additional request options.
2343//
2344// See UpdateJobTemplate for details on how to use this API operation.
2345//
2346// The context must be non-nil and will be used for request cancellation. If
2347// the context is nil a panic will occur. In the future the SDK may create
2348// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2349// for more information on using Contexts.
2350func (c *MediaConvert) UpdateJobTemplateWithContext(ctx aws.Context, input *UpdateJobTemplateInput, opts ...request.Option) (*UpdateJobTemplateOutput, error) {
2351	req, out := c.UpdateJobTemplateRequest(input)
2352	req.SetContext(ctx)
2353	req.ApplyOptions(opts...)
2354	return out, req.Send()
2355}
2356
2357const opUpdatePreset = "UpdatePreset"
2358
2359// UpdatePresetRequest generates a "aws/request.Request" representing the
2360// client's request for the UpdatePreset operation. The "output" return
2361// value will be populated with the request's response once the request completes
2362// successfully.
2363//
2364// Use "Send" method on the returned Request to send the API call to the service.
2365// the "output" return value is not valid until after Send returns without error.
2366//
2367// See UpdatePreset for more information on using the UpdatePreset
2368// API call, and error handling.
2369//
2370// This method is useful when you want to inject custom logic or configuration
2371// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2372//
2373//
2374//    // Example sending a request using the UpdatePresetRequest method.
2375//    req, resp := client.UpdatePresetRequest(params)
2376//
2377//    err := req.Send()
2378//    if err == nil { // resp is now filled
2379//        fmt.Println(resp)
2380//    }
2381//
2382// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdatePreset
2383func (c *MediaConvert) UpdatePresetRequest(input *UpdatePresetInput) (req *request.Request, output *UpdatePresetOutput) {
2384	op := &request.Operation{
2385		Name:       opUpdatePreset,
2386		HTTPMethod: "PUT",
2387		HTTPPath:   "/2017-08-29/presets/{name}",
2388	}
2389
2390	if input == nil {
2391		input = &UpdatePresetInput{}
2392	}
2393
2394	output = &UpdatePresetOutput{}
2395	req = c.newRequest(op, input, output)
2396	return
2397}
2398
2399// UpdatePreset API operation for AWS Elemental MediaConvert.
2400//
2401// Modify one of your existing presets.
2402//
2403// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2404// with awserr.Error's Code and Message methods to get detailed information about
2405// the error.
2406//
2407// See the AWS API reference guide for AWS Elemental MediaConvert's
2408// API operation UpdatePreset for usage and error information.
2409//
2410// Returned Error Types:
2411//   * BadRequestException
2412//
2413//   * InternalServerErrorException
2414//
2415//   * ForbiddenException
2416//
2417//   * NotFoundException
2418//
2419//   * TooManyRequestsException
2420//
2421//   * ConflictException
2422//
2423// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdatePreset
2424func (c *MediaConvert) UpdatePreset(input *UpdatePresetInput) (*UpdatePresetOutput, error) {
2425	req, out := c.UpdatePresetRequest(input)
2426	return out, req.Send()
2427}
2428
2429// UpdatePresetWithContext is the same as UpdatePreset with the addition of
2430// the ability to pass a context and additional request options.
2431//
2432// See UpdatePreset for details on how to use this API operation.
2433//
2434// The context must be non-nil and will be used for request cancellation. If
2435// the context is nil a panic will occur. In the future the SDK may create
2436// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2437// for more information on using Contexts.
2438func (c *MediaConvert) UpdatePresetWithContext(ctx aws.Context, input *UpdatePresetInput, opts ...request.Option) (*UpdatePresetOutput, error) {
2439	req, out := c.UpdatePresetRequest(input)
2440	req.SetContext(ctx)
2441	req.ApplyOptions(opts...)
2442	return out, req.Send()
2443}
2444
2445const opUpdateQueue = "UpdateQueue"
2446
2447// UpdateQueueRequest generates a "aws/request.Request" representing the
2448// client's request for the UpdateQueue operation. The "output" return
2449// value will be populated with the request's response once the request completes
2450// successfully.
2451//
2452// Use "Send" method on the returned Request to send the API call to the service.
2453// the "output" return value is not valid until after Send returns without error.
2454//
2455// See UpdateQueue for more information on using the UpdateQueue
2456// API call, and error handling.
2457//
2458// This method is useful when you want to inject custom logic or configuration
2459// into the SDK's request lifecycle. Such as custom headers, or retry logic.
2460//
2461//
2462//    // Example sending a request using the UpdateQueueRequest method.
2463//    req, resp := client.UpdateQueueRequest(params)
2464//
2465//    err := req.Send()
2466//    if err == nil { // resp is now filled
2467//        fmt.Println(resp)
2468//    }
2469//
2470// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateQueue
2471func (c *MediaConvert) UpdateQueueRequest(input *UpdateQueueInput) (req *request.Request, output *UpdateQueueOutput) {
2472	op := &request.Operation{
2473		Name:       opUpdateQueue,
2474		HTTPMethod: "PUT",
2475		HTTPPath:   "/2017-08-29/queues/{name}",
2476	}
2477
2478	if input == nil {
2479		input = &UpdateQueueInput{}
2480	}
2481
2482	output = &UpdateQueueOutput{}
2483	req = c.newRequest(op, input, output)
2484	return
2485}
2486
2487// UpdateQueue API operation for AWS Elemental MediaConvert.
2488//
2489// Modify one of your existing queues.
2490//
2491// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2492// with awserr.Error's Code and Message methods to get detailed information about
2493// the error.
2494//
2495// See the AWS API reference guide for AWS Elemental MediaConvert's
2496// API operation UpdateQueue for usage and error information.
2497//
2498// Returned Error Types:
2499//   * BadRequestException
2500//
2501//   * InternalServerErrorException
2502//
2503//   * ForbiddenException
2504//
2505//   * NotFoundException
2506//
2507//   * TooManyRequestsException
2508//
2509//   * ConflictException
2510//
2511// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateQueue
2512func (c *MediaConvert) UpdateQueue(input *UpdateQueueInput) (*UpdateQueueOutput, error) {
2513	req, out := c.UpdateQueueRequest(input)
2514	return out, req.Send()
2515}
2516
2517// UpdateQueueWithContext is the same as UpdateQueue with the addition of
2518// the ability to pass a context and additional request options.
2519//
2520// See UpdateQueue for details on how to use this API operation.
2521//
2522// The context must be non-nil and will be used for request cancellation. If
2523// the context is nil a panic will occur. In the future the SDK may create
2524// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2525// for more information on using Contexts.
2526func (c *MediaConvert) UpdateQueueWithContext(ctx aws.Context, input *UpdateQueueInput, opts ...request.Option) (*UpdateQueueOutput, error) {
2527	req, out := c.UpdateQueueRequest(input)
2528	req.SetContext(ctx)
2529	req.ApplyOptions(opts...)
2530	return out, req.Send()
2531}
2532
2533// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
2534// the value AAC. The service accepts one of two mutually exclusive groups of
2535// AAC settings--VBR and CBR. To select one of these modes, set the value of
2536// Bitrate control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you
2537// control the audio quality with the setting VBR quality (vbrQuality). In CBR
2538// mode, you use the setting Bitrate (bitrate). Defaults and valid values depend
2539// on the rate control mode.
2540type AacSettings struct {
2541	_ struct{} `type:"structure"`
2542
2543	// Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio
2544	// + audio description (AD) as a stereo pair. The value for AudioType will be
2545	// set to 3, which signals to downstream systems that this stream contains "broadcaster
2546	// mixed AD". Note that the input received by the encoder must contain pre-mixed
2547	// audio; the encoder does not perform the mixing. When you choose BROADCASTER_MIXED_AD,
2548	// the encoder ignores any values you provide in AudioType and FollowInputAudioType.
2549	// Choose NORMAL when the input does not contain pre-mixed audio + audio description
2550	// (AD). In this case, the encoder will use any values you provide for AudioType
2551	// and FollowInputAudioType.
2552	AudioDescriptionBroadcasterMix *string `locationName:"audioDescriptionBroadcasterMix" type:"string" enum:"AacAudioDescriptionBroadcasterMix"`
2553
2554	// Specify the average bitrate in bits per second. The set of valid values for
2555	// this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000,
2556	// 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000,
2557	// 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000,
2558	// 768000, 896000, 1024000. The value you set is also constrained by the values
2559	// that you choose for Profile (codecProfile), Bitrate control mode (codingMode),
2560	// and Sample rate (sampleRate). Default values depend on Bitrate control mode
2561	// and Profile.
2562	Bitrate *int64 `locationName:"bitrate" min:"6000" type:"integer"`
2563
2564	// AAC Profile.
2565	CodecProfile *string `locationName:"codecProfile" type:"string" enum:"AacCodecProfile"`
2566
2567	// Mono (Audio Description), Mono, Stereo, or 5.1 channel layout. Valid values
2568	// depend on rate control mode and profile. "1.0 - Audio Description (Receiver
2569	// Mix)" setting receives a stereo description plus control track and emits
2570	// a mono AAC encode of the description track, with control data emitted in
2571	// the PES header as per ETSI TS 101 154 Annex E.
2572	CodingMode *string `locationName:"codingMode" type:"string" enum:"AacCodingMode"`
2573
2574	// Rate Control Mode.
2575	RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"AacRateControlMode"`
2576
2577	// Enables LATM/LOAS AAC output. Note that if you use LATM/LOAS AAC in an output,
2578	// you must choose "No container" for the output container.
2579	RawFormat *string `locationName:"rawFormat" type:"string" enum:"AacRawFormat"`
2580
2581	// Sample rate in Hz. Valid values depend on rate control mode and profile.
2582	SampleRate *int64 `locationName:"sampleRate" min:"8000" type:"integer"`
2583
2584	// Use MPEG-2 AAC instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream
2585	// containers.
2586	Specification *string `locationName:"specification" type:"string" enum:"AacSpecification"`
2587
2588	// VBR Quality Level - Only used if rate_control_mode is VBR.
2589	VbrQuality *string `locationName:"vbrQuality" type:"string" enum:"AacVbrQuality"`
2590}
2591
2592// String returns the string representation
2593func (s AacSettings) String() string {
2594	return awsutil.Prettify(s)
2595}
2596
2597// GoString returns the string representation
2598func (s AacSettings) GoString() string {
2599	return s.String()
2600}
2601
2602// Validate inspects the fields of the type to determine if they are valid.
2603func (s *AacSettings) Validate() error {
2604	invalidParams := request.ErrInvalidParams{Context: "AacSettings"}
2605	if s.Bitrate != nil && *s.Bitrate < 6000 {
2606		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 6000))
2607	}
2608	if s.SampleRate != nil && *s.SampleRate < 8000 {
2609		invalidParams.Add(request.NewErrParamMinValue("SampleRate", 8000))
2610	}
2611
2612	if invalidParams.Len() > 0 {
2613		return invalidParams
2614	}
2615	return nil
2616}
2617
2618// SetAudioDescriptionBroadcasterMix sets the AudioDescriptionBroadcasterMix field's value.
2619func (s *AacSettings) SetAudioDescriptionBroadcasterMix(v string) *AacSettings {
2620	s.AudioDescriptionBroadcasterMix = &v
2621	return s
2622}
2623
2624// SetBitrate sets the Bitrate field's value.
2625func (s *AacSettings) SetBitrate(v int64) *AacSettings {
2626	s.Bitrate = &v
2627	return s
2628}
2629
2630// SetCodecProfile sets the CodecProfile field's value.
2631func (s *AacSettings) SetCodecProfile(v string) *AacSettings {
2632	s.CodecProfile = &v
2633	return s
2634}
2635
2636// SetCodingMode sets the CodingMode field's value.
2637func (s *AacSettings) SetCodingMode(v string) *AacSettings {
2638	s.CodingMode = &v
2639	return s
2640}
2641
2642// SetRateControlMode sets the RateControlMode field's value.
2643func (s *AacSettings) SetRateControlMode(v string) *AacSettings {
2644	s.RateControlMode = &v
2645	return s
2646}
2647
2648// SetRawFormat sets the RawFormat field's value.
2649func (s *AacSettings) SetRawFormat(v string) *AacSettings {
2650	s.RawFormat = &v
2651	return s
2652}
2653
2654// SetSampleRate sets the SampleRate field's value.
2655func (s *AacSettings) SetSampleRate(v int64) *AacSettings {
2656	s.SampleRate = &v
2657	return s
2658}
2659
2660// SetSpecification sets the Specification field's value.
2661func (s *AacSettings) SetSpecification(v string) *AacSettings {
2662	s.Specification = &v
2663	return s
2664}
2665
2666// SetVbrQuality sets the VbrQuality field's value.
2667func (s *AacSettings) SetVbrQuality(v string) *AacSettings {
2668	s.VbrQuality = &v
2669	return s
2670}
2671
2672// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
2673// the value AC3.
2674type Ac3Settings struct {
2675	_ struct{} `type:"structure"`
2676
2677	// Specify the average bitrate in bits per second. Valid bitrates depend on
2678	// the coding mode.
2679	Bitrate *int64 `locationName:"bitrate" min:"64000" type:"integer"`
2680
2681	// Specify the bitstream mode for the AC-3 stream that the encoder emits. For
2682	// more information about the AC3 bitstream mode, see ATSC A/52-2012 (Annex
2683	// E).
2684	BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Ac3BitstreamMode"`
2685
2686	// Dolby Digital coding mode. Determines number of channels.
2687	CodingMode *string `locationName:"codingMode" type:"string" enum:"Ac3CodingMode"`
2688
2689	// Sets the dialnorm for the output. If blank and input audio is Dolby Digital,
2690	// dialnorm will be passed through.
2691	Dialnorm *int64 `locationName:"dialnorm" min:"1" type:"integer"`
2692
2693	// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
2694	// uses when encoding the metadata in the Dolby Digital stream for the line
2695	// operating mode. Related setting: When you use this setting, MediaConvert
2696	// ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile).
2697	// For information about the Dolby Digital DRC operating modes and profiles,
2698	// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
2699	DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Ac3DynamicRangeCompressionLine"`
2700
2701	// When you want to add Dolby dynamic range compression (DRC) signaling to your
2702	// output stream, we recommend that you use the mode-specific settings instead
2703	// of Dynamic range compression profile (DynamicRangeCompressionProfile). The
2704	// mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine)
2705	// and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf).
2706	// Note that when you specify values for all three settings, MediaConvert ignores
2707	// the value of this setting in favor of the mode-specific settings. If you
2708	// do use this setting instead of the mode-specific settings, choose None (NONE)
2709	// to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD)
2710	// to set the profile to Dolby's film standard profile for all operating modes.
2711	DynamicRangeCompressionProfile *string `locationName:"dynamicRangeCompressionProfile" type:"string" enum:"Ac3DynamicRangeCompressionProfile"`
2712
2713	// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
2714	// uses when encoding the metadata in the Dolby Digital stream for the RF operating
2715	// mode. Related setting: When you use this setting, MediaConvert ignores any
2716	// value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile).
2717	// For information about the Dolby Digital DRC operating modes and profiles,
2718	// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
2719	DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Ac3DynamicRangeCompressionRf"`
2720
2721	// Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only
2722	// valid with 3_2_LFE coding mode.
2723	LfeFilter *string `locationName:"lfeFilter" type:"string" enum:"Ac3LfeFilter"`
2724
2725	// When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+,
2726	// or DolbyE decoder that supplied this audio data. If audio was not supplied
2727	// from one of these streams, then the static metadata settings will be used.
2728	MetadataControl *string `locationName:"metadataControl" type:"string" enum:"Ac3MetadataControl"`
2729
2730	// This value is always 48000. It represents the sample rate in Hz.
2731	SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"`
2732}
2733
2734// String returns the string representation
2735func (s Ac3Settings) String() string {
2736	return awsutil.Prettify(s)
2737}
2738
2739// GoString returns the string representation
2740func (s Ac3Settings) GoString() string {
2741	return s.String()
2742}
2743
2744// Validate inspects the fields of the type to determine if they are valid.
2745func (s *Ac3Settings) Validate() error {
2746	invalidParams := request.ErrInvalidParams{Context: "Ac3Settings"}
2747	if s.Bitrate != nil && *s.Bitrate < 64000 {
2748		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 64000))
2749	}
2750	if s.Dialnorm != nil && *s.Dialnorm < 1 {
2751		invalidParams.Add(request.NewErrParamMinValue("Dialnorm", 1))
2752	}
2753	if s.SampleRate != nil && *s.SampleRate < 48000 {
2754		invalidParams.Add(request.NewErrParamMinValue("SampleRate", 48000))
2755	}
2756
2757	if invalidParams.Len() > 0 {
2758		return invalidParams
2759	}
2760	return nil
2761}
2762
2763// SetBitrate sets the Bitrate field's value.
2764func (s *Ac3Settings) SetBitrate(v int64) *Ac3Settings {
2765	s.Bitrate = &v
2766	return s
2767}
2768
2769// SetBitstreamMode sets the BitstreamMode field's value.
2770func (s *Ac3Settings) SetBitstreamMode(v string) *Ac3Settings {
2771	s.BitstreamMode = &v
2772	return s
2773}
2774
2775// SetCodingMode sets the CodingMode field's value.
2776func (s *Ac3Settings) SetCodingMode(v string) *Ac3Settings {
2777	s.CodingMode = &v
2778	return s
2779}
2780
2781// SetDialnorm sets the Dialnorm field's value.
2782func (s *Ac3Settings) SetDialnorm(v int64) *Ac3Settings {
2783	s.Dialnorm = &v
2784	return s
2785}
2786
2787// SetDynamicRangeCompressionLine sets the DynamicRangeCompressionLine field's value.
2788func (s *Ac3Settings) SetDynamicRangeCompressionLine(v string) *Ac3Settings {
2789	s.DynamicRangeCompressionLine = &v
2790	return s
2791}
2792
2793// SetDynamicRangeCompressionProfile sets the DynamicRangeCompressionProfile field's value.
2794func (s *Ac3Settings) SetDynamicRangeCompressionProfile(v string) *Ac3Settings {
2795	s.DynamicRangeCompressionProfile = &v
2796	return s
2797}
2798
2799// SetDynamicRangeCompressionRf sets the DynamicRangeCompressionRf field's value.
2800func (s *Ac3Settings) SetDynamicRangeCompressionRf(v string) *Ac3Settings {
2801	s.DynamicRangeCompressionRf = &v
2802	return s
2803}
2804
2805// SetLfeFilter sets the LfeFilter field's value.
2806func (s *Ac3Settings) SetLfeFilter(v string) *Ac3Settings {
2807	s.LfeFilter = &v
2808	return s
2809}
2810
2811// SetMetadataControl sets the MetadataControl field's value.
2812func (s *Ac3Settings) SetMetadataControl(v string) *Ac3Settings {
2813	s.MetadataControl = &v
2814	return s
2815}
2816
2817// SetSampleRate sets the SampleRate field's value.
2818func (s *Ac3Settings) SetSampleRate(v int64) *Ac3Settings {
2819	s.SampleRate = &v
2820	return s
2821}
2822
2823// Accelerated transcoding can significantly speed up jobs with long, visually
2824// complex content.
2825type AccelerationSettings struct {
2826	_ struct{} `type:"structure"`
2827
2828	// Specify the conditions when the service will run your job with accelerated
2829	// transcoding.
2830	//
2831	// Mode is a required field
2832	Mode *string `locationName:"mode" type:"string" required:"true" enum:"AccelerationMode"`
2833}
2834
2835// String returns the string representation
2836func (s AccelerationSettings) String() string {
2837	return awsutil.Prettify(s)
2838}
2839
2840// GoString returns the string representation
2841func (s AccelerationSettings) GoString() string {
2842	return s.String()
2843}
2844
2845// Validate inspects the fields of the type to determine if they are valid.
2846func (s *AccelerationSettings) Validate() error {
2847	invalidParams := request.ErrInvalidParams{Context: "AccelerationSettings"}
2848	if s.Mode == nil {
2849		invalidParams.Add(request.NewErrParamRequired("Mode"))
2850	}
2851
2852	if invalidParams.Len() > 0 {
2853		return invalidParams
2854	}
2855	return nil
2856}
2857
2858// SetMode sets the Mode field's value.
2859func (s *AccelerationSettings) SetMode(v string) *AccelerationSettings {
2860	s.Mode = &v
2861	return s
2862}
2863
2864// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
2865// the value AIFF.
2866type AiffSettings struct {
2867	_ struct{} `type:"structure"`
2868
2869	// Specify Bit depth (BitDepth), in bits per sample, to choose the encoding
2870	// quality for this audio track.
2871	BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"`
2872
2873	// Specify the number of channels in this output audio track. Valid values are
2874	// 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64.
2875	Channels *int64 `locationName:"channels" min:"1" type:"integer"`
2876
2877	// Sample rate in hz.
2878	SampleRate *int64 `locationName:"sampleRate" min:"8000" type:"integer"`
2879}
2880
2881// String returns the string representation
2882func (s AiffSettings) String() string {
2883	return awsutil.Prettify(s)
2884}
2885
2886// GoString returns the string representation
2887func (s AiffSettings) GoString() string {
2888	return s.String()
2889}
2890
2891// Validate inspects the fields of the type to determine if they are valid.
2892func (s *AiffSettings) Validate() error {
2893	invalidParams := request.ErrInvalidParams{Context: "AiffSettings"}
2894	if s.BitDepth != nil && *s.BitDepth < 16 {
2895		invalidParams.Add(request.NewErrParamMinValue("BitDepth", 16))
2896	}
2897	if s.Channels != nil && *s.Channels < 1 {
2898		invalidParams.Add(request.NewErrParamMinValue("Channels", 1))
2899	}
2900	if s.SampleRate != nil && *s.SampleRate < 8000 {
2901		invalidParams.Add(request.NewErrParamMinValue("SampleRate", 8000))
2902	}
2903
2904	if invalidParams.Len() > 0 {
2905		return invalidParams
2906	}
2907	return nil
2908}
2909
2910// SetBitDepth sets the BitDepth field's value.
2911func (s *AiffSettings) SetBitDepth(v int64) *AiffSettings {
2912	s.BitDepth = &v
2913	return s
2914}
2915
2916// SetChannels sets the Channels field's value.
2917func (s *AiffSettings) SetChannels(v int64) *AiffSettings {
2918	s.Channels = &v
2919	return s
2920}
2921
2922// SetSampleRate sets the SampleRate field's value.
2923func (s *AiffSettings) SetSampleRate(v int64) *AiffSettings {
2924	s.SampleRate = &v
2925	return s
2926}
2927
2928// Settings for ancillary captions source.
2929type AncillarySourceSettings struct {
2930	_ struct{} `type:"structure"`
2931
2932	// Specify whether this set of input captions appears in your outputs in both
2933	// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes
2934	// the captions data in two ways: it passes the 608 data through using the 608
2935	// compatibility bytes fields of the 708 wrapper, and it also translates the
2936	// 608 data into 708.
2937	Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"AncillaryConvert608To708"`
2938
2939	// Specifies the 608 channel number in the ancillary data track from which to
2940	// extract captions. Unused for passthrough.
2941	SourceAncillaryChannelNumber *int64 `locationName:"sourceAncillaryChannelNumber" min:"1" type:"integer"`
2942
2943	// By default, the service terminates any unterminated captions at the end of
2944	// each input. If you want the caption to continue onto your next input, disable
2945	// this setting.
2946	TerminateCaptions *string `locationName:"terminateCaptions" type:"string" enum:"AncillaryTerminateCaptions"`
2947}
2948
2949// String returns the string representation
2950func (s AncillarySourceSettings) String() string {
2951	return awsutil.Prettify(s)
2952}
2953
2954// GoString returns the string representation
2955func (s AncillarySourceSettings) GoString() string {
2956	return s.String()
2957}
2958
2959// Validate inspects the fields of the type to determine if they are valid.
2960func (s *AncillarySourceSettings) Validate() error {
2961	invalidParams := request.ErrInvalidParams{Context: "AncillarySourceSettings"}
2962	if s.SourceAncillaryChannelNumber != nil && *s.SourceAncillaryChannelNumber < 1 {
2963		invalidParams.Add(request.NewErrParamMinValue("SourceAncillaryChannelNumber", 1))
2964	}
2965
2966	if invalidParams.Len() > 0 {
2967		return invalidParams
2968	}
2969	return nil
2970}
2971
2972// SetConvert608To708 sets the Convert608To708 field's value.
2973func (s *AncillarySourceSettings) SetConvert608To708(v string) *AncillarySourceSettings {
2974	s.Convert608To708 = &v
2975	return s
2976}
2977
2978// SetSourceAncillaryChannelNumber sets the SourceAncillaryChannelNumber field's value.
2979func (s *AncillarySourceSettings) SetSourceAncillaryChannelNumber(v int64) *AncillarySourceSettings {
2980	s.SourceAncillaryChannelNumber = &v
2981	return s
2982}
2983
2984// SetTerminateCaptions sets the TerminateCaptions field's value.
2985func (s *AncillarySourceSettings) SetTerminateCaptions(v string) *AncillarySourceSettings {
2986	s.TerminateCaptions = &v
2987	return s
2988}
2989
2990// Associates the Amazon Resource Name (ARN) of an AWS Certificate Manager (ACM)
2991// certificate with an AWS Elemental MediaConvert resource.
2992type AssociateCertificateInput struct {
2993	_ struct{} `type:"structure"`
2994
2995	// The ARN of the ACM certificate that you want to associate with your MediaConvert
2996	// resource.
2997	//
2998	// Arn is a required field
2999	Arn *string `locationName:"arn" type:"string" required:"true"`
3000}
3001
3002// String returns the string representation
3003func (s AssociateCertificateInput) String() string {
3004	return awsutil.Prettify(s)
3005}
3006
3007// GoString returns the string representation
3008func (s AssociateCertificateInput) GoString() string {
3009	return s.String()
3010}
3011
3012// Validate inspects the fields of the type to determine if they are valid.
3013func (s *AssociateCertificateInput) Validate() error {
3014	invalidParams := request.ErrInvalidParams{Context: "AssociateCertificateInput"}
3015	if s.Arn == nil {
3016		invalidParams.Add(request.NewErrParamRequired("Arn"))
3017	}
3018
3019	if invalidParams.Len() > 0 {
3020		return invalidParams
3021	}
3022	return nil
3023}
3024
3025// SetArn sets the Arn field's value.
3026func (s *AssociateCertificateInput) SetArn(v string) *AssociateCertificateInput {
3027	s.Arn = &v
3028	return s
3029}
3030
3031// Successful association of Certificate Manager Amazon Resource Name (ARN)
3032// with Mediaconvert returns an OK message.
3033type AssociateCertificateOutput struct {
3034	_ struct{} `type:"structure"`
3035}
3036
3037// String returns the string representation
3038func (s AssociateCertificateOutput) String() string {
3039	return awsutil.Prettify(s)
3040}
3041
3042// GoString returns the string representation
3043func (s AssociateCertificateOutput) GoString() string {
3044	return s.String()
3045}
3046
3047// When you mimic a multi-channel audio layout with multiple mono-channel tracks,
3048// you can tag each channel layout manually. For example, you would tag the
3049// tracks that contain your left, right, and center audio with Left (L), Right
3050// (R), and Center (C), respectively. When you don't specify a value, MediaConvert
3051// labels your track as Center (C) by default. To use audio layout tagging,
3052// your output must be in a QuickTime (.mov) container; your audio codec must
3053// be AAC, WAV, or AIFF; and you must set up your audio track to have only one
3054// channel.
3055type AudioChannelTaggingSettings struct {
3056	_ struct{} `type:"structure"`
3057
3058	// You can add a tag for this mono-channel audio track to mimic its placement
3059	// in a multi-channel layout. For example, if this track is the left surround
3060	// channel, choose Left surround (LS).
3061	ChannelTag *string `locationName:"channelTag" type:"string" enum:"AudioChannelTag"`
3062}
3063
3064// String returns the string representation
3065func (s AudioChannelTaggingSettings) String() string {
3066	return awsutil.Prettify(s)
3067}
3068
3069// GoString returns the string representation
3070func (s AudioChannelTaggingSettings) GoString() string {
3071	return s.String()
3072}
3073
3074// SetChannelTag sets the ChannelTag field's value.
3075func (s *AudioChannelTaggingSettings) SetChannelTag(v string) *AudioChannelTaggingSettings {
3076	s.ChannelTag = &v
3077	return s
3078}
3079
3080// Settings related to audio encoding. The settings in this group vary depending
3081// on the value that you choose for your audio codec.
3082type AudioCodecSettings struct {
3083	_ struct{} `type:"structure"`
3084
3085	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
3086	// the value AAC. The service accepts one of two mutually exclusive groups of
3087	// AAC settings--VBR and CBR. To select one of these modes, set the value of
3088	// Bitrate control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you
3089	// control the audio quality with the setting VBR quality (vbrQuality). In CBR
3090	// mode, you use the setting Bitrate (bitrate). Defaults and valid values depend
3091	// on the rate control mode.
3092	AacSettings *AacSettings `locationName:"aacSettings" type:"structure"`
3093
3094	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
3095	// the value AC3.
3096	Ac3Settings *Ac3Settings `locationName:"ac3Settings" type:"structure"`
3097
3098	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
3099	// the value AIFF.
3100	AiffSettings *AiffSettings `locationName:"aiffSettings" type:"structure"`
3101
3102	// Choose the audio codec for this output. Note that the option Dolby Digital
3103	// passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital
3104	// Plus audio inputs. Make sure that you choose a codec that's supported with
3105	// your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio
3106	// For audio-only outputs, make sure that both your input audio codec and your
3107	// output audio codec are supported for audio-only workflows. For more information,
3108	// see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only
3109	// and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output
3110	Codec *string `locationName:"codec" type:"string" enum:"AudioCodec"`
3111
3112	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
3113	// the value EAC3_ATMOS.
3114	Eac3AtmosSettings *Eac3AtmosSettings `locationName:"eac3AtmosSettings" type:"structure"`
3115
3116	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
3117	// the value EAC3.
3118	Eac3Settings *Eac3Settings `locationName:"eac3Settings" type:"structure"`
3119
3120	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
3121	// the value MP2.
3122	Mp2Settings *Mp2Settings `locationName:"mp2Settings" type:"structure"`
3123
3124	// Required when you set Codec, under AudioDescriptions>CodecSettings, to the
3125	// value MP3.
3126	Mp3Settings *Mp3Settings `locationName:"mp3Settings" type:"structure"`
3127
3128	// Required when you set Codec, under AudioDescriptions>CodecSettings, to the
3129	// value OPUS.
3130	OpusSettings *OpusSettings `locationName:"opusSettings" type:"structure"`
3131
3132	// Required when you set Codec, under AudioDescriptions>CodecSettings, to the
3133	// value Vorbis.
3134	VorbisSettings *VorbisSettings `locationName:"vorbisSettings" type:"structure"`
3135
3136	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
3137	// the value WAV.
3138	WavSettings *WavSettings `locationName:"wavSettings" type:"structure"`
3139}
3140
3141// String returns the string representation
3142func (s AudioCodecSettings) String() string {
3143	return awsutil.Prettify(s)
3144}
3145
3146// GoString returns the string representation
3147func (s AudioCodecSettings) GoString() string {
3148	return s.String()
3149}
3150
3151// Validate inspects the fields of the type to determine if they are valid.
3152func (s *AudioCodecSettings) Validate() error {
3153	invalidParams := request.ErrInvalidParams{Context: "AudioCodecSettings"}
3154	if s.AacSettings != nil {
3155		if err := s.AacSettings.Validate(); err != nil {
3156			invalidParams.AddNested("AacSettings", err.(request.ErrInvalidParams))
3157		}
3158	}
3159	if s.Ac3Settings != nil {
3160		if err := s.Ac3Settings.Validate(); err != nil {
3161			invalidParams.AddNested("Ac3Settings", err.(request.ErrInvalidParams))
3162		}
3163	}
3164	if s.AiffSettings != nil {
3165		if err := s.AiffSettings.Validate(); err != nil {
3166			invalidParams.AddNested("AiffSettings", err.(request.ErrInvalidParams))
3167		}
3168	}
3169	if s.Eac3AtmosSettings != nil {
3170		if err := s.Eac3AtmosSettings.Validate(); err != nil {
3171			invalidParams.AddNested("Eac3AtmosSettings", err.(request.ErrInvalidParams))
3172		}
3173	}
3174	if s.Eac3Settings != nil {
3175		if err := s.Eac3Settings.Validate(); err != nil {
3176			invalidParams.AddNested("Eac3Settings", err.(request.ErrInvalidParams))
3177		}
3178	}
3179	if s.Mp2Settings != nil {
3180		if err := s.Mp2Settings.Validate(); err != nil {
3181			invalidParams.AddNested("Mp2Settings", err.(request.ErrInvalidParams))
3182		}
3183	}
3184	if s.Mp3Settings != nil {
3185		if err := s.Mp3Settings.Validate(); err != nil {
3186			invalidParams.AddNested("Mp3Settings", err.(request.ErrInvalidParams))
3187		}
3188	}
3189	if s.OpusSettings != nil {
3190		if err := s.OpusSettings.Validate(); err != nil {
3191			invalidParams.AddNested("OpusSettings", err.(request.ErrInvalidParams))
3192		}
3193	}
3194	if s.VorbisSettings != nil {
3195		if err := s.VorbisSettings.Validate(); err != nil {
3196			invalidParams.AddNested("VorbisSettings", err.(request.ErrInvalidParams))
3197		}
3198	}
3199	if s.WavSettings != nil {
3200		if err := s.WavSettings.Validate(); err != nil {
3201			invalidParams.AddNested("WavSettings", err.(request.ErrInvalidParams))
3202		}
3203	}
3204
3205	if invalidParams.Len() > 0 {
3206		return invalidParams
3207	}
3208	return nil
3209}
3210
3211// SetAacSettings sets the AacSettings field's value.
3212func (s *AudioCodecSettings) SetAacSettings(v *AacSettings) *AudioCodecSettings {
3213	s.AacSettings = v
3214	return s
3215}
3216
3217// SetAc3Settings sets the Ac3Settings field's value.
3218func (s *AudioCodecSettings) SetAc3Settings(v *Ac3Settings) *AudioCodecSettings {
3219	s.Ac3Settings = v
3220	return s
3221}
3222
3223// SetAiffSettings sets the AiffSettings field's value.
3224func (s *AudioCodecSettings) SetAiffSettings(v *AiffSettings) *AudioCodecSettings {
3225	s.AiffSettings = v
3226	return s
3227}
3228
3229// SetCodec sets the Codec field's value.
3230func (s *AudioCodecSettings) SetCodec(v string) *AudioCodecSettings {
3231	s.Codec = &v
3232	return s
3233}
3234
3235// SetEac3AtmosSettings sets the Eac3AtmosSettings field's value.
3236func (s *AudioCodecSettings) SetEac3AtmosSettings(v *Eac3AtmosSettings) *AudioCodecSettings {
3237	s.Eac3AtmosSettings = v
3238	return s
3239}
3240
3241// SetEac3Settings sets the Eac3Settings field's value.
3242func (s *AudioCodecSettings) SetEac3Settings(v *Eac3Settings) *AudioCodecSettings {
3243	s.Eac3Settings = v
3244	return s
3245}
3246
3247// SetMp2Settings sets the Mp2Settings field's value.
3248func (s *AudioCodecSettings) SetMp2Settings(v *Mp2Settings) *AudioCodecSettings {
3249	s.Mp2Settings = v
3250	return s
3251}
3252
3253// SetMp3Settings sets the Mp3Settings field's value.
3254func (s *AudioCodecSettings) SetMp3Settings(v *Mp3Settings) *AudioCodecSettings {
3255	s.Mp3Settings = v
3256	return s
3257}
3258
3259// SetOpusSettings sets the OpusSettings field's value.
3260func (s *AudioCodecSettings) SetOpusSettings(v *OpusSettings) *AudioCodecSettings {
3261	s.OpusSettings = v
3262	return s
3263}
3264
3265// SetVorbisSettings sets the VorbisSettings field's value.
3266func (s *AudioCodecSettings) SetVorbisSettings(v *VorbisSettings) *AudioCodecSettings {
3267	s.VorbisSettings = v
3268	return s
3269}
3270
3271// SetWavSettings sets the WavSettings field's value.
3272func (s *AudioCodecSettings) SetWavSettings(v *WavSettings) *AudioCodecSettings {
3273	s.WavSettings = v
3274	return s
3275}
3276
3277// Settings related to one audio tab on the MediaConvert console. In your job
3278// JSON, an instance of AudioDescription is equivalent to one audio tab in the
3279// console. Usually, one audio tab corresponds to one output audio track. Depending
3280// on how you set up your input audio selectors and whether you use audio selector
3281// groups, one audio tab can correspond to a group of output audio tracks.
3282type AudioDescription struct {
3283	_ struct{} `type:"structure"`
3284
3285	// When you mimic a multi-channel audio layout with multiple mono-channel tracks,
3286	// you can tag each channel layout manually. For example, you would tag the
3287	// tracks that contain your left, right, and center audio with Left (L), Right
3288	// (R), and Center (C), respectively. When you don't specify a value, MediaConvert
3289	// labels your track as Center (C) by default. To use audio layout tagging,
3290	// your output must be in a QuickTime (.mov) container; your audio codec must
3291	// be AAC, WAV, or AIFF; and you must set up your audio track to have only one
3292	// channel.
3293	AudioChannelTaggingSettings *AudioChannelTaggingSettings `locationName:"audioChannelTaggingSettings" type:"structure"`
3294
3295	// Advanced audio normalization settings. Ignore these settings unless you need
3296	// to comply with a loudness standard.
3297	AudioNormalizationSettings *AudioNormalizationSettings `locationName:"audioNormalizationSettings" type:"structure"`
3298
3299	// Specifies which audio data to use from each input. In the simplest case,
3300	// specify an "Audio Selector":#inputs-audio_selector by name based on its order
3301	// within each input. For example if you specify "Audio Selector 3", then the
3302	// third audio selector will be used from each input. If an input does not have
3303	// an "Audio Selector 3", then the audio selector marked as "default" in that
3304	// input will be used. If there is no audio selector marked as "default", silence
3305	// will be inserted for the duration of that input. Alternatively, an "Audio
3306	// Selector Group":#inputs-audio_selector_group name may be specified, with
3307	// similar default/silence behavior. If no audio_source_name is specified, then
3308	// "Audio Selector 1" will be chosen automatically.
3309	AudioSourceName *string `locationName:"audioSourceName" type:"string"`
3310
3311	// Applies only if Follow Input Audio Type is unchecked (false). A number between
3312	// 0 and 255. The following are defined in ISO-IEC 13818-1: 0 = Undefined, 1
3313	// = Clean Effects, 2 = Hearing Impaired, 3 = Visually Impaired Commentary,
3314	// 4-255 = Reserved.
3315	AudioType *int64 `locationName:"audioType" type:"integer"`
3316
3317	// When set to FOLLOW_INPUT, if the input contains an ISO 639 audio_type, then
3318	// that value is passed through to the output. If the input contains no ISO
3319	// 639 audio_type, the value in Audio Type is included in the output. Otherwise
3320	// the value in Audio Type is included in the output. Note that this field and
3321	// audioType are both ignored if audioDescriptionBroadcasterMix is set to BROADCASTER_MIXED_AD.
3322	AudioTypeControl *string `locationName:"audioTypeControl" type:"string" enum:"AudioTypeControl"`
3323
3324	// Settings related to audio encoding. The settings in this group vary depending
3325	// on the value that you choose for your audio codec.
3326	CodecSettings *AudioCodecSettings `locationName:"codecSettings" type:"structure"`
3327
3328	// Specify the language for this audio output track. The service puts this language
3329	// code into your output audio track when you set Language code control (AudioLanguageCodeControl)
3330	// to Use configured (USE_CONFIGURED). The service also uses your specified
3331	// custom language code when you set Language code control (AudioLanguageCodeControl)
3332	// to Follow input (FOLLOW_INPUT), but your input file doesn't specify a language
3333	// code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming
3334	// outputs, you can also use any other code in the full RFC-5646 specification.
3335	// Streaming outputs are those that are in one of the following output groups:
3336	// CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming.
3337	CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"`
3338
3339	// Indicates the language of the audio output track. The ISO 639 language specified
3340	// in the 'Language Code' drop down will be used when 'Follow Input Language
3341	// Code' is not selected or when 'Follow Input Language Code' is selected but
3342	// there is no ISO 639 language code specified by the input.
3343	LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"`
3344
3345	// Specify which source for language code takes precedence for this audio track.
3346	// When you choose Follow input (FOLLOW_INPUT), the service uses the language
3347	// code from the input track if it's present. If there's no languge code on
3348	// the input track, the service uses the code that you specify in the setting
3349	// Language code (languageCode or customLanguageCode). When you choose Use configured
3350	// (USE_CONFIGURED), the service uses the language code that you specify.
3351	LanguageCodeControl *string `locationName:"languageCodeControl" type:"string" enum:"AudioLanguageCodeControl"`
3352
3353	// Advanced audio remixing settings.
3354	RemixSettings *RemixSettings `locationName:"remixSettings" type:"structure"`
3355
3356	// Specify a label for this output audio stream. For example, "English", "Director
3357	// commentary", or "track_2". For streaming outputs, MediaConvert passes this
3358	// information into destination manifests for display on the end-viewer's player
3359	// device. For outputs in other output groups, the service ignores this setting.
3360	StreamName *string `locationName:"streamName" type:"string"`
3361}
3362
3363// String returns the string representation
3364func (s AudioDescription) String() string {
3365	return awsutil.Prettify(s)
3366}
3367
3368// GoString returns the string representation
3369func (s AudioDescription) GoString() string {
3370	return s.String()
3371}
3372
3373// Validate inspects the fields of the type to determine if they are valid.
3374func (s *AudioDescription) Validate() error {
3375	invalidParams := request.ErrInvalidParams{Context: "AudioDescription"}
3376	if s.AudioNormalizationSettings != nil {
3377		if err := s.AudioNormalizationSettings.Validate(); err != nil {
3378			invalidParams.AddNested("AudioNormalizationSettings", err.(request.ErrInvalidParams))
3379		}
3380	}
3381	if s.CodecSettings != nil {
3382		if err := s.CodecSettings.Validate(); err != nil {
3383			invalidParams.AddNested("CodecSettings", err.(request.ErrInvalidParams))
3384		}
3385	}
3386	if s.RemixSettings != nil {
3387		if err := s.RemixSettings.Validate(); err != nil {
3388			invalidParams.AddNested("RemixSettings", err.(request.ErrInvalidParams))
3389		}
3390	}
3391
3392	if invalidParams.Len() > 0 {
3393		return invalidParams
3394	}
3395	return nil
3396}
3397
3398// SetAudioChannelTaggingSettings sets the AudioChannelTaggingSettings field's value.
3399func (s *AudioDescription) SetAudioChannelTaggingSettings(v *AudioChannelTaggingSettings) *AudioDescription {
3400	s.AudioChannelTaggingSettings = v
3401	return s
3402}
3403
3404// SetAudioNormalizationSettings sets the AudioNormalizationSettings field's value.
3405func (s *AudioDescription) SetAudioNormalizationSettings(v *AudioNormalizationSettings) *AudioDescription {
3406	s.AudioNormalizationSettings = v
3407	return s
3408}
3409
3410// SetAudioSourceName sets the AudioSourceName field's value.
3411func (s *AudioDescription) SetAudioSourceName(v string) *AudioDescription {
3412	s.AudioSourceName = &v
3413	return s
3414}
3415
3416// SetAudioType sets the AudioType field's value.
3417func (s *AudioDescription) SetAudioType(v int64) *AudioDescription {
3418	s.AudioType = &v
3419	return s
3420}
3421
3422// SetAudioTypeControl sets the AudioTypeControl field's value.
3423func (s *AudioDescription) SetAudioTypeControl(v string) *AudioDescription {
3424	s.AudioTypeControl = &v
3425	return s
3426}
3427
3428// SetCodecSettings sets the CodecSettings field's value.
3429func (s *AudioDescription) SetCodecSettings(v *AudioCodecSettings) *AudioDescription {
3430	s.CodecSettings = v
3431	return s
3432}
3433
3434// SetCustomLanguageCode sets the CustomLanguageCode field's value.
3435func (s *AudioDescription) SetCustomLanguageCode(v string) *AudioDescription {
3436	s.CustomLanguageCode = &v
3437	return s
3438}
3439
3440// SetLanguageCode sets the LanguageCode field's value.
3441func (s *AudioDescription) SetLanguageCode(v string) *AudioDescription {
3442	s.LanguageCode = &v
3443	return s
3444}
3445
3446// SetLanguageCodeControl sets the LanguageCodeControl field's value.
3447func (s *AudioDescription) SetLanguageCodeControl(v string) *AudioDescription {
3448	s.LanguageCodeControl = &v
3449	return s
3450}
3451
3452// SetRemixSettings sets the RemixSettings field's value.
3453func (s *AudioDescription) SetRemixSettings(v *RemixSettings) *AudioDescription {
3454	s.RemixSettings = v
3455	return s
3456}
3457
3458// SetStreamName sets the StreamName field's value.
3459func (s *AudioDescription) SetStreamName(v string) *AudioDescription {
3460	s.StreamName = &v
3461	return s
3462}
3463
3464// Advanced audio normalization settings. Ignore these settings unless you need
3465// to comply with a loudness standard.
3466type AudioNormalizationSettings struct {
3467	_ struct{} `type:"structure"`
3468
3469	// Choose one of the following audio normalization algorithms: ITU-R BS.1770-1:
3470	// Ungated loudness. A measurement of ungated average loudness for an entire
3471	// piece of content, suitable for measurement of short-form content under ATSC
3472	// recommendation A/85. Supports up to 5.1 audio channels. ITU-R BS.1770-2:
3473	// Gated loudness. A measurement of gated average loudness compliant with the
3474	// requirements of EBU-R128. Supports up to 5.1 audio channels. ITU-R BS.1770-3:
3475	// Modified peak. The same loudness measurement algorithm as 1770-2, with an
3476	// updated true peak measurement. ITU-R BS.1770-4: Higher channel count. Allows
3477	// for more audio channels than the other algorithms, including configurations
3478	// such as 7.1.
3479	Algorithm *string `locationName:"algorithm" type:"string" enum:"AudioNormalizationAlgorithm"`
3480
3481	// When enabled the output audio is corrected using the chosen algorithm. If
3482	// disabled, the audio will be measured but not adjusted.
3483	AlgorithmControl *string `locationName:"algorithmControl" type:"string" enum:"AudioNormalizationAlgorithmControl"`
3484
3485	// Content measuring above this level will be corrected to the target level.
3486	// Content measuring below this level will not be corrected.
3487	CorrectionGateLevel *int64 `locationName:"correctionGateLevel" type:"integer"`
3488
3489	// If set to LOG, log each output's audio track loudness to a CSV file.
3490	LoudnessLogging *string `locationName:"loudnessLogging" type:"string" enum:"AudioNormalizationLoudnessLogging"`
3491
3492	// If set to TRUE_PEAK, calculate and log the TruePeak for each output's audio
3493	// track loudness.
3494	PeakCalculation *string `locationName:"peakCalculation" type:"string" enum:"AudioNormalizationPeakCalculation"`
3495
3496	// When you use Audio normalization (AudioNormalizationSettings), optionally
3497	// use this setting to specify a target loudness. If you don't specify a value
3498	// here, the encoder chooses a value for you, based on the algorithm that you
3499	// choose for Algorithm (algorithm). If you choose algorithm 1770-1, the encoder
3500	// will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS.
3501	TargetLkfs *float64 `locationName:"targetLkfs" type:"double"`
3502}
3503
3504// String returns the string representation
3505func (s AudioNormalizationSettings) String() string {
3506	return awsutil.Prettify(s)
3507}
3508
3509// GoString returns the string representation
3510func (s AudioNormalizationSettings) GoString() string {
3511	return s.String()
3512}
3513
3514// Validate inspects the fields of the type to determine if they are valid.
3515func (s *AudioNormalizationSettings) Validate() error {
3516	invalidParams := request.ErrInvalidParams{Context: "AudioNormalizationSettings"}
3517	if s.CorrectionGateLevel != nil && *s.CorrectionGateLevel < -70 {
3518		invalidParams.Add(request.NewErrParamMinValue("CorrectionGateLevel", -70))
3519	}
3520
3521	if invalidParams.Len() > 0 {
3522		return invalidParams
3523	}
3524	return nil
3525}
3526
3527// SetAlgorithm sets the Algorithm field's value.
3528func (s *AudioNormalizationSettings) SetAlgorithm(v string) *AudioNormalizationSettings {
3529	s.Algorithm = &v
3530	return s
3531}
3532
3533// SetAlgorithmControl sets the AlgorithmControl field's value.
3534func (s *AudioNormalizationSettings) SetAlgorithmControl(v string) *AudioNormalizationSettings {
3535	s.AlgorithmControl = &v
3536	return s
3537}
3538
3539// SetCorrectionGateLevel sets the CorrectionGateLevel field's value.
3540func (s *AudioNormalizationSettings) SetCorrectionGateLevel(v int64) *AudioNormalizationSettings {
3541	s.CorrectionGateLevel = &v
3542	return s
3543}
3544
3545// SetLoudnessLogging sets the LoudnessLogging field's value.
3546func (s *AudioNormalizationSettings) SetLoudnessLogging(v string) *AudioNormalizationSettings {
3547	s.LoudnessLogging = &v
3548	return s
3549}
3550
3551// SetPeakCalculation sets the PeakCalculation field's value.
3552func (s *AudioNormalizationSettings) SetPeakCalculation(v string) *AudioNormalizationSettings {
3553	s.PeakCalculation = &v
3554	return s
3555}
3556
3557// SetTargetLkfs sets the TargetLkfs field's value.
3558func (s *AudioNormalizationSettings) SetTargetLkfs(v float64) *AudioNormalizationSettings {
3559	s.TargetLkfs = &v
3560	return s
3561}
3562
3563// Use Audio selectors (AudioSelectors) to specify a track or set of tracks
3564// from the input that you will use in your outputs. You can use multiple Audio
3565// selectors per input.
3566type AudioSelector struct {
3567	_ struct{} `type:"structure"`
3568
3569	// Selects a specific language code from within an audio source, using the ISO
3570	// 639-2 or ISO 639-3 three-letter language code
3571	CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"`
3572
3573	// Enable this setting on one audio selector to set it as the default for the
3574	// job. The service uses this default for outputs where it can't find the specified
3575	// input audio. If you don't set a default, those outputs have no audio.
3576	DefaultSelection *string `locationName:"defaultSelection" type:"string" enum:"AudioDefaultSelection"`
3577
3578	// Specifies audio data from an external file source.
3579	ExternalAudioFileInput *string `locationName:"externalAudioFileInput" type:"string"`
3580
3581	// Settings specific to audio sources in an HLS alternate rendition group. Specify
3582	// the properties (renditionGroupId, renditionName or renditionLanguageCode)
3583	// to identify the unique audio track among the alternative rendition groups
3584	// present in the HLS manifest. If no unique track is found, or multiple tracks
3585	// match the properties provided, the job fails. If no properties in hlsRenditionGroupSettings
3586	// are specified, the default audio track within the video segment is chosen.
3587	// If there is no audio within video segment, the alternative audio with DEFAULT=YES
3588	// is chosen instead.
3589	HlsRenditionGroupSettings *HlsRenditionGroupSettings `locationName:"hlsRenditionGroupSettings" type:"structure"`
3590
3591	// Selects a specific language code from within an audio source.
3592	LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"`
3593
3594	// Specifies a time delta in milliseconds to offset the audio from the input
3595	// video.
3596	Offset *int64 `locationName:"offset" type:"integer"`
3597
3598	// Selects a specific PID from within an audio source (e.g. 257 selects PID
3599	// 0x101).
3600	Pids []*int64 `locationName:"pids" type:"list"`
3601
3602	// Use this setting for input streams that contain Dolby E, to have the service
3603	// extract specific program data from the track. To select multiple programs,
3604	// create multiple selectors with the same Track and different Program numbers.
3605	// In the console, this setting is visible when you set Selector type to Track.
3606	// Choose the program number from the dropdown list. If you are sending a JSON
3607	// file, provide the program ID, which is part of the audio metadata. If your
3608	// input file has incorrect metadata, you can choose All channels instead of
3609	// a program number to have the service ignore the program IDs and include all
3610	// the programs in the track.
3611	ProgramSelection *int64 `locationName:"programSelection" type:"integer"`
3612
3613	// Use these settings to reorder the audio channels of one input to match those
3614	// of another input. This allows you to combine the two files into a single
3615	// output, one after the other.
3616	RemixSettings *RemixSettings `locationName:"remixSettings" type:"structure"`
3617
3618	// Specifies the type of the audio selector.
3619	SelectorType *string `locationName:"selectorType" type:"string" enum:"AudioSelectorType"`
3620
3621	// Identify a track from the input audio to include in this selector by entering
3622	// the track index number. To include several tracks in a single audio selector,
3623	// specify multiple tracks as follows. Using the console, enter a comma-separated
3624	// list. For examle, type "1,2,3" to include tracks 1 through 3. Specifying
3625	// directly in your JSON job file, provide the track numbers in an array. For
3626	// example, "tracks": [1,2,3].
3627	Tracks []*int64 `locationName:"tracks" type:"list"`
3628}
3629
3630// String returns the string representation
3631func (s AudioSelector) String() string {
3632	return awsutil.Prettify(s)
3633}
3634
3635// GoString returns the string representation
3636func (s AudioSelector) GoString() string {
3637	return s.String()
3638}
3639
3640// Validate inspects the fields of the type to determine if they are valid.
3641func (s *AudioSelector) Validate() error {
3642	invalidParams := request.ErrInvalidParams{Context: "AudioSelector"}
3643	if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 {
3644		invalidParams.Add(request.NewErrParamMinLen("CustomLanguageCode", 3))
3645	}
3646	if s.Offset != nil && *s.Offset < -2.147483648e+09 {
3647		invalidParams.Add(request.NewErrParamMinValue("Offset", -2.147483648e+09))
3648	}
3649	if s.RemixSettings != nil {
3650		if err := s.RemixSettings.Validate(); err != nil {
3651			invalidParams.AddNested("RemixSettings", err.(request.ErrInvalidParams))
3652		}
3653	}
3654
3655	if invalidParams.Len() > 0 {
3656		return invalidParams
3657	}
3658	return nil
3659}
3660
3661// SetCustomLanguageCode sets the CustomLanguageCode field's value.
3662func (s *AudioSelector) SetCustomLanguageCode(v string) *AudioSelector {
3663	s.CustomLanguageCode = &v
3664	return s
3665}
3666
3667// SetDefaultSelection sets the DefaultSelection field's value.
3668func (s *AudioSelector) SetDefaultSelection(v string) *AudioSelector {
3669	s.DefaultSelection = &v
3670	return s
3671}
3672
3673// SetExternalAudioFileInput sets the ExternalAudioFileInput field's value.
3674func (s *AudioSelector) SetExternalAudioFileInput(v string) *AudioSelector {
3675	s.ExternalAudioFileInput = &v
3676	return s
3677}
3678
3679// SetHlsRenditionGroupSettings sets the HlsRenditionGroupSettings field's value.
3680func (s *AudioSelector) SetHlsRenditionGroupSettings(v *HlsRenditionGroupSettings) *AudioSelector {
3681	s.HlsRenditionGroupSettings = v
3682	return s
3683}
3684
3685// SetLanguageCode sets the LanguageCode field's value.
3686func (s *AudioSelector) SetLanguageCode(v string) *AudioSelector {
3687	s.LanguageCode = &v
3688	return s
3689}
3690
3691// SetOffset sets the Offset field's value.
3692func (s *AudioSelector) SetOffset(v int64) *AudioSelector {
3693	s.Offset = &v
3694	return s
3695}
3696
3697// SetPids sets the Pids field's value.
3698func (s *AudioSelector) SetPids(v []*int64) *AudioSelector {
3699	s.Pids = v
3700	return s
3701}
3702
3703// SetProgramSelection sets the ProgramSelection field's value.
3704func (s *AudioSelector) SetProgramSelection(v int64) *AudioSelector {
3705	s.ProgramSelection = &v
3706	return s
3707}
3708
3709// SetRemixSettings sets the RemixSettings field's value.
3710func (s *AudioSelector) SetRemixSettings(v *RemixSettings) *AudioSelector {
3711	s.RemixSettings = v
3712	return s
3713}
3714
3715// SetSelectorType sets the SelectorType field's value.
3716func (s *AudioSelector) SetSelectorType(v string) *AudioSelector {
3717	s.SelectorType = &v
3718	return s
3719}
3720
3721// SetTracks sets the Tracks field's value.
3722func (s *AudioSelector) SetTracks(v []*int64) *AudioSelector {
3723	s.Tracks = v
3724	return s
3725}
3726
3727// Use audio selector groups to combine multiple sidecar audio inputs so that
3728// you can assign them to a single output audio tab (AudioDescription). Note
3729// that, if you're working with embedded audio, it's simpler to assign multiple
3730// input tracks into a single audio selector rather than use an audio selector
3731// group.
3732type AudioSelectorGroup struct {
3733	_ struct{} `type:"structure"`
3734
3735	// Name of an Audio Selector within the same input to include in the group.
3736	// Audio selector names are standardized, based on their order within the input
3737	// (e.g., "Audio Selector 1"). The audio selector name parameter can be repeated
3738	// to add any number of audio selectors to the group.
3739	AudioSelectorNames []*string `locationName:"audioSelectorNames" type:"list"`
3740}
3741
3742// String returns the string representation
3743func (s AudioSelectorGroup) String() string {
3744	return awsutil.Prettify(s)
3745}
3746
3747// GoString returns the string representation
3748func (s AudioSelectorGroup) GoString() string {
3749	return s.String()
3750}
3751
3752// SetAudioSelectorNames sets the AudioSelectorNames field's value.
3753func (s *AudioSelectorGroup) SetAudioSelectorNames(v []*string) *AudioSelectorGroup {
3754	s.AudioSelectorNames = v
3755	return s
3756}
3757
3758// Use automated ABR to have MediaConvert set up the renditions in your ABR
3759// package for you automatically, based on characteristics of your input video.
3760// This feature optimizes video quality while minimizing the overall size of
3761// your ABR package.
3762type AutomatedAbrSettings struct {
3763	_ struct{} `type:"structure"`
3764
3765	// Optional. The maximum target bit rate used in your automated ABR stack. Use
3766	// this value to set an upper limit on the bandwidth consumed by the highest-quality
3767	// rendition. This is the rendition that is delivered to viewers with the fastest
3768	// internet connections. If you don't specify a value, MediaConvert uses 8,000,000
3769	// (8 mb/s) by default.
3770	MaxAbrBitrate *int64 `locationName:"maxAbrBitrate" min:"100000" type:"integer"`
3771
3772	// Optional. The maximum number of renditions that MediaConvert will create
3773	// in your automated ABR stack. The number of renditions is determined automatically,
3774	// based on analysis of each job, but will never exceed this limit. When you
3775	// set this to Auto in the console, which is equivalent to excluding it from
3776	// your JSON job specification, MediaConvert defaults to a limit of 15.
3777	MaxRenditions *int64 `locationName:"maxRenditions" min:"3" type:"integer"`
3778
3779	// Optional. The minimum target bitrate used in your automated ABR stack. Use
3780	// this value to set a lower limit on the bitrate of video delivered to viewers
3781	// with slow internet connections. If you don't specify a value, MediaConvert
3782	// uses 600,000 (600 kb/s) by default.
3783	MinAbrBitrate *int64 `locationName:"minAbrBitrate" min:"100000" type:"integer"`
3784}
3785
3786// String returns the string representation
3787func (s AutomatedAbrSettings) String() string {
3788	return awsutil.Prettify(s)
3789}
3790
3791// GoString returns the string representation
3792func (s AutomatedAbrSettings) GoString() string {
3793	return s.String()
3794}
3795
3796// Validate inspects the fields of the type to determine if they are valid.
3797func (s *AutomatedAbrSettings) Validate() error {
3798	invalidParams := request.ErrInvalidParams{Context: "AutomatedAbrSettings"}
3799	if s.MaxAbrBitrate != nil && *s.MaxAbrBitrate < 100000 {
3800		invalidParams.Add(request.NewErrParamMinValue("MaxAbrBitrate", 100000))
3801	}
3802	if s.MaxRenditions != nil && *s.MaxRenditions < 3 {
3803		invalidParams.Add(request.NewErrParamMinValue("MaxRenditions", 3))
3804	}
3805	if s.MinAbrBitrate != nil && *s.MinAbrBitrate < 100000 {
3806		invalidParams.Add(request.NewErrParamMinValue("MinAbrBitrate", 100000))
3807	}
3808
3809	if invalidParams.Len() > 0 {
3810		return invalidParams
3811	}
3812	return nil
3813}
3814
3815// SetMaxAbrBitrate sets the MaxAbrBitrate field's value.
3816func (s *AutomatedAbrSettings) SetMaxAbrBitrate(v int64) *AutomatedAbrSettings {
3817	s.MaxAbrBitrate = &v
3818	return s
3819}
3820
3821// SetMaxRenditions sets the MaxRenditions field's value.
3822func (s *AutomatedAbrSettings) SetMaxRenditions(v int64) *AutomatedAbrSettings {
3823	s.MaxRenditions = &v
3824	return s
3825}
3826
3827// SetMinAbrBitrate sets the MinAbrBitrate field's value.
3828func (s *AutomatedAbrSettings) SetMinAbrBitrate(v int64) *AutomatedAbrSettings {
3829	s.MinAbrBitrate = &v
3830	return s
3831}
3832
3833// Use automated encoding to have MediaConvert choose your encoding settings
3834// for you, based on characteristics of your input video.
3835type AutomatedEncodingSettings struct {
3836	_ struct{} `type:"structure"`
3837
3838	// Use automated ABR to have MediaConvert set up the renditions in your ABR
3839	// package for you automatically, based on characteristics of your input video.
3840	// This feature optimizes video quality while minimizing the overall size of
3841	// your ABR package.
3842	AbrSettings *AutomatedAbrSettings `locationName:"abrSettings" type:"structure"`
3843}
3844
3845// String returns the string representation
3846func (s AutomatedEncodingSettings) String() string {
3847	return awsutil.Prettify(s)
3848}
3849
3850// GoString returns the string representation
3851func (s AutomatedEncodingSettings) GoString() string {
3852	return s.String()
3853}
3854
3855// Validate inspects the fields of the type to determine if they are valid.
3856func (s *AutomatedEncodingSettings) Validate() error {
3857	invalidParams := request.ErrInvalidParams{Context: "AutomatedEncodingSettings"}
3858	if s.AbrSettings != nil {
3859		if err := s.AbrSettings.Validate(); err != nil {
3860			invalidParams.AddNested("AbrSettings", err.(request.ErrInvalidParams))
3861		}
3862	}
3863
3864	if invalidParams.Len() > 0 {
3865		return invalidParams
3866	}
3867	return nil
3868}
3869
3870// SetAbrSettings sets the AbrSettings field's value.
3871func (s *AutomatedEncodingSettings) SetAbrSettings(v *AutomatedAbrSettings) *AutomatedEncodingSettings {
3872	s.AbrSettings = v
3873	return s
3874}
3875
3876// Settings for quality-defined variable bitrate encoding with the H.265 codec.
3877// Use these settings only when you set QVBR for Rate control mode (RateControlMode).
3878type Av1QvbrSettings struct {
3879	_ struct{} `type:"structure"`
3880
3881	// Use this setting only when you set Rate control mode (RateControlMode) to
3882	// QVBR. Specify the target quality level for this output. MediaConvert determines
3883	// the right number of bits to use for each part of the video to maintain the
3884	// video quality that you specify. When you keep the default value, AUTO, MediaConvert
3885	// picks a quality level for you, based on characteristics of your input video.
3886	// If you prefer to specify a quality level, specify a number from 1 through
3887	// 10. Use higher numbers for greater quality. Level 10 results in nearly lossless
3888	// compression. The quality level for most broadcast-quality transcodes is between
3889	// 6 and 9. Optionally, to specify a value between whole numbers, also provide
3890	// a value for the setting qvbrQualityLevelFineTune. For example, if you want
3891	// your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune
3892	// to .33.
3893	QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"`
3894
3895	// Optional. Specify a value here to set the QVBR quality to a level that is
3896	// between whole numbers. For example, if you want your QVBR quality level to
3897	// be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.
3898	// MediaConvert rounds your QVBR quality level to the nearest third of a whole
3899	// number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune
3900	// to .25, your actual QVBR quality level is 7.33.
3901	QvbrQualityLevelFineTune *float64 `locationName:"qvbrQualityLevelFineTune" type:"double"`
3902}
3903
3904// String returns the string representation
3905func (s Av1QvbrSettings) String() string {
3906	return awsutil.Prettify(s)
3907}
3908
3909// GoString returns the string representation
3910func (s Av1QvbrSettings) GoString() string {
3911	return s.String()
3912}
3913
3914// Validate inspects the fields of the type to determine if they are valid.
3915func (s *Av1QvbrSettings) Validate() error {
3916	invalidParams := request.ErrInvalidParams{Context: "Av1QvbrSettings"}
3917	if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 {
3918		invalidParams.Add(request.NewErrParamMinValue("QvbrQualityLevel", 1))
3919	}
3920
3921	if invalidParams.Len() > 0 {
3922		return invalidParams
3923	}
3924	return nil
3925}
3926
3927// SetQvbrQualityLevel sets the QvbrQualityLevel field's value.
3928func (s *Av1QvbrSettings) SetQvbrQualityLevel(v int64) *Av1QvbrSettings {
3929	s.QvbrQualityLevel = &v
3930	return s
3931}
3932
3933// SetQvbrQualityLevelFineTune sets the QvbrQualityLevelFineTune field's value.
3934func (s *Av1QvbrSettings) SetQvbrQualityLevelFineTune(v float64) *Av1QvbrSettings {
3935	s.QvbrQualityLevelFineTune = &v
3936	return s
3937}
3938
3939// Required when you set Codec, under VideoDescription>CodecSettings to the
3940// value AV1.
3941type Av1Settings struct {
3942	_ struct{} `type:"structure"`
3943
3944	// Specify the strength of any adaptive quantization filters that you enable.
3945	// The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization).
3946	AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"Av1AdaptiveQuantization"`
3947
3948	// If you are using the console, use the Framerate setting to specify the frame
3949	// rate for this output. If you want to keep the same frame rate as the input
3950	// video, choose Follow source. If you want to do frame rate conversion, choose
3951	// a frame rate from the dropdown list or choose Custom. The framerates shown
3952	// in the dropdown list are decimal approximations of fractions. If you choose
3953	// Custom, specify your frame rate as a fraction. If you are creating your transcoding
3954	// job specification as a JSON file without the console, use FramerateControl
3955	// to specify which value the service uses for the frame rate for this output.
3956	// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
3957	// from the input. Choose SPECIFIED if you want the service to use the frame
3958	// rate you specify in the settings FramerateNumerator and FramerateDenominator.
3959	FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Av1FramerateControl"`
3960
3961	// Choose the method that you want MediaConvert to use when increasing or decreasing
3962	// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
3963	// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
3964	// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
3965	// smooth picture, but might introduce undesirable video artifacts. For complex
3966	// frame rate conversions, especially if your source video has already been
3967	// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
3968	// motion-compensated interpolation. FrameFormer chooses the best conversion
3969	// method frame by frame. Note that using FrameFormer increases the transcoding
3970	// time and incurs a significant add-on cost.
3971	FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Av1FramerateConversionAlgorithm"`
3972
3973	// When you use the API for transcode jobs that use frame rate conversion, specify
3974	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
3975	// FramerateDenominator to specify the denominator of this fraction. In this
3976	// example, use 1001 for the value of FramerateDenominator. When you use the
3977	// console for transcode jobs that use frame rate conversion, provide the value
3978	// as a decimal number for Framerate. In this example, specify 23.976.
3979	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
3980
3981	// When you use the API for transcode jobs that use frame rate conversion, specify
3982	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
3983	// FramerateNumerator to specify the numerator of this fraction. In this example,
3984	// use 24000 for the value of FramerateNumerator. When you use the console for
3985	// transcode jobs that use frame rate conversion, provide the value as a decimal
3986	// number for Framerate. In this example, specify 23.976.
3987	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"`
3988
3989	// Specify the GOP length (keyframe interval) in frames. With AV1, MediaConvert
3990	// doesn't support GOP length in seconds. This value must be greater than zero
3991	// and preferably equal to 1 + ((numberBFrames + 1) * x), where x is an integer
3992	// value.
3993	GopSize *float64 `locationName:"gopSize" type:"double"`
3994
3995	// Maximum bitrate in bits/second. For example, enter five megabits per second
3996	// as 5000000. Required when Rate control mode is QVBR.
3997	MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"`
3998
3999	// Specify from the number of B-frames, in the range of 0-15. For AV1 encoding,
4000	// we recommend using 7 or 15. Choose a larger number for a lower bitrate and
4001	// smaller file size; choose a smaller number for better video quality.
4002	NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"`
4003
4004	// Settings for quality-defined variable bitrate encoding with the H.265 codec.
4005	// Use these settings only when you set QVBR for Rate control mode (RateControlMode).
4006	QvbrSettings *Av1QvbrSettings `locationName:"qvbrSettings" type:"structure"`
4007
4008	// 'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined
4009	// variable bitrate (QVBR). You can''t use CBR or VBR.'
4010	RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Av1RateControlMode"`
4011
4012	// Specify the number of slices per picture. This value must be 1, 2, 4, 8,
4013	// 16, or 32. For progressive pictures, this value must be less than or equal
4014	// to the number of macroblock rows. For interlaced pictures, this value must
4015	// be less than or equal to half the number of macroblock rows.
4016	Slices *int64 `locationName:"slices" min:"1" type:"integer"`
4017
4018	// Keep the default value, Enabled (ENABLED), to adjust quantization within
4019	// each frame based on spatial variation of content complexity. When you enable
4020	// this feature, the encoder uses fewer bits on areas that can sustain more
4021	// distortion with no noticeable visual degradation and uses more bits on areas
4022	// where any small distortion will be noticeable. For example, complex textured
4023	// blocks are encoded with fewer bits and smooth textured blocks are encoded
4024	// with more bits. Enabling this feature will almost always improve your video
4025	// quality. Note, though, that this feature doesn't take into account where
4026	// the viewer's attention is likely to be. If viewers are likely to be focusing
4027	// their attention on a part of the screen with a lot of complex texture, you
4028	// might choose to disable this feature. Related setting: When you enable spatial
4029	// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization)
4030	// depending on your content. For homogeneous content, such as cartoons and
4031	// video games, set it to Low. For content with a wider variety of textures,
4032	// set it to High or Higher.
4033	SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"Av1SpatialAdaptiveQuantization"`
4034}
4035
4036// String returns the string representation
4037func (s Av1Settings) String() string {
4038	return awsutil.Prettify(s)
4039}
4040
4041// GoString returns the string representation
4042func (s Av1Settings) GoString() string {
4043	return s.String()
4044}
4045
4046// Validate inspects the fields of the type to determine if they are valid.
4047func (s *Av1Settings) Validate() error {
4048	invalidParams := request.ErrInvalidParams{Context: "Av1Settings"}
4049	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
4050		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
4051	}
4052	if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 {
4053		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1))
4054	}
4055	if s.MaxBitrate != nil && *s.MaxBitrate < 1000 {
4056		invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000))
4057	}
4058	if s.Slices != nil && *s.Slices < 1 {
4059		invalidParams.Add(request.NewErrParamMinValue("Slices", 1))
4060	}
4061	if s.QvbrSettings != nil {
4062		if err := s.QvbrSettings.Validate(); err != nil {
4063			invalidParams.AddNested("QvbrSettings", err.(request.ErrInvalidParams))
4064		}
4065	}
4066
4067	if invalidParams.Len() > 0 {
4068		return invalidParams
4069	}
4070	return nil
4071}
4072
4073// SetAdaptiveQuantization sets the AdaptiveQuantization field's value.
4074func (s *Av1Settings) SetAdaptiveQuantization(v string) *Av1Settings {
4075	s.AdaptiveQuantization = &v
4076	return s
4077}
4078
4079// SetFramerateControl sets the FramerateControl field's value.
4080func (s *Av1Settings) SetFramerateControl(v string) *Av1Settings {
4081	s.FramerateControl = &v
4082	return s
4083}
4084
4085// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value.
4086func (s *Av1Settings) SetFramerateConversionAlgorithm(v string) *Av1Settings {
4087	s.FramerateConversionAlgorithm = &v
4088	return s
4089}
4090
4091// SetFramerateDenominator sets the FramerateDenominator field's value.
4092func (s *Av1Settings) SetFramerateDenominator(v int64) *Av1Settings {
4093	s.FramerateDenominator = &v
4094	return s
4095}
4096
4097// SetFramerateNumerator sets the FramerateNumerator field's value.
4098func (s *Av1Settings) SetFramerateNumerator(v int64) *Av1Settings {
4099	s.FramerateNumerator = &v
4100	return s
4101}
4102
4103// SetGopSize sets the GopSize field's value.
4104func (s *Av1Settings) SetGopSize(v float64) *Av1Settings {
4105	s.GopSize = &v
4106	return s
4107}
4108
4109// SetMaxBitrate sets the MaxBitrate field's value.
4110func (s *Av1Settings) SetMaxBitrate(v int64) *Av1Settings {
4111	s.MaxBitrate = &v
4112	return s
4113}
4114
4115// SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value.
4116func (s *Av1Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *Av1Settings {
4117	s.NumberBFramesBetweenReferenceFrames = &v
4118	return s
4119}
4120
4121// SetQvbrSettings sets the QvbrSettings field's value.
4122func (s *Av1Settings) SetQvbrSettings(v *Av1QvbrSettings) *Av1Settings {
4123	s.QvbrSettings = v
4124	return s
4125}
4126
4127// SetRateControlMode sets the RateControlMode field's value.
4128func (s *Av1Settings) SetRateControlMode(v string) *Av1Settings {
4129	s.RateControlMode = &v
4130	return s
4131}
4132
4133// SetSlices sets the Slices field's value.
4134func (s *Av1Settings) SetSlices(v int64) *Av1Settings {
4135	s.Slices = &v
4136	return s
4137}
4138
4139// SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value.
4140func (s *Av1Settings) SetSpatialAdaptiveQuantization(v string) *Av1Settings {
4141	s.SpatialAdaptiveQuantization = &v
4142	return s
4143}
4144
4145// Use ad avail blanking settings to specify your output content during SCTE-35
4146// triggered ad avails. You can blank your video or overlay it with an image.
4147// MediaConvert also removes any audio and embedded captions during the ad avail.
4148// For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ad-avail-blanking.html.
4149type AvailBlanking struct {
4150	_ struct{} `type:"structure"`
4151
4152	// Blanking image to be used. Leave empty for solid black. Only bmp and png
4153	// images are supported.
4154	AvailBlankingImage *string `locationName:"availBlankingImage" min:"14" type:"string"`
4155}
4156
4157// String returns the string representation
4158func (s AvailBlanking) String() string {
4159	return awsutil.Prettify(s)
4160}
4161
4162// GoString returns the string representation
4163func (s AvailBlanking) GoString() string {
4164	return s.String()
4165}
4166
4167// Validate inspects the fields of the type to determine if they are valid.
4168func (s *AvailBlanking) Validate() error {
4169	invalidParams := request.ErrInvalidParams{Context: "AvailBlanking"}
4170	if s.AvailBlankingImage != nil && len(*s.AvailBlankingImage) < 14 {
4171		invalidParams.Add(request.NewErrParamMinLen("AvailBlankingImage", 14))
4172	}
4173
4174	if invalidParams.Len() > 0 {
4175		return invalidParams
4176	}
4177	return nil
4178}
4179
4180// SetAvailBlankingImage sets the AvailBlankingImage field's value.
4181func (s *AvailBlanking) SetAvailBlankingImage(v string) *AvailBlanking {
4182	s.AvailBlankingImage = &v
4183	return s
4184}
4185
4186// Required when you choose AVC-Intra for your output video codec. For more
4187// information about the AVC-Intra settings, see the relevant specification.
4188// For detailed information about SD and HD in AVC-Intra, see https://ieeexplore.ieee.org/document/7290936.
4189// For information about 4K/2K in AVC-Intra, see https://pro-av.panasonic.net/en/avc-ultra/AVC-ULTRAoverview.pdf.
4190type AvcIntraSettings struct {
4191	_ struct{} `type:"structure"`
4192
4193	// Specify the AVC-Intra class of your output. The AVC-Intra class selection
4194	// determines the output video bit rate depending on the frame rate of the output.
4195	// Outputs with higher class values have higher bitrates and improved image
4196	// quality. Note that for Class 4K/2K, MediaConvert supports only 4:2:2 chroma
4197	// subsampling.
4198	AvcIntraClass *string `locationName:"avcIntraClass" type:"string" enum:"AvcIntraClass"`
4199
4200	// Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K).
4201	// When you set AVC-Intra class to a different value, this object isn't allowed.
4202	AvcIntraUhdSettings *AvcIntraUhdSettings `locationName:"avcIntraUhdSettings" type:"structure"`
4203
4204	// If you are using the console, use the Framerate setting to specify the frame
4205	// rate for this output. If you want to keep the same frame rate as the input
4206	// video, choose Follow source. If you want to do frame rate conversion, choose
4207	// a frame rate from the dropdown list or choose Custom. The framerates shown
4208	// in the dropdown list are decimal approximations of fractions. If you choose
4209	// Custom, specify your frame rate as a fraction. If you are creating your transcoding
4210	// job specification as a JSON file without the console, use FramerateControl
4211	// to specify which value the service uses for the frame rate for this output.
4212	// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
4213	// from the input. Choose SPECIFIED if you want the service to use the frame
4214	// rate you specify in the settings FramerateNumerator and FramerateDenominator.
4215	FramerateControl *string `locationName:"framerateControl" type:"string" enum:"AvcIntraFramerateControl"`
4216
4217	// Choose the method that you want MediaConvert to use when increasing or decreasing
4218	// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
4219	// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
4220	// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
4221	// smooth picture, but might introduce undesirable video artifacts. For complex
4222	// frame rate conversions, especially if your source video has already been
4223	// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
4224	// motion-compensated interpolation. FrameFormer chooses the best conversion
4225	// method frame by frame. Note that using FrameFormer increases the transcoding
4226	// time and incurs a significant add-on cost.
4227	FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"AvcIntraFramerateConversionAlgorithm"`
4228
4229	// When you use the API for transcode jobs that use frame rate conversion, specify
4230	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
4231	// FramerateDenominator to specify the denominator of this fraction. In this
4232	// example, use 1001 for the value of FramerateDenominator. When you use the
4233	// console for transcode jobs that use frame rate conversion, provide the value
4234	// as a decimal number for Framerate. In this example, specify 23.976.
4235	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
4236
4237	// When you use the API for transcode jobs that use frame rate conversion, specify
4238	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
4239	// FramerateNumerator to specify the numerator of this fraction. In this example,
4240	// use 24000 for the value of FramerateNumerator. When you use the console for
4241	// transcode jobs that use frame rate conversion, provide the value as a decimal
4242	// number for Framerate. In this example, specify 23.976.
4243	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"`
4244
4245	// Choose the scan line type for the output. Keep the default value, Progressive
4246	// (PROGRESSIVE) to create a progressive output, regardless of the scan type
4247	// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
4248	// to create an output that's interlaced with the same field polarity throughout.
4249	// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
4250	// to produce outputs with the same field polarity as the source. For jobs that
4251	// have multiple inputs, the output field polarity might change over the course
4252	// of the output. Follow behavior depends on the input scan type. If the source
4253	// is interlaced, the output will be interlaced with the same polarity as the
4254	// source. If the source is progressive, the output will be interlaced with
4255	// top field bottom field first, depending on which of the Follow options you
4256	// choose.
4257	InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"AvcIntraInterlaceMode"`
4258
4259	// Use this setting for interlaced outputs, when your output frame rate is half
4260	// of your input frame rate. In this situation, choose Optimized interlacing
4261	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
4262	// case, each progressive frame from the input corresponds to an interlaced
4263	// field in the output. Keep the default value, Basic interlacing (INTERLACED),
4264	// for all other output frame rates. With basic interlacing, MediaConvert performs
4265	// any frame rate conversion first and then interlaces the frames. When you
4266	// choose Optimized interlacing and you set your output frame rate to a value
4267	// that isn't suitable for optimized interlacing, MediaConvert automatically
4268	// falls back to basic interlacing. Required settings: To use optimized interlacing,
4269	// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
4270	// use optimized interlacing for hard telecine outputs. You must also set Interlace
4271	// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
4272	ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"AvcIntraScanTypeConversionMode"`
4273
4274	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
4275	// second (fps). Enable slow PAL to create a 25 fps output. When you enable
4276	// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples
4277	// your audio to keep it synchronized with the video. Note that enabling this
4278	// setting will slightly reduce the duration of your video. Required settings:
4279	// You must also set Framerate to 25. In your JSON job specification, set (framerateControl)
4280	// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
4281	// 1.
4282	SlowPal *string `locationName:"slowPal" type:"string" enum:"AvcIntraSlowPal"`
4283
4284	// When you do frame rate conversion from 23.976 frames per second (fps) to
4285	// 29.97 fps, and your output scan type is interlaced, you can optionally enable
4286	// hard telecine (HARD) to create a smoother picture. When you keep the default
4287	// value, None (NONE), MediaConvert does a standard frame rate conversion to
4288	// 29.97 without doing anything with the field polarity to create a smoother
4289	// picture.
4290	Telecine *string `locationName:"telecine" type:"string" enum:"AvcIntraTelecine"`
4291}
4292
4293// String returns the string representation
4294func (s AvcIntraSettings) String() string {
4295	return awsutil.Prettify(s)
4296}
4297
4298// GoString returns the string representation
4299func (s AvcIntraSettings) GoString() string {
4300	return s.String()
4301}
4302
4303// Validate inspects the fields of the type to determine if they are valid.
4304func (s *AvcIntraSettings) Validate() error {
4305	invalidParams := request.ErrInvalidParams{Context: "AvcIntraSettings"}
4306	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
4307		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
4308	}
4309	if s.FramerateNumerator != nil && *s.FramerateNumerator < 24 {
4310		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 24))
4311	}
4312
4313	if invalidParams.Len() > 0 {
4314		return invalidParams
4315	}
4316	return nil
4317}
4318
4319// SetAvcIntraClass sets the AvcIntraClass field's value.
4320func (s *AvcIntraSettings) SetAvcIntraClass(v string) *AvcIntraSettings {
4321	s.AvcIntraClass = &v
4322	return s
4323}
4324
4325// SetAvcIntraUhdSettings sets the AvcIntraUhdSettings field's value.
4326func (s *AvcIntraSettings) SetAvcIntraUhdSettings(v *AvcIntraUhdSettings) *AvcIntraSettings {
4327	s.AvcIntraUhdSettings = v
4328	return s
4329}
4330
4331// SetFramerateControl sets the FramerateControl field's value.
4332func (s *AvcIntraSettings) SetFramerateControl(v string) *AvcIntraSettings {
4333	s.FramerateControl = &v
4334	return s
4335}
4336
4337// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value.
4338func (s *AvcIntraSettings) SetFramerateConversionAlgorithm(v string) *AvcIntraSettings {
4339	s.FramerateConversionAlgorithm = &v
4340	return s
4341}
4342
4343// SetFramerateDenominator sets the FramerateDenominator field's value.
4344func (s *AvcIntraSettings) SetFramerateDenominator(v int64) *AvcIntraSettings {
4345	s.FramerateDenominator = &v
4346	return s
4347}
4348
4349// SetFramerateNumerator sets the FramerateNumerator field's value.
4350func (s *AvcIntraSettings) SetFramerateNumerator(v int64) *AvcIntraSettings {
4351	s.FramerateNumerator = &v
4352	return s
4353}
4354
4355// SetInterlaceMode sets the InterlaceMode field's value.
4356func (s *AvcIntraSettings) SetInterlaceMode(v string) *AvcIntraSettings {
4357	s.InterlaceMode = &v
4358	return s
4359}
4360
4361// SetScanTypeConversionMode sets the ScanTypeConversionMode field's value.
4362func (s *AvcIntraSettings) SetScanTypeConversionMode(v string) *AvcIntraSettings {
4363	s.ScanTypeConversionMode = &v
4364	return s
4365}
4366
4367// SetSlowPal sets the SlowPal field's value.
4368func (s *AvcIntraSettings) SetSlowPal(v string) *AvcIntraSettings {
4369	s.SlowPal = &v
4370	return s
4371}
4372
4373// SetTelecine sets the Telecine field's value.
4374func (s *AvcIntraSettings) SetTelecine(v string) *AvcIntraSettings {
4375	s.Telecine = &v
4376	return s
4377}
4378
4379// Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K).
4380// When you set AVC-Intra class to a different value, this object isn't allowed.
4381type AvcIntraUhdSettings struct {
4382	_ struct{} `type:"structure"`
4383
4384	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how many
4385	// transcoding passes MediaConvert does with your video. When you choose Multi-pass
4386	// (MULTI_PASS), your video quality is better and your output bitrate is more
4387	// accurate. That is, the actual bitrate of your output is closer to the target
4388	// bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS),
4389	// your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS).
4390	QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"AvcIntraUhdQualityTuningLevel"`
4391}
4392
4393// String returns the string representation
4394func (s AvcIntraUhdSettings) String() string {
4395	return awsutil.Prettify(s)
4396}
4397
4398// GoString returns the string representation
4399func (s AvcIntraUhdSettings) GoString() string {
4400	return s.String()
4401}
4402
4403// SetQualityTuningLevel sets the QualityTuningLevel field's value.
4404func (s *AvcIntraUhdSettings) SetQualityTuningLevel(v string) *AvcIntraUhdSettings {
4405	s.QualityTuningLevel = &v
4406	return s
4407}
4408
4409type BadRequestException struct {
4410	_            struct{}                  `type:"structure"`
4411	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
4412
4413	Message_ *string `locationName:"message" type:"string"`
4414}
4415
4416// String returns the string representation
4417func (s BadRequestException) String() string {
4418	return awsutil.Prettify(s)
4419}
4420
4421// GoString returns the string representation
4422func (s BadRequestException) GoString() string {
4423	return s.String()
4424}
4425
4426func newErrorBadRequestException(v protocol.ResponseMetadata) error {
4427	return &BadRequestException{
4428		RespMetadata: v,
4429	}
4430}
4431
4432// Code returns the exception type name.
4433func (s *BadRequestException) Code() string {
4434	return "BadRequestException"
4435}
4436
4437// Message returns the exception's message.
4438func (s *BadRequestException) Message() string {
4439	if s.Message_ != nil {
4440		return *s.Message_
4441	}
4442	return ""
4443}
4444
4445// OrigErr always returns nil, satisfies awserr.Error interface.
4446func (s *BadRequestException) OrigErr() error {
4447	return nil
4448}
4449
4450func (s *BadRequestException) Error() string {
4451	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
4452}
4453
4454// Status code returns the HTTP status code for the request's response error.
4455func (s *BadRequestException) StatusCode() int {
4456	return s.RespMetadata.StatusCode
4457}
4458
4459// RequestID returns the service's response RequestID for request.
4460func (s *BadRequestException) RequestID() string {
4461	return s.RespMetadata.RequestID
4462}
4463
4464// Settings related to burn-in captions. Set up burn-in captions in the same
4465// output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html.
4466// When you work directly in your JSON job specification, include this object
4467// and any required children when you set destinationType to BURN_IN.
4468type BurninDestinationSettings struct {
4469	_ struct{} `type:"structure"`
4470
4471	// If no explicit x_position or y_position is provided, setting alignment to
4472	// centered will place the captions at the bottom center of the output. Similarly,
4473	// setting a left alignment will align captions to the bottom left of the output.
4474	// If x and y positions are given in conjunction with the alignment parameter,
4475	// the font will be justified (either left or centered) relative to those coordinates.
4476	// This option is not valid for source captions that are STL, 608/embedded or
4477	// teletext. These source settings are already pre-defined by the caption stream.
4478	// All burn-in and DVB-Sub font settings must match.
4479	Alignment *string `locationName:"alignment" type:"string" enum:"BurninSubtitleAlignment"`
4480
4481	// Specifies the color of the rectangle behind the captions.All burn-in and
4482	// DVB-Sub font settings must match.
4483	BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"BurninSubtitleBackgroundColor"`
4484
4485	// Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent.
4486	// Leaving this parameter blank is equivalent to setting it to 0 (transparent).
4487	// All burn-in and DVB-Sub font settings must match.
4488	BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"`
4489
4490	// Specifies the color of the burned-in captions. This option is not valid for
4491	// source captions that are STL, 608/embedded or teletext. These source settings
4492	// are already pre-defined by the caption stream. All burn-in and DVB-Sub font
4493	// settings must match.
4494	FontColor *string `locationName:"fontColor" type:"string" enum:"BurninSubtitleFontColor"`
4495
4496	// Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.All
4497	// burn-in and DVB-Sub font settings must match.
4498	FontOpacity *int64 `locationName:"fontOpacity" type:"integer"`
4499
4500	// Font resolution in DPI (dots per inch); default is 96 dpi.All burn-in and
4501	// DVB-Sub font settings must match.
4502	FontResolution *int64 `locationName:"fontResolution" min:"96" type:"integer"`
4503
4504	// Provide the font script, using an ISO 15924 script code, if the LanguageCode
4505	// is not sufficient for determining the script type. Where LanguageCode or
4506	// CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. This is
4507	// used to help determine the appropriate font for rendering burn-in captions.
4508	FontScript *string `locationName:"fontScript" type:"string" enum:"FontScript"`
4509
4510	// A positive integer indicates the exact font size in points. Set to 0 for
4511	// automatic font size selection. All burn-in and DVB-Sub font settings must
4512	// match.
4513	FontSize *int64 `locationName:"fontSize" type:"integer"`
4514
4515	// Specifies font outline color. This option is not valid for source captions
4516	// that are either 608/embedded or teletext. These source settings are already
4517	// pre-defined by the caption stream. All burn-in and DVB-Sub font settings
4518	// must match.
4519	OutlineColor *string `locationName:"outlineColor" type:"string" enum:"BurninSubtitleOutlineColor"`
4520
4521	// Specifies font outline size in pixels. This option is not valid for source
4522	// captions that are either 608/embedded or teletext. These source settings
4523	// are already pre-defined by the caption stream. All burn-in and DVB-Sub font
4524	// settings must match.
4525	OutlineSize *int64 `locationName:"outlineSize" type:"integer"`
4526
4527	// Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub
4528	// font settings must match.
4529	ShadowColor *string `locationName:"shadowColor" type:"string" enum:"BurninSubtitleShadowColor"`
4530
4531	// Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving
4532	// this parameter blank is equivalent to setting it to 0 (transparent). All
4533	// burn-in and DVB-Sub font settings must match.
4534	ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"`
4535
4536	// Specifies the horizontal offset of the shadow relative to the captions in
4537	// pixels. A value of -2 would result in a shadow offset 2 pixels to the left.
4538	// All burn-in and DVB-Sub font settings must match.
4539	ShadowXOffset *int64 `locationName:"shadowXOffset" type:"integer"`
4540
4541	// Specifies the vertical offset of the shadow relative to the captions in pixels.
4542	// A value of -2 would result in a shadow offset 2 pixels above the text. All
4543	// burn-in and DVB-Sub font settings must match.
4544	ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"`
4545
4546	// Only applies to jobs with input captions in Teletext or STL formats. Specify
4547	// whether the spacing between letters in your captions is set by the captions
4548	// grid or varies depending on letter width. Choose fixed grid to conform to
4549	// the spacing specified in the captions file more accurately. Choose proportional
4550	// to make the text easier to read if the captions are closed caption.
4551	TeletextSpacing *string `locationName:"teletextSpacing" type:"string" enum:"BurninSubtitleTeletextSpacing"`
4552
4553	// Specifies the horizontal position of the caption relative to the left side
4554	// of the output in pixels. A value of 10 would result in the captions starting
4555	// 10 pixels from the left of the output. If no explicit x_position is provided,
4556	// the horizontal caption position will be determined by the alignment parameter.
4557	// This option is not valid for source captions that are STL, 608/embedded or
4558	// teletext. These source settings are already pre-defined by the caption stream.
4559	// All burn-in and DVB-Sub font settings must match.
4560	XPosition *int64 `locationName:"xPosition" type:"integer"`
4561
4562	// Specifies the vertical position of the caption relative to the top of the
4563	// output in pixels. A value of 10 would result in the captions starting 10
4564	// pixels from the top of the output. If no explicit y_position is provided,
4565	// the caption will be positioned towards the bottom of the output. This option
4566	// is not valid for source captions that are STL, 608/embedded or teletext.
4567	// These source settings are already pre-defined by the caption stream. All
4568	// burn-in and DVB-Sub font settings must match.
4569	YPosition *int64 `locationName:"yPosition" type:"integer"`
4570}
4571
4572// String returns the string representation
4573func (s BurninDestinationSettings) String() string {
4574	return awsutil.Prettify(s)
4575}
4576
4577// GoString returns the string representation
4578func (s BurninDestinationSettings) GoString() string {
4579	return s.String()
4580}
4581
4582// Validate inspects the fields of the type to determine if they are valid.
4583func (s *BurninDestinationSettings) Validate() error {
4584	invalidParams := request.ErrInvalidParams{Context: "BurninDestinationSettings"}
4585	if s.FontResolution != nil && *s.FontResolution < 96 {
4586		invalidParams.Add(request.NewErrParamMinValue("FontResolution", 96))
4587	}
4588	if s.ShadowXOffset != nil && *s.ShadowXOffset < -2.147483648e+09 {
4589		invalidParams.Add(request.NewErrParamMinValue("ShadowXOffset", -2.147483648e+09))
4590	}
4591	if s.ShadowYOffset != nil && *s.ShadowYOffset < -2.147483648e+09 {
4592		invalidParams.Add(request.NewErrParamMinValue("ShadowYOffset", -2.147483648e+09))
4593	}
4594
4595	if invalidParams.Len() > 0 {
4596		return invalidParams
4597	}
4598	return nil
4599}
4600
4601// SetAlignment sets the Alignment field's value.
4602func (s *BurninDestinationSettings) SetAlignment(v string) *BurninDestinationSettings {
4603	s.Alignment = &v
4604	return s
4605}
4606
4607// SetBackgroundColor sets the BackgroundColor field's value.
4608func (s *BurninDestinationSettings) SetBackgroundColor(v string) *BurninDestinationSettings {
4609	s.BackgroundColor = &v
4610	return s
4611}
4612
4613// SetBackgroundOpacity sets the BackgroundOpacity field's value.
4614func (s *BurninDestinationSettings) SetBackgroundOpacity(v int64) *BurninDestinationSettings {
4615	s.BackgroundOpacity = &v
4616	return s
4617}
4618
4619// SetFontColor sets the FontColor field's value.
4620func (s *BurninDestinationSettings) SetFontColor(v string) *BurninDestinationSettings {
4621	s.FontColor = &v
4622	return s
4623}
4624
4625// SetFontOpacity sets the FontOpacity field's value.
4626func (s *BurninDestinationSettings) SetFontOpacity(v int64) *BurninDestinationSettings {
4627	s.FontOpacity = &v
4628	return s
4629}
4630
4631// SetFontResolution sets the FontResolution field's value.
4632func (s *BurninDestinationSettings) SetFontResolution(v int64) *BurninDestinationSettings {
4633	s.FontResolution = &v
4634	return s
4635}
4636
4637// SetFontScript sets the FontScript field's value.
4638func (s *BurninDestinationSettings) SetFontScript(v string) *BurninDestinationSettings {
4639	s.FontScript = &v
4640	return s
4641}
4642
4643// SetFontSize sets the FontSize field's value.
4644func (s *BurninDestinationSettings) SetFontSize(v int64) *BurninDestinationSettings {
4645	s.FontSize = &v
4646	return s
4647}
4648
4649// SetOutlineColor sets the OutlineColor field's value.
4650func (s *BurninDestinationSettings) SetOutlineColor(v string) *BurninDestinationSettings {
4651	s.OutlineColor = &v
4652	return s
4653}
4654
4655// SetOutlineSize sets the OutlineSize field's value.
4656func (s *BurninDestinationSettings) SetOutlineSize(v int64) *BurninDestinationSettings {
4657	s.OutlineSize = &v
4658	return s
4659}
4660
4661// SetShadowColor sets the ShadowColor field's value.
4662func (s *BurninDestinationSettings) SetShadowColor(v string) *BurninDestinationSettings {
4663	s.ShadowColor = &v
4664	return s
4665}
4666
4667// SetShadowOpacity sets the ShadowOpacity field's value.
4668func (s *BurninDestinationSettings) SetShadowOpacity(v int64) *BurninDestinationSettings {
4669	s.ShadowOpacity = &v
4670	return s
4671}
4672
4673// SetShadowXOffset sets the ShadowXOffset field's value.
4674func (s *BurninDestinationSettings) SetShadowXOffset(v int64) *BurninDestinationSettings {
4675	s.ShadowXOffset = &v
4676	return s
4677}
4678
4679// SetShadowYOffset sets the ShadowYOffset field's value.
4680func (s *BurninDestinationSettings) SetShadowYOffset(v int64) *BurninDestinationSettings {
4681	s.ShadowYOffset = &v
4682	return s
4683}
4684
4685// SetTeletextSpacing sets the TeletextSpacing field's value.
4686func (s *BurninDestinationSettings) SetTeletextSpacing(v string) *BurninDestinationSettings {
4687	s.TeletextSpacing = &v
4688	return s
4689}
4690
4691// SetXPosition sets the XPosition field's value.
4692func (s *BurninDestinationSettings) SetXPosition(v int64) *BurninDestinationSettings {
4693	s.XPosition = &v
4694	return s
4695}
4696
4697// SetYPosition sets the YPosition field's value.
4698func (s *BurninDestinationSettings) SetYPosition(v int64) *BurninDestinationSettings {
4699	s.YPosition = &v
4700	return s
4701}
4702
4703// Cancel a job by sending a request with the job ID
4704type CancelJobInput struct {
4705	_ struct{} `type:"structure"`
4706
4707	// The Job ID of the job to be cancelled.
4708	//
4709	// Id is a required field
4710	Id *string `location:"uri" locationName:"id" type:"string" required:"true"`
4711}
4712
4713// String returns the string representation
4714func (s CancelJobInput) String() string {
4715	return awsutil.Prettify(s)
4716}
4717
4718// GoString returns the string representation
4719func (s CancelJobInput) GoString() string {
4720	return s.String()
4721}
4722
4723// Validate inspects the fields of the type to determine if they are valid.
4724func (s *CancelJobInput) Validate() error {
4725	invalidParams := request.ErrInvalidParams{Context: "CancelJobInput"}
4726	if s.Id == nil {
4727		invalidParams.Add(request.NewErrParamRequired("Id"))
4728	}
4729	if s.Id != nil && len(*s.Id) < 1 {
4730		invalidParams.Add(request.NewErrParamMinLen("Id", 1))
4731	}
4732
4733	if invalidParams.Len() > 0 {
4734		return invalidParams
4735	}
4736	return nil
4737}
4738
4739// SetId sets the Id field's value.
4740func (s *CancelJobInput) SetId(v string) *CancelJobInput {
4741	s.Id = &v
4742	return s
4743}
4744
4745// A cancel job request will receive a response with an empty body.
4746type CancelJobOutput struct {
4747	_ struct{} `type:"structure"`
4748}
4749
4750// String returns the string representation
4751func (s CancelJobOutput) String() string {
4752	return awsutil.Prettify(s)
4753}
4754
4755// GoString returns the string representation
4756func (s CancelJobOutput) GoString() string {
4757	return s.String()
4758}
4759
4760// This object holds groups of settings related to captions for one output.
4761// For each output that has captions, include one instance of CaptionDescriptions.
4762type CaptionDescription struct {
4763	_ struct{} `type:"structure"`
4764
4765	// Specifies which "Caption Selector":#inputs-caption_selector to use from each
4766	// input when generating captions. The name should be of the format "Caption
4767	// Selector ", which denotes that the Nth Caption Selector will be used from
4768	// each input.
4769	CaptionSelectorName *string `locationName:"captionSelectorName" min:"1" type:"string"`
4770
4771	// Specify the language for this captions output track. For most captions output
4772	// formats, the encoder puts this language information in the output captions
4773	// metadata. If your output captions format is DVB-Sub or Burn in, the encoder
4774	// uses this language information when automatically selecting the font script
4775	// for rendering the captions text. For all outputs, you can use an ISO 639-2
4776	// or ISO 639-3 code. For streaming outputs, you can also use any other code
4777	// in the full RFC-5646 specification. Streaming outputs are those that are
4778	// in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft
4779	// Smooth Streaming.
4780	CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"`
4781
4782	// Settings related to one captions tab on the MediaConvert console. In your
4783	// job JSON, an instance of captions DestinationSettings is equivalent to one
4784	// captions tab in the console. Usually, one captions tab corresponds to one
4785	// output captions track. Depending on your output captions format, one tab
4786	// might correspond to a set of output captions tracks. For more information,
4787	// see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.
4788	DestinationSettings *CaptionDestinationSettings `locationName:"destinationSettings" type:"structure"`
4789
4790	// Specify the language of this captions output track. For most captions output
4791	// formats, the encoder puts this language information in the output captions
4792	// metadata. If your output captions format is DVB-Sub or Burn in, the encoder
4793	// uses this language information to choose the font language for rendering
4794	// the captions text.
4795	LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"`
4796
4797	// Specify a label for this set of output captions. For example, "English",
4798	// "Director commentary", or "track_2". For streaming outputs, MediaConvert
4799	// passes this information into destination manifests for display on the end-viewer's
4800	// player device. For outputs in other output groups, the service ignores this
4801	// setting.
4802	LanguageDescription *string `locationName:"languageDescription" type:"string"`
4803}
4804
4805// String returns the string representation
4806func (s CaptionDescription) String() string {
4807	return awsutil.Prettify(s)
4808}
4809
4810// GoString returns the string representation
4811func (s CaptionDescription) GoString() string {
4812	return s.String()
4813}
4814
4815// Validate inspects the fields of the type to determine if they are valid.
4816func (s *CaptionDescription) Validate() error {
4817	invalidParams := request.ErrInvalidParams{Context: "CaptionDescription"}
4818	if s.CaptionSelectorName != nil && len(*s.CaptionSelectorName) < 1 {
4819		invalidParams.Add(request.NewErrParamMinLen("CaptionSelectorName", 1))
4820	}
4821	if s.DestinationSettings != nil {
4822		if err := s.DestinationSettings.Validate(); err != nil {
4823			invalidParams.AddNested("DestinationSettings", err.(request.ErrInvalidParams))
4824		}
4825	}
4826
4827	if invalidParams.Len() > 0 {
4828		return invalidParams
4829	}
4830	return nil
4831}
4832
4833// SetCaptionSelectorName sets the CaptionSelectorName field's value.
4834func (s *CaptionDescription) SetCaptionSelectorName(v string) *CaptionDescription {
4835	s.CaptionSelectorName = &v
4836	return s
4837}
4838
4839// SetCustomLanguageCode sets the CustomLanguageCode field's value.
4840func (s *CaptionDescription) SetCustomLanguageCode(v string) *CaptionDescription {
4841	s.CustomLanguageCode = &v
4842	return s
4843}
4844
4845// SetDestinationSettings sets the DestinationSettings field's value.
4846func (s *CaptionDescription) SetDestinationSettings(v *CaptionDestinationSettings) *CaptionDescription {
4847	s.DestinationSettings = v
4848	return s
4849}
4850
4851// SetLanguageCode sets the LanguageCode field's value.
4852func (s *CaptionDescription) SetLanguageCode(v string) *CaptionDescription {
4853	s.LanguageCode = &v
4854	return s
4855}
4856
4857// SetLanguageDescription sets the LanguageDescription field's value.
4858func (s *CaptionDescription) SetLanguageDescription(v string) *CaptionDescription {
4859	s.LanguageDescription = &v
4860	return s
4861}
4862
4863// Caption Description for preset
4864type CaptionDescriptionPreset struct {
4865	_ struct{} `type:"structure"`
4866
4867	// Specify the language for this captions output track. For most captions output
4868	// formats, the encoder puts this language information in the output captions
4869	// metadata. If your output captions format is DVB-Sub or Burn in, the encoder
4870	// uses this language information when automatically selecting the font script
4871	// for rendering the captions text. For all outputs, you can use an ISO 639-2
4872	// or ISO 639-3 code. For streaming outputs, you can also use any other code
4873	// in the full RFC-5646 specification. Streaming outputs are those that are
4874	// in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft
4875	// Smooth Streaming.
4876	CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"`
4877
4878	// Settings related to one captions tab on the MediaConvert console. In your
4879	// job JSON, an instance of captions DestinationSettings is equivalent to one
4880	// captions tab in the console. Usually, one captions tab corresponds to one
4881	// output captions track. Depending on your output captions format, one tab
4882	// might correspond to a set of output captions tracks. For more information,
4883	// see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.
4884	DestinationSettings *CaptionDestinationSettings `locationName:"destinationSettings" type:"structure"`
4885
4886	// Specify the language of this captions output track. For most captions output
4887	// formats, the encoder puts this language information in the output captions
4888	// metadata. If your output captions format is DVB-Sub or Burn in, the encoder
4889	// uses this language information to choose the font language for rendering
4890	// the captions text.
4891	LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"`
4892
4893	// Specify a label for this set of output captions. For example, "English",
4894	// "Director commentary", or "track_2". For streaming outputs, MediaConvert
4895	// passes this information into destination manifests for display on the end-viewer's
4896	// player device. For outputs in other output groups, the service ignores this
4897	// setting.
4898	LanguageDescription *string `locationName:"languageDescription" type:"string"`
4899}
4900
4901// String returns the string representation
4902func (s CaptionDescriptionPreset) String() string {
4903	return awsutil.Prettify(s)
4904}
4905
4906// GoString returns the string representation
4907func (s CaptionDescriptionPreset) GoString() string {
4908	return s.String()
4909}
4910
4911// Validate inspects the fields of the type to determine if they are valid.
4912func (s *CaptionDescriptionPreset) Validate() error {
4913	invalidParams := request.ErrInvalidParams{Context: "CaptionDescriptionPreset"}
4914	if s.DestinationSettings != nil {
4915		if err := s.DestinationSettings.Validate(); err != nil {
4916			invalidParams.AddNested("DestinationSettings", err.(request.ErrInvalidParams))
4917		}
4918	}
4919
4920	if invalidParams.Len() > 0 {
4921		return invalidParams
4922	}
4923	return nil
4924}
4925
4926// SetCustomLanguageCode sets the CustomLanguageCode field's value.
4927func (s *CaptionDescriptionPreset) SetCustomLanguageCode(v string) *CaptionDescriptionPreset {
4928	s.CustomLanguageCode = &v
4929	return s
4930}
4931
4932// SetDestinationSettings sets the DestinationSettings field's value.
4933func (s *CaptionDescriptionPreset) SetDestinationSettings(v *CaptionDestinationSettings) *CaptionDescriptionPreset {
4934	s.DestinationSettings = v
4935	return s
4936}
4937
4938// SetLanguageCode sets the LanguageCode field's value.
4939func (s *CaptionDescriptionPreset) SetLanguageCode(v string) *CaptionDescriptionPreset {
4940	s.LanguageCode = &v
4941	return s
4942}
4943
4944// SetLanguageDescription sets the LanguageDescription field's value.
4945func (s *CaptionDescriptionPreset) SetLanguageDescription(v string) *CaptionDescriptionPreset {
4946	s.LanguageDescription = &v
4947	return s
4948}
4949
4950// Settings related to one captions tab on the MediaConvert console. In your
4951// job JSON, an instance of captions DestinationSettings is equivalent to one
4952// captions tab in the console. Usually, one captions tab corresponds to one
4953// output captions track. Depending on your output captions format, one tab
4954// might correspond to a set of output captions tracks. For more information,
4955// see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.
4956type CaptionDestinationSettings struct {
4957	_ struct{} `type:"structure"`
4958
4959	// Settings related to burn-in captions. Set up burn-in captions in the same
4960	// output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html.
4961	// When you work directly in your JSON job specification, include this object
4962	// and any required children when you set destinationType to BURN_IN.
4963	BurninDestinationSettings *BurninDestinationSettings `locationName:"burninDestinationSettings" type:"structure"`
4964
4965	// Specify the format for this set of captions on this output. The default format
4966	// is embedded without SCTE-20. Note that your choice of video output container
4967	// constrains your choice of output captions format. For more information, see
4968	// https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html.
4969	// If you are using SCTE-20 and you want to create an output that complies with
4970	// the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To
4971	// create a non-compliant output where the embedded captions come first, choose
4972	// Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20).
4973	DestinationType *string `locationName:"destinationType" type:"string" enum:"CaptionDestinationType"`
4974
4975	// Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same
4976	// output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html.
4977	// When you work directly in your JSON job specification, include this object
4978	// and any required children when you set destinationType to DVB_SUB.
4979	DvbSubDestinationSettings *DvbSubDestinationSettings `locationName:"dvbSubDestinationSettings" type:"structure"`
4980
4981	// Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or
4982	// ancillary) captions. Set up embedded captions in the same output as your
4983	// video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html.
4984	// When you work directly in your JSON job specification, include this object
4985	// and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20,
4986	// or SCTE20_PLUS_EMBEDDED.
4987	EmbeddedDestinationSettings *EmbeddedDestinationSettings `locationName:"embeddedDestinationSettings" type:"structure"`
4988
4989	// Settings related to IMSC captions. IMSC is a sidecar format that holds captions
4990	// in a file that is separate from the video container. Set up sidecar captions
4991	// in the same output group, but different output from your video. For more
4992	// information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
4993	// When you work directly in your JSON job specification, include this object
4994	// and any required children when you set destinationType to IMSC.
4995	ImscDestinationSettings *ImscDestinationSettings `locationName:"imscDestinationSettings" type:"structure"`
4996
4997	// Settings related to SCC captions. SCC is a sidecar format that holds captions
4998	// in a file that is separate from the video container. Set up sidecar captions
4999	// in the same output group, but different output from your video. For more
5000	// information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html.
5001	// When you work directly in your JSON job specification, include this object
5002	// and any required children when you set destinationType to SCC.
5003	SccDestinationSettings *SccDestinationSettings `locationName:"sccDestinationSettings" type:"structure"`
5004
5005	// SRT Destination Settings
5006	SrtDestinationSettings *SrtDestinationSettings `locationName:"srtDestinationSettings" type:"structure"`
5007
5008	// Settings related to teletext captions. Set up teletext captions in the same
5009	// output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html.
5010	// When you work directly in your JSON job specification, include this object
5011	// and any required children when you set destinationType to TELETEXT.
5012	TeletextDestinationSettings *TeletextDestinationSettings `locationName:"teletextDestinationSettings" type:"structure"`
5013
5014	// Settings related to TTML captions. TTML is a sidecar format that holds captions
5015	// in a file that is separate from the video container. Set up sidecar captions
5016	// in the same output group, but different output from your video. For more
5017	// information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
5018	// When you work directly in your JSON job specification, include this object
5019	// and any required children when you set destinationType to TTML.
5020	TtmlDestinationSettings *TtmlDestinationSettings `locationName:"ttmlDestinationSettings" type:"structure"`
5021
5022	// WEBVTT Destination Settings
5023	WebvttDestinationSettings *WebvttDestinationSettings `locationName:"webvttDestinationSettings" type:"structure"`
5024}
5025
5026// String returns the string representation
5027func (s CaptionDestinationSettings) String() string {
5028	return awsutil.Prettify(s)
5029}
5030
5031// GoString returns the string representation
5032func (s CaptionDestinationSettings) GoString() string {
5033	return s.String()
5034}
5035
5036// Validate inspects the fields of the type to determine if they are valid.
5037func (s *CaptionDestinationSettings) Validate() error {
5038	invalidParams := request.ErrInvalidParams{Context: "CaptionDestinationSettings"}
5039	if s.BurninDestinationSettings != nil {
5040		if err := s.BurninDestinationSettings.Validate(); err != nil {
5041			invalidParams.AddNested("BurninDestinationSettings", err.(request.ErrInvalidParams))
5042		}
5043	}
5044	if s.DvbSubDestinationSettings != nil {
5045		if err := s.DvbSubDestinationSettings.Validate(); err != nil {
5046			invalidParams.AddNested("DvbSubDestinationSettings", err.(request.ErrInvalidParams))
5047		}
5048	}
5049	if s.EmbeddedDestinationSettings != nil {
5050		if err := s.EmbeddedDestinationSettings.Validate(); err != nil {
5051			invalidParams.AddNested("EmbeddedDestinationSettings", err.(request.ErrInvalidParams))
5052		}
5053	}
5054	if s.TeletextDestinationSettings != nil {
5055		if err := s.TeletextDestinationSettings.Validate(); err != nil {
5056			invalidParams.AddNested("TeletextDestinationSettings", err.(request.ErrInvalidParams))
5057		}
5058	}
5059
5060	if invalidParams.Len() > 0 {
5061		return invalidParams
5062	}
5063	return nil
5064}
5065
5066// SetBurninDestinationSettings sets the BurninDestinationSettings field's value.
5067func (s *CaptionDestinationSettings) SetBurninDestinationSettings(v *BurninDestinationSettings) *CaptionDestinationSettings {
5068	s.BurninDestinationSettings = v
5069	return s
5070}
5071
5072// SetDestinationType sets the DestinationType field's value.
5073func (s *CaptionDestinationSettings) SetDestinationType(v string) *CaptionDestinationSettings {
5074	s.DestinationType = &v
5075	return s
5076}
5077
5078// SetDvbSubDestinationSettings sets the DvbSubDestinationSettings field's value.
5079func (s *CaptionDestinationSettings) SetDvbSubDestinationSettings(v *DvbSubDestinationSettings) *CaptionDestinationSettings {
5080	s.DvbSubDestinationSettings = v
5081	return s
5082}
5083
5084// SetEmbeddedDestinationSettings sets the EmbeddedDestinationSettings field's value.
5085func (s *CaptionDestinationSettings) SetEmbeddedDestinationSettings(v *EmbeddedDestinationSettings) *CaptionDestinationSettings {
5086	s.EmbeddedDestinationSettings = v
5087	return s
5088}
5089
5090// SetImscDestinationSettings sets the ImscDestinationSettings field's value.
5091func (s *CaptionDestinationSettings) SetImscDestinationSettings(v *ImscDestinationSettings) *CaptionDestinationSettings {
5092	s.ImscDestinationSettings = v
5093	return s
5094}
5095
5096// SetSccDestinationSettings sets the SccDestinationSettings field's value.
5097func (s *CaptionDestinationSettings) SetSccDestinationSettings(v *SccDestinationSettings) *CaptionDestinationSettings {
5098	s.SccDestinationSettings = v
5099	return s
5100}
5101
5102// SetSrtDestinationSettings sets the SrtDestinationSettings field's value.
5103func (s *CaptionDestinationSettings) SetSrtDestinationSettings(v *SrtDestinationSettings) *CaptionDestinationSettings {
5104	s.SrtDestinationSettings = v
5105	return s
5106}
5107
5108// SetTeletextDestinationSettings sets the TeletextDestinationSettings field's value.
5109func (s *CaptionDestinationSettings) SetTeletextDestinationSettings(v *TeletextDestinationSettings) *CaptionDestinationSettings {
5110	s.TeletextDestinationSettings = v
5111	return s
5112}
5113
5114// SetTtmlDestinationSettings sets the TtmlDestinationSettings field's value.
5115func (s *CaptionDestinationSettings) SetTtmlDestinationSettings(v *TtmlDestinationSettings) *CaptionDestinationSettings {
5116	s.TtmlDestinationSettings = v
5117	return s
5118}
5119
5120// SetWebvttDestinationSettings sets the WebvttDestinationSettings field's value.
5121func (s *CaptionDestinationSettings) SetWebvttDestinationSettings(v *WebvttDestinationSettings) *CaptionDestinationSettings {
5122	s.WebvttDestinationSettings = v
5123	return s
5124}
5125
5126// Use captions selectors to specify the captions data from your input that
5127// you use in your outputs. You can use up to 20 captions selectors per input.
5128type CaptionSelector struct {
5129	_ struct{} `type:"structure"`
5130
5131	// The specific language to extract from source, using the ISO 639-2 or ISO
5132	// 639-3 three-letter language code. If input is SCTE-27, complete this field
5133	// and/or PID to select the caption language to extract. If input is DVB-Sub
5134	// and output is Burn-in or SMPTE-TT, complete this field and/or PID to select
5135	// the caption language to extract. If input is DVB-Sub that is being passed
5136	// through, omit this field (and PID field); there is no way to extract a specific
5137	// language with pass-through captions.
5138	CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"`
5139
5140	// The specific language to extract from source. If input is SCTE-27, complete
5141	// this field and/or PID to select the caption language to extract. If input
5142	// is DVB-Sub and output is Burn-in or SMPTE-TT, complete this field and/or
5143	// PID to select the caption language to extract. If input is DVB-Sub that is
5144	// being passed through, omit this field (and PID field); there is no way to
5145	// extract a specific language with pass-through captions.
5146	LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"`
5147
5148	// If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file,
5149	// specify the URI of the input captions source file. If your input captions
5150	// are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.
5151	SourceSettings *CaptionSourceSettings `locationName:"sourceSettings" type:"structure"`
5152}
5153
5154// String returns the string representation
5155func (s CaptionSelector) String() string {
5156	return awsutil.Prettify(s)
5157}
5158
5159// GoString returns the string representation
5160func (s CaptionSelector) GoString() string {
5161	return s.String()
5162}
5163
5164// Validate inspects the fields of the type to determine if they are valid.
5165func (s *CaptionSelector) Validate() error {
5166	invalidParams := request.ErrInvalidParams{Context: "CaptionSelector"}
5167	if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 {
5168		invalidParams.Add(request.NewErrParamMinLen("CustomLanguageCode", 3))
5169	}
5170	if s.SourceSettings != nil {
5171		if err := s.SourceSettings.Validate(); err != nil {
5172			invalidParams.AddNested("SourceSettings", err.(request.ErrInvalidParams))
5173		}
5174	}
5175
5176	if invalidParams.Len() > 0 {
5177		return invalidParams
5178	}
5179	return nil
5180}
5181
5182// SetCustomLanguageCode sets the CustomLanguageCode field's value.
5183func (s *CaptionSelector) SetCustomLanguageCode(v string) *CaptionSelector {
5184	s.CustomLanguageCode = &v
5185	return s
5186}
5187
5188// SetLanguageCode sets the LanguageCode field's value.
5189func (s *CaptionSelector) SetLanguageCode(v string) *CaptionSelector {
5190	s.LanguageCode = &v
5191	return s
5192}
5193
5194// SetSourceSettings sets the SourceSettings field's value.
5195func (s *CaptionSelector) SetSourceSettings(v *CaptionSourceSettings) *CaptionSelector {
5196	s.SourceSettings = v
5197	return s
5198}
5199
5200// Ignore this setting unless your input captions format is SCC. To have the
5201// service compensate for differing frame rates between your input captions
5202// and input video, specify the frame rate of the captions file. Specify this
5203// value as a fraction, using the settings Framerate numerator (framerateNumerator)
5204// and Framerate denominator (framerateDenominator). For example, you might
5205// specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps,
5206// or 30000 / 1001 for 29.97 fps.
5207type CaptionSourceFramerate struct {
5208	_ struct{} `type:"structure"`
5209
5210	// Specify the denominator of the fraction that represents the frame rate for
5211	// the setting Caption source frame rate (CaptionSourceFramerate). Use this
5212	// setting along with the setting Framerate numerator (framerateNumerator).
5213	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
5214
5215	// Specify the numerator of the fraction that represents the frame rate for
5216	// the setting Caption source frame rate (CaptionSourceFramerate). Use this
5217	// setting along with the setting Framerate denominator (framerateDenominator).
5218	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"`
5219}
5220
5221// String returns the string representation
5222func (s CaptionSourceFramerate) String() string {
5223	return awsutil.Prettify(s)
5224}
5225
5226// GoString returns the string representation
5227func (s CaptionSourceFramerate) GoString() string {
5228	return s.String()
5229}
5230
5231// Validate inspects the fields of the type to determine if they are valid.
5232func (s *CaptionSourceFramerate) Validate() error {
5233	invalidParams := request.ErrInvalidParams{Context: "CaptionSourceFramerate"}
5234	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
5235		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
5236	}
5237	if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 {
5238		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1))
5239	}
5240
5241	if invalidParams.Len() > 0 {
5242		return invalidParams
5243	}
5244	return nil
5245}
5246
5247// SetFramerateDenominator sets the FramerateDenominator field's value.
5248func (s *CaptionSourceFramerate) SetFramerateDenominator(v int64) *CaptionSourceFramerate {
5249	s.FramerateDenominator = &v
5250	return s
5251}
5252
5253// SetFramerateNumerator sets the FramerateNumerator field's value.
5254func (s *CaptionSourceFramerate) SetFramerateNumerator(v int64) *CaptionSourceFramerate {
5255	s.FramerateNumerator = &v
5256	return s
5257}
5258
5259// If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file,
5260// specify the URI of the input captions source file. If your input captions
5261// are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.
5262type CaptionSourceSettings struct {
5263	_ struct{} `type:"structure"`
5264
5265	// Settings for ancillary captions source.
5266	AncillarySourceSettings *AncillarySourceSettings `locationName:"ancillarySourceSettings" type:"structure"`
5267
5268	// DVB Sub Source Settings
5269	DvbSubSourceSettings *DvbSubSourceSettings `locationName:"dvbSubSourceSettings" type:"structure"`
5270
5271	// Settings for embedded captions Source
5272	EmbeddedSourceSettings *EmbeddedSourceSettings `locationName:"embeddedSourceSettings" type:"structure"`
5273
5274	// If your input captions are SCC, SMI, SRT, STL, TTML, WebVTT, or IMSC 1.1
5275	// in an xml file, specify the URI of the input caption source file. If your
5276	// caption source is IMSC in an IMF package, use TrackSourceSettings instead
5277	// of FileSoureSettings.
5278	FileSourceSettings *FileSourceSettings `locationName:"fileSourceSettings" type:"structure"`
5279
5280	// Use Source (SourceType) to identify the format of your input captions. The
5281	// service cannot auto-detect caption format.
5282	SourceType *string `locationName:"sourceType" type:"string" enum:"CaptionSourceType"`
5283
5284	// Settings specific to Teletext caption sources, including Page number.
5285	TeletextSourceSettings *TeletextSourceSettings `locationName:"teletextSourceSettings" type:"structure"`
5286
5287	// Settings specific to caption sources that are specified by track number.
5288	// Currently, this is only IMSC captions in an IMF package. If your caption
5289	// source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead
5290	// of TrackSourceSettings.
5291	TrackSourceSettings *TrackSourceSettings `locationName:"trackSourceSettings" type:"structure"`
5292
5293	// Settings specific to WebVTT sources in HLS alternative rendition group. Specify
5294	// the properties (renditionGroupId, renditionName or renditionLanguageCode)
5295	// to identify the unique subtitle track among the alternative rendition groups
5296	// present in the HLS manifest. If no unique track is found, or multiple tracks
5297	// match the specified properties, the job fails. If there is only one subtitle
5298	// track in the rendition group, the settings can be left empty and the default
5299	// subtitle track will be chosen. If your caption source is a sidecar file,
5300	// use FileSourceSettings instead of WebvttHlsSourceSettings.
5301	WebvttHlsSourceSettings *WebvttHlsSourceSettings `locationName:"webvttHlsSourceSettings" type:"structure"`
5302}
5303
5304// String returns the string representation
5305func (s CaptionSourceSettings) String() string {
5306	return awsutil.Prettify(s)
5307}
5308
5309// GoString returns the string representation
5310func (s CaptionSourceSettings) GoString() string {
5311	return s.String()
5312}
5313
5314// Validate inspects the fields of the type to determine if they are valid.
5315func (s *CaptionSourceSettings) Validate() error {
5316	invalidParams := request.ErrInvalidParams{Context: "CaptionSourceSettings"}
5317	if s.AncillarySourceSettings != nil {
5318		if err := s.AncillarySourceSettings.Validate(); err != nil {
5319			invalidParams.AddNested("AncillarySourceSettings", err.(request.ErrInvalidParams))
5320		}
5321	}
5322	if s.DvbSubSourceSettings != nil {
5323		if err := s.DvbSubSourceSettings.Validate(); err != nil {
5324			invalidParams.AddNested("DvbSubSourceSettings", err.(request.ErrInvalidParams))
5325		}
5326	}
5327	if s.EmbeddedSourceSettings != nil {
5328		if err := s.EmbeddedSourceSettings.Validate(); err != nil {
5329			invalidParams.AddNested("EmbeddedSourceSettings", err.(request.ErrInvalidParams))
5330		}
5331	}
5332	if s.FileSourceSettings != nil {
5333		if err := s.FileSourceSettings.Validate(); err != nil {
5334			invalidParams.AddNested("FileSourceSettings", err.(request.ErrInvalidParams))
5335		}
5336	}
5337	if s.TeletextSourceSettings != nil {
5338		if err := s.TeletextSourceSettings.Validate(); err != nil {
5339			invalidParams.AddNested("TeletextSourceSettings", err.(request.ErrInvalidParams))
5340		}
5341	}
5342	if s.TrackSourceSettings != nil {
5343		if err := s.TrackSourceSettings.Validate(); err != nil {
5344			invalidParams.AddNested("TrackSourceSettings", err.(request.ErrInvalidParams))
5345		}
5346	}
5347
5348	if invalidParams.Len() > 0 {
5349		return invalidParams
5350	}
5351	return nil
5352}
5353
5354// SetAncillarySourceSettings sets the AncillarySourceSettings field's value.
5355func (s *CaptionSourceSettings) SetAncillarySourceSettings(v *AncillarySourceSettings) *CaptionSourceSettings {
5356	s.AncillarySourceSettings = v
5357	return s
5358}
5359
5360// SetDvbSubSourceSettings sets the DvbSubSourceSettings field's value.
5361func (s *CaptionSourceSettings) SetDvbSubSourceSettings(v *DvbSubSourceSettings) *CaptionSourceSettings {
5362	s.DvbSubSourceSettings = v
5363	return s
5364}
5365
5366// SetEmbeddedSourceSettings sets the EmbeddedSourceSettings field's value.
5367func (s *CaptionSourceSettings) SetEmbeddedSourceSettings(v *EmbeddedSourceSettings) *CaptionSourceSettings {
5368	s.EmbeddedSourceSettings = v
5369	return s
5370}
5371
5372// SetFileSourceSettings sets the FileSourceSettings field's value.
5373func (s *CaptionSourceSettings) SetFileSourceSettings(v *FileSourceSettings) *CaptionSourceSettings {
5374	s.FileSourceSettings = v
5375	return s
5376}
5377
5378// SetSourceType sets the SourceType field's value.
5379func (s *CaptionSourceSettings) SetSourceType(v string) *CaptionSourceSettings {
5380	s.SourceType = &v
5381	return s
5382}
5383
5384// SetTeletextSourceSettings sets the TeletextSourceSettings field's value.
5385func (s *CaptionSourceSettings) SetTeletextSourceSettings(v *TeletextSourceSettings) *CaptionSourceSettings {
5386	s.TeletextSourceSettings = v
5387	return s
5388}
5389
5390// SetTrackSourceSettings sets the TrackSourceSettings field's value.
5391func (s *CaptionSourceSettings) SetTrackSourceSettings(v *TrackSourceSettings) *CaptionSourceSettings {
5392	s.TrackSourceSettings = v
5393	return s
5394}
5395
5396// SetWebvttHlsSourceSettings sets the WebvttHlsSourceSettings field's value.
5397func (s *CaptionSourceSettings) SetWebvttHlsSourceSettings(v *WebvttHlsSourceSettings) *CaptionSourceSettings {
5398	s.WebvttHlsSourceSettings = v
5399	return s
5400}
5401
5402// Channel mapping (ChannelMapping) contains the group of fields that hold the
5403// remixing value for each channel, in dB. Specify remix values to indicate
5404// how much of the content from your input audio channel you want in your output
5405// audio channels. Each instance of the InputChannels or InputChannelsFineTune
5406// array specifies these values for one output channel. Use one instance of
5407// this array for each output channel. In the console, each array corresponds
5408// to a column in the graphical depiction of the mapping matrix. The rows of
5409// the graphical matrix correspond to input channels. Valid values are within
5410// the range from -60 (mute) through 6. A setting of 0 passes the input channel
5411// unchanged to the output channel (no attenuation or amplification). Use InputChannels
5412// or InputChannelsFineTune to specify your remix values. Don't use both.
5413type ChannelMapping struct {
5414	_ struct{} `type:"structure"`
5415
5416	// In your JSON job specification, include one child of OutputChannels for each
5417	// audio channel that you want in your output. Each child should contain one
5418	// instance of InputChannels or InputChannelsFineTune.
5419	OutputChannels []*OutputChannelMapping `locationName:"outputChannels" type:"list"`
5420}
5421
5422// String returns the string representation
5423func (s ChannelMapping) String() string {
5424	return awsutil.Prettify(s)
5425}
5426
5427// GoString returns the string representation
5428func (s ChannelMapping) GoString() string {
5429	return s.String()
5430}
5431
5432// SetOutputChannels sets the OutputChannels field's value.
5433func (s *ChannelMapping) SetOutputChannels(v []*OutputChannelMapping) *ChannelMapping {
5434	s.OutputChannels = v
5435	return s
5436}
5437
5438// Specify the details for each pair of HLS and DASH additional manifests that
5439// you want the service to generate for this CMAF output group. Each pair of
5440// manifests can reference a different subset of outputs in the group.
5441type CmafAdditionalManifest struct {
5442	_ struct{} `type:"structure"`
5443
5444	// Specify a name modifier that the service adds to the name of this manifest
5445	// to make it different from the file names of the other main manifests in the
5446	// output group. For example, say that the default main manifest for your HLS
5447	// group is film-name.m3u8. If you enter "-no-premium" for this setting, then
5448	// the file name the service generates for this top-level manifest is film-name-no-premium.m3u8.
5449	// For HLS output groups, specify a manifestNameModifier that is different from
5450	// the nameModifier of the output. The service uses the output name modifier
5451	// to create unique names for the individual variant manifests.
5452	ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"`
5453
5454	// Specify the outputs that you want this additional top-level manifest to reference.
5455	SelectedOutputs []*string `locationName:"selectedOutputs" type:"list"`
5456}
5457
5458// String returns the string representation
5459func (s CmafAdditionalManifest) String() string {
5460	return awsutil.Prettify(s)
5461}
5462
5463// GoString returns the string representation
5464func (s CmafAdditionalManifest) GoString() string {
5465	return s.String()
5466}
5467
5468// Validate inspects the fields of the type to determine if they are valid.
5469func (s *CmafAdditionalManifest) Validate() error {
5470	invalidParams := request.ErrInvalidParams{Context: "CmafAdditionalManifest"}
5471	if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 {
5472		invalidParams.Add(request.NewErrParamMinLen("ManifestNameModifier", 1))
5473	}
5474
5475	if invalidParams.Len() > 0 {
5476		return invalidParams
5477	}
5478	return nil
5479}
5480
5481// SetManifestNameModifier sets the ManifestNameModifier field's value.
5482func (s *CmafAdditionalManifest) SetManifestNameModifier(v string) *CmafAdditionalManifest {
5483	s.ManifestNameModifier = &v
5484	return s
5485}
5486
5487// SetSelectedOutputs sets the SelectedOutputs field's value.
5488func (s *CmafAdditionalManifest) SetSelectedOutputs(v []*string) *CmafAdditionalManifest {
5489	s.SelectedOutputs = v
5490	return s
5491}
5492
5493// Settings for CMAF encryption
5494type CmafEncryptionSettings struct {
5495	_ struct{} `type:"structure"`
5496
5497	// This is a 128-bit, 16-byte hex value represented by a 32-character text string.
5498	// If this parameter is not set then the Initialization Vector will follow the
5499	// segment number by default.
5500	ConstantInitializationVector *string `locationName:"constantInitializationVector" min:"32" type:"string"`
5501
5502	// Specify the encryption scheme that you want the service to use when encrypting
5503	// your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR).
5504	EncryptionMethod *string `locationName:"encryptionMethod" type:"string" enum:"CmafEncryptionType"`
5505
5506	// When you use DRM with CMAF outputs, choose whether the service writes the
5507	// 128-bit encryption initialization vector in the HLS and DASH manifests.
5508	InitializationVectorInManifest *string `locationName:"initializationVectorInManifest" type:"string" enum:"CmafInitializationVectorInManifest"`
5509
5510	// If your output group type is CMAF, use these settings when doing DRM encryption
5511	// with a SPEKE-compliant key provider. If your output group type is HLS, DASH,
5512	// or Microsoft Smooth, use the SpekeKeyProvider settings instead.
5513	SpekeKeyProvider *SpekeKeyProviderCmaf `locationName:"spekeKeyProvider" type:"structure"`
5514
5515	// Use these settings to set up encryption with a static key provider.
5516	StaticKeyProvider *StaticKeyProvider `locationName:"staticKeyProvider" type:"structure"`
5517
5518	// Specify whether your DRM encryption key is static or from a key provider
5519	// that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.
5520	Type *string `locationName:"type" type:"string" enum:"CmafKeyProviderType"`
5521}
5522
5523// String returns the string representation
5524func (s CmafEncryptionSettings) String() string {
5525	return awsutil.Prettify(s)
5526}
5527
5528// GoString returns the string representation
5529func (s CmafEncryptionSettings) GoString() string {
5530	return s.String()
5531}
5532
5533// Validate inspects the fields of the type to determine if they are valid.
5534func (s *CmafEncryptionSettings) Validate() error {
5535	invalidParams := request.ErrInvalidParams{Context: "CmafEncryptionSettings"}
5536	if s.ConstantInitializationVector != nil && len(*s.ConstantInitializationVector) < 32 {
5537		invalidParams.Add(request.NewErrParamMinLen("ConstantInitializationVector", 32))
5538	}
5539
5540	if invalidParams.Len() > 0 {
5541		return invalidParams
5542	}
5543	return nil
5544}
5545
5546// SetConstantInitializationVector sets the ConstantInitializationVector field's value.
5547func (s *CmafEncryptionSettings) SetConstantInitializationVector(v string) *CmafEncryptionSettings {
5548	s.ConstantInitializationVector = &v
5549	return s
5550}
5551
5552// SetEncryptionMethod sets the EncryptionMethod field's value.
5553func (s *CmafEncryptionSettings) SetEncryptionMethod(v string) *CmafEncryptionSettings {
5554	s.EncryptionMethod = &v
5555	return s
5556}
5557
5558// SetInitializationVectorInManifest sets the InitializationVectorInManifest field's value.
5559func (s *CmafEncryptionSettings) SetInitializationVectorInManifest(v string) *CmafEncryptionSettings {
5560	s.InitializationVectorInManifest = &v
5561	return s
5562}
5563
5564// SetSpekeKeyProvider sets the SpekeKeyProvider field's value.
5565func (s *CmafEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProviderCmaf) *CmafEncryptionSettings {
5566	s.SpekeKeyProvider = v
5567	return s
5568}
5569
5570// SetStaticKeyProvider sets the StaticKeyProvider field's value.
5571func (s *CmafEncryptionSettings) SetStaticKeyProvider(v *StaticKeyProvider) *CmafEncryptionSettings {
5572	s.StaticKeyProvider = v
5573	return s
5574}
5575
5576// SetType sets the Type field's value.
5577func (s *CmafEncryptionSettings) SetType(v string) *CmafEncryptionSettings {
5578	s.Type = &v
5579	return s
5580}
5581
5582// Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
5583// When you work directly in your JSON job specification, include this object
5584// and any required children when you set Type, under OutputGroupSettings, to
5585// CMAF_GROUP_SETTINGS.
5586type CmafGroupSettings struct {
5587	_ struct{} `type:"structure"`
5588
5589	// By default, the service creates one top-level .m3u8 HLS manifest and one
5590	// top -level .mpd DASH manifest for each CMAF output group in your job. These
5591	// default manifests reference every output in the output group. To create additional
5592	// top-level manifests that reference a subset of the outputs in the output
5593	// group, specify a list of them here. For each additional manifest that you
5594	// specify, the service creates one HLS manifest and one DASH manifest.
5595	AdditionalManifests []*CmafAdditionalManifest `locationName:"additionalManifests" type:"list"`
5596
5597	// A partial URI prefix that will be put in the manifest file at the top level
5598	// BaseURL element. Can be used if streams are delivered from a different URL
5599	// than the manifest file.
5600	BaseUrl *string `locationName:"baseUrl" type:"string"`
5601
5602	// Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no
5603	// tag. Otherwise, keep the default value Enabled (ENABLED) and control caching
5604	// in your video distribution set up. For example, use the Cache-Control http
5605	// header.
5606	ClientCache *string `locationName:"clientCache" type:"string" enum:"CmafClientCache"`
5607
5608	// Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist
5609	// generation.
5610	CodecSpecification *string `locationName:"codecSpecification" type:"string" enum:"CmafCodecSpecification"`
5611
5612	// Use Destination (Destination) to specify the S3 output location and the output
5613	// filename base. Destination accepts format identifiers. If you do not specify
5614	// the base filename in the URI, the service will use the filename of the input
5615	// file. If your job has multiple inputs, the service uses the filename of the
5616	// first input file.
5617	Destination *string `locationName:"destination" type:"string"`
5618
5619	// Settings associated with the destination. Will vary based on the type of
5620	// destination
5621	DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"`
5622
5623	// DRM settings.
5624	Encryption *CmafEncryptionSettings `locationName:"encryption" type:"structure"`
5625
5626	// Length of fragments to generate (in seconds). Fragment length must be compatible
5627	// with GOP size and Framerate. Note that fragments will end on the next keyframe
5628	// after this number of seconds, so actual fragment length may be longer. When
5629	// Emit Single File is checked, the fragmentation is internal to a single output
5630	// file and it does not cause the creation of many output files as in other
5631	// output types.
5632	FragmentLength *int64 `locationName:"fragmentLength" min:"1" type:"integer"`
5633
5634	// Specify whether MediaConvert generates images for trick play. Keep the default
5635	// value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL)
5636	// to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME)
5637	// to generate tiled thumbnails and full-resolution images of single frames.
5638	// When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates
5639	// a child manifest for each set of images that you generate and adds corresponding
5640	// entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest),
5641	// MediaConvert adds an entry in the .mpd manifest for each set of images that
5642	// you generate. A common application for these images is Roku trick mode. The
5643	// thumbnails and full-frame images that MediaConvert creates with this feature
5644	// are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md
5645	ImageBasedTrickPlay *string `locationName:"imageBasedTrickPlay" type:"string" enum:"CmafImageBasedTrickPlay"`
5646
5647	// When set to GZIP, compresses HLS playlist.
5648	ManifestCompression *string `locationName:"manifestCompression" type:"string" enum:"CmafManifestCompression"`
5649
5650	// Indicates whether the output manifest should use floating point values for
5651	// segment duration.
5652	ManifestDurationFormat *string `locationName:"manifestDurationFormat" type:"string" enum:"CmafManifestDurationFormat"`
5653
5654	// Minimum time of initially buffered media that is needed to ensure smooth
5655	// playout.
5656	MinBufferTime *int64 `locationName:"minBufferTime" type:"integer"`
5657
5658	// Keep this setting at the default value of 0, unless you are troubleshooting
5659	// a problem with how devices play back the end of your video asset. If you
5660	// know that player devices are hanging on the final segment of your video because
5661	// the length of your final segment is too short, use this setting to specify
5662	// a minimum final segment length, in seconds. Choose a value that is greater
5663	// than or equal to 1 and less than your segment length. When you specify a
5664	// value for this setting, the encoder will combine any final segment that is
5665	// shorter than the length that you specify with the previous segment. For example,
5666	// your segment length is 3 seconds and your final segment is .5 seconds without
5667	// a minimum final segment length; when you set the minimum final segment length
5668	// to 1, your final segment is 3.5 seconds.
5669	MinFinalSegmentLength *float64 `locationName:"minFinalSegmentLength" type:"double"`
5670
5671	// Specify whether your DASH profile is on-demand or main. When you choose Main
5672	// profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011
5673	// in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE),
5674	// the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd.
5675	// When you choose On-demand, you must also set the output group setting Segment
5676	// control (SegmentControl) to Single file (SINGLE_FILE).
5677	MpdProfile *string `locationName:"mpdProfile" type:"string" enum:"CmafMpdProfile"`
5678
5679	// Use this setting only when your output video stream has B-frames, which causes
5680	// the initial presentation time stamp (PTS) to be offset from the initial decode
5681	// time stamp (DTS). Specify how MediaConvert handles PTS when writing time
5682	// stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS)
5683	// when you want MediaConvert to use the initial PTS as the first time stamp
5684	// in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore
5685	// the initial PTS in the video stream and instead write the initial time stamp
5686	// as zero in the manifest. For outputs that don't have B-frames, the time stamps
5687	// in your DASH manifests start at zero regardless of your choice here.
5688	PtsOffsetHandlingForBFrames *string `locationName:"ptsOffsetHandlingForBFrames" type:"string" enum:"CmafPtsOffsetHandlingForBFrames"`
5689
5690	// When set to SINGLE_FILE, a single output file is generated, which is internally
5691	// segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES,
5692	// separate segment files will be created.
5693	SegmentControl *string `locationName:"segmentControl" type:"string" enum:"CmafSegmentControl"`
5694
5695	// Use this setting to specify the length, in seconds, of each individual CMAF
5696	// segment. This value applies to the whole package; that is, to every output
5697	// in the output group. Note that segments end on the first keyframe after this
5698	// number of seconds, so the actual segment length might be slightly longer.
5699	// If you set Segment control (CmafSegmentControl) to single file, the service
5700	// puts the content of each output in a single file that has metadata that marks
5701	// these segments. If you set it to segmented files, the service creates multiple
5702	// files for each output, each with the content of one segment.
5703	SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"`
5704
5705	// Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag
5706	// of variant manifest.
5707	StreamInfResolution *string `locationName:"streamInfResolution" type:"string" enum:"CmafStreamInfResolution"`
5708
5709	// When set to LEGACY, the segment target duration is always rounded up to the
5710	// nearest integer value above its current value in seconds. When set to SPEC\\_COMPLIANT,
5711	// the segment target duration is rounded up to the nearest integer value if
5712	// fraction seconds are greater than or equal to 0.5 (>= 0.5) and rounded down
5713	// if less than 0.5 (< 0.5). You may need to use LEGACY if your client needs
5714	// to ensure that the target duration is always longer than the actual duration
5715	// of the segment. Some older players may experience interrupted playback when
5716	// the actual duration of a track in a segment is longer than the target duration.
5717	TargetDurationCompatibilityMode *string `locationName:"targetDurationCompatibilityMode" type:"string" enum:"CmafTargetDurationCompatibilityMode"`
5718
5719	// When set to ENABLED, a DASH MPD manifest will be generated for this output.
5720	WriteDashManifest *string `locationName:"writeDashManifest" type:"string" enum:"CmafWriteDASHManifest"`
5721
5722	// When set to ENABLED, an Apple HLS manifest will be generated for this output.
5723	WriteHlsManifest *string `locationName:"writeHlsManifest" type:"string" enum:"CmafWriteHLSManifest"`
5724
5725	// When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation),
5726	// your DASH manifest shows precise segment durations. The segment duration
5727	// information appears inside the SegmentTimeline element, inside SegmentTemplate
5728	// at the Representation level. When this feature isn't enabled, the segment
5729	// durations in your DASH manifest are approximate. The segment duration information
5730	// appears in the duration attribute of the SegmentTemplate element.
5731	WriteSegmentTimelineInRepresentation *string `locationName:"writeSegmentTimelineInRepresentation" type:"string" enum:"CmafWriteSegmentTimelineInRepresentation"`
5732}
5733
5734// String returns the string representation
5735func (s CmafGroupSettings) String() string {
5736	return awsutil.Prettify(s)
5737}
5738
5739// GoString returns the string representation
5740func (s CmafGroupSettings) GoString() string {
5741	return s.String()
5742}
5743
5744// Validate inspects the fields of the type to determine if they are valid.
5745func (s *CmafGroupSettings) Validate() error {
5746	invalidParams := request.ErrInvalidParams{Context: "CmafGroupSettings"}
5747	if s.FragmentLength != nil && *s.FragmentLength < 1 {
5748		invalidParams.Add(request.NewErrParamMinValue("FragmentLength", 1))
5749	}
5750	if s.SegmentLength != nil && *s.SegmentLength < 1 {
5751		invalidParams.Add(request.NewErrParamMinValue("SegmentLength", 1))
5752	}
5753	if s.AdditionalManifests != nil {
5754		for i, v := range s.AdditionalManifests {
5755			if v == nil {
5756				continue
5757			}
5758			if err := v.Validate(); err != nil {
5759				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(request.ErrInvalidParams))
5760			}
5761		}
5762	}
5763	if s.Encryption != nil {
5764		if err := s.Encryption.Validate(); err != nil {
5765			invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams))
5766		}
5767	}
5768
5769	if invalidParams.Len() > 0 {
5770		return invalidParams
5771	}
5772	return nil
5773}
5774
5775// SetAdditionalManifests sets the AdditionalManifests field's value.
5776func (s *CmafGroupSettings) SetAdditionalManifests(v []*CmafAdditionalManifest) *CmafGroupSettings {
5777	s.AdditionalManifests = v
5778	return s
5779}
5780
5781// SetBaseUrl sets the BaseUrl field's value.
5782func (s *CmafGroupSettings) SetBaseUrl(v string) *CmafGroupSettings {
5783	s.BaseUrl = &v
5784	return s
5785}
5786
5787// SetClientCache sets the ClientCache field's value.
5788func (s *CmafGroupSettings) SetClientCache(v string) *CmafGroupSettings {
5789	s.ClientCache = &v
5790	return s
5791}
5792
5793// SetCodecSpecification sets the CodecSpecification field's value.
5794func (s *CmafGroupSettings) SetCodecSpecification(v string) *CmafGroupSettings {
5795	s.CodecSpecification = &v
5796	return s
5797}
5798
5799// SetDestination sets the Destination field's value.
5800func (s *CmafGroupSettings) SetDestination(v string) *CmafGroupSettings {
5801	s.Destination = &v
5802	return s
5803}
5804
5805// SetDestinationSettings sets the DestinationSettings field's value.
5806func (s *CmafGroupSettings) SetDestinationSettings(v *DestinationSettings) *CmafGroupSettings {
5807	s.DestinationSettings = v
5808	return s
5809}
5810
5811// SetEncryption sets the Encryption field's value.
5812func (s *CmafGroupSettings) SetEncryption(v *CmafEncryptionSettings) *CmafGroupSettings {
5813	s.Encryption = v
5814	return s
5815}
5816
5817// SetFragmentLength sets the FragmentLength field's value.
5818func (s *CmafGroupSettings) SetFragmentLength(v int64) *CmafGroupSettings {
5819	s.FragmentLength = &v
5820	return s
5821}
5822
5823// SetImageBasedTrickPlay sets the ImageBasedTrickPlay field's value.
5824func (s *CmafGroupSettings) SetImageBasedTrickPlay(v string) *CmafGroupSettings {
5825	s.ImageBasedTrickPlay = &v
5826	return s
5827}
5828
5829// SetManifestCompression sets the ManifestCompression field's value.
5830func (s *CmafGroupSettings) SetManifestCompression(v string) *CmafGroupSettings {
5831	s.ManifestCompression = &v
5832	return s
5833}
5834
5835// SetManifestDurationFormat sets the ManifestDurationFormat field's value.
5836func (s *CmafGroupSettings) SetManifestDurationFormat(v string) *CmafGroupSettings {
5837	s.ManifestDurationFormat = &v
5838	return s
5839}
5840
5841// SetMinBufferTime sets the MinBufferTime field's value.
5842func (s *CmafGroupSettings) SetMinBufferTime(v int64) *CmafGroupSettings {
5843	s.MinBufferTime = &v
5844	return s
5845}
5846
5847// SetMinFinalSegmentLength sets the MinFinalSegmentLength field's value.
5848func (s *CmafGroupSettings) SetMinFinalSegmentLength(v float64) *CmafGroupSettings {
5849	s.MinFinalSegmentLength = &v
5850	return s
5851}
5852
5853// SetMpdProfile sets the MpdProfile field's value.
5854func (s *CmafGroupSettings) SetMpdProfile(v string) *CmafGroupSettings {
5855	s.MpdProfile = &v
5856	return s
5857}
5858
5859// SetPtsOffsetHandlingForBFrames sets the PtsOffsetHandlingForBFrames field's value.
5860func (s *CmafGroupSettings) SetPtsOffsetHandlingForBFrames(v string) *CmafGroupSettings {
5861	s.PtsOffsetHandlingForBFrames = &v
5862	return s
5863}
5864
5865// SetSegmentControl sets the SegmentControl field's value.
5866func (s *CmafGroupSettings) SetSegmentControl(v string) *CmafGroupSettings {
5867	s.SegmentControl = &v
5868	return s
5869}
5870
5871// SetSegmentLength sets the SegmentLength field's value.
5872func (s *CmafGroupSettings) SetSegmentLength(v int64) *CmafGroupSettings {
5873	s.SegmentLength = &v
5874	return s
5875}
5876
5877// SetStreamInfResolution sets the StreamInfResolution field's value.
5878func (s *CmafGroupSettings) SetStreamInfResolution(v string) *CmafGroupSettings {
5879	s.StreamInfResolution = &v
5880	return s
5881}
5882
5883// SetTargetDurationCompatibilityMode sets the TargetDurationCompatibilityMode field's value.
5884func (s *CmafGroupSettings) SetTargetDurationCompatibilityMode(v string) *CmafGroupSettings {
5885	s.TargetDurationCompatibilityMode = &v
5886	return s
5887}
5888
5889// SetWriteDashManifest sets the WriteDashManifest field's value.
5890func (s *CmafGroupSettings) SetWriteDashManifest(v string) *CmafGroupSettings {
5891	s.WriteDashManifest = &v
5892	return s
5893}
5894
5895// SetWriteHlsManifest sets the WriteHlsManifest field's value.
5896func (s *CmafGroupSettings) SetWriteHlsManifest(v string) *CmafGroupSettings {
5897	s.WriteHlsManifest = &v
5898	return s
5899}
5900
5901// SetWriteSegmentTimelineInRepresentation sets the WriteSegmentTimelineInRepresentation field's value.
5902func (s *CmafGroupSettings) SetWriteSegmentTimelineInRepresentation(v string) *CmafGroupSettings {
5903	s.WriteSegmentTimelineInRepresentation = &v
5904	return s
5905}
5906
5907// These settings relate to the fragmented MP4 container for the segments in
5908// your CMAF outputs.
5909type CmfcSettings struct {
5910	_ struct{} `type:"structure"`
5911
5912	// Specify this setting only when your output will be consumed by a downstream
5913	// repackaging workflow that is sensitive to very small duration differences
5914	// between video and audio. For this situation, choose Match video duration
5915	// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
5916	// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
5917	// MediaConvert pads the output audio streams with silence or trims them to
5918	// ensure that the total duration of each audio stream is at least as long as
5919	// the total duration of the video stream. After padding or trimming, the audio
5920	// stream duration is no more than one frame longer than the video stream. MediaConvert
5921	// applies audio padding or trimming only to the end of the last segment of
5922	// the output. For unsegmented outputs, MediaConvert adds padding only to the
5923	// end of the file. When you keep the default value, any minor discrepancies
5924	// between audio and video duration will depend on your output audio codec.
5925	AudioDuration *string `locationName:"audioDuration" type:"string" enum:"CmfcAudioDuration"`
5926
5927	// Specify the audio rendition group for this audio rendition. Specify up to
5928	// one value for each audio output in your output group. This value appears
5929	// in your HLS parent manifest in the EXT-X-MEDIA tag of TYPE=AUDIO, as the
5930	// value for the GROUP-ID attribute. For example, if you specify "audio_aac_1"
5931	// for Audio group ID, it appears in your manifest like this: #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio_aac_1".
5932	// Related setting: To associate the rendition group that this audio track belongs
5933	// to with a video rendition, include the same value that you provide here for
5934	// that video output's setting Audio rendition sets (audioRenditionSets).
5935	AudioGroupId *string `locationName:"audioGroupId" type:"string"`
5936
5937	// List the audio rendition groups that you want included with this video rendition.
5938	// Use a comma-separated list. For example, say you want to include the audio
5939	// rendition groups that have the audio group IDs "audio_aac_1" and "audio_dolby".
5940	// Then you would specify this value: "audio_aac_1, audio_dolby". Related setting:
5941	// The rendition groups that you include in your comma-separated list should
5942	// all match values that you specify in the setting Audio group ID (AudioGroupId)
5943	// for audio renditions in the same output group as this video rendition. Default
5944	// behavior: If you don't specify anything here and for Audio group ID, MediaConvert
5945	// puts each audio variant in its own audio rendition group and associates it
5946	// with every video variant. Each value in your list appears in your HLS parent
5947	// manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO attribute.
5948	// To continue the previous example, say that the file name for the child manifest
5949	// for your video rendition is "amazing_video_1.m3u8". Then, in your parent
5950	// manifest, each value will appear on separate lines, like this: #EXT-X-STREAM-INF:AUDIO="audio_aac_1"...
5951	// amazing_video_1.m3u8 #EXT-X-STREAM-INF:AUDIO="audio_dolby"... amazing_video_1.m3u8
5952	AudioRenditionSets *string `locationName:"audioRenditionSets" type:"string"`
5953
5954	// Use this setting to control the values that MediaConvert puts in your HLS
5955	// parent playlist to control how the client player selects which audio track
5956	// to play. The other options for this setting determine the values that MediaConvert
5957	// writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry
5958	// for the audio variant. For more information about these attributes, see the
5959	// Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist.
5960	// Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT)
5961	// to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant
5962	// in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT)
5963	// to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select
5964	// to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this
5965	// setting, MediaConvert defaults to Alternate audio, auto select, default.
5966	// When there is more than one variant in your output group, you must explicitly
5967	// choose a value for this setting.
5968	AudioTrackType *string `locationName:"audioTrackType" type:"string" enum:"CmfcAudioTrackType"`
5969
5970	// Specify whether to flag this audio track as descriptive video service (DVS)
5971	// in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes
5972	// the parameter CHARACTERISTICS="public.accessibility.describes-video" in the
5973	// EXT-X-MEDIA entry for this track. When you keep the default choice, Don't
5974	// flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can
5975	// help with accessibility on Apple devices. For more information, see the Apple
5976	// documentation.
5977	DescriptiveVideoServiceFlag *string `locationName:"descriptiveVideoServiceFlag" type:"string" enum:"CmfcDescriptiveVideoServiceFlag"`
5978
5979	// Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest
5980	// that lists only the I-frames for this rendition, in addition to your regular
5981	// manifest for this rendition. You might use this manifest as part of a workflow
5982	// that creates preview functions for your video. MediaConvert adds both the
5983	// I-frame only child manifest and the regular child manifest to the parent
5984	// manifest. When you don't need the I-frame only child manifest, keep the default
5985	// value Exclude (EXCLUDE).
5986	IFrameOnlyManifest *string `locationName:"iFrameOnlyManifest" type:"string" enum:"CmfcIFrameOnlyManifest"`
5987
5988	// Use this setting only when you specify SCTE-35 markers from ESAM. Choose
5989	// INSERT to put SCTE-35 markers in this output at the insertion points that
5990	// you specify in an ESAM XML document. Provide the document in the setting
5991	// SCC XML (sccXml).
5992	Scte35Esam *string `locationName:"scte35Esam" type:"string" enum:"CmfcScte35Esam"`
5993
5994	// Ignore this setting unless you have SCTE-35 markers in your input video file.
5995	// Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear
5996	// in your input to also appear in this output. Choose None (NONE) if you don't
5997	// want those SCTE-35 markers in this output.
5998	Scte35Source *string `locationName:"scte35Source" type:"string" enum:"CmfcScte35Source"`
5999}
6000
6001// String returns the string representation
6002func (s CmfcSettings) String() string {
6003	return awsutil.Prettify(s)
6004}
6005
6006// GoString returns the string representation
6007func (s CmfcSettings) GoString() string {
6008	return s.String()
6009}
6010
6011// SetAudioDuration sets the AudioDuration field's value.
6012func (s *CmfcSettings) SetAudioDuration(v string) *CmfcSettings {
6013	s.AudioDuration = &v
6014	return s
6015}
6016
6017// SetAudioGroupId sets the AudioGroupId field's value.
6018func (s *CmfcSettings) SetAudioGroupId(v string) *CmfcSettings {
6019	s.AudioGroupId = &v
6020	return s
6021}
6022
6023// SetAudioRenditionSets sets the AudioRenditionSets field's value.
6024func (s *CmfcSettings) SetAudioRenditionSets(v string) *CmfcSettings {
6025	s.AudioRenditionSets = &v
6026	return s
6027}
6028
6029// SetAudioTrackType sets the AudioTrackType field's value.
6030func (s *CmfcSettings) SetAudioTrackType(v string) *CmfcSettings {
6031	s.AudioTrackType = &v
6032	return s
6033}
6034
6035// SetDescriptiveVideoServiceFlag sets the DescriptiveVideoServiceFlag field's value.
6036func (s *CmfcSettings) SetDescriptiveVideoServiceFlag(v string) *CmfcSettings {
6037	s.DescriptiveVideoServiceFlag = &v
6038	return s
6039}
6040
6041// SetIFrameOnlyManifest sets the IFrameOnlyManifest field's value.
6042func (s *CmfcSettings) SetIFrameOnlyManifest(v string) *CmfcSettings {
6043	s.IFrameOnlyManifest = &v
6044	return s
6045}
6046
6047// SetScte35Esam sets the Scte35Esam field's value.
6048func (s *CmfcSettings) SetScte35Esam(v string) *CmfcSettings {
6049	s.Scte35Esam = &v
6050	return s
6051}
6052
6053// SetScte35Source sets the Scte35Source field's value.
6054func (s *CmfcSettings) SetScte35Source(v string) *CmfcSettings {
6055	s.Scte35Source = &v
6056	return s
6057}
6058
6059// Settings for color correction.
6060type ColorCorrector struct {
6061	_ struct{} `type:"structure"`
6062
6063	// Brightness level.
6064	Brightness *int64 `locationName:"brightness" min:"1" type:"integer"`
6065
6066	// Specify the color space you want for this output. The service supports conversion
6067	// between HDR formats, between SDR formats, from SDR to HDR, and from HDR to
6068	// SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted
6069	// video has an HDR format, but visually appears the same as an unconverted
6070	// output. HDR to SDR conversion uses Elemental tone mapping technology to approximate
6071	// the outcome of manually regrading from HDR to SDR.
6072	ColorSpaceConversion *string `locationName:"colorSpaceConversion" type:"string" enum:"ColorSpaceConversion"`
6073
6074	// Contrast level.
6075	Contrast *int64 `locationName:"contrast" min:"1" type:"integer"`
6076
6077	// Use these settings when you convert to the HDR 10 color space. Specify the
6078	// SMPTE ST 2086 Mastering Display Color Volume static metadata that you want
6079	// signaled in the output. These values don't affect the pixel values that are
6080	// encoded in the video stream. They are intended to help the downstream video
6081	// player display content in a way that reflects the intentions of the the content
6082	// creator. When you set Color space conversion (ColorSpaceConversion) to HDR
6083	// 10 (FORCE_HDR10), these settings are required. You must set values for Max
6084	// frame average light level (maxFrameAverageLightLevel) and Max content light
6085	// level (maxContentLightLevel); these settings don't have a default value.
6086	// The default values for the other HDR 10 metadata settings are defined by
6087	// the P3D65 color space. For more information about MediaConvert HDR jobs,
6088	// see https://docs.aws.amazon.com/console/mediaconvert/hdr.
6089	Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"`
6090
6091	// Hue in degrees.
6092	Hue *int64 `locationName:"hue" type:"integer"`
6093
6094	// Specify the video color sample range for this output. To create a full range
6095	// output, you must start with a full range YUV input and keep the default value,
6096	// None (NONE). To create a limited range output from a full range input, choose
6097	// Limited range (LIMITED_RANGE_SQUEEZE). With RGB inputs, your output is always
6098	// limited range, regardless of your choice here. When you create a limited
6099	// range output from a full range input, MediaConvert limits the active pixel
6100	// values in a way that depends on the output's bit depth: 8-bit outputs contain
6101	// only values from 16 through 235 and 10-bit outputs contain only values from
6102	// 64 through 940. With this conversion, MediaConvert also changes the output
6103	// metadata to note the limited range.
6104	SampleRangeConversion *string `locationName:"sampleRangeConversion" type:"string" enum:"SampleRangeConversion"`
6105
6106	// Saturation level.
6107	Saturation *int64 `locationName:"saturation" min:"1" type:"integer"`
6108}
6109
6110// String returns the string representation
6111func (s ColorCorrector) String() string {
6112	return awsutil.Prettify(s)
6113}
6114
6115// GoString returns the string representation
6116func (s ColorCorrector) GoString() string {
6117	return s.String()
6118}
6119
6120// Validate inspects the fields of the type to determine if they are valid.
6121func (s *ColorCorrector) Validate() error {
6122	invalidParams := request.ErrInvalidParams{Context: "ColorCorrector"}
6123	if s.Brightness != nil && *s.Brightness < 1 {
6124		invalidParams.Add(request.NewErrParamMinValue("Brightness", 1))
6125	}
6126	if s.Contrast != nil && *s.Contrast < 1 {
6127		invalidParams.Add(request.NewErrParamMinValue("Contrast", 1))
6128	}
6129	if s.Hue != nil && *s.Hue < -180 {
6130		invalidParams.Add(request.NewErrParamMinValue("Hue", -180))
6131	}
6132	if s.Saturation != nil && *s.Saturation < 1 {
6133		invalidParams.Add(request.NewErrParamMinValue("Saturation", 1))
6134	}
6135
6136	if invalidParams.Len() > 0 {
6137		return invalidParams
6138	}
6139	return nil
6140}
6141
6142// SetBrightness sets the Brightness field's value.
6143func (s *ColorCorrector) SetBrightness(v int64) *ColorCorrector {
6144	s.Brightness = &v
6145	return s
6146}
6147
6148// SetColorSpaceConversion sets the ColorSpaceConversion field's value.
6149func (s *ColorCorrector) SetColorSpaceConversion(v string) *ColorCorrector {
6150	s.ColorSpaceConversion = &v
6151	return s
6152}
6153
6154// SetContrast sets the Contrast field's value.
6155func (s *ColorCorrector) SetContrast(v int64) *ColorCorrector {
6156	s.Contrast = &v
6157	return s
6158}
6159
6160// SetHdr10Metadata sets the Hdr10Metadata field's value.
6161func (s *ColorCorrector) SetHdr10Metadata(v *Hdr10Metadata) *ColorCorrector {
6162	s.Hdr10Metadata = v
6163	return s
6164}
6165
6166// SetHue sets the Hue field's value.
6167func (s *ColorCorrector) SetHue(v int64) *ColorCorrector {
6168	s.Hue = &v
6169	return s
6170}
6171
6172// SetSampleRangeConversion sets the SampleRangeConversion field's value.
6173func (s *ColorCorrector) SetSampleRangeConversion(v string) *ColorCorrector {
6174	s.SampleRangeConversion = &v
6175	return s
6176}
6177
6178// SetSaturation sets the Saturation field's value.
6179func (s *ColorCorrector) SetSaturation(v int64) *ColorCorrector {
6180	s.Saturation = &v
6181	return s
6182}
6183
6184type ConflictException struct {
6185	_            struct{}                  `type:"structure"`
6186	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
6187
6188	Message_ *string `locationName:"message" type:"string"`
6189}
6190
6191// String returns the string representation
6192func (s ConflictException) String() string {
6193	return awsutil.Prettify(s)
6194}
6195
6196// GoString returns the string representation
6197func (s ConflictException) GoString() string {
6198	return s.String()
6199}
6200
6201func newErrorConflictException(v protocol.ResponseMetadata) error {
6202	return &ConflictException{
6203		RespMetadata: v,
6204	}
6205}
6206
6207// Code returns the exception type name.
6208func (s *ConflictException) Code() string {
6209	return "ConflictException"
6210}
6211
6212// Message returns the exception's message.
6213func (s *ConflictException) Message() string {
6214	if s.Message_ != nil {
6215		return *s.Message_
6216	}
6217	return ""
6218}
6219
6220// OrigErr always returns nil, satisfies awserr.Error interface.
6221func (s *ConflictException) OrigErr() error {
6222	return nil
6223}
6224
6225func (s *ConflictException) Error() string {
6226	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
6227}
6228
6229// Status code returns the HTTP status code for the request's response error.
6230func (s *ConflictException) StatusCode() int {
6231	return s.RespMetadata.StatusCode
6232}
6233
6234// RequestID returns the service's response RequestID for request.
6235func (s *ConflictException) RequestID() string {
6236	return s.RespMetadata.RequestID
6237}
6238
6239// Container specific settings.
6240type ContainerSettings struct {
6241	_ struct{} `type:"structure"`
6242
6243	// These settings relate to the fragmented MP4 container for the segments in
6244	// your CMAF outputs.
6245	CmfcSettings *CmfcSettings `locationName:"cmfcSettings" type:"structure"`
6246
6247	// Container for this output. Some containers require a container settings object.
6248	// If not specified, the default object will be created.
6249	Container *string `locationName:"container" type:"string" enum:"ContainerType"`
6250
6251	// Settings for F4v container
6252	F4vSettings *F4vSettings `locationName:"f4vSettings" type:"structure"`
6253
6254	// MPEG-2 TS container settings. These apply to outputs in a File output group
6255	// when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS).
6256	// In these assets, data is organized by the program map table (PMT). Each transport
6257	// stream program contains subsets of data, including audio, video, and metadata.
6258	// Each of these subsets of data has a numerical label called a packet identifier
6259	// (PID). Each transport stream program corresponds to one MediaConvert output.
6260	// The PMT lists the types of data in a program along with their PID. Downstream
6261	// systems and players use the program map table to look up the PID for each
6262	// type of data it accesses and then uses the PIDs to locate specific data within
6263	// the asset.
6264	M2tsSettings *M2tsSettings `locationName:"m2tsSettings" type:"structure"`
6265
6266	// These settings relate to the MPEG-2 transport stream (MPEG2-TS) container
6267	// for the MPEG2-TS segments in your HLS outputs.
6268	M3u8Settings *M3u8Settings `locationName:"m3u8Settings" type:"structure"`
6269
6270	// These settings relate to your QuickTime MOV output container.
6271	MovSettings *MovSettings `locationName:"movSettings" type:"structure"`
6272
6273	// These settings relate to your MP4 output container. You can create audio
6274	// only outputs with this container. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/supported-codecs-containers-audio-only.html#output-codecs-and-containers-supported-for-audio-only.
6275	Mp4Settings *Mp4Settings `locationName:"mp4Settings" type:"structure"`
6276
6277	// These settings relate to the fragmented MP4 container for the segments in
6278	// your DASH outputs.
6279	MpdSettings *MpdSettings `locationName:"mpdSettings" type:"structure"`
6280
6281	// These settings relate to your MXF output container.
6282	MxfSettings *MxfSettings `locationName:"mxfSettings" type:"structure"`
6283}
6284
6285// String returns the string representation
6286func (s ContainerSettings) String() string {
6287	return awsutil.Prettify(s)
6288}
6289
6290// GoString returns the string representation
6291func (s ContainerSettings) GoString() string {
6292	return s.String()
6293}
6294
6295// Validate inspects the fields of the type to determine if they are valid.
6296func (s *ContainerSettings) Validate() error {
6297	invalidParams := request.ErrInvalidParams{Context: "ContainerSettings"}
6298	if s.M2tsSettings != nil {
6299		if err := s.M2tsSettings.Validate(); err != nil {
6300			invalidParams.AddNested("M2tsSettings", err.(request.ErrInvalidParams))
6301		}
6302	}
6303	if s.M3u8Settings != nil {
6304		if err := s.M3u8Settings.Validate(); err != nil {
6305			invalidParams.AddNested("M3u8Settings", err.(request.ErrInvalidParams))
6306		}
6307	}
6308
6309	if invalidParams.Len() > 0 {
6310		return invalidParams
6311	}
6312	return nil
6313}
6314
6315// SetCmfcSettings sets the CmfcSettings field's value.
6316func (s *ContainerSettings) SetCmfcSettings(v *CmfcSettings) *ContainerSettings {
6317	s.CmfcSettings = v
6318	return s
6319}
6320
6321// SetContainer sets the Container field's value.
6322func (s *ContainerSettings) SetContainer(v string) *ContainerSettings {
6323	s.Container = &v
6324	return s
6325}
6326
6327// SetF4vSettings sets the F4vSettings field's value.
6328func (s *ContainerSettings) SetF4vSettings(v *F4vSettings) *ContainerSettings {
6329	s.F4vSettings = v
6330	return s
6331}
6332
6333// SetM2tsSettings sets the M2tsSettings field's value.
6334func (s *ContainerSettings) SetM2tsSettings(v *M2tsSettings) *ContainerSettings {
6335	s.M2tsSettings = v
6336	return s
6337}
6338
6339// SetM3u8Settings sets the M3u8Settings field's value.
6340func (s *ContainerSettings) SetM3u8Settings(v *M3u8Settings) *ContainerSettings {
6341	s.M3u8Settings = v
6342	return s
6343}
6344
6345// SetMovSettings sets the MovSettings field's value.
6346func (s *ContainerSettings) SetMovSettings(v *MovSettings) *ContainerSettings {
6347	s.MovSettings = v
6348	return s
6349}
6350
6351// SetMp4Settings sets the Mp4Settings field's value.
6352func (s *ContainerSettings) SetMp4Settings(v *Mp4Settings) *ContainerSettings {
6353	s.Mp4Settings = v
6354	return s
6355}
6356
6357// SetMpdSettings sets the MpdSettings field's value.
6358func (s *ContainerSettings) SetMpdSettings(v *MpdSettings) *ContainerSettings {
6359	s.MpdSettings = v
6360	return s
6361}
6362
6363// SetMxfSettings sets the MxfSettings field's value.
6364func (s *ContainerSettings) SetMxfSettings(v *MxfSettings) *ContainerSettings {
6365	s.MxfSettings = v
6366	return s
6367}
6368
6369// Send your create job request with your job settings and IAM role. Optionally,
6370// include user metadata and the ARN for the queue.
6371type CreateJobInput struct {
6372	_ struct{} `type:"structure"`
6373
6374	// Optional. Accelerated transcoding can significantly speed up jobs with long,
6375	// visually complex content. Outputs that use this feature incur pro-tier pricing.
6376	// For information about feature limitations, see the AWS Elemental MediaConvert
6377	// User Guide.
6378	AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"`
6379
6380	// Optional. Choose a tag type that AWS Billing and Cost Management will use
6381	// to sort your AWS Elemental MediaConvert costs on any billing report that
6382	// you set up. Any transcoding outputs that don't have an associated tag will
6383	// appear in your billing report unsorted. If you don't choose a valid value
6384	// for this field, your job outputs will appear on the billing report unsorted.
6385	BillingTagsSource *string `locationName:"billingTagsSource" type:"string" enum:"BillingTagsSource"`
6386
6387	// Optional. Idempotency token for CreateJob operation.
6388	ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"`
6389
6390	// Optional. Use queue hopping to avoid overly long waits in the backlog of
6391	// the queue that you submit your job to. Specify an alternate queue and the
6392	// maximum time that your job will wait in the initial queue before hopping.
6393	// For more information about this feature, see the AWS Elemental MediaConvert
6394	// User Guide.
6395	HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"`
6396
6397	// Optional. When you create a job, you can either specify a job template or
6398	// specify the transcoding settings individually.
6399	JobTemplate *string `locationName:"jobTemplate" type:"string"`
6400
6401	// Optional. Specify the relative priority for this job. In any given queue,
6402	// the service begins processing the job with the highest value first. When
6403	// more than one job has the same priority, the service begins processing the
6404	// job that you submitted first. If you don't specify a priority, the service
6405	// uses the default value 0.
6406	Priority *int64 `locationName:"priority" type:"integer"`
6407
6408	// Optional. When you create a job, you can specify a queue to send it to. If
6409	// you don't specify, the job will go to the default queue. For more about queues,
6410	// see the User Guide topic at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html.
6411	Queue *string `locationName:"queue" type:"string"`
6412
6413	// Required. The IAM role you use for creating this job. For details about permissions,
6414	// see the User Guide topic at the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html.
6415	//
6416	// Role is a required field
6417	Role *string `locationName:"role" type:"string" required:"true"`
6418
6419	// JobSettings contains all the transcode settings for a job.
6420	//
6421	// Settings is a required field
6422	Settings *JobSettings `locationName:"settings" type:"structure" required:"true"`
6423
6424	// Optional. Enable this setting when you run a test job to estimate how many
6425	// reserved transcoding slots (RTS) you need. When this is enabled, MediaConvert
6426	// runs your job from an on-demand queue with similar performance to what you
6427	// will see with one RTS in a reserved queue. This setting is disabled by default.
6428	SimulateReservedQueue *string `locationName:"simulateReservedQueue" type:"string" enum:"SimulateReservedQueue"`
6429
6430	// Optional. Specify how often MediaConvert sends STATUS_UPDATE events to Amazon
6431	// CloudWatch Events. Set the interval, in seconds, between status updates.
6432	// MediaConvert sends an update at this interval from the time the service begins
6433	// processing your job to the time it completes the transcode or encounters
6434	// an error.
6435	StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"`
6436
6437	// Optional. The tags that you want to add to the resource. You can tag resources
6438	// with a key-value pair or with only a key. Use standard AWS tags on your job
6439	// for automatic integration with AWS services and for custom integrations and
6440	// workflows.
6441	Tags map[string]*string `locationName:"tags" type:"map"`
6442
6443	// Optional. User-defined metadata that you want to associate with an MediaConvert
6444	// job. You specify metadata in key/value pairs. Use only for existing integrations
6445	// or workflows that rely on job metadata tags. Otherwise, we recommend that
6446	// you use standard AWS tags.
6447	UserMetadata map[string]*string `locationName:"userMetadata" type:"map"`
6448}
6449
6450// String returns the string representation
6451func (s CreateJobInput) String() string {
6452	return awsutil.Prettify(s)
6453}
6454
6455// GoString returns the string representation
6456func (s CreateJobInput) GoString() string {
6457	return s.String()
6458}
6459
6460// Validate inspects the fields of the type to determine if they are valid.
6461func (s *CreateJobInput) Validate() error {
6462	invalidParams := request.ErrInvalidParams{Context: "CreateJobInput"}
6463	if s.Priority != nil && *s.Priority < -50 {
6464		invalidParams.Add(request.NewErrParamMinValue("Priority", -50))
6465	}
6466	if s.Role == nil {
6467		invalidParams.Add(request.NewErrParamRequired("Role"))
6468	}
6469	if s.Settings == nil {
6470		invalidParams.Add(request.NewErrParamRequired("Settings"))
6471	}
6472	if s.AccelerationSettings != nil {
6473		if err := s.AccelerationSettings.Validate(); err != nil {
6474			invalidParams.AddNested("AccelerationSettings", err.(request.ErrInvalidParams))
6475		}
6476	}
6477	if s.HopDestinations != nil {
6478		for i, v := range s.HopDestinations {
6479			if v == nil {
6480				continue
6481			}
6482			if err := v.Validate(); err != nil {
6483				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HopDestinations", i), err.(request.ErrInvalidParams))
6484			}
6485		}
6486	}
6487	if s.Settings != nil {
6488		if err := s.Settings.Validate(); err != nil {
6489			invalidParams.AddNested("Settings", err.(request.ErrInvalidParams))
6490		}
6491	}
6492
6493	if invalidParams.Len() > 0 {
6494		return invalidParams
6495	}
6496	return nil
6497}
6498
6499// SetAccelerationSettings sets the AccelerationSettings field's value.
6500func (s *CreateJobInput) SetAccelerationSettings(v *AccelerationSettings) *CreateJobInput {
6501	s.AccelerationSettings = v
6502	return s
6503}
6504
6505// SetBillingTagsSource sets the BillingTagsSource field's value.
6506func (s *CreateJobInput) SetBillingTagsSource(v string) *CreateJobInput {
6507	s.BillingTagsSource = &v
6508	return s
6509}
6510
6511// SetClientRequestToken sets the ClientRequestToken field's value.
6512func (s *CreateJobInput) SetClientRequestToken(v string) *CreateJobInput {
6513	s.ClientRequestToken = &v
6514	return s
6515}
6516
6517// SetHopDestinations sets the HopDestinations field's value.
6518func (s *CreateJobInput) SetHopDestinations(v []*HopDestination) *CreateJobInput {
6519	s.HopDestinations = v
6520	return s
6521}
6522
6523// SetJobTemplate sets the JobTemplate field's value.
6524func (s *CreateJobInput) SetJobTemplate(v string) *CreateJobInput {
6525	s.JobTemplate = &v
6526	return s
6527}
6528
6529// SetPriority sets the Priority field's value.
6530func (s *CreateJobInput) SetPriority(v int64) *CreateJobInput {
6531	s.Priority = &v
6532	return s
6533}
6534
6535// SetQueue sets the Queue field's value.
6536func (s *CreateJobInput) SetQueue(v string) *CreateJobInput {
6537	s.Queue = &v
6538	return s
6539}
6540
6541// SetRole sets the Role field's value.
6542func (s *CreateJobInput) SetRole(v string) *CreateJobInput {
6543	s.Role = &v
6544	return s
6545}
6546
6547// SetSettings sets the Settings field's value.
6548func (s *CreateJobInput) SetSettings(v *JobSettings) *CreateJobInput {
6549	s.Settings = v
6550	return s
6551}
6552
6553// SetSimulateReservedQueue sets the SimulateReservedQueue field's value.
6554func (s *CreateJobInput) SetSimulateReservedQueue(v string) *CreateJobInput {
6555	s.SimulateReservedQueue = &v
6556	return s
6557}
6558
6559// SetStatusUpdateInterval sets the StatusUpdateInterval field's value.
6560func (s *CreateJobInput) SetStatusUpdateInterval(v string) *CreateJobInput {
6561	s.StatusUpdateInterval = &v
6562	return s
6563}
6564
6565// SetTags sets the Tags field's value.
6566func (s *CreateJobInput) SetTags(v map[string]*string) *CreateJobInput {
6567	s.Tags = v
6568	return s
6569}
6570
6571// SetUserMetadata sets the UserMetadata field's value.
6572func (s *CreateJobInput) SetUserMetadata(v map[string]*string) *CreateJobInput {
6573	s.UserMetadata = v
6574	return s
6575}
6576
6577// Successful create job requests will return the job JSON.
6578type CreateJobOutput struct {
6579	_ struct{} `type:"structure"`
6580
6581	// Each job converts an input file into an output file or files. For more information,
6582	// see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html
6583	Job *Job `locationName:"job" type:"structure"`
6584}
6585
6586// String returns the string representation
6587func (s CreateJobOutput) String() string {
6588	return awsutil.Prettify(s)
6589}
6590
6591// GoString returns the string representation
6592func (s CreateJobOutput) GoString() string {
6593	return s.String()
6594}
6595
6596// SetJob sets the Job field's value.
6597func (s *CreateJobOutput) SetJob(v *Job) *CreateJobOutput {
6598	s.Job = v
6599	return s
6600}
6601
6602// Send your create job template request with the name of the template and the
6603// JSON for the template. The template JSON should include everything in a valid
6604// job, except for input location and filename, IAM role, and user metadata.
6605type CreateJobTemplateInput struct {
6606	_ struct{} `type:"structure"`
6607
6608	// Accelerated transcoding can significantly speed up jobs with long, visually
6609	// complex content. Outputs that use this feature incur pro-tier pricing. For
6610	// information about feature limitations, see the AWS Elemental MediaConvert
6611	// User Guide.
6612	AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"`
6613
6614	// Optional. A category for the job template you are creating
6615	Category *string `locationName:"category" type:"string"`
6616
6617	// Optional. A description of the job template you are creating.
6618	Description *string `locationName:"description" type:"string"`
6619
6620	// Optional. Use queue hopping to avoid overly long waits in the backlog of
6621	// the queue that you submit your job to. Specify an alternate queue and the
6622	// maximum time that your job will wait in the initial queue before hopping.
6623	// For more information about this feature, see the AWS Elemental MediaConvert
6624	// User Guide.
6625	HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"`
6626
6627	// The name of the job template you are creating.
6628	//
6629	// Name is a required field
6630	Name *string `locationName:"name" type:"string" required:"true"`
6631
6632	// Specify the relative priority for this job. In any given queue, the service
6633	// begins processing the job with the highest value first. When more than one
6634	// job has the same priority, the service begins processing the job that you
6635	// submitted first. If you don't specify a priority, the service uses the default
6636	// value 0.
6637	Priority *int64 `locationName:"priority" type:"integer"`
6638
6639	// Optional. The queue that jobs created from this template are assigned to.
6640	// If you don't specify this, jobs will go to the default queue.
6641	Queue *string `locationName:"queue" type:"string"`
6642
6643	// JobTemplateSettings contains all the transcode settings saved in the template
6644	// that will be applied to jobs created from it.
6645	//
6646	// Settings is a required field
6647	Settings *JobTemplateSettings `locationName:"settings" type:"structure" required:"true"`
6648
6649	// Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch
6650	// Events. Set the interval, in seconds, between status updates. MediaConvert
6651	// sends an update at this interval from the time the service begins processing
6652	// your job to the time it completes the transcode or encounters an error.
6653	StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"`
6654
6655	// The tags that you want to add to the resource. You can tag resources with
6656	// a key-value pair or with only a key.
6657	Tags map[string]*string `locationName:"tags" type:"map"`
6658}
6659
6660// String returns the string representation
6661func (s CreateJobTemplateInput) String() string {
6662	return awsutil.Prettify(s)
6663}
6664
6665// GoString returns the string representation
6666func (s CreateJobTemplateInput) GoString() string {
6667	return s.String()
6668}
6669
6670// Validate inspects the fields of the type to determine if they are valid.
6671func (s *CreateJobTemplateInput) Validate() error {
6672	invalidParams := request.ErrInvalidParams{Context: "CreateJobTemplateInput"}
6673	if s.Name == nil {
6674		invalidParams.Add(request.NewErrParamRequired("Name"))
6675	}
6676	if s.Priority != nil && *s.Priority < -50 {
6677		invalidParams.Add(request.NewErrParamMinValue("Priority", -50))
6678	}
6679	if s.Settings == nil {
6680		invalidParams.Add(request.NewErrParamRequired("Settings"))
6681	}
6682	if s.AccelerationSettings != nil {
6683		if err := s.AccelerationSettings.Validate(); err != nil {
6684			invalidParams.AddNested("AccelerationSettings", err.(request.ErrInvalidParams))
6685		}
6686	}
6687	if s.HopDestinations != nil {
6688		for i, v := range s.HopDestinations {
6689			if v == nil {
6690				continue
6691			}
6692			if err := v.Validate(); err != nil {
6693				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HopDestinations", i), err.(request.ErrInvalidParams))
6694			}
6695		}
6696	}
6697	if s.Settings != nil {
6698		if err := s.Settings.Validate(); err != nil {
6699			invalidParams.AddNested("Settings", err.(request.ErrInvalidParams))
6700		}
6701	}
6702
6703	if invalidParams.Len() > 0 {
6704		return invalidParams
6705	}
6706	return nil
6707}
6708
6709// SetAccelerationSettings sets the AccelerationSettings field's value.
6710func (s *CreateJobTemplateInput) SetAccelerationSettings(v *AccelerationSettings) *CreateJobTemplateInput {
6711	s.AccelerationSettings = v
6712	return s
6713}
6714
6715// SetCategory sets the Category field's value.
6716func (s *CreateJobTemplateInput) SetCategory(v string) *CreateJobTemplateInput {
6717	s.Category = &v
6718	return s
6719}
6720
6721// SetDescription sets the Description field's value.
6722func (s *CreateJobTemplateInput) SetDescription(v string) *CreateJobTemplateInput {
6723	s.Description = &v
6724	return s
6725}
6726
6727// SetHopDestinations sets the HopDestinations field's value.
6728func (s *CreateJobTemplateInput) SetHopDestinations(v []*HopDestination) *CreateJobTemplateInput {
6729	s.HopDestinations = v
6730	return s
6731}
6732
6733// SetName sets the Name field's value.
6734func (s *CreateJobTemplateInput) SetName(v string) *CreateJobTemplateInput {
6735	s.Name = &v
6736	return s
6737}
6738
6739// SetPriority sets the Priority field's value.
6740func (s *CreateJobTemplateInput) SetPriority(v int64) *CreateJobTemplateInput {
6741	s.Priority = &v
6742	return s
6743}
6744
6745// SetQueue sets the Queue field's value.
6746func (s *CreateJobTemplateInput) SetQueue(v string) *CreateJobTemplateInput {
6747	s.Queue = &v
6748	return s
6749}
6750
6751// SetSettings sets the Settings field's value.
6752func (s *CreateJobTemplateInput) SetSettings(v *JobTemplateSettings) *CreateJobTemplateInput {
6753	s.Settings = v
6754	return s
6755}
6756
6757// SetStatusUpdateInterval sets the StatusUpdateInterval field's value.
6758func (s *CreateJobTemplateInput) SetStatusUpdateInterval(v string) *CreateJobTemplateInput {
6759	s.StatusUpdateInterval = &v
6760	return s
6761}
6762
6763// SetTags sets the Tags field's value.
6764func (s *CreateJobTemplateInput) SetTags(v map[string]*string) *CreateJobTemplateInput {
6765	s.Tags = v
6766	return s
6767}
6768
6769// Successful create job template requests will return the template JSON.
6770type CreateJobTemplateOutput struct {
6771	_ struct{} `type:"structure"`
6772
6773	// A job template is a pre-made set of encoding instructions that you can use
6774	// to quickly create a job.
6775	JobTemplate *JobTemplate `locationName:"jobTemplate" type:"structure"`
6776}
6777
6778// String returns the string representation
6779func (s CreateJobTemplateOutput) String() string {
6780	return awsutil.Prettify(s)
6781}
6782
6783// GoString returns the string representation
6784func (s CreateJobTemplateOutput) GoString() string {
6785	return s.String()
6786}
6787
6788// SetJobTemplate sets the JobTemplate field's value.
6789func (s *CreateJobTemplateOutput) SetJobTemplate(v *JobTemplate) *CreateJobTemplateOutput {
6790	s.JobTemplate = v
6791	return s
6792}
6793
6794// Send your create preset request with the name of the preset and the JSON
6795// for the output settings specified by the preset.
6796type CreatePresetInput struct {
6797	_ struct{} `type:"structure"`
6798
6799	// Optional. A category for the preset you are creating.
6800	Category *string `locationName:"category" type:"string"`
6801
6802	// Optional. A description of the preset you are creating.
6803	Description *string `locationName:"description" type:"string"`
6804
6805	// The name of the preset you are creating.
6806	//
6807	// Name is a required field
6808	Name *string `locationName:"name" type:"string" required:"true"`
6809
6810	// Settings for preset
6811	//
6812	// Settings is a required field
6813	Settings *PresetSettings `locationName:"settings" type:"structure" required:"true"`
6814
6815	// The tags that you want to add to the resource. You can tag resources with
6816	// a key-value pair or with only a key.
6817	Tags map[string]*string `locationName:"tags" type:"map"`
6818}
6819
6820// String returns the string representation
6821func (s CreatePresetInput) String() string {
6822	return awsutil.Prettify(s)
6823}
6824
6825// GoString returns the string representation
6826func (s CreatePresetInput) GoString() string {
6827	return s.String()
6828}
6829
6830// Validate inspects the fields of the type to determine if they are valid.
6831func (s *CreatePresetInput) Validate() error {
6832	invalidParams := request.ErrInvalidParams{Context: "CreatePresetInput"}
6833	if s.Name == nil {
6834		invalidParams.Add(request.NewErrParamRequired("Name"))
6835	}
6836	if s.Settings == nil {
6837		invalidParams.Add(request.NewErrParamRequired("Settings"))
6838	}
6839	if s.Settings != nil {
6840		if err := s.Settings.Validate(); err != nil {
6841			invalidParams.AddNested("Settings", err.(request.ErrInvalidParams))
6842		}
6843	}
6844
6845	if invalidParams.Len() > 0 {
6846		return invalidParams
6847	}
6848	return nil
6849}
6850
6851// SetCategory sets the Category field's value.
6852func (s *CreatePresetInput) SetCategory(v string) *CreatePresetInput {
6853	s.Category = &v
6854	return s
6855}
6856
6857// SetDescription sets the Description field's value.
6858func (s *CreatePresetInput) SetDescription(v string) *CreatePresetInput {
6859	s.Description = &v
6860	return s
6861}
6862
6863// SetName sets the Name field's value.
6864func (s *CreatePresetInput) SetName(v string) *CreatePresetInput {
6865	s.Name = &v
6866	return s
6867}
6868
6869// SetSettings sets the Settings field's value.
6870func (s *CreatePresetInput) SetSettings(v *PresetSettings) *CreatePresetInput {
6871	s.Settings = v
6872	return s
6873}
6874
6875// SetTags sets the Tags field's value.
6876func (s *CreatePresetInput) SetTags(v map[string]*string) *CreatePresetInput {
6877	s.Tags = v
6878	return s
6879}
6880
6881// Successful create preset requests will return the preset JSON.
6882type CreatePresetOutput struct {
6883	_ struct{} `type:"structure"`
6884
6885	// A preset is a collection of preconfigured media conversion settings that
6886	// you want MediaConvert to apply to the output during the conversion process.
6887	Preset *Preset `locationName:"preset" type:"structure"`
6888}
6889
6890// String returns the string representation
6891func (s CreatePresetOutput) String() string {
6892	return awsutil.Prettify(s)
6893}
6894
6895// GoString returns the string representation
6896func (s CreatePresetOutput) GoString() string {
6897	return s.String()
6898}
6899
6900// SetPreset sets the Preset field's value.
6901func (s *CreatePresetOutput) SetPreset(v *Preset) *CreatePresetOutput {
6902	s.Preset = v
6903	return s
6904}
6905
6906// Create an on-demand queue by sending a CreateQueue request with the name
6907// of the queue. Create a reserved queue by sending a CreateQueue request with
6908// the pricing plan set to RESERVED and with values specified for the settings
6909// under reservationPlanSettings. When you create a reserved queue, you enter
6910// into a 12-month commitment to purchase the RTS that you specify. You can't
6911// cancel this commitment.
6912type CreateQueueInput struct {
6913	_ struct{} `type:"structure"`
6914
6915	// Optional. A description of the queue that you are creating.
6916	Description *string `locationName:"description" type:"string"`
6917
6918	// The name of the queue that you are creating.
6919	//
6920	// Name is a required field
6921	Name *string `locationName:"name" type:"string" required:"true"`
6922
6923	// Specifies whether the pricing plan for the queue is on-demand or reserved.
6924	// For on-demand, you pay per minute, billed in increments of .01 minute. For
6925	// reserved, you pay for the transcoding capacity of the entire queue, regardless
6926	// of how much or how little you use it. Reserved pricing requires a 12-month
6927	// commitment. When you use the API to create a queue, the default is on-demand.
6928	PricingPlan *string `locationName:"pricingPlan" type:"string" enum:"PricingPlan"`
6929
6930	// Details about the pricing plan for your reserved queue. Required for reserved
6931	// queues and not applicable to on-demand queues.
6932	ReservationPlanSettings *ReservationPlanSettings `locationName:"reservationPlanSettings" type:"structure"`
6933
6934	// Initial state of the queue. If you create a paused queue, then jobs in that
6935	// queue won't begin.
6936	Status *string `locationName:"status" type:"string" enum:"QueueStatus"`
6937
6938	// The tags that you want to add to the resource. You can tag resources with
6939	// a key-value pair or with only a key.
6940	Tags map[string]*string `locationName:"tags" type:"map"`
6941}
6942
6943// String returns the string representation
6944func (s CreateQueueInput) String() string {
6945	return awsutil.Prettify(s)
6946}
6947
6948// GoString returns the string representation
6949func (s CreateQueueInput) GoString() string {
6950	return s.String()
6951}
6952
6953// Validate inspects the fields of the type to determine if they are valid.
6954func (s *CreateQueueInput) Validate() error {
6955	invalidParams := request.ErrInvalidParams{Context: "CreateQueueInput"}
6956	if s.Name == nil {
6957		invalidParams.Add(request.NewErrParamRequired("Name"))
6958	}
6959	if s.ReservationPlanSettings != nil {
6960		if err := s.ReservationPlanSettings.Validate(); err != nil {
6961			invalidParams.AddNested("ReservationPlanSettings", err.(request.ErrInvalidParams))
6962		}
6963	}
6964
6965	if invalidParams.Len() > 0 {
6966		return invalidParams
6967	}
6968	return nil
6969}
6970
6971// SetDescription sets the Description field's value.
6972func (s *CreateQueueInput) SetDescription(v string) *CreateQueueInput {
6973	s.Description = &v
6974	return s
6975}
6976
6977// SetName sets the Name field's value.
6978func (s *CreateQueueInput) SetName(v string) *CreateQueueInput {
6979	s.Name = &v
6980	return s
6981}
6982
6983// SetPricingPlan sets the PricingPlan field's value.
6984func (s *CreateQueueInput) SetPricingPlan(v string) *CreateQueueInput {
6985	s.PricingPlan = &v
6986	return s
6987}
6988
6989// SetReservationPlanSettings sets the ReservationPlanSettings field's value.
6990func (s *CreateQueueInput) SetReservationPlanSettings(v *ReservationPlanSettings) *CreateQueueInput {
6991	s.ReservationPlanSettings = v
6992	return s
6993}
6994
6995// SetStatus sets the Status field's value.
6996func (s *CreateQueueInput) SetStatus(v string) *CreateQueueInput {
6997	s.Status = &v
6998	return s
6999}
7000
7001// SetTags sets the Tags field's value.
7002func (s *CreateQueueInput) SetTags(v map[string]*string) *CreateQueueInput {
7003	s.Tags = v
7004	return s
7005}
7006
7007// Successful create queue requests return the name of the queue that you just
7008// created and information about it.
7009type CreateQueueOutput struct {
7010	_ struct{} `type:"structure"`
7011
7012	// You can use queues to manage the resources that are available to your AWS
7013	// account for running multiple transcoding jobs at the same time. If you don't
7014	// specify a queue, the service sends all jobs through the default queue. For
7015	// more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html.
7016	Queue *Queue `locationName:"queue" type:"structure"`
7017}
7018
7019// String returns the string representation
7020func (s CreateQueueOutput) String() string {
7021	return awsutil.Prettify(s)
7022}
7023
7024// GoString returns the string representation
7025func (s CreateQueueOutput) GoString() string {
7026	return s.String()
7027}
7028
7029// SetQueue sets the Queue field's value.
7030func (s *CreateQueueOutput) SetQueue(v *Queue) *CreateQueueOutput {
7031	s.Queue = v
7032	return s
7033}
7034
7035// Specify the details for each additional DASH manifest that you want the service
7036// to generate for this output group. Each manifest can reference a different
7037// subset of outputs in the group.
7038type DashAdditionalManifest struct {
7039	_ struct{} `type:"structure"`
7040
7041	// Specify a name modifier that the service adds to the name of this manifest
7042	// to make it different from the file names of the other main manifests in the
7043	// output group. For example, say that the default main manifest for your DASH
7044	// group is film-name.mpd. If you enter "-no-premium" for this setting, then
7045	// the file name the service generates for this top-level manifest is film-name-no-premium.mpd.
7046	ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"`
7047
7048	// Specify the outputs that you want this additional top-level manifest to reference.
7049	SelectedOutputs []*string `locationName:"selectedOutputs" type:"list"`
7050}
7051
7052// String returns the string representation
7053func (s DashAdditionalManifest) String() string {
7054	return awsutil.Prettify(s)
7055}
7056
7057// GoString returns the string representation
7058func (s DashAdditionalManifest) GoString() string {
7059	return s.String()
7060}
7061
7062// Validate inspects the fields of the type to determine if they are valid.
7063func (s *DashAdditionalManifest) Validate() error {
7064	invalidParams := request.ErrInvalidParams{Context: "DashAdditionalManifest"}
7065	if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 {
7066		invalidParams.Add(request.NewErrParamMinLen("ManifestNameModifier", 1))
7067	}
7068
7069	if invalidParams.Len() > 0 {
7070		return invalidParams
7071	}
7072	return nil
7073}
7074
7075// SetManifestNameModifier sets the ManifestNameModifier field's value.
7076func (s *DashAdditionalManifest) SetManifestNameModifier(v string) *DashAdditionalManifest {
7077	s.ManifestNameModifier = &v
7078	return s
7079}
7080
7081// SetSelectedOutputs sets the SelectedOutputs field's value.
7082func (s *DashAdditionalManifest) SetSelectedOutputs(v []*string) *DashAdditionalManifest {
7083	s.SelectedOutputs = v
7084	return s
7085}
7086
7087// Specifies DRM settings for DASH outputs.
7088type DashIsoEncryptionSettings struct {
7089	_ struct{} `type:"structure"`
7090
7091	// This setting can improve the compatibility of your output with video players
7092	// on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption.
7093	// Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback
7094	// on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1).
7095	// If you choose Unencrypted SEI, for that output, the service will exclude
7096	// the access unit delimiter and will leave the SEI NAL units unencrypted.
7097	PlaybackDeviceCompatibility *string `locationName:"playbackDeviceCompatibility" type:"string" enum:"DashIsoPlaybackDeviceCompatibility"`
7098
7099	// If your output group type is HLS, DASH, or Microsoft Smooth, use these settings
7100	// when doing DRM encryption with a SPEKE-compliant key provider. If your output
7101	// group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
7102	SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"`
7103}
7104
7105// String returns the string representation
7106func (s DashIsoEncryptionSettings) String() string {
7107	return awsutil.Prettify(s)
7108}
7109
7110// GoString returns the string representation
7111func (s DashIsoEncryptionSettings) GoString() string {
7112	return s.String()
7113}
7114
7115// SetPlaybackDeviceCompatibility sets the PlaybackDeviceCompatibility field's value.
7116func (s *DashIsoEncryptionSettings) SetPlaybackDeviceCompatibility(v string) *DashIsoEncryptionSettings {
7117	s.PlaybackDeviceCompatibility = &v
7118	return s
7119}
7120
7121// SetSpekeKeyProvider sets the SpekeKeyProvider field's value.
7122func (s *DashIsoEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProvider) *DashIsoEncryptionSettings {
7123	s.SpekeKeyProvider = v
7124	return s
7125}
7126
7127// Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
7128// When you work directly in your JSON job specification, include this object
7129// and any required children when you set Type, under OutputGroupSettings, to
7130// DASH_ISO_GROUP_SETTINGS.
7131type DashIsoGroupSettings struct {
7132	_ struct{} `type:"structure"`
7133
7134	// By default, the service creates one .mpd DASH manifest for each DASH ISO
7135	// output group in your job. This default manifest references every output in
7136	// the output group. To create additional DASH manifests that reference a subset
7137	// of the outputs in the output group, specify a list of them here.
7138	AdditionalManifests []*DashAdditionalManifest `locationName:"additionalManifests" type:"list"`
7139
7140	// Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or
7141	// Atmos) and your downstream workflow requires that your DASH manifest use
7142	// the Dolby channel configuration tag, rather than the MPEG one. For example,
7143	// you might need to use this to make dynamic ad insertion work. Specify which
7144	// audio channel configuration scheme ID URI MediaConvert writes in your DASH
7145	// manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION),
7146	// to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration.
7147	// Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have
7148	// MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011.
7149	AudioChannelConfigSchemeIdUri *string `locationName:"audioChannelConfigSchemeIdUri" type:"string" enum:"DashIsoGroupAudioChannelConfigSchemeIdUri"`
7150
7151	// A partial URI prefix that will be put in the manifest (.mpd) file at the
7152	// top level BaseURL element. Can be used if streams are delivered from a different
7153	// URL than the manifest file.
7154	BaseUrl *string `locationName:"baseUrl" type:"string"`
7155
7156	// Use Destination (Destination) to specify the S3 output location and the output
7157	// filename base. Destination accepts format identifiers. If you do not specify
7158	// the base filename in the URI, the service will use the filename of the input
7159	// file. If your job has multiple inputs, the service uses the filename of the
7160	// first input file.
7161	Destination *string `locationName:"destination" type:"string"`
7162
7163	// Settings associated with the destination. Will vary based on the type of
7164	// destination
7165	DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"`
7166
7167	// DRM settings.
7168	Encryption *DashIsoEncryptionSettings `locationName:"encryption" type:"structure"`
7169
7170	// Length of fragments to generate (in seconds). Fragment length must be compatible
7171	// with GOP size and Framerate. Note that fragments will end on the next keyframe
7172	// after this number of seconds, so actual fragment length may be longer. When
7173	// Emit Single File is checked, the fragmentation is internal to a single output
7174	// file and it does not cause the creation of many output files as in other
7175	// output types.
7176	FragmentLength *int64 `locationName:"fragmentLength" min:"1" type:"integer"`
7177
7178	// Supports HbbTV specification as indicated
7179	HbbtvCompliance *string `locationName:"hbbtvCompliance" type:"string" enum:"DashIsoHbbtvCompliance"`
7180
7181	// Specify whether MediaConvert generates images for trick play. Keep the default
7182	// value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL)
7183	// to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME)
7184	// to generate tiled thumbnails and full-resolution images of single frames.
7185	// MediaConvert adds an entry in the .mpd manifest for each set of images that
7186	// you generate. A common application for these images is Roku trick mode. The
7187	// thumbnails and full-frame images that MediaConvert creates with this feature
7188	// are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md
7189	ImageBasedTrickPlay *string `locationName:"imageBasedTrickPlay" type:"string" enum:"DashIsoImageBasedTrickPlay"`
7190
7191	// Minimum time of initially buffered media that is needed to ensure smooth
7192	// playout.
7193	MinBufferTime *int64 `locationName:"minBufferTime" type:"integer"`
7194
7195	// Keep this setting at the default value of 0, unless you are troubleshooting
7196	// a problem with how devices play back the end of your video asset. If you
7197	// know that player devices are hanging on the final segment of your video because
7198	// the length of your final segment is too short, use this setting to specify
7199	// a minimum final segment length, in seconds. Choose a value that is greater
7200	// than or equal to 1 and less than your segment length. When you specify a
7201	// value for this setting, the encoder will combine any final segment that is
7202	// shorter than the length that you specify with the previous segment. For example,
7203	// your segment length is 3 seconds and your final segment is .5 seconds without
7204	// a minimum final segment length; when you set the minimum final segment length
7205	// to 1, your final segment is 3.5 seconds.
7206	MinFinalSegmentLength *float64 `locationName:"minFinalSegmentLength" type:"double"`
7207
7208	// Specify whether your DASH profile is on-demand or main. When you choose Main
7209	// profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011
7210	// in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE),
7211	// the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd.
7212	// When you choose On-demand, you must also set the output group setting Segment
7213	// control (SegmentControl) to Single file (SINGLE_FILE).
7214	MpdProfile *string `locationName:"mpdProfile" type:"string" enum:"DashIsoMpdProfile"`
7215
7216	// Use this setting only when your output video stream has B-frames, which causes
7217	// the initial presentation time stamp (PTS) to be offset from the initial decode
7218	// time stamp (DTS). Specify how MediaConvert handles PTS when writing time
7219	// stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS)
7220	// when you want MediaConvert to use the initial PTS as the first time stamp
7221	// in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore
7222	// the initial PTS in the video stream and instead write the initial time stamp
7223	// as zero in the manifest. For outputs that don't have B-frames, the time stamps
7224	// in your DASH manifests start at zero regardless of your choice here.
7225	PtsOffsetHandlingForBFrames *string `locationName:"ptsOffsetHandlingForBFrames" type:"string" enum:"DashIsoPtsOffsetHandlingForBFrames"`
7226
7227	// When set to SINGLE_FILE, a single output file is generated, which is internally
7228	// segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES,
7229	// separate segment files will be created.
7230	SegmentControl *string `locationName:"segmentControl" type:"string" enum:"DashIsoSegmentControl"`
7231
7232	// Length of mpd segments to create (in seconds). Note that segments will end
7233	// on the next keyframe after this number of seconds, so actual segment length
7234	// may be longer. When Emit Single File is checked, the segmentation is internal
7235	// to a single output file and it does not cause the creation of many output
7236	// files as in other output types.
7237	SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"`
7238
7239	// If you get an HTTP error in the 400 range when you play back your DASH output,
7240	// enable this setting and run your transcoding job again. When you enable this
7241	// setting, the service writes precise segment durations in the DASH manifest.
7242	// The segment duration information appears inside the SegmentTimeline element,
7243	// inside SegmentTemplate at the Representation level. When you don't enable
7244	// this setting, the service writes approximate segment durations in your DASH
7245	// manifest.
7246	WriteSegmentTimelineInRepresentation *string `locationName:"writeSegmentTimelineInRepresentation" type:"string" enum:"DashIsoWriteSegmentTimelineInRepresentation"`
7247}
7248
7249// String returns the string representation
7250func (s DashIsoGroupSettings) String() string {
7251	return awsutil.Prettify(s)
7252}
7253
7254// GoString returns the string representation
7255func (s DashIsoGroupSettings) GoString() string {
7256	return s.String()
7257}
7258
7259// Validate inspects the fields of the type to determine if they are valid.
7260func (s *DashIsoGroupSettings) Validate() error {
7261	invalidParams := request.ErrInvalidParams{Context: "DashIsoGroupSettings"}
7262	if s.FragmentLength != nil && *s.FragmentLength < 1 {
7263		invalidParams.Add(request.NewErrParamMinValue("FragmentLength", 1))
7264	}
7265	if s.SegmentLength != nil && *s.SegmentLength < 1 {
7266		invalidParams.Add(request.NewErrParamMinValue("SegmentLength", 1))
7267	}
7268	if s.AdditionalManifests != nil {
7269		for i, v := range s.AdditionalManifests {
7270			if v == nil {
7271				continue
7272			}
7273			if err := v.Validate(); err != nil {
7274				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(request.ErrInvalidParams))
7275			}
7276		}
7277	}
7278
7279	if invalidParams.Len() > 0 {
7280		return invalidParams
7281	}
7282	return nil
7283}
7284
7285// SetAdditionalManifests sets the AdditionalManifests field's value.
7286func (s *DashIsoGroupSettings) SetAdditionalManifests(v []*DashAdditionalManifest) *DashIsoGroupSettings {
7287	s.AdditionalManifests = v
7288	return s
7289}
7290
7291// SetAudioChannelConfigSchemeIdUri sets the AudioChannelConfigSchemeIdUri field's value.
7292func (s *DashIsoGroupSettings) SetAudioChannelConfigSchemeIdUri(v string) *DashIsoGroupSettings {
7293	s.AudioChannelConfigSchemeIdUri = &v
7294	return s
7295}
7296
7297// SetBaseUrl sets the BaseUrl field's value.
7298func (s *DashIsoGroupSettings) SetBaseUrl(v string) *DashIsoGroupSettings {
7299	s.BaseUrl = &v
7300	return s
7301}
7302
7303// SetDestination sets the Destination field's value.
7304func (s *DashIsoGroupSettings) SetDestination(v string) *DashIsoGroupSettings {
7305	s.Destination = &v
7306	return s
7307}
7308
7309// SetDestinationSettings sets the DestinationSettings field's value.
7310func (s *DashIsoGroupSettings) SetDestinationSettings(v *DestinationSettings) *DashIsoGroupSettings {
7311	s.DestinationSettings = v
7312	return s
7313}
7314
7315// SetEncryption sets the Encryption field's value.
7316func (s *DashIsoGroupSettings) SetEncryption(v *DashIsoEncryptionSettings) *DashIsoGroupSettings {
7317	s.Encryption = v
7318	return s
7319}
7320
7321// SetFragmentLength sets the FragmentLength field's value.
7322func (s *DashIsoGroupSettings) SetFragmentLength(v int64) *DashIsoGroupSettings {
7323	s.FragmentLength = &v
7324	return s
7325}
7326
7327// SetHbbtvCompliance sets the HbbtvCompliance field's value.
7328func (s *DashIsoGroupSettings) SetHbbtvCompliance(v string) *DashIsoGroupSettings {
7329	s.HbbtvCompliance = &v
7330	return s
7331}
7332
7333// SetImageBasedTrickPlay sets the ImageBasedTrickPlay field's value.
7334func (s *DashIsoGroupSettings) SetImageBasedTrickPlay(v string) *DashIsoGroupSettings {
7335	s.ImageBasedTrickPlay = &v
7336	return s
7337}
7338
7339// SetMinBufferTime sets the MinBufferTime field's value.
7340func (s *DashIsoGroupSettings) SetMinBufferTime(v int64) *DashIsoGroupSettings {
7341	s.MinBufferTime = &v
7342	return s
7343}
7344
7345// SetMinFinalSegmentLength sets the MinFinalSegmentLength field's value.
7346func (s *DashIsoGroupSettings) SetMinFinalSegmentLength(v float64) *DashIsoGroupSettings {
7347	s.MinFinalSegmentLength = &v
7348	return s
7349}
7350
7351// SetMpdProfile sets the MpdProfile field's value.
7352func (s *DashIsoGroupSettings) SetMpdProfile(v string) *DashIsoGroupSettings {
7353	s.MpdProfile = &v
7354	return s
7355}
7356
7357// SetPtsOffsetHandlingForBFrames sets the PtsOffsetHandlingForBFrames field's value.
7358func (s *DashIsoGroupSettings) SetPtsOffsetHandlingForBFrames(v string) *DashIsoGroupSettings {
7359	s.PtsOffsetHandlingForBFrames = &v
7360	return s
7361}
7362
7363// SetSegmentControl sets the SegmentControl field's value.
7364func (s *DashIsoGroupSettings) SetSegmentControl(v string) *DashIsoGroupSettings {
7365	s.SegmentControl = &v
7366	return s
7367}
7368
7369// SetSegmentLength sets the SegmentLength field's value.
7370func (s *DashIsoGroupSettings) SetSegmentLength(v int64) *DashIsoGroupSettings {
7371	s.SegmentLength = &v
7372	return s
7373}
7374
7375// SetWriteSegmentTimelineInRepresentation sets the WriteSegmentTimelineInRepresentation field's value.
7376func (s *DashIsoGroupSettings) SetWriteSegmentTimelineInRepresentation(v string) *DashIsoGroupSettings {
7377	s.WriteSegmentTimelineInRepresentation = &v
7378	return s
7379}
7380
7381// Settings for deinterlacer
7382type Deinterlacer struct {
7383	_ struct{} `type:"structure"`
7384
7385	// Only applies when you set Deinterlacer (DeinterlaceMode) to Deinterlace (DEINTERLACE)
7386	// or Adaptive (ADAPTIVE). Motion adaptive interpolate (INTERPOLATE) produces
7387	// sharper pictures, while blend (BLEND) produces smoother motion. Use (INTERPOLATE_TICKER)
7388	// OR (BLEND_TICKER) if your source file includes a ticker, such as a scrolling
7389	// headline at the bottom of the frame.
7390	Algorithm *string `locationName:"algorithm" type:"string" enum:"DeinterlaceAlgorithm"`
7391
7392	// - When set to NORMAL (default), the deinterlacer does not convert frames
7393	// that are tagged in metadata as progressive. It will only convert those that
7394	// are tagged as some other type. - When set to FORCE_ALL_FRAMES, the deinterlacer
7395	// converts every frame to progressive - even those that are already tagged
7396	// as progressive. Turn Force mode on only if there is a good chance that the
7397	// metadata has tagged frames as progressive when they are not progressive.
7398	// Do not turn on otherwise; processing frames that are already progressive
7399	// into progressive will probably result in lower quality video.
7400	Control *string `locationName:"control" type:"string" enum:"DeinterlacerControl"`
7401
7402	// Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing.
7403	// Default is Deinterlace. - Deinterlace converts interlaced to progressive.
7404	// - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p.
7405	// - Adaptive auto-detects and converts to progressive.
7406	Mode *string `locationName:"mode" type:"string" enum:"DeinterlacerMode"`
7407}
7408
7409// String returns the string representation
7410func (s Deinterlacer) String() string {
7411	return awsutil.Prettify(s)
7412}
7413
7414// GoString returns the string representation
7415func (s Deinterlacer) GoString() string {
7416	return s.String()
7417}
7418
7419// SetAlgorithm sets the Algorithm field's value.
7420func (s *Deinterlacer) SetAlgorithm(v string) *Deinterlacer {
7421	s.Algorithm = &v
7422	return s
7423}
7424
7425// SetControl sets the Control field's value.
7426func (s *Deinterlacer) SetControl(v string) *Deinterlacer {
7427	s.Control = &v
7428	return s
7429}
7430
7431// SetMode sets the Mode field's value.
7432func (s *Deinterlacer) SetMode(v string) *Deinterlacer {
7433	s.Mode = &v
7434	return s
7435}
7436
7437// Delete a job template by sending a request with the job template name
7438type DeleteJobTemplateInput struct {
7439	_ struct{} `type:"structure"`
7440
7441	// The name of the job template to be deleted.
7442	//
7443	// Name is a required field
7444	Name *string `location:"uri" locationName:"name" type:"string" required:"true"`
7445}
7446
7447// String returns the string representation
7448func (s DeleteJobTemplateInput) String() string {
7449	return awsutil.Prettify(s)
7450}
7451
7452// GoString returns the string representation
7453func (s DeleteJobTemplateInput) GoString() string {
7454	return s.String()
7455}
7456
7457// Validate inspects the fields of the type to determine if they are valid.
7458func (s *DeleteJobTemplateInput) Validate() error {
7459	invalidParams := request.ErrInvalidParams{Context: "DeleteJobTemplateInput"}
7460	if s.Name == nil {
7461		invalidParams.Add(request.NewErrParamRequired("Name"))
7462	}
7463	if s.Name != nil && len(*s.Name) < 1 {
7464		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
7465	}
7466
7467	if invalidParams.Len() > 0 {
7468		return invalidParams
7469	}
7470	return nil
7471}
7472
7473// SetName sets the Name field's value.
7474func (s *DeleteJobTemplateInput) SetName(v string) *DeleteJobTemplateInput {
7475	s.Name = &v
7476	return s
7477}
7478
7479// Delete job template requests will return an OK message or error message with
7480// an empty body.
7481type DeleteJobTemplateOutput struct {
7482	_ struct{} `type:"structure"`
7483}
7484
7485// String returns the string representation
7486func (s DeleteJobTemplateOutput) String() string {
7487	return awsutil.Prettify(s)
7488}
7489
7490// GoString returns the string representation
7491func (s DeleteJobTemplateOutput) GoString() string {
7492	return s.String()
7493}
7494
7495// Delete a preset by sending a request with the preset name
7496type DeletePresetInput struct {
7497	_ struct{} `type:"structure"`
7498
7499	// The name of the preset to be deleted.
7500	//
7501	// Name is a required field
7502	Name *string `location:"uri" locationName:"name" type:"string" required:"true"`
7503}
7504
7505// String returns the string representation
7506func (s DeletePresetInput) String() string {
7507	return awsutil.Prettify(s)
7508}
7509
7510// GoString returns the string representation
7511func (s DeletePresetInput) GoString() string {
7512	return s.String()
7513}
7514
7515// Validate inspects the fields of the type to determine if they are valid.
7516func (s *DeletePresetInput) Validate() error {
7517	invalidParams := request.ErrInvalidParams{Context: "DeletePresetInput"}
7518	if s.Name == nil {
7519		invalidParams.Add(request.NewErrParamRequired("Name"))
7520	}
7521	if s.Name != nil && len(*s.Name) < 1 {
7522		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
7523	}
7524
7525	if invalidParams.Len() > 0 {
7526		return invalidParams
7527	}
7528	return nil
7529}
7530
7531// SetName sets the Name field's value.
7532func (s *DeletePresetInput) SetName(v string) *DeletePresetInput {
7533	s.Name = &v
7534	return s
7535}
7536
7537// Delete preset requests will return an OK message or error message with an
7538// empty body.
7539type DeletePresetOutput struct {
7540	_ struct{} `type:"structure"`
7541}
7542
7543// String returns the string representation
7544func (s DeletePresetOutput) String() string {
7545	return awsutil.Prettify(s)
7546}
7547
7548// GoString returns the string representation
7549func (s DeletePresetOutput) GoString() string {
7550	return s.String()
7551}
7552
7553// Delete a queue by sending a request with the queue name. You can't delete
7554// a queue with an active pricing plan or one that has unprocessed jobs in it.
7555type DeleteQueueInput struct {
7556	_ struct{} `type:"structure"`
7557
7558	// The name of the queue that you want to delete.
7559	//
7560	// Name is a required field
7561	Name *string `location:"uri" locationName:"name" type:"string" required:"true"`
7562}
7563
7564// String returns the string representation
7565func (s DeleteQueueInput) String() string {
7566	return awsutil.Prettify(s)
7567}
7568
7569// GoString returns the string representation
7570func (s DeleteQueueInput) GoString() string {
7571	return s.String()
7572}
7573
7574// Validate inspects the fields of the type to determine if they are valid.
7575func (s *DeleteQueueInput) Validate() error {
7576	invalidParams := request.ErrInvalidParams{Context: "DeleteQueueInput"}
7577	if s.Name == nil {
7578		invalidParams.Add(request.NewErrParamRequired("Name"))
7579	}
7580	if s.Name != nil && len(*s.Name) < 1 {
7581		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
7582	}
7583
7584	if invalidParams.Len() > 0 {
7585		return invalidParams
7586	}
7587	return nil
7588}
7589
7590// SetName sets the Name field's value.
7591func (s *DeleteQueueInput) SetName(v string) *DeleteQueueInput {
7592	s.Name = &v
7593	return s
7594}
7595
7596// Delete queue requests return an OK message or error message with an empty
7597// body.
7598type DeleteQueueOutput struct {
7599	_ struct{} `type:"structure"`
7600}
7601
7602// String returns the string representation
7603func (s DeleteQueueOutput) String() string {
7604	return awsutil.Prettify(s)
7605}
7606
7607// GoString returns the string representation
7608func (s DeleteQueueOutput) GoString() string {
7609	return s.String()
7610}
7611
7612// Send an request with an empty body to the regional API endpoint to get your
7613// account API endpoint.
7614type DescribeEndpointsInput struct {
7615	_ struct{} `type:"structure"`
7616
7617	// Optional. Max number of endpoints, up to twenty, that will be returned at
7618	// one time.
7619	MaxResults *int64 `locationName:"maxResults" type:"integer"`
7620
7621	// Optional field, defaults to DEFAULT. Specify DEFAULT for this operation to
7622	// return your endpoints if any exist, or to create an endpoint for you and
7623	// return it if one doesn't already exist. Specify GET_ONLY to return your endpoints
7624	// if any exist, or an empty list if none exist.
7625	Mode *string `locationName:"mode" type:"string" enum:"DescribeEndpointsMode"`
7626
7627	// Use this string, provided with the response to a previous request, to request
7628	// the next batch of endpoints.
7629	NextToken *string `locationName:"nextToken" type:"string"`
7630}
7631
7632// String returns the string representation
7633func (s DescribeEndpointsInput) String() string {
7634	return awsutil.Prettify(s)
7635}
7636
7637// GoString returns the string representation
7638func (s DescribeEndpointsInput) GoString() string {
7639	return s.String()
7640}
7641
7642// SetMaxResults sets the MaxResults field's value.
7643func (s *DescribeEndpointsInput) SetMaxResults(v int64) *DescribeEndpointsInput {
7644	s.MaxResults = &v
7645	return s
7646}
7647
7648// SetMode sets the Mode field's value.
7649func (s *DescribeEndpointsInput) SetMode(v string) *DescribeEndpointsInput {
7650	s.Mode = &v
7651	return s
7652}
7653
7654// SetNextToken sets the NextToken field's value.
7655func (s *DescribeEndpointsInput) SetNextToken(v string) *DescribeEndpointsInput {
7656	s.NextToken = &v
7657	return s
7658}
7659
7660// Successful describe endpoints requests will return your account API endpoint.
7661type DescribeEndpointsOutput struct {
7662	_ struct{} `type:"structure"`
7663
7664	// List of endpoints
7665	Endpoints []*Endpoint `locationName:"endpoints" type:"list"`
7666
7667	// Use this string to request the next batch of endpoints.
7668	NextToken *string `locationName:"nextToken" type:"string"`
7669}
7670
7671// String returns the string representation
7672func (s DescribeEndpointsOutput) String() string {
7673	return awsutil.Prettify(s)
7674}
7675
7676// GoString returns the string representation
7677func (s DescribeEndpointsOutput) GoString() string {
7678	return s.String()
7679}
7680
7681// SetEndpoints sets the Endpoints field's value.
7682func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput {
7683	s.Endpoints = v
7684	return s
7685}
7686
7687// SetNextToken sets the NextToken field's value.
7688func (s *DescribeEndpointsOutput) SetNextToken(v string) *DescribeEndpointsOutput {
7689	s.NextToken = &v
7690	return s
7691}
7692
7693// Settings associated with the destination. Will vary based on the type of
7694// destination
7695type DestinationSettings struct {
7696	_ struct{} `type:"structure"`
7697
7698	// Settings associated with S3 destination
7699	S3Settings *S3DestinationSettings `locationName:"s3Settings" type:"structure"`
7700}
7701
7702// String returns the string representation
7703func (s DestinationSettings) String() string {
7704	return awsutil.Prettify(s)
7705}
7706
7707// GoString returns the string representation
7708func (s DestinationSettings) GoString() string {
7709	return s.String()
7710}
7711
7712// SetS3Settings sets the S3Settings field's value.
7713func (s *DestinationSettings) SetS3Settings(v *S3DestinationSettings) *DestinationSettings {
7714	s.S3Settings = v
7715	return s
7716}
7717
7718// Removes an association between the Amazon Resource Name (ARN) of an AWS Certificate
7719// Manager (ACM) certificate and an AWS Elemental MediaConvert resource.
7720type DisassociateCertificateInput struct {
7721	_ struct{} `type:"structure"`
7722
7723	// The ARN of the ACM certificate that you want to disassociate from your MediaConvert
7724	// resource.
7725	//
7726	// Arn is a required field
7727	Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"`
7728}
7729
7730// String returns the string representation
7731func (s DisassociateCertificateInput) String() string {
7732	return awsutil.Prettify(s)
7733}
7734
7735// GoString returns the string representation
7736func (s DisassociateCertificateInput) GoString() string {
7737	return s.String()
7738}
7739
7740// Validate inspects the fields of the type to determine if they are valid.
7741func (s *DisassociateCertificateInput) Validate() error {
7742	invalidParams := request.ErrInvalidParams{Context: "DisassociateCertificateInput"}
7743	if s.Arn == nil {
7744		invalidParams.Add(request.NewErrParamRequired("Arn"))
7745	}
7746	if s.Arn != nil && len(*s.Arn) < 1 {
7747		invalidParams.Add(request.NewErrParamMinLen("Arn", 1))
7748	}
7749
7750	if invalidParams.Len() > 0 {
7751		return invalidParams
7752	}
7753	return nil
7754}
7755
7756// SetArn sets the Arn field's value.
7757func (s *DisassociateCertificateInput) SetArn(v string) *DisassociateCertificateInput {
7758	s.Arn = &v
7759	return s
7760}
7761
7762// Successful disassociation of Certificate Manager Amazon Resource Name (ARN)
7763// with Mediaconvert returns an OK message.
7764type DisassociateCertificateOutput struct {
7765	_ struct{} `type:"structure"`
7766}
7767
7768// String returns the string representation
7769func (s DisassociateCertificateOutput) String() string {
7770	return awsutil.Prettify(s)
7771}
7772
7773// GoString returns the string representation
7774func (s DisassociateCertificateOutput) GoString() string {
7775	return s.String()
7776}
7777
7778// With AWS Elemental MediaConvert, you can create profile 5 Dolby Vision outputs
7779// from MXF and IMF sources that contain mastering information as frame-interleaved
7780// Dolby Vision metadata.
7781type DolbyVision struct {
7782	_ struct{} `type:"structure"`
7783
7784	// Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override
7785	// the MaxCLL and MaxFALL values in your input with new values.
7786	L6Metadata *DolbyVisionLevel6Metadata `locationName:"l6Metadata" type:"structure"`
7787
7788	// Use Dolby Vision Mode to choose how the service will handle Dolby Vision
7789	// MaxCLL and MaxFALL properies.
7790	L6Mode *string `locationName:"l6Mode" type:"string" enum:"DolbyVisionLevel6Mode"`
7791
7792	// In the current MediaConvert implementation, the Dolby Vision profile is always
7793	// 5 (PROFILE_5). Therefore, all of your inputs must contain Dolby Vision frame
7794	// interleaved data.
7795	Profile *string `locationName:"profile" type:"string" enum:"DolbyVisionProfile"`
7796}
7797
7798// String returns the string representation
7799func (s DolbyVision) String() string {
7800	return awsutil.Prettify(s)
7801}
7802
7803// GoString returns the string representation
7804func (s DolbyVision) GoString() string {
7805	return s.String()
7806}
7807
7808// SetL6Metadata sets the L6Metadata field's value.
7809func (s *DolbyVision) SetL6Metadata(v *DolbyVisionLevel6Metadata) *DolbyVision {
7810	s.L6Metadata = v
7811	return s
7812}
7813
7814// SetL6Mode sets the L6Mode field's value.
7815func (s *DolbyVision) SetL6Mode(v string) *DolbyVision {
7816	s.L6Mode = &v
7817	return s
7818}
7819
7820// SetProfile sets the Profile field's value.
7821func (s *DolbyVision) SetProfile(v string) *DolbyVision {
7822	s.Profile = &v
7823	return s
7824}
7825
7826// Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override
7827// the MaxCLL and MaxFALL values in your input with new values.
7828type DolbyVisionLevel6Metadata struct {
7829	_ struct{} `type:"structure"`
7830
7831	// Maximum Content Light Level. Static HDR metadata that corresponds to the
7832	// brightest pixel in the entire stream. Measured in nits.
7833	MaxCll *int64 `locationName:"maxCll" type:"integer"`
7834
7835	// Maximum Frame-Average Light Level. Static HDR metadata that corresponds to
7836	// the highest frame-average brightness in the entire stream. Measured in nits.
7837	MaxFall *int64 `locationName:"maxFall" type:"integer"`
7838}
7839
7840// String returns the string representation
7841func (s DolbyVisionLevel6Metadata) String() string {
7842	return awsutil.Prettify(s)
7843}
7844
7845// GoString returns the string representation
7846func (s DolbyVisionLevel6Metadata) GoString() string {
7847	return s.String()
7848}
7849
7850// SetMaxCll sets the MaxCll field's value.
7851func (s *DolbyVisionLevel6Metadata) SetMaxCll(v int64) *DolbyVisionLevel6Metadata {
7852	s.MaxCll = &v
7853	return s
7854}
7855
7856// SetMaxFall sets the MaxFall field's value.
7857func (s *DolbyVisionLevel6Metadata) SetMaxFall(v int64) *DolbyVisionLevel6Metadata {
7858	s.MaxFall = &v
7859	return s
7860}
7861
7862// Use these settings to insert a DVB Network Information Table (NIT) in the
7863// transport stream of this output. When you work directly in your JSON job
7864// specification, include this object only when your job has a transport stream
7865// output and the container settings contain the object M2tsSettings.
7866type DvbNitSettings struct {
7867	_ struct{} `type:"structure"`
7868
7869	// The numeric value placed in the Network Information Table (NIT).
7870	NetworkId *int64 `locationName:"networkId" type:"integer"`
7871
7872	// The network name text placed in the network_name_descriptor inside the Network
7873	// Information Table. Maximum length is 256 characters.
7874	NetworkName *string `locationName:"networkName" min:"1" type:"string"`
7875
7876	// The number of milliseconds between instances of this table in the output
7877	// transport stream.
7878	NitInterval *int64 `locationName:"nitInterval" min:"25" type:"integer"`
7879}
7880
7881// String returns the string representation
7882func (s DvbNitSettings) String() string {
7883	return awsutil.Prettify(s)
7884}
7885
7886// GoString returns the string representation
7887func (s DvbNitSettings) GoString() string {
7888	return s.String()
7889}
7890
7891// Validate inspects the fields of the type to determine if they are valid.
7892func (s *DvbNitSettings) Validate() error {
7893	invalidParams := request.ErrInvalidParams{Context: "DvbNitSettings"}
7894	if s.NetworkName != nil && len(*s.NetworkName) < 1 {
7895		invalidParams.Add(request.NewErrParamMinLen("NetworkName", 1))
7896	}
7897	if s.NitInterval != nil && *s.NitInterval < 25 {
7898		invalidParams.Add(request.NewErrParamMinValue("NitInterval", 25))
7899	}
7900
7901	if invalidParams.Len() > 0 {
7902		return invalidParams
7903	}
7904	return nil
7905}
7906
7907// SetNetworkId sets the NetworkId field's value.
7908func (s *DvbNitSettings) SetNetworkId(v int64) *DvbNitSettings {
7909	s.NetworkId = &v
7910	return s
7911}
7912
7913// SetNetworkName sets the NetworkName field's value.
7914func (s *DvbNitSettings) SetNetworkName(v string) *DvbNitSettings {
7915	s.NetworkName = &v
7916	return s
7917}
7918
7919// SetNitInterval sets the NitInterval field's value.
7920func (s *DvbNitSettings) SetNitInterval(v int64) *DvbNitSettings {
7921	s.NitInterval = &v
7922	return s
7923}
7924
7925// Use these settings to insert a DVB Service Description Table (SDT) in the
7926// transport stream of this output. When you work directly in your JSON job
7927// specification, include this object only when your job has a transport stream
7928// output and the container settings contain the object M2tsSettings.
7929type DvbSdtSettings struct {
7930	_ struct{} `type:"structure"`
7931
7932	// Selects method of inserting SDT information into output stream. "Follow input
7933	// SDT" copies SDT information from input stream to output stream. "Follow input
7934	// SDT if present" copies SDT information from input stream to output stream
7935	// if SDT information is present in the input, otherwise it will fall back on
7936	// the user-defined values. Enter "SDT Manually" means user will enter the SDT
7937	// information. "No SDT" means output stream will not contain SDT information.
7938	OutputSdt *string `locationName:"outputSdt" type:"string" enum:"OutputSdt"`
7939
7940	// The number of milliseconds between instances of this table in the output
7941	// transport stream.
7942	SdtInterval *int64 `locationName:"sdtInterval" min:"25" type:"integer"`
7943
7944	// The service name placed in the service_descriptor in the Service Description
7945	// Table. Maximum length is 256 characters.
7946	ServiceName *string `locationName:"serviceName" min:"1" type:"string"`
7947
7948	// The service provider name placed in the service_descriptor in the Service
7949	// Description Table. Maximum length is 256 characters.
7950	ServiceProviderName *string `locationName:"serviceProviderName" min:"1" type:"string"`
7951}
7952
7953// String returns the string representation
7954func (s DvbSdtSettings) String() string {
7955	return awsutil.Prettify(s)
7956}
7957
7958// GoString returns the string representation
7959func (s DvbSdtSettings) GoString() string {
7960	return s.String()
7961}
7962
7963// Validate inspects the fields of the type to determine if they are valid.
7964func (s *DvbSdtSettings) Validate() error {
7965	invalidParams := request.ErrInvalidParams{Context: "DvbSdtSettings"}
7966	if s.SdtInterval != nil && *s.SdtInterval < 25 {
7967		invalidParams.Add(request.NewErrParamMinValue("SdtInterval", 25))
7968	}
7969	if s.ServiceName != nil && len(*s.ServiceName) < 1 {
7970		invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1))
7971	}
7972	if s.ServiceProviderName != nil && len(*s.ServiceProviderName) < 1 {
7973		invalidParams.Add(request.NewErrParamMinLen("ServiceProviderName", 1))
7974	}
7975
7976	if invalidParams.Len() > 0 {
7977		return invalidParams
7978	}
7979	return nil
7980}
7981
7982// SetOutputSdt sets the OutputSdt field's value.
7983func (s *DvbSdtSettings) SetOutputSdt(v string) *DvbSdtSettings {
7984	s.OutputSdt = &v
7985	return s
7986}
7987
7988// SetSdtInterval sets the SdtInterval field's value.
7989func (s *DvbSdtSettings) SetSdtInterval(v int64) *DvbSdtSettings {
7990	s.SdtInterval = &v
7991	return s
7992}
7993
7994// SetServiceName sets the ServiceName field's value.
7995func (s *DvbSdtSettings) SetServiceName(v string) *DvbSdtSettings {
7996	s.ServiceName = &v
7997	return s
7998}
7999
8000// SetServiceProviderName sets the ServiceProviderName field's value.
8001func (s *DvbSdtSettings) SetServiceProviderName(v string) *DvbSdtSettings {
8002	s.ServiceProviderName = &v
8003	return s
8004}
8005
8006// Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same
8007// output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html.
8008// When you work directly in your JSON job specification, include this object
8009// and any required children when you set destinationType to DVB_SUB.
8010type DvbSubDestinationSettings struct {
8011	_ struct{} `type:"structure"`
8012
8013	// If no explicit x_position or y_position is provided, setting alignment to
8014	// centered will place the captions at the bottom center of the output. Similarly,
8015	// setting a left alignment will align captions to the bottom left of the output.
8016	// If x and y positions are given in conjunction with the alignment parameter,
8017	// the font will be justified (either left or centered) relative to those coordinates.
8018	// This option is not valid for source captions that are STL, 608/embedded or
8019	// teletext. These source settings are already pre-defined by the caption stream.
8020	// All burn-in and DVB-Sub font settings must match.
8021	Alignment *string `locationName:"alignment" type:"string" enum:"DvbSubtitleAlignment"`
8022
8023	// Specifies the color of the rectangle behind the captions.All burn-in and
8024	// DVB-Sub font settings must match.
8025	BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"DvbSubtitleBackgroundColor"`
8026
8027	// Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent.
8028	// Leaving this parameter blank is equivalent to setting it to 0 (transparent).
8029	// All burn-in and DVB-Sub font settings must match.
8030	BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"`
8031
8032	// Specify how MediaConvert handles the display definition segment (DDS). Keep
8033	// the default, None (NONE), to exclude the DDS from this set of captions. Choose
8034	// No display window (NO_DISPLAY_WINDOW) to have MediaConvert include the DDS
8035	// but not include display window data. In this case, MediaConvert writes that
8036	// information to the page composition segment (PCS) instead. Choose Specify
8037	// (SPECIFIED) to have MediaConvert set up the display window based on the values
8038	// that you specify in related job settings. For video resolutions that are
8039	// 576 pixels or smaller in height, MediaConvert doesn't include the DDS, regardless
8040	// of the value you choose for DDS handling (ddsHandling). In this case, it
8041	// doesn't write the display window data to the PCS either. Related settings:
8042	// Use the settings DDS x-coordinate (ddsXCoordinate) and DDS y-coordinate (ddsYCoordinate)
8043	// to specify the offset between the top left corner of the display window and
8044	// the top left corner of the video frame. All burn-in and DVB-Sub font settings
8045	// must match.
8046	DdsHandling *string `locationName:"ddsHandling" type:"string" enum:"DvbddsHandling"`
8047
8048	// Use this setting, along with DDS y-coordinate (ddsYCoordinate), to specify
8049	// the upper left corner of the display definition segment (DDS) display window.
8050	// With this setting, specify the distance, in pixels, between the left side
8051	// of the frame and the left side of the DDS display window. Keep the default
8052	// value, 0, to have MediaConvert automatically choose this offset. Related
8053	// setting: When you use this setting, you must set DDS handling (ddsHandling)
8054	// to a value other than None (NONE). MediaConvert uses these values to determine
8055	// whether to write page position data to the DDS or to the page composition
8056	// segment (PCS). All burn-in and DVB-Sub font settings must match.
8057	DdsXCoordinate *int64 `locationName:"ddsXCoordinate" type:"integer"`
8058
8059	// Use this setting, along with DDS x-coordinate (ddsXCoordinate), to specify
8060	// the upper left corner of the display definition segment (DDS) display window.
8061	// With this setting, specify the distance, in pixels, between the top of the
8062	// frame and the top of the DDS display window. Keep the default value, 0, to
8063	// have MediaConvert automatically choose this offset. Related setting: When
8064	// you use this setting, you must set DDS handling (ddsHandling) to a value
8065	// other than None (NONE). MediaConvert uses these values to determine whether
8066	// to write page position data to the DDS or to the page composition segment
8067	// (PCS). All burn-in and DVB-Sub font settings must match.
8068	DdsYCoordinate *int64 `locationName:"ddsYCoordinate" type:"integer"`
8069
8070	// Specifies the color of the DVB-SUB captions. This option is not valid for
8071	// source captions that are STL, 608/embedded or teletext. These source settings
8072	// are already pre-defined by the caption stream. All burn-in and DVB-Sub font
8073	// settings must match.
8074	FontColor *string `locationName:"fontColor" type:"string" enum:"DvbSubtitleFontColor"`
8075
8076	// Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.All
8077	// burn-in and DVB-Sub font settings must match.
8078	FontOpacity *int64 `locationName:"fontOpacity" type:"integer"`
8079
8080	// Font resolution in DPI (dots per inch); default is 96 dpi.All burn-in and
8081	// DVB-Sub font settings must match.
8082	FontResolution *int64 `locationName:"fontResolution" min:"96" type:"integer"`
8083
8084	// Provide the font script, using an ISO 15924 script code, if the LanguageCode
8085	// is not sufficient for determining the script type. Where LanguageCode or
8086	// CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. This is
8087	// used to help determine the appropriate font for rendering DVB-Sub captions.
8088	FontScript *string `locationName:"fontScript" type:"string" enum:"FontScript"`
8089
8090	// A positive integer indicates the exact font size in points. Set to 0 for
8091	// automatic font size selection. All burn-in and DVB-Sub font settings must
8092	// match.
8093	FontSize *int64 `locationName:"fontSize" type:"integer"`
8094
8095	// Specify the height, in pixels, of this set of DVB-Sub captions. The default
8096	// value is 576 pixels. Related setting: When you use this setting, you must
8097	// set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in
8098	// and DVB-Sub font settings must match.
8099	Height *int64 `locationName:"height" min:"1" type:"integer"`
8100
8101	// Specifies font outline color. This option is not valid for source captions
8102	// that are either 608/embedded or teletext. These source settings are already
8103	// pre-defined by the caption stream. All burn-in and DVB-Sub font settings
8104	// must match.
8105	OutlineColor *string `locationName:"outlineColor" type:"string" enum:"DvbSubtitleOutlineColor"`
8106
8107	// Specifies font outline size in pixels. This option is not valid for source
8108	// captions that are either 608/embedded or teletext. These source settings
8109	// are already pre-defined by the caption stream. All burn-in and DVB-Sub font
8110	// settings must match.
8111	OutlineSize *int64 `locationName:"outlineSize" type:"integer"`
8112
8113	// Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub
8114	// font settings must match.
8115	ShadowColor *string `locationName:"shadowColor" type:"string" enum:"DvbSubtitleShadowColor"`
8116
8117	// Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving
8118	// this parameter blank is equivalent to setting it to 0 (transparent). All
8119	// burn-in and DVB-Sub font settings must match.
8120	ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"`
8121
8122	// Specifies the horizontal offset of the shadow relative to the captions in
8123	// pixels. A value of -2 would result in a shadow offset 2 pixels to the left.
8124	// All burn-in and DVB-Sub font settings must match.
8125	ShadowXOffset *int64 `locationName:"shadowXOffset" type:"integer"`
8126
8127	// Specifies the vertical offset of the shadow relative to the captions in pixels.
8128	// A value of -2 would result in a shadow offset 2 pixels above the text. All
8129	// burn-in and DVB-Sub font settings must match.
8130	ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"`
8131
8132	// Specify whether your DVB subtitles are standard or for hearing impaired.
8133	// Choose hearing impaired if your subtitles include audio descriptions and
8134	// dialogue. Choose standard if your subtitles include only dialogue.
8135	SubtitlingType *string `locationName:"subtitlingType" type:"string" enum:"DvbSubtitlingType"`
8136
8137	// Only applies to jobs with input captions in Teletext or STL formats. Specify
8138	// whether the spacing between letters in your captions is set by the captions
8139	// grid or varies depending on letter width. Choose fixed grid to conform to
8140	// the spacing specified in the captions file more accurately. Choose proportional
8141	// to make the text easier to read if the captions are closed caption.
8142	TeletextSpacing *string `locationName:"teletextSpacing" type:"string" enum:"DvbSubtitleTeletextSpacing"`
8143
8144	// Specify the width, in pixels, of this set of DVB-Sub captions. The default
8145	// value is 720 pixels. Related setting: When you use this setting, you must
8146	// set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in
8147	// and DVB-Sub font settings must match.
8148	Width *int64 `locationName:"width" min:"1" type:"integer"`
8149
8150	// Specifies the horizontal position of the caption relative to the left side
8151	// of the output in pixels. A value of 10 would result in the captions starting
8152	// 10 pixels from the left of the output. If no explicit x_position is provided,
8153	// the horizontal caption position will be determined by the alignment parameter.
8154	// This option is not valid for source captions that are STL, 608/embedded or
8155	// teletext. These source settings are already pre-defined by the caption stream.
8156	// All burn-in and DVB-Sub font settings must match.
8157	XPosition *int64 `locationName:"xPosition" type:"integer"`
8158
8159	// Specifies the vertical position of the caption relative to the top of the
8160	// output in pixels. A value of 10 would result in the captions starting 10
8161	// pixels from the top of the output. If no explicit y_position is provided,
8162	// the caption will be positioned towards the bottom of the output. This option
8163	// is not valid for source captions that are STL, 608/embedded or teletext.
8164	// These source settings are already pre-defined by the caption stream. All
8165	// burn-in and DVB-Sub font settings must match.
8166	YPosition *int64 `locationName:"yPosition" type:"integer"`
8167}
8168
8169// String returns the string representation
8170func (s DvbSubDestinationSettings) String() string {
8171	return awsutil.Prettify(s)
8172}
8173
8174// GoString returns the string representation
8175func (s DvbSubDestinationSettings) GoString() string {
8176	return s.String()
8177}
8178
8179// Validate inspects the fields of the type to determine if they are valid.
8180func (s *DvbSubDestinationSettings) Validate() error {
8181	invalidParams := request.ErrInvalidParams{Context: "DvbSubDestinationSettings"}
8182	if s.FontResolution != nil && *s.FontResolution < 96 {
8183		invalidParams.Add(request.NewErrParamMinValue("FontResolution", 96))
8184	}
8185	if s.Height != nil && *s.Height < 1 {
8186		invalidParams.Add(request.NewErrParamMinValue("Height", 1))
8187	}
8188	if s.ShadowXOffset != nil && *s.ShadowXOffset < -2.147483648e+09 {
8189		invalidParams.Add(request.NewErrParamMinValue("ShadowXOffset", -2.147483648e+09))
8190	}
8191	if s.ShadowYOffset != nil && *s.ShadowYOffset < -2.147483648e+09 {
8192		invalidParams.Add(request.NewErrParamMinValue("ShadowYOffset", -2.147483648e+09))
8193	}
8194	if s.Width != nil && *s.Width < 1 {
8195		invalidParams.Add(request.NewErrParamMinValue("Width", 1))
8196	}
8197
8198	if invalidParams.Len() > 0 {
8199		return invalidParams
8200	}
8201	return nil
8202}
8203
8204// SetAlignment sets the Alignment field's value.
8205func (s *DvbSubDestinationSettings) SetAlignment(v string) *DvbSubDestinationSettings {
8206	s.Alignment = &v
8207	return s
8208}
8209
8210// SetBackgroundColor sets the BackgroundColor field's value.
8211func (s *DvbSubDestinationSettings) SetBackgroundColor(v string) *DvbSubDestinationSettings {
8212	s.BackgroundColor = &v
8213	return s
8214}
8215
8216// SetBackgroundOpacity sets the BackgroundOpacity field's value.
8217func (s *DvbSubDestinationSettings) SetBackgroundOpacity(v int64) *DvbSubDestinationSettings {
8218	s.BackgroundOpacity = &v
8219	return s
8220}
8221
8222// SetDdsHandling sets the DdsHandling field's value.
8223func (s *DvbSubDestinationSettings) SetDdsHandling(v string) *DvbSubDestinationSettings {
8224	s.DdsHandling = &v
8225	return s
8226}
8227
8228// SetDdsXCoordinate sets the DdsXCoordinate field's value.
8229func (s *DvbSubDestinationSettings) SetDdsXCoordinate(v int64) *DvbSubDestinationSettings {
8230	s.DdsXCoordinate = &v
8231	return s
8232}
8233
8234// SetDdsYCoordinate sets the DdsYCoordinate field's value.
8235func (s *DvbSubDestinationSettings) SetDdsYCoordinate(v int64) *DvbSubDestinationSettings {
8236	s.DdsYCoordinate = &v
8237	return s
8238}
8239
8240// SetFontColor sets the FontColor field's value.
8241func (s *DvbSubDestinationSettings) SetFontColor(v string) *DvbSubDestinationSettings {
8242	s.FontColor = &v
8243	return s
8244}
8245
8246// SetFontOpacity sets the FontOpacity field's value.
8247func (s *DvbSubDestinationSettings) SetFontOpacity(v int64) *DvbSubDestinationSettings {
8248	s.FontOpacity = &v
8249	return s
8250}
8251
8252// SetFontResolution sets the FontResolution field's value.
8253func (s *DvbSubDestinationSettings) SetFontResolution(v int64) *DvbSubDestinationSettings {
8254	s.FontResolution = &v
8255	return s
8256}
8257
8258// SetFontScript sets the FontScript field's value.
8259func (s *DvbSubDestinationSettings) SetFontScript(v string) *DvbSubDestinationSettings {
8260	s.FontScript = &v
8261	return s
8262}
8263
8264// SetFontSize sets the FontSize field's value.
8265func (s *DvbSubDestinationSettings) SetFontSize(v int64) *DvbSubDestinationSettings {
8266	s.FontSize = &v
8267	return s
8268}
8269
8270// SetHeight sets the Height field's value.
8271func (s *DvbSubDestinationSettings) SetHeight(v int64) *DvbSubDestinationSettings {
8272	s.Height = &v
8273	return s
8274}
8275
8276// SetOutlineColor sets the OutlineColor field's value.
8277func (s *DvbSubDestinationSettings) SetOutlineColor(v string) *DvbSubDestinationSettings {
8278	s.OutlineColor = &v
8279	return s
8280}
8281
8282// SetOutlineSize sets the OutlineSize field's value.
8283func (s *DvbSubDestinationSettings) SetOutlineSize(v int64) *DvbSubDestinationSettings {
8284	s.OutlineSize = &v
8285	return s
8286}
8287
8288// SetShadowColor sets the ShadowColor field's value.
8289func (s *DvbSubDestinationSettings) SetShadowColor(v string) *DvbSubDestinationSettings {
8290	s.ShadowColor = &v
8291	return s
8292}
8293
8294// SetShadowOpacity sets the ShadowOpacity field's value.
8295func (s *DvbSubDestinationSettings) SetShadowOpacity(v int64) *DvbSubDestinationSettings {
8296	s.ShadowOpacity = &v
8297	return s
8298}
8299
8300// SetShadowXOffset sets the ShadowXOffset field's value.
8301func (s *DvbSubDestinationSettings) SetShadowXOffset(v int64) *DvbSubDestinationSettings {
8302	s.ShadowXOffset = &v
8303	return s
8304}
8305
8306// SetShadowYOffset sets the ShadowYOffset field's value.
8307func (s *DvbSubDestinationSettings) SetShadowYOffset(v int64) *DvbSubDestinationSettings {
8308	s.ShadowYOffset = &v
8309	return s
8310}
8311
8312// SetSubtitlingType sets the SubtitlingType field's value.
8313func (s *DvbSubDestinationSettings) SetSubtitlingType(v string) *DvbSubDestinationSettings {
8314	s.SubtitlingType = &v
8315	return s
8316}
8317
8318// SetTeletextSpacing sets the TeletextSpacing field's value.
8319func (s *DvbSubDestinationSettings) SetTeletextSpacing(v string) *DvbSubDestinationSettings {
8320	s.TeletextSpacing = &v
8321	return s
8322}
8323
8324// SetWidth sets the Width field's value.
8325func (s *DvbSubDestinationSettings) SetWidth(v int64) *DvbSubDestinationSettings {
8326	s.Width = &v
8327	return s
8328}
8329
8330// SetXPosition sets the XPosition field's value.
8331func (s *DvbSubDestinationSettings) SetXPosition(v int64) *DvbSubDestinationSettings {
8332	s.XPosition = &v
8333	return s
8334}
8335
8336// SetYPosition sets the YPosition field's value.
8337func (s *DvbSubDestinationSettings) SetYPosition(v int64) *DvbSubDestinationSettings {
8338	s.YPosition = &v
8339	return s
8340}
8341
8342// DVB Sub Source Settings
8343type DvbSubSourceSettings struct {
8344	_ struct{} `type:"structure"`
8345
8346	// When using DVB-Sub with Burn-In or SMPTE-TT, use this PID for the source
8347	// content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through,
8348	// regardless of selectors.
8349	Pid *int64 `locationName:"pid" min:"1" type:"integer"`
8350}
8351
8352// String returns the string representation
8353func (s DvbSubSourceSettings) String() string {
8354	return awsutil.Prettify(s)
8355}
8356
8357// GoString returns the string representation
8358func (s DvbSubSourceSettings) GoString() string {
8359	return s.String()
8360}
8361
8362// Validate inspects the fields of the type to determine if they are valid.
8363func (s *DvbSubSourceSettings) Validate() error {
8364	invalidParams := request.ErrInvalidParams{Context: "DvbSubSourceSettings"}
8365	if s.Pid != nil && *s.Pid < 1 {
8366		invalidParams.Add(request.NewErrParamMinValue("Pid", 1))
8367	}
8368
8369	if invalidParams.Len() > 0 {
8370		return invalidParams
8371	}
8372	return nil
8373}
8374
8375// SetPid sets the Pid field's value.
8376func (s *DvbSubSourceSettings) SetPid(v int64) *DvbSubSourceSettings {
8377	s.Pid = &v
8378	return s
8379}
8380
8381// Use these settings to insert a DVB Time and Date Table (TDT) in the transport
8382// stream of this output. When you work directly in your JSON job specification,
8383// include this object only when your job has a transport stream output and
8384// the container settings contain the object M2tsSettings.
8385type DvbTdtSettings struct {
8386	_ struct{} `type:"structure"`
8387
8388	// The number of milliseconds between instances of this table in the output
8389	// transport stream.
8390	TdtInterval *int64 `locationName:"tdtInterval" min:"1000" type:"integer"`
8391}
8392
8393// String returns the string representation
8394func (s DvbTdtSettings) String() string {
8395	return awsutil.Prettify(s)
8396}
8397
8398// GoString returns the string representation
8399func (s DvbTdtSettings) GoString() string {
8400	return s.String()
8401}
8402
8403// Validate inspects the fields of the type to determine if they are valid.
8404func (s *DvbTdtSettings) Validate() error {
8405	invalidParams := request.ErrInvalidParams{Context: "DvbTdtSettings"}
8406	if s.TdtInterval != nil && *s.TdtInterval < 1000 {
8407		invalidParams.Add(request.NewErrParamMinValue("TdtInterval", 1000))
8408	}
8409
8410	if invalidParams.Len() > 0 {
8411		return invalidParams
8412	}
8413	return nil
8414}
8415
8416// SetTdtInterval sets the TdtInterval field's value.
8417func (s *DvbTdtSettings) SetTdtInterval(v int64) *DvbTdtSettings {
8418	s.TdtInterval = &v
8419	return s
8420}
8421
8422// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
8423// the value EAC3_ATMOS.
8424type Eac3AtmosSettings struct {
8425	_ struct{} `type:"structure"`
8426
8427	// Specify the average bitrate for this output in bits per second. Valid values:
8428	// 384k, 448k, 576k, 640k, 768k, 1024k Default value: 448k Note that MediaConvert
8429	// supports 384k only with channel-based immersive (CBI) 7.1.4 and 5.1.4 inputs.
8430	// For CBI 9.1.6 and other input types, MediaConvert automatically increases
8431	// your output bitrate to 448k.
8432	Bitrate *int64 `locationName:"bitrate" min:"384000" type:"integer"`
8433
8434	// Specify the bitstream mode for the E-AC-3 stream that the encoder emits.
8435	// For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex
8436	// E).
8437	BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Eac3AtmosBitstreamMode"`
8438
8439	// The coding mode for Dolby Digital Plus JOC (Atmos).
8440	CodingMode *string `locationName:"codingMode" type:"string" enum:"Eac3AtmosCodingMode"`
8441
8442	// Enable Dolby Dialogue Intelligence to adjust loudness based on dialogue analysis.
8443	DialogueIntelligence *string `locationName:"dialogueIntelligence" type:"string" enum:"Eac3AtmosDialogueIntelligence"`
8444
8445	// Specify whether MediaConvert should use any downmix metadata from your input
8446	// file. Keep the default value, Custom (SPECIFIED) to provide downmix values
8447	// in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use
8448	// the metadata from your input. Related settings--Use these settings to specify
8449	// your downmix values: Left only/Right only surround (LoRoSurroundMixLevel),
8450	// Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right
8451	// total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel),
8452	// and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for
8453	// Downmix control (DownmixControl) and you don't specify values for the related
8454	// settings, MediaConvert uses default values for those settings.
8455	DownmixControl *string `locationName:"downmixControl" type:"string" enum:"Eac3AtmosDownmixControl"`
8456
8457	// Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses
8458	// when encoding the metadata in the Dolby stream for the line operating mode.
8459	// Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting:
8460	// To have MediaConvert use the value you specify here, keep the default value,
8461	// Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl).
8462	// Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine).
8463	// For information about the Dolby DRC operating modes and profiles, see the
8464	// Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
8465	DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Eac3AtmosDynamicRangeCompressionLine"`
8466
8467	// Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses
8468	// when encoding the metadata in the Dolby stream for the RF operating mode.
8469	// Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting:
8470	// To have MediaConvert use the value you specify here, keep the default value,
8471	// Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl).
8472	// Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf).
8473	// For information about the Dolby DRC operating modes and profiles, see the
8474	// Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
8475	DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Eac3AtmosDynamicRangeCompressionRf"`
8476
8477	// Specify whether MediaConvert should use any dynamic range control metadata
8478	// from your input file. Keep the default value, Custom (SPECIFIED), to provide
8479	// dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE)
8480	// to use the metadata from your input. Related settings--Use these settings
8481	// to specify your dynamic range control values: Dynamic range compression line
8482	// (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf).
8483	// When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl)
8484	// and you don't specify values for the related settings, MediaConvert uses
8485	// default values for those settings.
8486	DynamicRangeControl *string `locationName:"dynamicRangeControl" type:"string" enum:"Eac3AtmosDynamicRangeControl"`
8487
8488	// Specify a value for the following Dolby Atmos setting: Left only/Right only
8489	// center mix (Lo/Ro center). MediaConvert uses this value for downmixing. Default
8490	// value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: 3.0, 1.5,
8491	// 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this
8492	// value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix).
8493	// Related setting: To have MediaConvert use this value, keep the default value,
8494	// Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise,
8495	// MediaConvert ignores Left only/Right only center (LoRoCenterMixLevel).
8496	LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"`
8497
8498	// Specify a value for the following Dolby Atmos setting: Left only/Right only
8499	// (Lo/Ro surround). MediaConvert uses this value for downmixing. Default value:
8500	// -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: -1.5, -3.0, -4.5,
8501	// -6.0, and -60. The value -60 mutes the channel. Related setting: How the
8502	// service uses this value depends on the value that you choose for Stereo downmix
8503	// (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this
8504	// value, keep the default value, Custom (SPECIFIED) for the setting Downmix
8505	// control (DownmixControl). Otherwise, MediaConvert ignores Left only/Right
8506	// only surround (LoRoSurroundMixLevel).
8507	LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"`
8508
8509	// Specify a value for the following Dolby Atmos setting: Left total/Right total
8510	// center mix (Lt/Rt center). MediaConvert uses this value for downmixing. Default
8511	// value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: 3.0, 1.5,
8512	// 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this
8513	// value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix).
8514	// Related setting: To have MediaConvert use this value, keep the default value,
8515	// Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise,
8516	// MediaConvert ignores Left total/Right total center (LtRtCenterMixLevel).
8517	LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"`
8518
8519	// Specify a value for the following Dolby Atmos setting: Left total/Right total
8520	// surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing.
8521	// Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values:
8522	// -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related
8523	// setting: How the service uses this value depends on the value that you choose
8524	// for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert
8525	// use this value, keep the default value, Custom (SPECIFIED) for the setting
8526	// Downmix control (DownmixControl). Otherwise, the service ignores Left total/Right
8527	// total surround (LtRtSurroundMixLevel).
8528	LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"`
8529
8530	// Choose how the service meters the loudness of your audio.
8531	MeteringMode *string `locationName:"meteringMode" type:"string" enum:"Eac3AtmosMeteringMode"`
8532
8533	// This value is always 48000. It represents the sample rate in Hz.
8534	SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"`
8535
8536	// Specify the percentage of audio content, from 0% to 100%, that must be speech
8537	// in order for the encoder to use the measured speech loudness as the overall
8538	// program loudness. Default value: 15%
8539	SpeechThreshold *int64 `locationName:"speechThreshold" type:"integer"`
8540
8541	// Choose how the service does stereo downmixing. Default value: Not indicated
8542	// (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert
8543	// use this value, keep the default value, Custom (SPECIFIED) for the setting
8544	// Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo
8545	// downmix (StereoDownmix).
8546	StereoDownmix *string `locationName:"stereoDownmix" type:"string" enum:"Eac3AtmosStereoDownmix"`
8547
8548	// Specify whether your input audio has an additional center rear surround channel
8549	// matrix encoded into your left and right surround channels.
8550	SurroundExMode *string `locationName:"surroundExMode" type:"string" enum:"Eac3AtmosSurroundExMode"`
8551}
8552
8553// String returns the string representation
8554func (s Eac3AtmosSettings) String() string {
8555	return awsutil.Prettify(s)
8556}
8557
8558// GoString returns the string representation
8559func (s Eac3AtmosSettings) GoString() string {
8560	return s.String()
8561}
8562
8563// Validate inspects the fields of the type to determine if they are valid.
8564func (s *Eac3AtmosSettings) Validate() error {
8565	invalidParams := request.ErrInvalidParams{Context: "Eac3AtmosSettings"}
8566	if s.Bitrate != nil && *s.Bitrate < 384000 {
8567		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 384000))
8568	}
8569	if s.SampleRate != nil && *s.SampleRate < 48000 {
8570		invalidParams.Add(request.NewErrParamMinValue("SampleRate", 48000))
8571	}
8572
8573	if invalidParams.Len() > 0 {
8574		return invalidParams
8575	}
8576	return nil
8577}
8578
8579// SetBitrate sets the Bitrate field's value.
8580func (s *Eac3AtmosSettings) SetBitrate(v int64) *Eac3AtmosSettings {
8581	s.Bitrate = &v
8582	return s
8583}
8584
8585// SetBitstreamMode sets the BitstreamMode field's value.
8586func (s *Eac3AtmosSettings) SetBitstreamMode(v string) *Eac3AtmosSettings {
8587	s.BitstreamMode = &v
8588	return s
8589}
8590
8591// SetCodingMode sets the CodingMode field's value.
8592func (s *Eac3AtmosSettings) SetCodingMode(v string) *Eac3AtmosSettings {
8593	s.CodingMode = &v
8594	return s
8595}
8596
8597// SetDialogueIntelligence sets the DialogueIntelligence field's value.
8598func (s *Eac3AtmosSettings) SetDialogueIntelligence(v string) *Eac3AtmosSettings {
8599	s.DialogueIntelligence = &v
8600	return s
8601}
8602
8603// SetDownmixControl sets the DownmixControl field's value.
8604func (s *Eac3AtmosSettings) SetDownmixControl(v string) *Eac3AtmosSettings {
8605	s.DownmixControl = &v
8606	return s
8607}
8608
8609// SetDynamicRangeCompressionLine sets the DynamicRangeCompressionLine field's value.
8610func (s *Eac3AtmosSettings) SetDynamicRangeCompressionLine(v string) *Eac3AtmosSettings {
8611	s.DynamicRangeCompressionLine = &v
8612	return s
8613}
8614
8615// SetDynamicRangeCompressionRf sets the DynamicRangeCompressionRf field's value.
8616func (s *Eac3AtmosSettings) SetDynamicRangeCompressionRf(v string) *Eac3AtmosSettings {
8617	s.DynamicRangeCompressionRf = &v
8618	return s
8619}
8620
8621// SetDynamicRangeControl sets the DynamicRangeControl field's value.
8622func (s *Eac3AtmosSettings) SetDynamicRangeControl(v string) *Eac3AtmosSettings {
8623	s.DynamicRangeControl = &v
8624	return s
8625}
8626
8627// SetLoRoCenterMixLevel sets the LoRoCenterMixLevel field's value.
8628func (s *Eac3AtmosSettings) SetLoRoCenterMixLevel(v float64) *Eac3AtmosSettings {
8629	s.LoRoCenterMixLevel = &v
8630	return s
8631}
8632
8633// SetLoRoSurroundMixLevel sets the LoRoSurroundMixLevel field's value.
8634func (s *Eac3AtmosSettings) SetLoRoSurroundMixLevel(v float64) *Eac3AtmosSettings {
8635	s.LoRoSurroundMixLevel = &v
8636	return s
8637}
8638
8639// SetLtRtCenterMixLevel sets the LtRtCenterMixLevel field's value.
8640func (s *Eac3AtmosSettings) SetLtRtCenterMixLevel(v float64) *Eac3AtmosSettings {
8641	s.LtRtCenterMixLevel = &v
8642	return s
8643}
8644
8645// SetLtRtSurroundMixLevel sets the LtRtSurroundMixLevel field's value.
8646func (s *Eac3AtmosSettings) SetLtRtSurroundMixLevel(v float64) *Eac3AtmosSettings {
8647	s.LtRtSurroundMixLevel = &v
8648	return s
8649}
8650
8651// SetMeteringMode sets the MeteringMode field's value.
8652func (s *Eac3AtmosSettings) SetMeteringMode(v string) *Eac3AtmosSettings {
8653	s.MeteringMode = &v
8654	return s
8655}
8656
8657// SetSampleRate sets the SampleRate field's value.
8658func (s *Eac3AtmosSettings) SetSampleRate(v int64) *Eac3AtmosSettings {
8659	s.SampleRate = &v
8660	return s
8661}
8662
8663// SetSpeechThreshold sets the SpeechThreshold field's value.
8664func (s *Eac3AtmosSettings) SetSpeechThreshold(v int64) *Eac3AtmosSettings {
8665	s.SpeechThreshold = &v
8666	return s
8667}
8668
8669// SetStereoDownmix sets the StereoDownmix field's value.
8670func (s *Eac3AtmosSettings) SetStereoDownmix(v string) *Eac3AtmosSettings {
8671	s.StereoDownmix = &v
8672	return s
8673}
8674
8675// SetSurroundExMode sets the SurroundExMode field's value.
8676func (s *Eac3AtmosSettings) SetSurroundExMode(v string) *Eac3AtmosSettings {
8677	s.SurroundExMode = &v
8678	return s
8679}
8680
8681// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
8682// the value EAC3.
8683type Eac3Settings struct {
8684	_ struct{} `type:"structure"`
8685
8686	// If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels.
8687	// Only used for 3/2 coding mode.
8688	AttenuationControl *string `locationName:"attenuationControl" type:"string" enum:"Eac3AttenuationControl"`
8689
8690	// Specify the average bitrate in bits per second. Valid bitrates depend on
8691	// the coding mode.
8692	Bitrate *int64 `locationName:"bitrate" min:"64000" type:"integer"`
8693
8694	// Specify the bitstream mode for the E-AC-3 stream that the encoder emits.
8695	// For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex
8696	// E).
8697	BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Eac3BitstreamMode"`
8698
8699	// Dolby Digital Plus coding mode. Determines number of channels.
8700	CodingMode *string `locationName:"codingMode" type:"string" enum:"Eac3CodingMode"`
8701
8702	// Activates a DC highpass filter for all input channels.
8703	DcFilter *string `locationName:"dcFilter" type:"string" enum:"Eac3DcFilter"`
8704
8705	// Sets the dialnorm for the output. If blank and input audio is Dolby Digital
8706	// Plus, dialnorm will be passed through.
8707	Dialnorm *int64 `locationName:"dialnorm" min:"1" type:"integer"`
8708
8709	// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
8710	// uses when encoding the metadata in the Dolby Digital stream for the line
8711	// operating mode. Related setting: When you use this setting, MediaConvert
8712	// ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile).
8713	// For information about the Dolby Digital DRC operating modes and profiles,
8714	// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
8715	DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Eac3DynamicRangeCompressionLine"`
8716
8717	// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
8718	// uses when encoding the metadata in the Dolby Digital stream for the RF operating
8719	// mode. Related setting: When you use this setting, MediaConvert ignores any
8720	// value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile).
8721	// For information about the Dolby Digital DRC operating modes and profiles,
8722	// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
8723	DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Eac3DynamicRangeCompressionRf"`
8724
8725	// When encoding 3/2 audio, controls whether the LFE channel is enabled
8726	LfeControl *string `locationName:"lfeControl" type:"string" enum:"Eac3LfeControl"`
8727
8728	// Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only
8729	// valid with 3_2_LFE coding mode.
8730	LfeFilter *string `locationName:"lfeFilter" type:"string" enum:"Eac3LfeFilter"`
8731
8732	// Specify a value for the following Dolby Digital Plus setting: Left only/Right
8733	// only center mix (Lo/Ro center). MediaConvert uses this value for downmixing.
8734	// How the service uses this value depends on the value that you choose for
8735	// Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0,
8736	// -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies
8737	// only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2)
8738	// for the setting Coding mode (Eac3CodingMode). If you choose a different value
8739	// for Coding mode, the service ignores Left only/Right only center (loRoCenterMixLevel).
8740	LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"`
8741
8742	// Specify a value for the following Dolby Digital Plus setting: Left only/Right
8743	// only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the
8744	// service uses this value depends on the value that you choose for Stereo downmix
8745	// (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value
8746	// -60 mutes the channel. This setting applies only if you keep the default
8747	// value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode
8748	// (Eac3CodingMode). If you choose a different value for Coding mode, the service
8749	// ignores Left only/Right only surround (loRoSurroundMixLevel).
8750	LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"`
8751
8752	// Specify a value for the following Dolby Digital Plus setting: Left total/Right
8753	// total center mix (Lt/Rt center). MediaConvert uses this value for downmixing.
8754	// How the service uses this value depends on the value that you choose for
8755	// Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0,
8756	// -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies
8757	// only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2)
8758	// for the setting Coding mode (Eac3CodingMode). If you choose a different value
8759	// for Coding mode, the service ignores Left total/Right total center (ltRtCenterMixLevel).
8760	LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"`
8761
8762	// Specify a value for the following Dolby Digital Plus setting: Left total/Right
8763	// total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing.
8764	// How the service uses this value depends on the value that you choose for
8765	// Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0,
8766	// and -60. The value -60 mutes the channel. This setting applies only if you
8767	// keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the
8768	// setting Coding mode (Eac3CodingMode). If you choose a different value for
8769	// Coding mode, the service ignores Left total/Right total surround (ltRtSurroundMixLevel).
8770	LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"`
8771
8772	// When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+,
8773	// or DolbyE decoder that supplied this audio data. If audio was not supplied
8774	// from one of these streams, then the static metadata settings will be used.
8775	MetadataControl *string `locationName:"metadataControl" type:"string" enum:"Eac3MetadataControl"`
8776
8777	// When set to WHEN_POSSIBLE, input DD+ audio will be passed through if it is
8778	// present on the input. this detection is dynamic over the life of the transcode.
8779	// Inputs that alternate between DD+ and non-DD+ content will have a consistent
8780	// DD+ output as the system alternates between passthrough and encoding.
8781	PassthroughControl *string `locationName:"passthroughControl" type:"string" enum:"Eac3PassthroughControl"`
8782
8783	// Controls the amount of phase-shift applied to the surround channels. Only
8784	// used for 3/2 coding mode.
8785	PhaseControl *string `locationName:"phaseControl" type:"string" enum:"Eac3PhaseControl"`
8786
8787	// This value is always 48000. It represents the sample rate in Hz.
8788	SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"`
8789
8790	// Choose how the service does stereo downmixing. This setting only applies
8791	// if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2)
8792	// for the setting Coding mode (Eac3CodingMode). If you choose a different value
8793	// for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix).
8794	StereoDownmix *string `locationName:"stereoDownmix" type:"string" enum:"Eac3StereoDownmix"`
8795
8796	// When encoding 3/2 audio, sets whether an extra center back surround channel
8797	// is matrix encoded into the left and right surround channels.
8798	SurroundExMode *string `locationName:"surroundExMode" type:"string" enum:"Eac3SurroundExMode"`
8799
8800	// When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into
8801	// the two channels.
8802	SurroundMode *string `locationName:"surroundMode" type:"string" enum:"Eac3SurroundMode"`
8803}
8804
8805// String returns the string representation
8806func (s Eac3Settings) String() string {
8807	return awsutil.Prettify(s)
8808}
8809
8810// GoString returns the string representation
8811func (s Eac3Settings) GoString() string {
8812	return s.String()
8813}
8814
8815// Validate inspects the fields of the type to determine if they are valid.
8816func (s *Eac3Settings) Validate() error {
8817	invalidParams := request.ErrInvalidParams{Context: "Eac3Settings"}
8818	if s.Bitrate != nil && *s.Bitrate < 64000 {
8819		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 64000))
8820	}
8821	if s.Dialnorm != nil && *s.Dialnorm < 1 {
8822		invalidParams.Add(request.NewErrParamMinValue("Dialnorm", 1))
8823	}
8824	if s.SampleRate != nil && *s.SampleRate < 48000 {
8825		invalidParams.Add(request.NewErrParamMinValue("SampleRate", 48000))
8826	}
8827
8828	if invalidParams.Len() > 0 {
8829		return invalidParams
8830	}
8831	return nil
8832}
8833
8834// SetAttenuationControl sets the AttenuationControl field's value.
8835func (s *Eac3Settings) SetAttenuationControl(v string) *Eac3Settings {
8836	s.AttenuationControl = &v
8837	return s
8838}
8839
8840// SetBitrate sets the Bitrate field's value.
8841func (s *Eac3Settings) SetBitrate(v int64) *Eac3Settings {
8842	s.Bitrate = &v
8843	return s
8844}
8845
8846// SetBitstreamMode sets the BitstreamMode field's value.
8847func (s *Eac3Settings) SetBitstreamMode(v string) *Eac3Settings {
8848	s.BitstreamMode = &v
8849	return s
8850}
8851
8852// SetCodingMode sets the CodingMode field's value.
8853func (s *Eac3Settings) SetCodingMode(v string) *Eac3Settings {
8854	s.CodingMode = &v
8855	return s
8856}
8857
8858// SetDcFilter sets the DcFilter field's value.
8859func (s *Eac3Settings) SetDcFilter(v string) *Eac3Settings {
8860	s.DcFilter = &v
8861	return s
8862}
8863
8864// SetDialnorm sets the Dialnorm field's value.
8865func (s *Eac3Settings) SetDialnorm(v int64) *Eac3Settings {
8866	s.Dialnorm = &v
8867	return s
8868}
8869
8870// SetDynamicRangeCompressionLine sets the DynamicRangeCompressionLine field's value.
8871func (s *Eac3Settings) SetDynamicRangeCompressionLine(v string) *Eac3Settings {
8872	s.DynamicRangeCompressionLine = &v
8873	return s
8874}
8875
8876// SetDynamicRangeCompressionRf sets the DynamicRangeCompressionRf field's value.
8877func (s *Eac3Settings) SetDynamicRangeCompressionRf(v string) *Eac3Settings {
8878	s.DynamicRangeCompressionRf = &v
8879	return s
8880}
8881
8882// SetLfeControl sets the LfeControl field's value.
8883func (s *Eac3Settings) SetLfeControl(v string) *Eac3Settings {
8884	s.LfeControl = &v
8885	return s
8886}
8887
8888// SetLfeFilter sets the LfeFilter field's value.
8889func (s *Eac3Settings) SetLfeFilter(v string) *Eac3Settings {
8890	s.LfeFilter = &v
8891	return s
8892}
8893
8894// SetLoRoCenterMixLevel sets the LoRoCenterMixLevel field's value.
8895func (s *Eac3Settings) SetLoRoCenterMixLevel(v float64) *Eac3Settings {
8896	s.LoRoCenterMixLevel = &v
8897	return s
8898}
8899
8900// SetLoRoSurroundMixLevel sets the LoRoSurroundMixLevel field's value.
8901func (s *Eac3Settings) SetLoRoSurroundMixLevel(v float64) *Eac3Settings {
8902	s.LoRoSurroundMixLevel = &v
8903	return s
8904}
8905
8906// SetLtRtCenterMixLevel sets the LtRtCenterMixLevel field's value.
8907func (s *Eac3Settings) SetLtRtCenterMixLevel(v float64) *Eac3Settings {
8908	s.LtRtCenterMixLevel = &v
8909	return s
8910}
8911
8912// SetLtRtSurroundMixLevel sets the LtRtSurroundMixLevel field's value.
8913func (s *Eac3Settings) SetLtRtSurroundMixLevel(v float64) *Eac3Settings {
8914	s.LtRtSurroundMixLevel = &v
8915	return s
8916}
8917
8918// SetMetadataControl sets the MetadataControl field's value.
8919func (s *Eac3Settings) SetMetadataControl(v string) *Eac3Settings {
8920	s.MetadataControl = &v
8921	return s
8922}
8923
8924// SetPassthroughControl sets the PassthroughControl field's value.
8925func (s *Eac3Settings) SetPassthroughControl(v string) *Eac3Settings {
8926	s.PassthroughControl = &v
8927	return s
8928}
8929
8930// SetPhaseControl sets the PhaseControl field's value.
8931func (s *Eac3Settings) SetPhaseControl(v string) *Eac3Settings {
8932	s.PhaseControl = &v
8933	return s
8934}
8935
8936// SetSampleRate sets the SampleRate field's value.
8937func (s *Eac3Settings) SetSampleRate(v int64) *Eac3Settings {
8938	s.SampleRate = &v
8939	return s
8940}
8941
8942// SetStereoDownmix sets the StereoDownmix field's value.
8943func (s *Eac3Settings) SetStereoDownmix(v string) *Eac3Settings {
8944	s.StereoDownmix = &v
8945	return s
8946}
8947
8948// SetSurroundExMode sets the SurroundExMode field's value.
8949func (s *Eac3Settings) SetSurroundExMode(v string) *Eac3Settings {
8950	s.SurroundExMode = &v
8951	return s
8952}
8953
8954// SetSurroundMode sets the SurroundMode field's value.
8955func (s *Eac3Settings) SetSurroundMode(v string) *Eac3Settings {
8956	s.SurroundMode = &v
8957	return s
8958}
8959
8960// Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or
8961// ancillary) captions. Set up embedded captions in the same output as your
8962// video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html.
8963// When you work directly in your JSON job specification, include this object
8964// and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20,
8965// or SCTE20_PLUS_EMBEDDED.
8966type EmbeddedDestinationSettings struct {
8967	_ struct{} `type:"structure"`
8968
8969	// Ignore this setting unless your input captions are SCC format and your output
8970	// captions are embedded in the video stream. Specify a CC number for each captions
8971	// channel in this output. If you have two channels, choose CC numbers that
8972	// aren't in the same field. For example, choose 1 and 3. For more information,
8973	// see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded.
8974	Destination608ChannelNumber *int64 `locationName:"destination608ChannelNumber" min:"1" type:"integer"`
8975
8976	// Ignore this setting unless your input captions are SCC format and you want
8977	// both 608 and 708 captions embedded in your output stream. Optionally, specify
8978	// the 708 service number for each output captions channel. Choose a different
8979	// number for each channel. To use this setting, also set Force 608 to 708 upconvert
8980	// (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector
8981	// settings. If you choose to upconvert but don't specify a 708 service number,
8982	// MediaConvert uses the number that you specify for CC channel number (destination608ChannelNumber)
8983	// for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded.
8984	Destination708ServiceNumber *int64 `locationName:"destination708ServiceNumber" min:"1" type:"integer"`
8985}
8986
8987// String returns the string representation
8988func (s EmbeddedDestinationSettings) String() string {
8989	return awsutil.Prettify(s)
8990}
8991
8992// GoString returns the string representation
8993func (s EmbeddedDestinationSettings) GoString() string {
8994	return s.String()
8995}
8996
8997// Validate inspects the fields of the type to determine if they are valid.
8998func (s *EmbeddedDestinationSettings) Validate() error {
8999	invalidParams := request.ErrInvalidParams{Context: "EmbeddedDestinationSettings"}
9000	if s.Destination608ChannelNumber != nil && *s.Destination608ChannelNumber < 1 {
9001		invalidParams.Add(request.NewErrParamMinValue("Destination608ChannelNumber", 1))
9002	}
9003	if s.Destination708ServiceNumber != nil && *s.Destination708ServiceNumber < 1 {
9004		invalidParams.Add(request.NewErrParamMinValue("Destination708ServiceNumber", 1))
9005	}
9006
9007	if invalidParams.Len() > 0 {
9008		return invalidParams
9009	}
9010	return nil
9011}
9012
9013// SetDestination608ChannelNumber sets the Destination608ChannelNumber field's value.
9014func (s *EmbeddedDestinationSettings) SetDestination608ChannelNumber(v int64) *EmbeddedDestinationSettings {
9015	s.Destination608ChannelNumber = &v
9016	return s
9017}
9018
9019// SetDestination708ServiceNumber sets the Destination708ServiceNumber field's value.
9020func (s *EmbeddedDestinationSettings) SetDestination708ServiceNumber(v int64) *EmbeddedDestinationSettings {
9021	s.Destination708ServiceNumber = &v
9022	return s
9023}
9024
9025// Settings for embedded captions Source
9026type EmbeddedSourceSettings struct {
9027	_ struct{} `type:"structure"`
9028
9029	// Specify whether this set of input captions appears in your outputs in both
9030	// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes
9031	// the captions data in two ways: it passes the 608 data through using the 608
9032	// compatibility bytes fields of the 708 wrapper, and it also translates the
9033	// 608 data into 708.
9034	Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"EmbeddedConvert608To708"`
9035
9036	// Specifies the 608/708 channel number within the video track from which to
9037	// extract captions. Unused for passthrough.
9038	Source608ChannelNumber *int64 `locationName:"source608ChannelNumber" min:"1" type:"integer"`
9039
9040	// Specifies the video track index used for extracting captions. The system
9041	// only supports one input video track, so this should always be set to '1'.
9042	Source608TrackNumber *int64 `locationName:"source608TrackNumber" min:"1" type:"integer"`
9043
9044	// By default, the service terminates any unterminated captions at the end of
9045	// each input. If you want the caption to continue onto your next input, disable
9046	// this setting.
9047	TerminateCaptions *string `locationName:"terminateCaptions" type:"string" enum:"EmbeddedTerminateCaptions"`
9048}
9049
9050// String returns the string representation
9051func (s EmbeddedSourceSettings) String() string {
9052	return awsutil.Prettify(s)
9053}
9054
9055// GoString returns the string representation
9056func (s EmbeddedSourceSettings) GoString() string {
9057	return s.String()
9058}
9059
9060// Validate inspects the fields of the type to determine if they are valid.
9061func (s *EmbeddedSourceSettings) Validate() error {
9062	invalidParams := request.ErrInvalidParams{Context: "EmbeddedSourceSettings"}
9063	if s.Source608ChannelNumber != nil && *s.Source608ChannelNumber < 1 {
9064		invalidParams.Add(request.NewErrParamMinValue("Source608ChannelNumber", 1))
9065	}
9066	if s.Source608TrackNumber != nil && *s.Source608TrackNumber < 1 {
9067		invalidParams.Add(request.NewErrParamMinValue("Source608TrackNumber", 1))
9068	}
9069
9070	if invalidParams.Len() > 0 {
9071		return invalidParams
9072	}
9073	return nil
9074}
9075
9076// SetConvert608To708 sets the Convert608To708 field's value.
9077func (s *EmbeddedSourceSettings) SetConvert608To708(v string) *EmbeddedSourceSettings {
9078	s.Convert608To708 = &v
9079	return s
9080}
9081
9082// SetSource608ChannelNumber sets the Source608ChannelNumber field's value.
9083func (s *EmbeddedSourceSettings) SetSource608ChannelNumber(v int64) *EmbeddedSourceSettings {
9084	s.Source608ChannelNumber = &v
9085	return s
9086}
9087
9088// SetSource608TrackNumber sets the Source608TrackNumber field's value.
9089func (s *EmbeddedSourceSettings) SetSource608TrackNumber(v int64) *EmbeddedSourceSettings {
9090	s.Source608TrackNumber = &v
9091	return s
9092}
9093
9094// SetTerminateCaptions sets the TerminateCaptions field's value.
9095func (s *EmbeddedSourceSettings) SetTerminateCaptions(v string) *EmbeddedSourceSettings {
9096	s.TerminateCaptions = &v
9097	return s
9098}
9099
9100// Describes an account-specific API endpoint.
9101type Endpoint struct {
9102	_ struct{} `type:"structure"`
9103
9104	// URL of endpoint
9105	Url *string `locationName:"url" type:"string"`
9106}
9107
9108// String returns the string representation
9109func (s Endpoint) String() string {
9110	return awsutil.Prettify(s)
9111}
9112
9113// GoString returns the string representation
9114func (s Endpoint) GoString() string {
9115	return s.String()
9116}
9117
9118// SetUrl sets the Url field's value.
9119func (s *Endpoint) SetUrl(v string) *Endpoint {
9120	s.Url = &v
9121	return s
9122}
9123
9124// ESAM ManifestConfirmConditionNotification defined by OC-SP-ESAM-API-I03-131025.
9125type EsamManifestConfirmConditionNotification struct {
9126	_ struct{} `type:"structure"`
9127
9128	// Provide your ESAM ManifestConfirmConditionNotification XML document inside
9129	// your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025.
9130	// The transcoder will use the Manifest Conditioning instructions in the message
9131	// that you supply.
9132	MccXml *string `locationName:"mccXml" type:"string"`
9133}
9134
9135// String returns the string representation
9136func (s EsamManifestConfirmConditionNotification) String() string {
9137	return awsutil.Prettify(s)
9138}
9139
9140// GoString returns the string representation
9141func (s EsamManifestConfirmConditionNotification) GoString() string {
9142	return s.String()
9143}
9144
9145// SetMccXml sets the MccXml field's value.
9146func (s *EsamManifestConfirmConditionNotification) SetMccXml(v string) *EsamManifestConfirmConditionNotification {
9147	s.MccXml = &v
9148	return s
9149}
9150
9151// Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion,
9152// you can ignore these settings.
9153type EsamSettings struct {
9154	_ struct{} `type:"structure"`
9155
9156	// Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025.
9157	// The transcoder uses the manifest conditioning instructions that you provide
9158	// in the setting MCC XML (mccXml).
9159	ManifestConfirmConditionNotification *EsamManifestConfirmConditionNotification `locationName:"manifestConfirmConditionNotification" type:"structure"`
9160
9161	// Specifies the stream distance, in milliseconds, between the SCTE 35 messages
9162	// that the transcoder places and the splice points that they refer to. If the
9163	// time between the start of the asset and the SCTE-35 message is less than
9164	// this value, then the transcoder places the SCTE-35 marker at the beginning
9165	// of the stream.
9166	ResponseSignalPreroll *int64 `locationName:"responseSignalPreroll" type:"integer"`
9167
9168	// Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025.
9169	// The transcoder uses the signal processing instructions that you provide in
9170	// the setting SCC XML (sccXml).
9171	SignalProcessingNotification *EsamSignalProcessingNotification `locationName:"signalProcessingNotification" type:"structure"`
9172}
9173
9174// String returns the string representation
9175func (s EsamSettings) String() string {
9176	return awsutil.Prettify(s)
9177}
9178
9179// GoString returns the string representation
9180func (s EsamSettings) GoString() string {
9181	return s.String()
9182}
9183
9184// SetManifestConfirmConditionNotification sets the ManifestConfirmConditionNotification field's value.
9185func (s *EsamSettings) SetManifestConfirmConditionNotification(v *EsamManifestConfirmConditionNotification) *EsamSettings {
9186	s.ManifestConfirmConditionNotification = v
9187	return s
9188}
9189
9190// SetResponseSignalPreroll sets the ResponseSignalPreroll field's value.
9191func (s *EsamSettings) SetResponseSignalPreroll(v int64) *EsamSettings {
9192	s.ResponseSignalPreroll = &v
9193	return s
9194}
9195
9196// SetSignalProcessingNotification sets the SignalProcessingNotification field's value.
9197func (s *EsamSettings) SetSignalProcessingNotification(v *EsamSignalProcessingNotification) *EsamSettings {
9198	s.SignalProcessingNotification = v
9199	return s
9200}
9201
9202// ESAM SignalProcessingNotification data defined by OC-SP-ESAM-API-I03-131025.
9203type EsamSignalProcessingNotification struct {
9204	_ struct{} `type:"structure"`
9205
9206	// Provide your ESAM SignalProcessingNotification XML document inside your JSON
9207	// job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The
9208	// transcoder will use the signal processing instructions in the message that
9209	// you supply. Provide your ESAM SignalProcessingNotification XML document inside
9210	// your JSON job settings. For your MPEG2-TS file outputs, if you want the service
9211	// to place SCTE-35 markers at the insertion points you specify in the XML document,
9212	// you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either
9213	// specify an ESAM XML document or enable SCTE-35 passthrough. You can't do
9214	// both.
9215	SccXml *string `locationName:"sccXml" type:"string"`
9216}
9217
9218// String returns the string representation
9219func (s EsamSignalProcessingNotification) String() string {
9220	return awsutil.Prettify(s)
9221}
9222
9223// GoString returns the string representation
9224func (s EsamSignalProcessingNotification) GoString() string {
9225	return s.String()
9226}
9227
9228// SetSccXml sets the SccXml field's value.
9229func (s *EsamSignalProcessingNotification) SetSccXml(v string) *EsamSignalProcessingNotification {
9230	s.SccXml = &v
9231	return s
9232}
9233
9234// Hexadecimal value as per EIA-608 Line 21 Data Services, section 9.5.1.5 05h
9235// Content Advisory.
9236type ExtendedDataServices struct {
9237	_ struct{} `type:"structure"`
9238
9239	// The action to take on copy and redistribution control XDS packets. If you
9240	// select PASSTHROUGH, packets will not be changed. If you select STRIP, any
9241	// packets will be removed in output captions.
9242	CopyProtectionAction *string `locationName:"copyProtectionAction" type:"string" enum:"CopyProtectionAction"`
9243
9244	// The action to take on content advisory XDS packets. If you select PASSTHROUGH,
9245	// packets will not be changed. If you select STRIP, any packets will be removed
9246	// in output captions.
9247	VchipAction *string `locationName:"vchipAction" type:"string" enum:"VchipAction"`
9248}
9249
9250// String returns the string representation
9251func (s ExtendedDataServices) String() string {
9252	return awsutil.Prettify(s)
9253}
9254
9255// GoString returns the string representation
9256func (s ExtendedDataServices) GoString() string {
9257	return s.String()
9258}
9259
9260// SetCopyProtectionAction sets the CopyProtectionAction field's value.
9261func (s *ExtendedDataServices) SetCopyProtectionAction(v string) *ExtendedDataServices {
9262	s.CopyProtectionAction = &v
9263	return s
9264}
9265
9266// SetVchipAction sets the VchipAction field's value.
9267func (s *ExtendedDataServices) SetVchipAction(v string) *ExtendedDataServices {
9268	s.VchipAction = &v
9269	return s
9270}
9271
9272// Settings for F4v container
9273type F4vSettings struct {
9274	_ struct{} `type:"structure"`
9275
9276	// If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning
9277	// of the archive as required for progressive downloading. Otherwise it is placed
9278	// normally at the end.
9279	MoovPlacement *string `locationName:"moovPlacement" type:"string" enum:"F4vMoovPlacement"`
9280}
9281
9282// String returns the string representation
9283func (s F4vSettings) String() string {
9284	return awsutil.Prettify(s)
9285}
9286
9287// GoString returns the string representation
9288func (s F4vSettings) GoString() string {
9289	return s.String()
9290}
9291
9292// SetMoovPlacement sets the MoovPlacement field's value.
9293func (s *F4vSettings) SetMoovPlacement(v string) *F4vSettings {
9294	s.MoovPlacement = &v
9295	return s
9296}
9297
9298// Settings related to your File output group. MediaConvert uses this group
9299// of settings to generate a single standalone file, rather than a streaming
9300// package. When you work directly in your JSON job specification, include this
9301// object and any required children when you set Type, under OutputGroupSettings,
9302// to FILE_GROUP_SETTINGS.
9303type FileGroupSettings struct {
9304	_ struct{} `type:"structure"`
9305
9306	// Use Destination (Destination) to specify the S3 output location and the output
9307	// filename base. Destination accepts format identifiers. If you do not specify
9308	// the base filename in the URI, the service will use the filename of the input
9309	// file. If your job has multiple inputs, the service uses the filename of the
9310	// first input file.
9311	Destination *string `locationName:"destination" type:"string"`
9312
9313	// Settings associated with the destination. Will vary based on the type of
9314	// destination
9315	DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"`
9316}
9317
9318// String returns the string representation
9319func (s FileGroupSettings) String() string {
9320	return awsutil.Prettify(s)
9321}
9322
9323// GoString returns the string representation
9324func (s FileGroupSettings) GoString() string {
9325	return s.String()
9326}
9327
9328// SetDestination sets the Destination field's value.
9329func (s *FileGroupSettings) SetDestination(v string) *FileGroupSettings {
9330	s.Destination = &v
9331	return s
9332}
9333
9334// SetDestinationSettings sets the DestinationSettings field's value.
9335func (s *FileGroupSettings) SetDestinationSettings(v *DestinationSettings) *FileGroupSettings {
9336	s.DestinationSettings = v
9337	return s
9338}
9339
9340// If your input captions are SCC, SMI, SRT, STL, TTML, WebVTT, or IMSC 1.1
9341// in an xml file, specify the URI of the input caption source file. If your
9342// caption source is IMSC in an IMF package, use TrackSourceSettings instead
9343// of FileSoureSettings.
9344type FileSourceSettings struct {
9345	_ struct{} `type:"structure"`
9346
9347	// Specify whether this set of input captions appears in your outputs in both
9348	// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes
9349	// the captions data in two ways: it passes the 608 data through using the 608
9350	// compatibility bytes fields of the 708 wrapper, and it also translates the
9351	// 608 data into 708.
9352	Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"FileSourceConvert608To708"`
9353
9354	// Ignore this setting unless your input captions format is SCC. To have the
9355	// service compensate for differing frame rates between your input captions
9356	// and input video, specify the frame rate of the captions file. Specify this
9357	// value as a fraction, using the settings Framerate numerator (framerateNumerator)
9358	// and Framerate denominator (framerateDenominator). For example, you might
9359	// specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps,
9360	// or 30000 / 1001 for 29.97 fps.
9361	Framerate *CaptionSourceFramerate `locationName:"framerate" type:"structure"`
9362
9363	// External caption file used for loading captions. Accepted file extensions
9364	// are 'scc', 'ttml', 'dfxp', 'stl', 'srt', 'xml', 'smi', 'webvtt', and 'vtt'.
9365	SourceFile *string `locationName:"sourceFile" min:"14" type:"string"`
9366
9367	// Specifies a time delta in seconds to offset the captions from the source
9368	// file.
9369	TimeDelta *int64 `locationName:"timeDelta" type:"integer"`
9370}
9371
9372// String returns the string representation
9373func (s FileSourceSettings) String() string {
9374	return awsutil.Prettify(s)
9375}
9376
9377// GoString returns the string representation
9378func (s FileSourceSettings) GoString() string {
9379	return s.String()
9380}
9381
9382// Validate inspects the fields of the type to determine if they are valid.
9383func (s *FileSourceSettings) Validate() error {
9384	invalidParams := request.ErrInvalidParams{Context: "FileSourceSettings"}
9385	if s.SourceFile != nil && len(*s.SourceFile) < 14 {
9386		invalidParams.Add(request.NewErrParamMinLen("SourceFile", 14))
9387	}
9388	if s.TimeDelta != nil && *s.TimeDelta < -2.147483648e+09 {
9389		invalidParams.Add(request.NewErrParamMinValue("TimeDelta", -2.147483648e+09))
9390	}
9391	if s.Framerate != nil {
9392		if err := s.Framerate.Validate(); err != nil {
9393			invalidParams.AddNested("Framerate", err.(request.ErrInvalidParams))
9394		}
9395	}
9396
9397	if invalidParams.Len() > 0 {
9398		return invalidParams
9399	}
9400	return nil
9401}
9402
9403// SetConvert608To708 sets the Convert608To708 field's value.
9404func (s *FileSourceSettings) SetConvert608To708(v string) *FileSourceSettings {
9405	s.Convert608To708 = &v
9406	return s
9407}
9408
9409// SetFramerate sets the Framerate field's value.
9410func (s *FileSourceSettings) SetFramerate(v *CaptionSourceFramerate) *FileSourceSettings {
9411	s.Framerate = v
9412	return s
9413}
9414
9415// SetSourceFile sets the SourceFile field's value.
9416func (s *FileSourceSettings) SetSourceFile(v string) *FileSourceSettings {
9417	s.SourceFile = &v
9418	return s
9419}
9420
9421// SetTimeDelta sets the TimeDelta field's value.
9422func (s *FileSourceSettings) SetTimeDelta(v int64) *FileSourceSettings {
9423	s.TimeDelta = &v
9424	return s
9425}
9426
9427type ForbiddenException struct {
9428	_            struct{}                  `type:"structure"`
9429	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
9430
9431	Message_ *string `locationName:"message" type:"string"`
9432}
9433
9434// String returns the string representation
9435func (s ForbiddenException) String() string {
9436	return awsutil.Prettify(s)
9437}
9438
9439// GoString returns the string representation
9440func (s ForbiddenException) GoString() string {
9441	return s.String()
9442}
9443
9444func newErrorForbiddenException(v protocol.ResponseMetadata) error {
9445	return &ForbiddenException{
9446		RespMetadata: v,
9447	}
9448}
9449
9450// Code returns the exception type name.
9451func (s *ForbiddenException) Code() string {
9452	return "ForbiddenException"
9453}
9454
9455// Message returns the exception's message.
9456func (s *ForbiddenException) Message() string {
9457	if s.Message_ != nil {
9458		return *s.Message_
9459	}
9460	return ""
9461}
9462
9463// OrigErr always returns nil, satisfies awserr.Error interface.
9464func (s *ForbiddenException) OrigErr() error {
9465	return nil
9466}
9467
9468func (s *ForbiddenException) Error() string {
9469	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
9470}
9471
9472// Status code returns the HTTP status code for the request's response error.
9473func (s *ForbiddenException) StatusCode() int {
9474	return s.RespMetadata.StatusCode
9475}
9476
9477// RequestID returns the service's response RequestID for request.
9478func (s *ForbiddenException) RequestID() string {
9479	return s.RespMetadata.RequestID
9480}
9481
9482// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
9483// the value FRAME_CAPTURE.
9484type FrameCaptureSettings struct {
9485	_ struct{} `type:"structure"`
9486
9487	// Frame capture will encode the first frame of the output stream, then one
9488	// frame every framerateDenominator/framerateNumerator seconds. For example,
9489	// settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of
9490	// 1/3 frame per second) will capture the first frame, then 1 frame every 3s.
9491	// Files will be named as filename.n.jpg where n is the 0-based sequence number
9492	// of each Capture.
9493	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
9494
9495	// Frame capture will encode the first frame of the output stream, then one
9496	// frame every framerateDenominator/framerateNumerator seconds. For example,
9497	// settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of
9498	// 1/3 frame per second) will capture the first frame, then 1 frame every 3s.
9499	// Files will be named as filename.NNNNNNN.jpg where N is the 0-based frame
9500	// sequence number zero padded to 7 decimal places.
9501	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"`
9502
9503	// Maximum number of captures (encoded jpg output files).
9504	MaxCaptures *int64 `locationName:"maxCaptures" min:"1" type:"integer"`
9505
9506	// JPEG Quality - a higher value equals higher quality.
9507	Quality *int64 `locationName:"quality" min:"1" type:"integer"`
9508}
9509
9510// String returns the string representation
9511func (s FrameCaptureSettings) String() string {
9512	return awsutil.Prettify(s)
9513}
9514
9515// GoString returns the string representation
9516func (s FrameCaptureSettings) GoString() string {
9517	return s.String()
9518}
9519
9520// Validate inspects the fields of the type to determine if they are valid.
9521func (s *FrameCaptureSettings) Validate() error {
9522	invalidParams := request.ErrInvalidParams{Context: "FrameCaptureSettings"}
9523	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
9524		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
9525	}
9526	if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 {
9527		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1))
9528	}
9529	if s.MaxCaptures != nil && *s.MaxCaptures < 1 {
9530		invalidParams.Add(request.NewErrParamMinValue("MaxCaptures", 1))
9531	}
9532	if s.Quality != nil && *s.Quality < 1 {
9533		invalidParams.Add(request.NewErrParamMinValue("Quality", 1))
9534	}
9535
9536	if invalidParams.Len() > 0 {
9537		return invalidParams
9538	}
9539	return nil
9540}
9541
9542// SetFramerateDenominator sets the FramerateDenominator field's value.
9543func (s *FrameCaptureSettings) SetFramerateDenominator(v int64) *FrameCaptureSettings {
9544	s.FramerateDenominator = &v
9545	return s
9546}
9547
9548// SetFramerateNumerator sets the FramerateNumerator field's value.
9549func (s *FrameCaptureSettings) SetFramerateNumerator(v int64) *FrameCaptureSettings {
9550	s.FramerateNumerator = &v
9551	return s
9552}
9553
9554// SetMaxCaptures sets the MaxCaptures field's value.
9555func (s *FrameCaptureSettings) SetMaxCaptures(v int64) *FrameCaptureSettings {
9556	s.MaxCaptures = &v
9557	return s
9558}
9559
9560// SetQuality sets the Quality field's value.
9561func (s *FrameCaptureSettings) SetQuality(v int64) *FrameCaptureSettings {
9562	s.Quality = &v
9563	return s
9564}
9565
9566// Query a job by sending a request with the job ID.
9567type GetJobInput struct {
9568	_ struct{} `type:"structure"`
9569
9570	// the job ID of the job.
9571	//
9572	// Id is a required field
9573	Id *string `location:"uri" locationName:"id" type:"string" required:"true"`
9574}
9575
9576// String returns the string representation
9577func (s GetJobInput) String() string {
9578	return awsutil.Prettify(s)
9579}
9580
9581// GoString returns the string representation
9582func (s GetJobInput) GoString() string {
9583	return s.String()
9584}
9585
9586// Validate inspects the fields of the type to determine if they are valid.
9587func (s *GetJobInput) Validate() error {
9588	invalidParams := request.ErrInvalidParams{Context: "GetJobInput"}
9589	if s.Id == nil {
9590		invalidParams.Add(request.NewErrParamRequired("Id"))
9591	}
9592	if s.Id != nil && len(*s.Id) < 1 {
9593		invalidParams.Add(request.NewErrParamMinLen("Id", 1))
9594	}
9595
9596	if invalidParams.Len() > 0 {
9597		return invalidParams
9598	}
9599	return nil
9600}
9601
9602// SetId sets the Id field's value.
9603func (s *GetJobInput) SetId(v string) *GetJobInput {
9604	s.Id = &v
9605	return s
9606}
9607
9608// Successful get job requests will return an OK message and the job JSON.
9609type GetJobOutput struct {
9610	_ struct{} `type:"structure"`
9611
9612	// Each job converts an input file into an output file or files. For more information,
9613	// see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html
9614	Job *Job `locationName:"job" type:"structure"`
9615}
9616
9617// String returns the string representation
9618func (s GetJobOutput) String() string {
9619	return awsutil.Prettify(s)
9620}
9621
9622// GoString returns the string representation
9623func (s GetJobOutput) GoString() string {
9624	return s.String()
9625}
9626
9627// SetJob sets the Job field's value.
9628func (s *GetJobOutput) SetJob(v *Job) *GetJobOutput {
9629	s.Job = v
9630	return s
9631}
9632
9633// Query a job template by sending a request with the job template name.
9634type GetJobTemplateInput struct {
9635	_ struct{} `type:"structure"`
9636
9637	// The name of the job template.
9638	//
9639	// Name is a required field
9640	Name *string `location:"uri" locationName:"name" type:"string" required:"true"`
9641}
9642
9643// String returns the string representation
9644func (s GetJobTemplateInput) String() string {
9645	return awsutil.Prettify(s)
9646}
9647
9648// GoString returns the string representation
9649func (s GetJobTemplateInput) GoString() string {
9650	return s.String()
9651}
9652
9653// Validate inspects the fields of the type to determine if they are valid.
9654func (s *GetJobTemplateInput) Validate() error {
9655	invalidParams := request.ErrInvalidParams{Context: "GetJobTemplateInput"}
9656	if s.Name == nil {
9657		invalidParams.Add(request.NewErrParamRequired("Name"))
9658	}
9659	if s.Name != nil && len(*s.Name) < 1 {
9660		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
9661	}
9662
9663	if invalidParams.Len() > 0 {
9664		return invalidParams
9665	}
9666	return nil
9667}
9668
9669// SetName sets the Name field's value.
9670func (s *GetJobTemplateInput) SetName(v string) *GetJobTemplateInput {
9671	s.Name = &v
9672	return s
9673}
9674
9675// Successful get job template requests will return an OK message and the job
9676// template JSON.
9677type GetJobTemplateOutput struct {
9678	_ struct{} `type:"structure"`
9679
9680	// A job template is a pre-made set of encoding instructions that you can use
9681	// to quickly create a job.
9682	JobTemplate *JobTemplate `locationName:"jobTemplate" type:"structure"`
9683}
9684
9685// String returns the string representation
9686func (s GetJobTemplateOutput) String() string {
9687	return awsutil.Prettify(s)
9688}
9689
9690// GoString returns the string representation
9691func (s GetJobTemplateOutput) GoString() string {
9692	return s.String()
9693}
9694
9695// SetJobTemplate sets the JobTemplate field's value.
9696func (s *GetJobTemplateOutput) SetJobTemplate(v *JobTemplate) *GetJobTemplateOutput {
9697	s.JobTemplate = v
9698	return s
9699}
9700
9701// Query a preset by sending a request with the preset name.
9702type GetPresetInput struct {
9703	_ struct{} `type:"structure"`
9704
9705	// The name of the preset.
9706	//
9707	// Name is a required field
9708	Name *string `location:"uri" locationName:"name" type:"string" required:"true"`
9709}
9710
9711// String returns the string representation
9712func (s GetPresetInput) String() string {
9713	return awsutil.Prettify(s)
9714}
9715
9716// GoString returns the string representation
9717func (s GetPresetInput) GoString() string {
9718	return s.String()
9719}
9720
9721// Validate inspects the fields of the type to determine if they are valid.
9722func (s *GetPresetInput) Validate() error {
9723	invalidParams := request.ErrInvalidParams{Context: "GetPresetInput"}
9724	if s.Name == nil {
9725		invalidParams.Add(request.NewErrParamRequired("Name"))
9726	}
9727	if s.Name != nil && len(*s.Name) < 1 {
9728		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
9729	}
9730
9731	if invalidParams.Len() > 0 {
9732		return invalidParams
9733	}
9734	return nil
9735}
9736
9737// SetName sets the Name field's value.
9738func (s *GetPresetInput) SetName(v string) *GetPresetInput {
9739	s.Name = &v
9740	return s
9741}
9742
9743// Successful get preset requests will return an OK message and the preset JSON.
9744type GetPresetOutput struct {
9745	_ struct{} `type:"structure"`
9746
9747	// A preset is a collection of preconfigured media conversion settings that
9748	// you want MediaConvert to apply to the output during the conversion process.
9749	Preset *Preset `locationName:"preset" type:"structure"`
9750}
9751
9752// String returns the string representation
9753func (s GetPresetOutput) String() string {
9754	return awsutil.Prettify(s)
9755}
9756
9757// GoString returns the string representation
9758func (s GetPresetOutput) GoString() string {
9759	return s.String()
9760}
9761
9762// SetPreset sets the Preset field's value.
9763func (s *GetPresetOutput) SetPreset(v *Preset) *GetPresetOutput {
9764	s.Preset = v
9765	return s
9766}
9767
9768// Get information about a queue by sending a request with the queue name.
9769type GetQueueInput struct {
9770	_ struct{} `type:"structure"`
9771
9772	// The name of the queue that you want information about.
9773	//
9774	// Name is a required field
9775	Name *string `location:"uri" locationName:"name" type:"string" required:"true"`
9776}
9777
9778// String returns the string representation
9779func (s GetQueueInput) String() string {
9780	return awsutil.Prettify(s)
9781}
9782
9783// GoString returns the string representation
9784func (s GetQueueInput) GoString() string {
9785	return s.String()
9786}
9787
9788// Validate inspects the fields of the type to determine if they are valid.
9789func (s *GetQueueInput) Validate() error {
9790	invalidParams := request.ErrInvalidParams{Context: "GetQueueInput"}
9791	if s.Name == nil {
9792		invalidParams.Add(request.NewErrParamRequired("Name"))
9793	}
9794	if s.Name != nil && len(*s.Name) < 1 {
9795		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
9796	}
9797
9798	if invalidParams.Len() > 0 {
9799		return invalidParams
9800	}
9801	return nil
9802}
9803
9804// SetName sets the Name field's value.
9805func (s *GetQueueInput) SetName(v string) *GetQueueInput {
9806	s.Name = &v
9807	return s
9808}
9809
9810// Successful get queue requests return an OK message and information about
9811// the queue in JSON.
9812type GetQueueOutput struct {
9813	_ struct{} `type:"structure"`
9814
9815	// You can use queues to manage the resources that are available to your AWS
9816	// account for running multiple transcoding jobs at the same time. If you don't
9817	// specify a queue, the service sends all jobs through the default queue. For
9818	// more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html.
9819	Queue *Queue `locationName:"queue" type:"structure"`
9820}
9821
9822// String returns the string representation
9823func (s GetQueueOutput) String() string {
9824	return awsutil.Prettify(s)
9825}
9826
9827// GoString returns the string representation
9828func (s GetQueueOutput) GoString() string {
9829	return s.String()
9830}
9831
9832// SetQueue sets the Queue field's value.
9833func (s *GetQueueOutput) SetQueue(v *Queue) *GetQueueOutput {
9834	s.Queue = v
9835	return s
9836}
9837
9838// Settings for quality-defined variable bitrate encoding with the H.265 codec.
9839// Use these settings only when you set QVBR for Rate control mode (RateControlMode).
9840type H264QvbrSettings struct {
9841	_ struct{} `type:"structure"`
9842
9843	// Use this setting only when Rate control mode is QVBR and Quality tuning level
9844	// is Multi-pass HQ. For Max average bitrate values suited to the complexity
9845	// of your input video, the service limits the average bitrate of the video
9846	// part of this output to the value that you choose. That is, the total size
9847	// of the video element is less than or equal to the value you set multiplied
9848	// by the number of seconds of encoded output.
9849	MaxAverageBitrate *int64 `locationName:"maxAverageBitrate" min:"1000" type:"integer"`
9850
9851	// Use this setting only when you set Rate control mode (RateControlMode) to
9852	// QVBR. Specify the target quality level for this output. MediaConvert determines
9853	// the right number of bits to use for each part of the video to maintain the
9854	// video quality that you specify. When you keep the default value, AUTO, MediaConvert
9855	// picks a quality level for you, based on characteristics of your input video.
9856	// If you prefer to specify a quality level, specify a number from 1 through
9857	// 10. Use higher numbers for greater quality. Level 10 results in nearly lossless
9858	// compression. The quality level for most broadcast-quality transcodes is between
9859	// 6 and 9. Optionally, to specify a value between whole numbers, also provide
9860	// a value for the setting qvbrQualityLevelFineTune. For example, if you want
9861	// your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune
9862	// to .33.
9863	QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"`
9864
9865	// Optional. Specify a value here to set the QVBR quality to a level that is
9866	// between whole numbers. For example, if you want your QVBR quality level to
9867	// be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.
9868	// MediaConvert rounds your QVBR quality level to the nearest third of a whole
9869	// number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune
9870	// to .25, your actual QVBR quality level is 7.33.
9871	QvbrQualityLevelFineTune *float64 `locationName:"qvbrQualityLevelFineTune" type:"double"`
9872}
9873
9874// String returns the string representation
9875func (s H264QvbrSettings) String() string {
9876	return awsutil.Prettify(s)
9877}
9878
9879// GoString returns the string representation
9880func (s H264QvbrSettings) GoString() string {
9881	return s.String()
9882}
9883
9884// Validate inspects the fields of the type to determine if they are valid.
9885func (s *H264QvbrSettings) Validate() error {
9886	invalidParams := request.ErrInvalidParams{Context: "H264QvbrSettings"}
9887	if s.MaxAverageBitrate != nil && *s.MaxAverageBitrate < 1000 {
9888		invalidParams.Add(request.NewErrParamMinValue("MaxAverageBitrate", 1000))
9889	}
9890	if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 {
9891		invalidParams.Add(request.NewErrParamMinValue("QvbrQualityLevel", 1))
9892	}
9893
9894	if invalidParams.Len() > 0 {
9895		return invalidParams
9896	}
9897	return nil
9898}
9899
9900// SetMaxAverageBitrate sets the MaxAverageBitrate field's value.
9901func (s *H264QvbrSettings) SetMaxAverageBitrate(v int64) *H264QvbrSettings {
9902	s.MaxAverageBitrate = &v
9903	return s
9904}
9905
9906// SetQvbrQualityLevel sets the QvbrQualityLevel field's value.
9907func (s *H264QvbrSettings) SetQvbrQualityLevel(v int64) *H264QvbrSettings {
9908	s.QvbrQualityLevel = &v
9909	return s
9910}
9911
9912// SetQvbrQualityLevelFineTune sets the QvbrQualityLevelFineTune field's value.
9913func (s *H264QvbrSettings) SetQvbrQualityLevelFineTune(v float64) *H264QvbrSettings {
9914	s.QvbrQualityLevelFineTune = &v
9915	return s
9916}
9917
9918// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
9919// the value H_264.
9920type H264Settings struct {
9921	_ struct{} `type:"structure"`
9922
9923	// Keep the default value, Auto (AUTO), for this setting to have MediaConvert
9924	// automatically apply the best types of quantization for your video content.
9925	// When you want to apply your quantization settings manually, you must set
9926	// H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting
9927	// to specify the strength of any adaptive quantization filters that you enable.
9928	// If you don't want MediaConvert to do any adaptive quantization in this transcode,
9929	// set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related
9930	// settings: The value that you choose here applies to the following settings:
9931	// H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization.
9932	AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H264AdaptiveQuantization"`
9933
9934	// Specify the average bitrate in bits per second. Required for VBR and CBR.
9935	// For MS Smooth outputs, bitrates must be unique when rounded down to the nearest
9936	// multiple of 1000.
9937	Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"`
9938
9939	// Specify an H.264 level that is consistent with your output video settings.
9940	// If you aren't sure what level to specify, choose Auto (AUTO).
9941	CodecLevel *string `locationName:"codecLevel" type:"string" enum:"H264CodecLevel"`
9942
9943	// H.264 Profile. High 4:2:2 and 10-bit profiles are only available with the
9944	// AVC-I License.
9945	CodecProfile *string `locationName:"codecProfile" type:"string" enum:"H264CodecProfile"`
9946
9947	// Choose Adaptive to improve subjective video quality for high-motion content.
9948	// This will cause the service to use fewer B-frames (which infer information
9949	// based on other frames) for high-motion portions of the video and more B-frames
9950	// for low-motion portions. The maximum number of B-frames is limited by the
9951	// value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).
9952	DynamicSubGop *string `locationName:"dynamicSubGop" type:"string" enum:"H264DynamicSubGop"`
9953
9954	// Entropy encoding mode. Use CABAC (must be in Main or High profile) or CAVLC.
9955	EntropyEncoding *string `locationName:"entropyEncoding" type:"string" enum:"H264EntropyEncoding"`
9956
9957	// Keep the default value, PAFF, to have MediaConvert use PAFF encoding for
9958	// interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding
9959	// and create separate interlaced fields.
9960	FieldEncoding *string `locationName:"fieldEncoding" type:"string" enum:"H264FieldEncoding"`
9961
9962	// Only use this setting when you change the default value, AUTO, for the setting
9963	// H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization
9964	// and all other adaptive quantization from your JSON job specification, MediaConvert
9965	// automatically applies the best types of quantization for your video content.
9966	// When you set H264AdaptiveQuantization to a value other than AUTO, the default
9967	// value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change
9968	// this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears
9969	// as a visual flicker that can arise when the encoder saves bits by copying
9970	// some macroblocks many times from frame to frame, and then refreshes them
9971	// at the I-frame. When you enable this setting, the encoder updates these macroblocks
9972	// slightly more often to smooth out the flicker. To manually enable or disable
9973	// H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization)
9974	// to a value other than AUTO.
9975	FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"H264FlickerAdaptiveQuantization"`
9976
9977	// If you are using the console, use the Framerate setting to specify the frame
9978	// rate for this output. If you want to keep the same frame rate as the input
9979	// video, choose Follow source. If you want to do frame rate conversion, choose
9980	// a frame rate from the dropdown list or choose Custom. The framerates shown
9981	// in the dropdown list are decimal approximations of fractions. If you choose
9982	// Custom, specify your frame rate as a fraction. If you are creating your transcoding
9983	// job specification as a JSON file without the console, use FramerateControl
9984	// to specify which value the service uses for the frame rate for this output.
9985	// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
9986	// from the input. Choose SPECIFIED if you want the service to use the frame
9987	// rate you specify in the settings FramerateNumerator and FramerateDenominator.
9988	FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H264FramerateControl"`
9989
9990	// Choose the method that you want MediaConvert to use when increasing or decreasing
9991	// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
9992	// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
9993	// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
9994	// smooth picture, but might introduce undesirable video artifacts. For complex
9995	// frame rate conversions, especially if your source video has already been
9996	// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
9997	// motion-compensated interpolation. FrameFormer chooses the best conversion
9998	// method frame by frame. Note that using FrameFormer increases the transcoding
9999	// time and incurs a significant add-on cost.
10000	FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"H264FramerateConversionAlgorithm"`
10001
10002	// When you use the API for transcode jobs that use frame rate conversion, specify
10003	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
10004	// FramerateDenominator to specify the denominator of this fraction. In this
10005	// example, use 1001 for the value of FramerateDenominator. When you use the
10006	// console for transcode jobs that use frame rate conversion, provide the value
10007	// as a decimal number for Framerate. In this example, specify 23.976.
10008	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
10009
10010	// When you use the API for transcode jobs that use frame rate conversion, specify
10011	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
10012	// FramerateNumerator to specify the numerator of this fraction. In this example,
10013	// use 24000 for the value of FramerateNumerator. When you use the console for
10014	// transcode jobs that use frame rate conversion, provide the value as a decimal
10015	// number for Framerate. In this example, specify 23.976.
10016	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"`
10017
10018	// If enable, use reference B frames for GOP structures that have B frames >
10019	// 1.
10020	GopBReference *string `locationName:"gopBReference" type:"string" enum:"H264GopBReference"`
10021
10022	// Frequency of closed GOPs. In streaming applications, it is recommended that
10023	// this be set to 1 so a decoder joining mid-stream will receive an IDR frame
10024	// as quickly as possible. Setting this value to 0 will break output segmenting.
10025	GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"`
10026
10027	// GOP Length (keyframe interval) in frames or seconds. Must be greater than
10028	// zero.
10029	GopSize *float64 `locationName:"gopSize" type:"double"`
10030
10031	// Indicates if the GOP Size in H264 is specified in frames or seconds. If seconds
10032	// the system will convert the GOP Size into a frame count at run time.
10033	GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"H264GopSizeUnits"`
10034
10035	// Percentage of the buffer that should initially be filled (HRD buffer model).
10036	HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"`
10037
10038	// Size of buffer (HRD buffer model) in bits. For example, enter five megabits
10039	// as 5000000.
10040	HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"`
10041
10042	// Choose the scan line type for the output. Keep the default value, Progressive
10043	// (PROGRESSIVE) to create a progressive output, regardless of the scan type
10044	// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
10045	// to create an output that's interlaced with the same field polarity throughout.
10046	// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
10047	// to produce outputs with the same field polarity as the source. For jobs that
10048	// have multiple inputs, the output field polarity might change over the course
10049	// of the output. Follow behavior depends on the input scan type. If the source
10050	// is interlaced, the output will be interlaced with the same polarity as the
10051	// source. If the source is progressive, the output will be interlaced with
10052	// top field bottom field first, depending on which of the Follow options you
10053	// choose.
10054	InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"H264InterlaceMode"`
10055
10056	// Maximum bitrate in bits/second. For example, enter five megabits per second
10057	// as 5000000. Required when Rate control mode is QVBR.
10058	MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"`
10059
10060	// Enforces separation between repeated (cadence) I-frames and I-frames inserted
10061	// by Scene Change Detection. If a scene change I-frame is within I-interval
10062	// frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene
10063	// change I-frame. GOP stretch requires enabling lookahead as well as setting
10064	// I-interval. The normal cadence resumes for the next GOP. This setting is
10065	// only used when Scene Change Detect is enabled. Note: Maximum GOP stretch
10066	// = GOP size + Min-I-interval - 1
10067	MinIInterval *int64 `locationName:"minIInterval" type:"integer"`
10068
10069	// Number of B-frames between reference frames.
10070	NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"`
10071
10072	// Number of reference frames to use. The encoder may use more than requested
10073	// if using B-frames and/or interlaced encoding.
10074	NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" min:"1" type:"integer"`
10075
10076	// Optional. Specify how the service determines the pixel aspect ratio (PAR)
10077	// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE),
10078	// uses the PAR from your input video for your output. To specify a different
10079	// PAR in the console, choose any value other than Follow source. To specify
10080	// a different PAR by editing the JSON job specification, choose SPECIFIED.
10081	// When you choose SPECIFIED for this setting, you must also specify values
10082	// for the parNumerator and parDenominator settings.
10083	ParControl *string `locationName:"parControl" type:"string" enum:"H264ParControl"`
10084
10085	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
10086	// console, this corresponds to any value other than Follow source. When you
10087	// specify an output pixel aspect ratio (PAR) that is different from your input
10088	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
10089	// widescreen, you would specify the ratio 40:33. In this example, the value
10090	// for parDenominator is 33.
10091	ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"`
10092
10093	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
10094	// console, this corresponds to any value other than Follow source. When you
10095	// specify an output pixel aspect ratio (PAR) that is different from your input
10096	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
10097	// widescreen, you would specify the ratio 40:33. In this example, the value
10098	// for parNumerator is 40.
10099	ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"`
10100
10101	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
10102	// want to trade off encoding speed for output video quality. The default behavior
10103	// is faster, lower quality, single-pass encoding.
10104	QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"H264QualityTuningLevel"`
10105
10106	// Settings for quality-defined variable bitrate encoding with the H.265 codec.
10107	// Use these settings only when you set QVBR for Rate control mode (RateControlMode).
10108	QvbrSettings *H264QvbrSettings `locationName:"qvbrSettings" type:"structure"`
10109
10110	// Use this setting to specify whether this output has a variable bitrate (VBR),
10111	// constant bitrate (CBR) or quality-defined variable bitrate (QVBR).
10112	RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"H264RateControlMode"`
10113
10114	// Places a PPS header on each encoded picture, even if repeated.
10115	RepeatPps *string `locationName:"repeatPps" type:"string" enum:"H264RepeatPps"`
10116
10117	// Use this setting for interlaced outputs, when your output frame rate is half
10118	// of your input frame rate. In this situation, choose Optimized interlacing
10119	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
10120	// case, each progressive frame from the input corresponds to an interlaced
10121	// field in the output. Keep the default value, Basic interlacing (INTERLACED),
10122	// for all other output frame rates. With basic interlacing, MediaConvert performs
10123	// any frame rate conversion first and then interlaces the frames. When you
10124	// choose Optimized interlacing and you set your output frame rate to a value
10125	// that isn't suitable for optimized interlacing, MediaConvert automatically
10126	// falls back to basic interlacing. Required settings: To use optimized interlacing,
10127	// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
10128	// use optimized interlacing for hard telecine outputs. You must also set Interlace
10129	// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
10130	ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"H264ScanTypeConversionMode"`
10131
10132	// Enable this setting to insert I-frames at scene changes that the service
10133	// automatically detects. This improves video quality and is enabled by default.
10134	// If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION)
10135	// for further video quality improvement. For more information about QVBR, see
10136	// https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.
10137	SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H264SceneChangeDetect"`
10138
10139	// Number of slices per picture. Must be less than or equal to the number of
10140	// macroblock rows for progressive pictures, and less than or equal to half
10141	// the number of macroblock rows for interlaced pictures.
10142	Slices *int64 `locationName:"slices" min:"1" type:"integer"`
10143
10144	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
10145	// second (fps). Enable slow PAL to create a 25 fps output. When you enable
10146	// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples
10147	// your audio to keep it synchronized with the video. Note that enabling this
10148	// setting will slightly reduce the duration of your video. Required settings:
10149	// You must also set Framerate to 25. In your JSON job specification, set (framerateControl)
10150	// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
10151	// 1.
10152	SlowPal *string `locationName:"slowPal" type:"string" enum:"H264SlowPal"`
10153
10154	// Ignore this setting unless you need to comply with a specification that requires
10155	// a specific value. If you don't have a specification requirement, we recommend
10156	// that you adjust the softness of your output by using a lower value for the
10157	// setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter).
10158	// The Softness (softness) setting specifies the quantization matrices that
10159	// the encoder uses. Keep the default value, 0, for flat quantization. Choose
10160	// the value 1 or 16 to use the default JVT softening quantization matricies
10161	// from the H.264 specification. Choose a value from 17 to 128 to use planar
10162	// interpolation. Increasing values from 17 to 128 result in increasing reduction
10163	// of high-frequency data. The value 128 results in the softest video.
10164	Softness *int64 `locationName:"softness" type:"integer"`
10165
10166	// Only use this setting when you change the default value, Auto (AUTO), for
10167	// the setting H264AdaptiveQuantization. When you keep all defaults, excluding
10168	// H264AdaptiveQuantization and all other adaptive quantization from your JSON
10169	// job specification, MediaConvert automatically applies the best types of quantization
10170	// for your video content. When you set H264AdaptiveQuantization to a value
10171	// other than AUTO, the default value for H264SpatialAdaptiveQuantization is
10172	// Enabled (ENABLED). Keep this default value to adjust quantization within
10173	// each frame based on spatial variation of content complexity. When you enable
10174	// this feature, the encoder uses fewer bits on areas that can sustain more
10175	// distortion with no noticeable visual degradation and uses more bits on areas
10176	// where any small distortion will be noticeable. For example, complex textured
10177	// blocks are encoded with fewer bits and smooth textured blocks are encoded
10178	// with more bits. Enabling this feature will almost always improve your video
10179	// quality. Note, though, that this feature doesn't take into account where
10180	// the viewer's attention is likely to be. If viewers are likely to be focusing
10181	// their attention on a part of the screen with a lot of complex texture, you
10182	// might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED).
10183	// Related setting: When you enable spatial adaptive quantization, set the value
10184	// for Adaptive quantization (H264AdaptiveQuantization) depending on your content.
10185	// For homogeneous content, such as cartoons and video games, set it to Low.
10186	// For content with a wider variety of textures, set it to High or Higher. To
10187	// manually enable or disable H264SpatialAdaptiveQuantization, you must set
10188	// Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.
10189	SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"H264SpatialAdaptiveQuantization"`
10190
10191	// Produces a bitstream compliant with SMPTE RP-2027.
10192	Syntax *string `locationName:"syntax" type:"string" enum:"H264Syntax"`
10193
10194	// When you do frame rate conversion from 23.976 frames per second (fps) to
10195	// 29.97 fps, and your output scan type is interlaced, you can optionally enable
10196	// hard or soft telecine to create a smoother picture. Hard telecine (HARD)
10197	// produces a 29.97i output. Soft telecine (SOFT) produces an output with a
10198	// 23.976 output that signals to the video player device to do the conversion
10199	// during play back. When you keep the default value, None (NONE), MediaConvert
10200	// does a standard frame rate conversion to 29.97 without doing anything with
10201	// the field polarity to create a smoother picture.
10202	Telecine *string `locationName:"telecine" type:"string" enum:"H264Telecine"`
10203
10204	// Only use this setting when you change the default value, AUTO, for the setting
10205	// H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization
10206	// and all other adaptive quantization from your JSON job specification, MediaConvert
10207	// automatically applies the best types of quantization for your video content.
10208	// When you set H264AdaptiveQuantization to a value other than AUTO, the default
10209	// value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this
10210	// default value to adjust quantization within each frame based on temporal
10211	// variation of content complexity. When you enable this feature, the encoder
10212	// uses fewer bits on areas of the frame that aren't moving and uses more bits
10213	// on complex objects with sharp edges that move a lot. For example, this feature
10214	// improves the readability of text tickers on newscasts and scoreboards on
10215	// sports matches. Enabling this feature will almost always improve your video
10216	// quality. Note, though, that this feature doesn't take into account where
10217	// the viewer's attention is likely to be. If viewers are likely to be focusing
10218	// their attention on a part of the screen that doesn't have moving objects
10219	// with sharp edges, such as sports athletes' faces, you might choose to set
10220	// H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting:
10221	// When you enable temporal quantization, adjust the strength of the filter
10222	// with the setting Adaptive quantization (adaptiveQuantization). To manually
10223	// enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive
10224	// quantization (H264AdaptiveQuantization) to a value other than AUTO.
10225	TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"H264TemporalAdaptiveQuantization"`
10226
10227	// Inserts timecode for each frame as 4 bytes of an unregistered SEI message.
10228	UnregisteredSeiTimecode *string `locationName:"unregisteredSeiTimecode" type:"string" enum:"H264UnregisteredSeiTimecode"`
10229}
10230
10231// String returns the string representation
10232func (s H264Settings) String() string {
10233	return awsutil.Prettify(s)
10234}
10235
10236// GoString returns the string representation
10237func (s H264Settings) GoString() string {
10238	return s.String()
10239}
10240
10241// Validate inspects the fields of the type to determine if they are valid.
10242func (s *H264Settings) Validate() error {
10243	invalidParams := request.ErrInvalidParams{Context: "H264Settings"}
10244	if s.Bitrate != nil && *s.Bitrate < 1000 {
10245		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000))
10246	}
10247	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
10248		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
10249	}
10250	if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 {
10251		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1))
10252	}
10253	if s.MaxBitrate != nil && *s.MaxBitrate < 1000 {
10254		invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000))
10255	}
10256	if s.NumberReferenceFrames != nil && *s.NumberReferenceFrames < 1 {
10257		invalidParams.Add(request.NewErrParamMinValue("NumberReferenceFrames", 1))
10258	}
10259	if s.ParDenominator != nil && *s.ParDenominator < 1 {
10260		invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1))
10261	}
10262	if s.ParNumerator != nil && *s.ParNumerator < 1 {
10263		invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1))
10264	}
10265	if s.Slices != nil && *s.Slices < 1 {
10266		invalidParams.Add(request.NewErrParamMinValue("Slices", 1))
10267	}
10268	if s.QvbrSettings != nil {
10269		if err := s.QvbrSettings.Validate(); err != nil {
10270			invalidParams.AddNested("QvbrSettings", err.(request.ErrInvalidParams))
10271		}
10272	}
10273
10274	if invalidParams.Len() > 0 {
10275		return invalidParams
10276	}
10277	return nil
10278}
10279
10280// SetAdaptiveQuantization sets the AdaptiveQuantization field's value.
10281func (s *H264Settings) SetAdaptiveQuantization(v string) *H264Settings {
10282	s.AdaptiveQuantization = &v
10283	return s
10284}
10285
10286// SetBitrate sets the Bitrate field's value.
10287func (s *H264Settings) SetBitrate(v int64) *H264Settings {
10288	s.Bitrate = &v
10289	return s
10290}
10291
10292// SetCodecLevel sets the CodecLevel field's value.
10293func (s *H264Settings) SetCodecLevel(v string) *H264Settings {
10294	s.CodecLevel = &v
10295	return s
10296}
10297
10298// SetCodecProfile sets the CodecProfile field's value.
10299func (s *H264Settings) SetCodecProfile(v string) *H264Settings {
10300	s.CodecProfile = &v
10301	return s
10302}
10303
10304// SetDynamicSubGop sets the DynamicSubGop field's value.
10305func (s *H264Settings) SetDynamicSubGop(v string) *H264Settings {
10306	s.DynamicSubGop = &v
10307	return s
10308}
10309
10310// SetEntropyEncoding sets the EntropyEncoding field's value.
10311func (s *H264Settings) SetEntropyEncoding(v string) *H264Settings {
10312	s.EntropyEncoding = &v
10313	return s
10314}
10315
10316// SetFieldEncoding sets the FieldEncoding field's value.
10317func (s *H264Settings) SetFieldEncoding(v string) *H264Settings {
10318	s.FieldEncoding = &v
10319	return s
10320}
10321
10322// SetFlickerAdaptiveQuantization sets the FlickerAdaptiveQuantization field's value.
10323func (s *H264Settings) SetFlickerAdaptiveQuantization(v string) *H264Settings {
10324	s.FlickerAdaptiveQuantization = &v
10325	return s
10326}
10327
10328// SetFramerateControl sets the FramerateControl field's value.
10329func (s *H264Settings) SetFramerateControl(v string) *H264Settings {
10330	s.FramerateControl = &v
10331	return s
10332}
10333
10334// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value.
10335func (s *H264Settings) SetFramerateConversionAlgorithm(v string) *H264Settings {
10336	s.FramerateConversionAlgorithm = &v
10337	return s
10338}
10339
10340// SetFramerateDenominator sets the FramerateDenominator field's value.
10341func (s *H264Settings) SetFramerateDenominator(v int64) *H264Settings {
10342	s.FramerateDenominator = &v
10343	return s
10344}
10345
10346// SetFramerateNumerator sets the FramerateNumerator field's value.
10347func (s *H264Settings) SetFramerateNumerator(v int64) *H264Settings {
10348	s.FramerateNumerator = &v
10349	return s
10350}
10351
10352// SetGopBReference sets the GopBReference field's value.
10353func (s *H264Settings) SetGopBReference(v string) *H264Settings {
10354	s.GopBReference = &v
10355	return s
10356}
10357
10358// SetGopClosedCadence sets the GopClosedCadence field's value.
10359func (s *H264Settings) SetGopClosedCadence(v int64) *H264Settings {
10360	s.GopClosedCadence = &v
10361	return s
10362}
10363
10364// SetGopSize sets the GopSize field's value.
10365func (s *H264Settings) SetGopSize(v float64) *H264Settings {
10366	s.GopSize = &v
10367	return s
10368}
10369
10370// SetGopSizeUnits sets the GopSizeUnits field's value.
10371func (s *H264Settings) SetGopSizeUnits(v string) *H264Settings {
10372	s.GopSizeUnits = &v
10373	return s
10374}
10375
10376// SetHrdBufferInitialFillPercentage sets the HrdBufferInitialFillPercentage field's value.
10377func (s *H264Settings) SetHrdBufferInitialFillPercentage(v int64) *H264Settings {
10378	s.HrdBufferInitialFillPercentage = &v
10379	return s
10380}
10381
10382// SetHrdBufferSize sets the HrdBufferSize field's value.
10383func (s *H264Settings) SetHrdBufferSize(v int64) *H264Settings {
10384	s.HrdBufferSize = &v
10385	return s
10386}
10387
10388// SetInterlaceMode sets the InterlaceMode field's value.
10389func (s *H264Settings) SetInterlaceMode(v string) *H264Settings {
10390	s.InterlaceMode = &v
10391	return s
10392}
10393
10394// SetMaxBitrate sets the MaxBitrate field's value.
10395func (s *H264Settings) SetMaxBitrate(v int64) *H264Settings {
10396	s.MaxBitrate = &v
10397	return s
10398}
10399
10400// SetMinIInterval sets the MinIInterval field's value.
10401func (s *H264Settings) SetMinIInterval(v int64) *H264Settings {
10402	s.MinIInterval = &v
10403	return s
10404}
10405
10406// SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value.
10407func (s *H264Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *H264Settings {
10408	s.NumberBFramesBetweenReferenceFrames = &v
10409	return s
10410}
10411
10412// SetNumberReferenceFrames sets the NumberReferenceFrames field's value.
10413func (s *H264Settings) SetNumberReferenceFrames(v int64) *H264Settings {
10414	s.NumberReferenceFrames = &v
10415	return s
10416}
10417
10418// SetParControl sets the ParControl field's value.
10419func (s *H264Settings) SetParControl(v string) *H264Settings {
10420	s.ParControl = &v
10421	return s
10422}
10423
10424// SetParDenominator sets the ParDenominator field's value.
10425func (s *H264Settings) SetParDenominator(v int64) *H264Settings {
10426	s.ParDenominator = &v
10427	return s
10428}
10429
10430// SetParNumerator sets the ParNumerator field's value.
10431func (s *H264Settings) SetParNumerator(v int64) *H264Settings {
10432	s.ParNumerator = &v
10433	return s
10434}
10435
10436// SetQualityTuningLevel sets the QualityTuningLevel field's value.
10437func (s *H264Settings) SetQualityTuningLevel(v string) *H264Settings {
10438	s.QualityTuningLevel = &v
10439	return s
10440}
10441
10442// SetQvbrSettings sets the QvbrSettings field's value.
10443func (s *H264Settings) SetQvbrSettings(v *H264QvbrSettings) *H264Settings {
10444	s.QvbrSettings = v
10445	return s
10446}
10447
10448// SetRateControlMode sets the RateControlMode field's value.
10449func (s *H264Settings) SetRateControlMode(v string) *H264Settings {
10450	s.RateControlMode = &v
10451	return s
10452}
10453
10454// SetRepeatPps sets the RepeatPps field's value.
10455func (s *H264Settings) SetRepeatPps(v string) *H264Settings {
10456	s.RepeatPps = &v
10457	return s
10458}
10459
10460// SetScanTypeConversionMode sets the ScanTypeConversionMode field's value.
10461func (s *H264Settings) SetScanTypeConversionMode(v string) *H264Settings {
10462	s.ScanTypeConversionMode = &v
10463	return s
10464}
10465
10466// SetSceneChangeDetect sets the SceneChangeDetect field's value.
10467func (s *H264Settings) SetSceneChangeDetect(v string) *H264Settings {
10468	s.SceneChangeDetect = &v
10469	return s
10470}
10471
10472// SetSlices sets the Slices field's value.
10473func (s *H264Settings) SetSlices(v int64) *H264Settings {
10474	s.Slices = &v
10475	return s
10476}
10477
10478// SetSlowPal sets the SlowPal field's value.
10479func (s *H264Settings) SetSlowPal(v string) *H264Settings {
10480	s.SlowPal = &v
10481	return s
10482}
10483
10484// SetSoftness sets the Softness field's value.
10485func (s *H264Settings) SetSoftness(v int64) *H264Settings {
10486	s.Softness = &v
10487	return s
10488}
10489
10490// SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value.
10491func (s *H264Settings) SetSpatialAdaptiveQuantization(v string) *H264Settings {
10492	s.SpatialAdaptiveQuantization = &v
10493	return s
10494}
10495
10496// SetSyntax sets the Syntax field's value.
10497func (s *H264Settings) SetSyntax(v string) *H264Settings {
10498	s.Syntax = &v
10499	return s
10500}
10501
10502// SetTelecine sets the Telecine field's value.
10503func (s *H264Settings) SetTelecine(v string) *H264Settings {
10504	s.Telecine = &v
10505	return s
10506}
10507
10508// SetTemporalAdaptiveQuantization sets the TemporalAdaptiveQuantization field's value.
10509func (s *H264Settings) SetTemporalAdaptiveQuantization(v string) *H264Settings {
10510	s.TemporalAdaptiveQuantization = &v
10511	return s
10512}
10513
10514// SetUnregisteredSeiTimecode sets the UnregisteredSeiTimecode field's value.
10515func (s *H264Settings) SetUnregisteredSeiTimecode(v string) *H264Settings {
10516	s.UnregisteredSeiTimecode = &v
10517	return s
10518}
10519
10520// Settings for quality-defined variable bitrate encoding with the H.265 codec.
10521// Use these settings only when you set QVBR for Rate control mode (RateControlMode).
10522type H265QvbrSettings struct {
10523	_ struct{} `type:"structure"`
10524
10525	// Use this setting only when Rate control mode is QVBR and Quality tuning level
10526	// is Multi-pass HQ. For Max average bitrate values suited to the complexity
10527	// of your input video, the service limits the average bitrate of the video
10528	// part of this output to the value that you choose. That is, the total size
10529	// of the video element is less than or equal to the value you set multiplied
10530	// by the number of seconds of encoded output.
10531	MaxAverageBitrate *int64 `locationName:"maxAverageBitrate" min:"1000" type:"integer"`
10532
10533	// Use this setting only when you set Rate control mode (RateControlMode) to
10534	// QVBR. Specify the target quality level for this output. MediaConvert determines
10535	// the right number of bits to use for each part of the video to maintain the
10536	// video quality that you specify. When you keep the default value, AUTO, MediaConvert
10537	// picks a quality level for you, based on characteristics of your input video.
10538	// If you prefer to specify a quality level, specify a number from 1 through
10539	// 10. Use higher numbers for greater quality. Level 10 results in nearly lossless
10540	// compression. The quality level for most broadcast-quality transcodes is between
10541	// 6 and 9. Optionally, to specify a value between whole numbers, also provide
10542	// a value for the setting qvbrQualityLevelFineTune. For example, if you want
10543	// your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune
10544	// to .33.
10545	QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"`
10546
10547	// Optional. Specify a value here to set the QVBR quality to a level that is
10548	// between whole numbers. For example, if you want your QVBR quality level to
10549	// be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.
10550	// MediaConvert rounds your QVBR quality level to the nearest third of a whole
10551	// number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune
10552	// to .25, your actual QVBR quality level is 7.33.
10553	QvbrQualityLevelFineTune *float64 `locationName:"qvbrQualityLevelFineTune" type:"double"`
10554}
10555
10556// String returns the string representation
10557func (s H265QvbrSettings) String() string {
10558	return awsutil.Prettify(s)
10559}
10560
10561// GoString returns the string representation
10562func (s H265QvbrSettings) GoString() string {
10563	return s.String()
10564}
10565
10566// Validate inspects the fields of the type to determine if they are valid.
10567func (s *H265QvbrSettings) Validate() error {
10568	invalidParams := request.ErrInvalidParams{Context: "H265QvbrSettings"}
10569	if s.MaxAverageBitrate != nil && *s.MaxAverageBitrate < 1000 {
10570		invalidParams.Add(request.NewErrParamMinValue("MaxAverageBitrate", 1000))
10571	}
10572	if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 {
10573		invalidParams.Add(request.NewErrParamMinValue("QvbrQualityLevel", 1))
10574	}
10575
10576	if invalidParams.Len() > 0 {
10577		return invalidParams
10578	}
10579	return nil
10580}
10581
10582// SetMaxAverageBitrate sets the MaxAverageBitrate field's value.
10583func (s *H265QvbrSettings) SetMaxAverageBitrate(v int64) *H265QvbrSettings {
10584	s.MaxAverageBitrate = &v
10585	return s
10586}
10587
10588// SetQvbrQualityLevel sets the QvbrQualityLevel field's value.
10589func (s *H265QvbrSettings) SetQvbrQualityLevel(v int64) *H265QvbrSettings {
10590	s.QvbrQualityLevel = &v
10591	return s
10592}
10593
10594// SetQvbrQualityLevelFineTune sets the QvbrQualityLevelFineTune field's value.
10595func (s *H265QvbrSettings) SetQvbrQualityLevelFineTune(v float64) *H265QvbrSettings {
10596	s.QvbrQualityLevelFineTune = &v
10597	return s
10598}
10599
10600// Settings for H265 codec
10601type H265Settings struct {
10602	_ struct{} `type:"structure"`
10603
10604	// Specify the strength of any adaptive quantization filters that you enable.
10605	// The value that you choose here applies to the following settings: Flicker
10606	// adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization
10607	// (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization).
10608	AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H265AdaptiveQuantization"`
10609
10610	// Enables Alternate Transfer Function SEI message for outputs using Hybrid
10611	// Log Gamma (HLG) Electro-Optical Transfer Function (EOTF).
10612	AlternateTransferFunctionSei *string `locationName:"alternateTransferFunctionSei" type:"string" enum:"H265AlternateTransferFunctionSei"`
10613
10614	// Specify the average bitrate in bits per second. Required for VBR and CBR.
10615	// For MS Smooth outputs, bitrates must be unique when rounded down to the nearest
10616	// multiple of 1000.
10617	Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"`
10618
10619	// H.265 Level.
10620	CodecLevel *string `locationName:"codecLevel" type:"string" enum:"H265CodecLevel"`
10621
10622	// Represents the Profile and Tier, per the HEVC (H.265) specification. Selections
10623	// are grouped as [Profile] / [Tier], so "Main/High" represents Main Profile
10624	// with High Tier. 4:2:2 profiles are only available with the HEVC 4:2:2 License.
10625	CodecProfile *string `locationName:"codecProfile" type:"string" enum:"H265CodecProfile"`
10626
10627	// Choose Adaptive to improve subjective video quality for high-motion content.
10628	// This will cause the service to use fewer B-frames (which infer information
10629	// based on other frames) for high-motion portions of the video and more B-frames
10630	// for low-motion portions. The maximum number of B-frames is limited by the
10631	// value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).
10632	DynamicSubGop *string `locationName:"dynamicSubGop" type:"string" enum:"H265DynamicSubGop"`
10633
10634	// Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears
10635	// as a visual flicker that can arise when the encoder saves bits by copying
10636	// some macroblocks many times from frame to frame, and then refreshes them
10637	// at the I-frame. When you enable this setting, the encoder updates these macroblocks
10638	// slightly more often to smooth out the flicker. This setting is disabled by
10639	// default. Related setting: In addition to enabling this setting, you must
10640	// also set adaptiveQuantization to a value other than Off (OFF).
10641	FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"H265FlickerAdaptiveQuantization"`
10642
10643	// If you are using the console, use the Framerate setting to specify the frame
10644	// rate for this output. If you want to keep the same frame rate as the input
10645	// video, choose Follow source. If you want to do frame rate conversion, choose
10646	// a frame rate from the dropdown list or choose Custom. The framerates shown
10647	// in the dropdown list are decimal approximations of fractions. If you choose
10648	// Custom, specify your frame rate as a fraction. If you are creating your transcoding
10649	// job specification as a JSON file without the console, use FramerateControl
10650	// to specify which value the service uses for the frame rate for this output.
10651	// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
10652	// from the input. Choose SPECIFIED if you want the service to use the frame
10653	// rate you specify in the settings FramerateNumerator and FramerateDenominator.
10654	FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H265FramerateControl"`
10655
10656	// Choose the method that you want MediaConvert to use when increasing or decreasing
10657	// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
10658	// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
10659	// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
10660	// smooth picture, but might introduce undesirable video artifacts. For complex
10661	// frame rate conversions, especially if your source video has already been
10662	// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
10663	// motion-compensated interpolation. FrameFormer chooses the best conversion
10664	// method frame by frame. Note that using FrameFormer increases the transcoding
10665	// time and incurs a significant add-on cost.
10666	FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"H265FramerateConversionAlgorithm"`
10667
10668	// When you use the API for transcode jobs that use frame rate conversion, specify
10669	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
10670	// FramerateDenominator to specify the denominator of this fraction. In this
10671	// example, use 1001 for the value of FramerateDenominator. When you use the
10672	// console for transcode jobs that use frame rate conversion, provide the value
10673	// as a decimal number for Framerate. In this example, specify 23.976.
10674	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
10675
10676	// When you use the API for transcode jobs that use frame rate conversion, specify
10677	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
10678	// FramerateNumerator to specify the numerator of this fraction. In this example,
10679	// use 24000 for the value of FramerateNumerator. When you use the console for
10680	// transcode jobs that use frame rate conversion, provide the value as a decimal
10681	// number for Framerate. In this example, specify 23.976.
10682	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"`
10683
10684	// If enable, use reference B frames for GOP structures that have B frames >
10685	// 1.
10686	GopBReference *string `locationName:"gopBReference" type:"string" enum:"H265GopBReference"`
10687
10688	// Frequency of closed GOPs. In streaming applications, it is recommended that
10689	// this be set to 1 so a decoder joining mid-stream will receive an IDR frame
10690	// as quickly as possible. Setting this value to 0 will break output segmenting.
10691	GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"`
10692
10693	// GOP Length (keyframe interval) in frames or seconds. Must be greater than
10694	// zero.
10695	GopSize *float64 `locationName:"gopSize" type:"double"`
10696
10697	// Indicates if the GOP Size in H265 is specified in frames or seconds. If seconds
10698	// the system will convert the GOP Size into a frame count at run time.
10699	GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"H265GopSizeUnits"`
10700
10701	// Percentage of the buffer that should initially be filled (HRD buffer model).
10702	HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"`
10703
10704	// Size of buffer (HRD buffer model) in bits. For example, enter five megabits
10705	// as 5000000.
10706	HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"`
10707
10708	// Choose the scan line type for the output. Keep the default value, Progressive
10709	// (PROGRESSIVE) to create a progressive output, regardless of the scan type
10710	// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
10711	// to create an output that's interlaced with the same field polarity throughout.
10712	// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
10713	// to produce outputs with the same field polarity as the source. For jobs that
10714	// have multiple inputs, the output field polarity might change over the course
10715	// of the output. Follow behavior depends on the input scan type. If the source
10716	// is interlaced, the output will be interlaced with the same polarity as the
10717	// source. If the source is progressive, the output will be interlaced with
10718	// top field bottom field first, depending on which of the Follow options you
10719	// choose.
10720	InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"H265InterlaceMode"`
10721
10722	// Maximum bitrate in bits/second. For example, enter five megabits per second
10723	// as 5000000. Required when Rate control mode is QVBR.
10724	MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"`
10725
10726	// Enforces separation between repeated (cadence) I-frames and I-frames inserted
10727	// by Scene Change Detection. If a scene change I-frame is within I-interval
10728	// frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene
10729	// change I-frame. GOP stretch requires enabling lookahead as well as setting
10730	// I-interval. The normal cadence resumes for the next GOP. This setting is
10731	// only used when Scene Change Detect is enabled. Note: Maximum GOP stretch
10732	// = GOP size + Min-I-interval - 1
10733	MinIInterval *int64 `locationName:"minIInterval" type:"integer"`
10734
10735	// Number of B-frames between reference frames.
10736	NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"`
10737
10738	// Number of reference frames to use. The encoder may use more than requested
10739	// if using B-frames and/or interlaced encoding.
10740	NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" min:"1" type:"integer"`
10741
10742	// Optional. Specify how the service determines the pixel aspect ratio (PAR)
10743	// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE),
10744	// uses the PAR from your input video for your output. To specify a different
10745	// PAR in the console, choose any value other than Follow source. To specify
10746	// a different PAR by editing the JSON job specification, choose SPECIFIED.
10747	// When you choose SPECIFIED for this setting, you must also specify values
10748	// for the parNumerator and parDenominator settings.
10749	ParControl *string `locationName:"parControl" type:"string" enum:"H265ParControl"`
10750
10751	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
10752	// console, this corresponds to any value other than Follow source. When you
10753	// specify an output pixel aspect ratio (PAR) that is different from your input
10754	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
10755	// widescreen, you would specify the ratio 40:33. In this example, the value
10756	// for parDenominator is 33.
10757	ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"`
10758
10759	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
10760	// console, this corresponds to any value other than Follow source. When you
10761	// specify an output pixel aspect ratio (PAR) that is different from your input
10762	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
10763	// widescreen, you would specify the ratio 40:33. In this example, the value
10764	// for parNumerator is 40.
10765	ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"`
10766
10767	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
10768	// want to trade off encoding speed for output video quality. The default behavior
10769	// is faster, lower quality, single-pass encoding.
10770	QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"H265QualityTuningLevel"`
10771
10772	// Settings for quality-defined variable bitrate encoding with the H.265 codec.
10773	// Use these settings only when you set QVBR for Rate control mode (RateControlMode).
10774	QvbrSettings *H265QvbrSettings `locationName:"qvbrSettings" type:"structure"`
10775
10776	// Use this setting to specify whether this output has a variable bitrate (VBR),
10777	// constant bitrate (CBR) or quality-defined variable bitrate (QVBR).
10778	RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"H265RateControlMode"`
10779
10780	// Specify Sample Adaptive Offset (SAO) filter strength. Adaptive mode dynamically
10781	// selects best strength based on content
10782	SampleAdaptiveOffsetFilterMode *string `locationName:"sampleAdaptiveOffsetFilterMode" type:"string" enum:"H265SampleAdaptiveOffsetFilterMode"`
10783
10784	// Use this setting for interlaced outputs, when your output frame rate is half
10785	// of your input frame rate. In this situation, choose Optimized interlacing
10786	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
10787	// case, each progressive frame from the input corresponds to an interlaced
10788	// field in the output. Keep the default value, Basic interlacing (INTERLACED),
10789	// for all other output frame rates. With basic interlacing, MediaConvert performs
10790	// any frame rate conversion first and then interlaces the frames. When you
10791	// choose Optimized interlacing and you set your output frame rate to a value
10792	// that isn't suitable for optimized interlacing, MediaConvert automatically
10793	// falls back to basic interlacing. Required settings: To use optimized interlacing,
10794	// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
10795	// use optimized interlacing for hard telecine outputs. You must also set Interlace
10796	// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
10797	ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"H265ScanTypeConversionMode"`
10798
10799	// Enable this setting to insert I-frames at scene changes that the service
10800	// automatically detects. This improves video quality and is enabled by default.
10801	// If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION)
10802	// for further video quality improvement. For more information about QVBR, see
10803	// https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.
10804	SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H265SceneChangeDetect"`
10805
10806	// Number of slices per picture. Must be less than or equal to the number of
10807	// macroblock rows for progressive pictures, and less than or equal to half
10808	// the number of macroblock rows for interlaced pictures.
10809	Slices *int64 `locationName:"slices" min:"1" type:"integer"`
10810
10811	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
10812	// second (fps). Enable slow PAL to create a 25 fps output. When you enable
10813	// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples
10814	// your audio to keep it synchronized with the video. Note that enabling this
10815	// setting will slightly reduce the duration of your video. Required settings:
10816	// You must also set Framerate to 25. In your JSON job specification, set (framerateControl)
10817	// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
10818	// 1.
10819	SlowPal *string `locationName:"slowPal" type:"string" enum:"H265SlowPal"`
10820
10821	// Keep the default value, Enabled (ENABLED), to adjust quantization within
10822	// each frame based on spatial variation of content complexity. When you enable
10823	// this feature, the encoder uses fewer bits on areas that can sustain more
10824	// distortion with no noticeable visual degradation and uses more bits on areas
10825	// where any small distortion will be noticeable. For example, complex textured
10826	// blocks are encoded with fewer bits and smooth textured blocks are encoded
10827	// with more bits. Enabling this feature will almost always improve your video
10828	// quality. Note, though, that this feature doesn't take into account where
10829	// the viewer's attention is likely to be. If viewers are likely to be focusing
10830	// their attention on a part of the screen with a lot of complex texture, you
10831	// might choose to disable this feature. Related setting: When you enable spatial
10832	// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization)
10833	// depending on your content. For homogeneous content, such as cartoons and
10834	// video games, set it to Low. For content with a wider variety of textures,
10835	// set it to High or Higher.
10836	SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"H265SpatialAdaptiveQuantization"`
10837
10838	// This field applies only if the Streams > Advanced > Framerate (framerate)
10839	// field is set to 29.970. This field works with the Streams > Advanced > Preprocessors
10840	// > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced
10841	// Mode field (interlace_mode) to identify the scan type for the output: Progressive,
10842	// Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output
10843	// from 23.976 input. - Soft: produces 23.976; the player converts this output
10844	// to 29.97i.
10845	Telecine *string `locationName:"telecine" type:"string" enum:"H265Telecine"`
10846
10847	// Keep the default value, Enabled (ENABLED), to adjust quantization within
10848	// each frame based on temporal variation of content complexity. When you enable
10849	// this feature, the encoder uses fewer bits on areas of the frame that aren't
10850	// moving and uses more bits on complex objects with sharp edges that move a
10851	// lot. For example, this feature improves the readability of text tickers on
10852	// newscasts and scoreboards on sports matches. Enabling this feature will almost
10853	// always improve your video quality. Note, though, that this feature doesn't
10854	// take into account where the viewer's attention is likely to be. If viewers
10855	// are likely to be focusing their attention on a part of the screen that doesn't
10856	// have moving objects with sharp edges, such as sports athletes' faces, you
10857	// might choose to disable this feature. Related setting: When you enable temporal
10858	// quantization, adjust the strength of the filter with the setting Adaptive
10859	// quantization (adaptiveQuantization).
10860	TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"H265TemporalAdaptiveQuantization"`
10861
10862	// Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers
10863	// are supported depending on GOP structure: I- and P-frames form one layer,
10864	// reference B-frames can form a second layer and non-reference b-frames can
10865	// form a third layer. Decoders can optionally decode only the lower temporal
10866	// layers to generate a lower frame rate output. For example, given a bitstream
10867	// with temporal IDs and with b-frames = 1 (i.e. IbPbPb display order), a decoder
10868	// could decode all the frames for full frame rate output or only the I and
10869	// P frames (lowest temporal layer) for a half frame rate output.
10870	TemporalIds *string `locationName:"temporalIds" type:"string" enum:"H265TemporalIds"`
10871
10872	// Enable use of tiles, allowing horizontal as well as vertical subdivision
10873	// of the encoded pictures.
10874	Tiles *string `locationName:"tiles" type:"string" enum:"H265Tiles"`
10875
10876	// Inserts timecode for each frame as 4 bytes of an unregistered SEI message.
10877	UnregisteredSeiTimecode *string `locationName:"unregisteredSeiTimecode" type:"string" enum:"H265UnregisteredSeiTimecode"`
10878
10879	// If the location of parameter set NAL units doesn't matter in your workflow,
10880	// ignore this setting. Use this setting only with CMAF or DASH outputs, or
10881	// with standalone file outputs in an MPEG-4 container (MP4 outputs). Choose
10882	// HVC1 to mark your output as HVC1. This makes your output compliant with the
10883	// following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15
10884	// 3rd Edition. For these outputs, the service stores parameter set NAL units
10885	// in the sample headers but not in the samples directly. For MP4 outputs, when
10886	// you choose HVC1, your output video might not work properly with some downstream
10887	// systems and video players. The service defaults to marking your output as
10888	// HEV1. For these outputs, the service writes parameter set NAL units directly
10889	// into the samples.
10890	WriteMp4PackagingType *string `locationName:"writeMp4PackagingType" type:"string" enum:"H265WriteMp4PackagingType"`
10891}
10892
10893// String returns the string representation
10894func (s H265Settings) String() string {
10895	return awsutil.Prettify(s)
10896}
10897
10898// GoString returns the string representation
10899func (s H265Settings) GoString() string {
10900	return s.String()
10901}
10902
10903// Validate inspects the fields of the type to determine if they are valid.
10904func (s *H265Settings) Validate() error {
10905	invalidParams := request.ErrInvalidParams{Context: "H265Settings"}
10906	if s.Bitrate != nil && *s.Bitrate < 1000 {
10907		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000))
10908	}
10909	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
10910		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
10911	}
10912	if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 {
10913		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1))
10914	}
10915	if s.MaxBitrate != nil && *s.MaxBitrate < 1000 {
10916		invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000))
10917	}
10918	if s.NumberReferenceFrames != nil && *s.NumberReferenceFrames < 1 {
10919		invalidParams.Add(request.NewErrParamMinValue("NumberReferenceFrames", 1))
10920	}
10921	if s.ParDenominator != nil && *s.ParDenominator < 1 {
10922		invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1))
10923	}
10924	if s.ParNumerator != nil && *s.ParNumerator < 1 {
10925		invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1))
10926	}
10927	if s.Slices != nil && *s.Slices < 1 {
10928		invalidParams.Add(request.NewErrParamMinValue("Slices", 1))
10929	}
10930	if s.QvbrSettings != nil {
10931		if err := s.QvbrSettings.Validate(); err != nil {
10932			invalidParams.AddNested("QvbrSettings", err.(request.ErrInvalidParams))
10933		}
10934	}
10935
10936	if invalidParams.Len() > 0 {
10937		return invalidParams
10938	}
10939	return nil
10940}
10941
10942// SetAdaptiveQuantization sets the AdaptiveQuantization field's value.
10943func (s *H265Settings) SetAdaptiveQuantization(v string) *H265Settings {
10944	s.AdaptiveQuantization = &v
10945	return s
10946}
10947
10948// SetAlternateTransferFunctionSei sets the AlternateTransferFunctionSei field's value.
10949func (s *H265Settings) SetAlternateTransferFunctionSei(v string) *H265Settings {
10950	s.AlternateTransferFunctionSei = &v
10951	return s
10952}
10953
10954// SetBitrate sets the Bitrate field's value.
10955func (s *H265Settings) SetBitrate(v int64) *H265Settings {
10956	s.Bitrate = &v
10957	return s
10958}
10959
10960// SetCodecLevel sets the CodecLevel field's value.
10961func (s *H265Settings) SetCodecLevel(v string) *H265Settings {
10962	s.CodecLevel = &v
10963	return s
10964}
10965
10966// SetCodecProfile sets the CodecProfile field's value.
10967func (s *H265Settings) SetCodecProfile(v string) *H265Settings {
10968	s.CodecProfile = &v
10969	return s
10970}
10971
10972// SetDynamicSubGop sets the DynamicSubGop field's value.
10973func (s *H265Settings) SetDynamicSubGop(v string) *H265Settings {
10974	s.DynamicSubGop = &v
10975	return s
10976}
10977
10978// SetFlickerAdaptiveQuantization sets the FlickerAdaptiveQuantization field's value.
10979func (s *H265Settings) SetFlickerAdaptiveQuantization(v string) *H265Settings {
10980	s.FlickerAdaptiveQuantization = &v
10981	return s
10982}
10983
10984// SetFramerateControl sets the FramerateControl field's value.
10985func (s *H265Settings) SetFramerateControl(v string) *H265Settings {
10986	s.FramerateControl = &v
10987	return s
10988}
10989
10990// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value.
10991func (s *H265Settings) SetFramerateConversionAlgorithm(v string) *H265Settings {
10992	s.FramerateConversionAlgorithm = &v
10993	return s
10994}
10995
10996// SetFramerateDenominator sets the FramerateDenominator field's value.
10997func (s *H265Settings) SetFramerateDenominator(v int64) *H265Settings {
10998	s.FramerateDenominator = &v
10999	return s
11000}
11001
11002// SetFramerateNumerator sets the FramerateNumerator field's value.
11003func (s *H265Settings) SetFramerateNumerator(v int64) *H265Settings {
11004	s.FramerateNumerator = &v
11005	return s
11006}
11007
11008// SetGopBReference sets the GopBReference field's value.
11009func (s *H265Settings) SetGopBReference(v string) *H265Settings {
11010	s.GopBReference = &v
11011	return s
11012}
11013
11014// SetGopClosedCadence sets the GopClosedCadence field's value.
11015func (s *H265Settings) SetGopClosedCadence(v int64) *H265Settings {
11016	s.GopClosedCadence = &v
11017	return s
11018}
11019
11020// SetGopSize sets the GopSize field's value.
11021func (s *H265Settings) SetGopSize(v float64) *H265Settings {
11022	s.GopSize = &v
11023	return s
11024}
11025
11026// SetGopSizeUnits sets the GopSizeUnits field's value.
11027func (s *H265Settings) SetGopSizeUnits(v string) *H265Settings {
11028	s.GopSizeUnits = &v
11029	return s
11030}
11031
11032// SetHrdBufferInitialFillPercentage sets the HrdBufferInitialFillPercentage field's value.
11033func (s *H265Settings) SetHrdBufferInitialFillPercentage(v int64) *H265Settings {
11034	s.HrdBufferInitialFillPercentage = &v
11035	return s
11036}
11037
11038// SetHrdBufferSize sets the HrdBufferSize field's value.
11039func (s *H265Settings) SetHrdBufferSize(v int64) *H265Settings {
11040	s.HrdBufferSize = &v
11041	return s
11042}
11043
11044// SetInterlaceMode sets the InterlaceMode field's value.
11045func (s *H265Settings) SetInterlaceMode(v string) *H265Settings {
11046	s.InterlaceMode = &v
11047	return s
11048}
11049
11050// SetMaxBitrate sets the MaxBitrate field's value.
11051func (s *H265Settings) SetMaxBitrate(v int64) *H265Settings {
11052	s.MaxBitrate = &v
11053	return s
11054}
11055
11056// SetMinIInterval sets the MinIInterval field's value.
11057func (s *H265Settings) SetMinIInterval(v int64) *H265Settings {
11058	s.MinIInterval = &v
11059	return s
11060}
11061
11062// SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value.
11063func (s *H265Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *H265Settings {
11064	s.NumberBFramesBetweenReferenceFrames = &v
11065	return s
11066}
11067
11068// SetNumberReferenceFrames sets the NumberReferenceFrames field's value.
11069func (s *H265Settings) SetNumberReferenceFrames(v int64) *H265Settings {
11070	s.NumberReferenceFrames = &v
11071	return s
11072}
11073
11074// SetParControl sets the ParControl field's value.
11075func (s *H265Settings) SetParControl(v string) *H265Settings {
11076	s.ParControl = &v
11077	return s
11078}
11079
11080// SetParDenominator sets the ParDenominator field's value.
11081func (s *H265Settings) SetParDenominator(v int64) *H265Settings {
11082	s.ParDenominator = &v
11083	return s
11084}
11085
11086// SetParNumerator sets the ParNumerator field's value.
11087func (s *H265Settings) SetParNumerator(v int64) *H265Settings {
11088	s.ParNumerator = &v
11089	return s
11090}
11091
11092// SetQualityTuningLevel sets the QualityTuningLevel field's value.
11093func (s *H265Settings) SetQualityTuningLevel(v string) *H265Settings {
11094	s.QualityTuningLevel = &v
11095	return s
11096}
11097
11098// SetQvbrSettings sets the QvbrSettings field's value.
11099func (s *H265Settings) SetQvbrSettings(v *H265QvbrSettings) *H265Settings {
11100	s.QvbrSettings = v
11101	return s
11102}
11103
11104// SetRateControlMode sets the RateControlMode field's value.
11105func (s *H265Settings) SetRateControlMode(v string) *H265Settings {
11106	s.RateControlMode = &v
11107	return s
11108}
11109
11110// SetSampleAdaptiveOffsetFilterMode sets the SampleAdaptiveOffsetFilterMode field's value.
11111func (s *H265Settings) SetSampleAdaptiveOffsetFilterMode(v string) *H265Settings {
11112	s.SampleAdaptiveOffsetFilterMode = &v
11113	return s
11114}
11115
11116// SetScanTypeConversionMode sets the ScanTypeConversionMode field's value.
11117func (s *H265Settings) SetScanTypeConversionMode(v string) *H265Settings {
11118	s.ScanTypeConversionMode = &v
11119	return s
11120}
11121
11122// SetSceneChangeDetect sets the SceneChangeDetect field's value.
11123func (s *H265Settings) SetSceneChangeDetect(v string) *H265Settings {
11124	s.SceneChangeDetect = &v
11125	return s
11126}
11127
11128// SetSlices sets the Slices field's value.
11129func (s *H265Settings) SetSlices(v int64) *H265Settings {
11130	s.Slices = &v
11131	return s
11132}
11133
11134// SetSlowPal sets the SlowPal field's value.
11135func (s *H265Settings) SetSlowPal(v string) *H265Settings {
11136	s.SlowPal = &v
11137	return s
11138}
11139
11140// SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value.
11141func (s *H265Settings) SetSpatialAdaptiveQuantization(v string) *H265Settings {
11142	s.SpatialAdaptiveQuantization = &v
11143	return s
11144}
11145
11146// SetTelecine sets the Telecine field's value.
11147func (s *H265Settings) SetTelecine(v string) *H265Settings {
11148	s.Telecine = &v
11149	return s
11150}
11151
11152// SetTemporalAdaptiveQuantization sets the TemporalAdaptiveQuantization field's value.
11153func (s *H265Settings) SetTemporalAdaptiveQuantization(v string) *H265Settings {
11154	s.TemporalAdaptiveQuantization = &v
11155	return s
11156}
11157
11158// SetTemporalIds sets the TemporalIds field's value.
11159func (s *H265Settings) SetTemporalIds(v string) *H265Settings {
11160	s.TemporalIds = &v
11161	return s
11162}
11163
11164// SetTiles sets the Tiles field's value.
11165func (s *H265Settings) SetTiles(v string) *H265Settings {
11166	s.Tiles = &v
11167	return s
11168}
11169
11170// SetUnregisteredSeiTimecode sets the UnregisteredSeiTimecode field's value.
11171func (s *H265Settings) SetUnregisteredSeiTimecode(v string) *H265Settings {
11172	s.UnregisteredSeiTimecode = &v
11173	return s
11174}
11175
11176// SetWriteMp4PackagingType sets the WriteMp4PackagingType field's value.
11177func (s *H265Settings) SetWriteMp4PackagingType(v string) *H265Settings {
11178	s.WriteMp4PackagingType = &v
11179	return s
11180}
11181
11182// Use these settings to specify static color calibration metadata, as defined
11183// by SMPTE ST 2086. These values don't affect the pixel values that are encoded
11184// in the video stream. They are intended to help the downstream video player
11185// display content in a way that reflects the intentions of the the content
11186// creator.
11187type Hdr10Metadata struct {
11188	_ struct{} `type:"structure"`
11189
11190	// HDR Master Display Information must be provided by a color grader, using
11191	// color grading tools. Range is 0 to 50,000, each increment represents 0.00002
11192	// in CIE1931 color coordinate. Note that this setting is not for color correction.
11193	BluePrimaryX *int64 `locationName:"bluePrimaryX" type:"integer"`
11194
11195	// HDR Master Display Information must be provided by a color grader, using
11196	// color grading tools. Range is 0 to 50,000, each increment represents 0.00002
11197	// in CIE1931 color coordinate. Note that this setting is not for color correction.
11198	BluePrimaryY *int64 `locationName:"bluePrimaryY" type:"integer"`
11199
11200	// HDR Master Display Information must be provided by a color grader, using
11201	// color grading tools. Range is 0 to 50,000, each increment represents 0.00002
11202	// in CIE1931 color coordinate. Note that this setting is not for color correction.
11203	GreenPrimaryX *int64 `locationName:"greenPrimaryX" type:"integer"`
11204
11205	// HDR Master Display Information must be provided by a color grader, using
11206	// color grading tools. Range is 0 to 50,000, each increment represents 0.00002
11207	// in CIE1931 color coordinate. Note that this setting is not for color correction.
11208	GreenPrimaryY *int64 `locationName:"greenPrimaryY" type:"integer"`
11209
11210	// Maximum light level among all samples in the coded video sequence, in units
11211	// of candelas per square meter. This setting doesn't have a default value;
11212	// you must specify a value that is suitable for the content.
11213	MaxContentLightLevel *int64 `locationName:"maxContentLightLevel" type:"integer"`
11214
11215	// Maximum average light level of any frame in the coded video sequence, in
11216	// units of candelas per square meter. This setting doesn't have a default value;
11217	// you must specify a value that is suitable for the content.
11218	MaxFrameAverageLightLevel *int64 `locationName:"maxFrameAverageLightLevel" type:"integer"`
11219
11220	// Nominal maximum mastering display luminance in units of of 0.0001 candelas
11221	// per square meter.
11222	MaxLuminance *int64 `locationName:"maxLuminance" type:"integer"`
11223
11224	// Nominal minimum mastering display luminance in units of of 0.0001 candelas
11225	// per square meter
11226	MinLuminance *int64 `locationName:"minLuminance" type:"integer"`
11227
11228	// HDR Master Display Information must be provided by a color grader, using
11229	// color grading tools. Range is 0 to 50,000, each increment represents 0.00002
11230	// in CIE1931 color coordinate. Note that this setting is not for color correction.
11231	RedPrimaryX *int64 `locationName:"redPrimaryX" type:"integer"`
11232
11233	// HDR Master Display Information must be provided by a color grader, using
11234	// color grading tools. Range is 0 to 50,000, each increment represents 0.00002
11235	// in CIE1931 color coordinate. Note that this setting is not for color correction.
11236	RedPrimaryY *int64 `locationName:"redPrimaryY" type:"integer"`
11237
11238	// HDR Master Display Information must be provided by a color grader, using
11239	// color grading tools. Range is 0 to 50,000, each increment represents 0.00002
11240	// in CIE1931 color coordinate. Note that this setting is not for color correction.
11241	WhitePointX *int64 `locationName:"whitePointX" type:"integer"`
11242
11243	// HDR Master Display Information must be provided by a color grader, using
11244	// color grading tools. Range is 0 to 50,000, each increment represents 0.00002
11245	// in CIE1931 color coordinate. Note that this setting is not for color correction.
11246	WhitePointY *int64 `locationName:"whitePointY" type:"integer"`
11247}
11248
11249// String returns the string representation
11250func (s Hdr10Metadata) String() string {
11251	return awsutil.Prettify(s)
11252}
11253
11254// GoString returns the string representation
11255func (s Hdr10Metadata) GoString() string {
11256	return s.String()
11257}
11258
11259// SetBluePrimaryX sets the BluePrimaryX field's value.
11260func (s *Hdr10Metadata) SetBluePrimaryX(v int64) *Hdr10Metadata {
11261	s.BluePrimaryX = &v
11262	return s
11263}
11264
11265// SetBluePrimaryY sets the BluePrimaryY field's value.
11266func (s *Hdr10Metadata) SetBluePrimaryY(v int64) *Hdr10Metadata {
11267	s.BluePrimaryY = &v
11268	return s
11269}
11270
11271// SetGreenPrimaryX sets the GreenPrimaryX field's value.
11272func (s *Hdr10Metadata) SetGreenPrimaryX(v int64) *Hdr10Metadata {
11273	s.GreenPrimaryX = &v
11274	return s
11275}
11276
11277// SetGreenPrimaryY sets the GreenPrimaryY field's value.
11278func (s *Hdr10Metadata) SetGreenPrimaryY(v int64) *Hdr10Metadata {
11279	s.GreenPrimaryY = &v
11280	return s
11281}
11282
11283// SetMaxContentLightLevel sets the MaxContentLightLevel field's value.
11284func (s *Hdr10Metadata) SetMaxContentLightLevel(v int64) *Hdr10Metadata {
11285	s.MaxContentLightLevel = &v
11286	return s
11287}
11288
11289// SetMaxFrameAverageLightLevel sets the MaxFrameAverageLightLevel field's value.
11290func (s *Hdr10Metadata) SetMaxFrameAverageLightLevel(v int64) *Hdr10Metadata {
11291	s.MaxFrameAverageLightLevel = &v
11292	return s
11293}
11294
11295// SetMaxLuminance sets the MaxLuminance field's value.
11296func (s *Hdr10Metadata) SetMaxLuminance(v int64) *Hdr10Metadata {
11297	s.MaxLuminance = &v
11298	return s
11299}
11300
11301// SetMinLuminance sets the MinLuminance field's value.
11302func (s *Hdr10Metadata) SetMinLuminance(v int64) *Hdr10Metadata {
11303	s.MinLuminance = &v
11304	return s
11305}
11306
11307// SetRedPrimaryX sets the RedPrimaryX field's value.
11308func (s *Hdr10Metadata) SetRedPrimaryX(v int64) *Hdr10Metadata {
11309	s.RedPrimaryX = &v
11310	return s
11311}
11312
11313// SetRedPrimaryY sets the RedPrimaryY field's value.
11314func (s *Hdr10Metadata) SetRedPrimaryY(v int64) *Hdr10Metadata {
11315	s.RedPrimaryY = &v
11316	return s
11317}
11318
11319// SetWhitePointX sets the WhitePointX field's value.
11320func (s *Hdr10Metadata) SetWhitePointX(v int64) *Hdr10Metadata {
11321	s.WhitePointX = &v
11322	return s
11323}
11324
11325// SetWhitePointY sets the WhitePointY field's value.
11326func (s *Hdr10Metadata) SetWhitePointY(v int64) *Hdr10Metadata {
11327	s.WhitePointY = &v
11328	return s
11329}
11330
11331// Setting for HDR10+ metadata insertion
11332type Hdr10Plus struct {
11333	_ struct{} `type:"structure"`
11334
11335	// Specify the HDR10+ mastering display normalized peak luminance, in nits.
11336	// This is the normalized actual peak luminance of the mastering display, as
11337	// defined by ST 2094-40.
11338	MasteringMonitorNits *int64 `locationName:"masteringMonitorNits" type:"integer"`
11339
11340	// Specify the HDR10+ target display nominal peak luminance, in nits. This is
11341	// the nominal maximum luminance of the target display as defined by ST 2094-40.
11342	TargetMonitorNits *int64 `locationName:"targetMonitorNits" type:"integer"`
11343}
11344
11345// String returns the string representation
11346func (s Hdr10Plus) String() string {
11347	return awsutil.Prettify(s)
11348}
11349
11350// GoString returns the string representation
11351func (s Hdr10Plus) GoString() string {
11352	return s.String()
11353}
11354
11355// SetMasteringMonitorNits sets the MasteringMonitorNits field's value.
11356func (s *Hdr10Plus) SetMasteringMonitorNits(v int64) *Hdr10Plus {
11357	s.MasteringMonitorNits = &v
11358	return s
11359}
11360
11361// SetTargetMonitorNits sets the TargetMonitorNits field's value.
11362func (s *Hdr10Plus) SetTargetMonitorNits(v int64) *Hdr10Plus {
11363	s.TargetMonitorNits = &v
11364	return s
11365}
11366
11367// Specify the details for each additional HLS manifest that you want the service
11368// to generate for this output group. Each manifest can reference a different
11369// subset of outputs in the group.
11370type HlsAdditionalManifest struct {
11371	_ struct{} `type:"structure"`
11372
11373	// Specify a name modifier that the service adds to the name of this manifest
11374	// to make it different from the file names of the other main manifests in the
11375	// output group. For example, say that the default main manifest for your HLS
11376	// group is film-name.m3u8. If you enter "-no-premium" for this setting, then
11377	// the file name the service generates for this top-level manifest is film-name-no-premium.m3u8.
11378	// For HLS output groups, specify a manifestNameModifier that is different from
11379	// the nameModifier of the output. The service uses the output name modifier
11380	// to create unique names for the individual variant manifests.
11381	ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"`
11382
11383	// Specify the outputs that you want this additional top-level manifest to reference.
11384	SelectedOutputs []*string `locationName:"selectedOutputs" type:"list"`
11385}
11386
11387// String returns the string representation
11388func (s HlsAdditionalManifest) String() string {
11389	return awsutil.Prettify(s)
11390}
11391
11392// GoString returns the string representation
11393func (s HlsAdditionalManifest) GoString() string {
11394	return s.String()
11395}
11396
11397// Validate inspects the fields of the type to determine if they are valid.
11398func (s *HlsAdditionalManifest) Validate() error {
11399	invalidParams := request.ErrInvalidParams{Context: "HlsAdditionalManifest"}
11400	if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 {
11401		invalidParams.Add(request.NewErrParamMinLen("ManifestNameModifier", 1))
11402	}
11403
11404	if invalidParams.Len() > 0 {
11405		return invalidParams
11406	}
11407	return nil
11408}
11409
11410// SetManifestNameModifier sets the ManifestNameModifier field's value.
11411func (s *HlsAdditionalManifest) SetManifestNameModifier(v string) *HlsAdditionalManifest {
11412	s.ManifestNameModifier = &v
11413	return s
11414}
11415
11416// SetSelectedOutputs sets the SelectedOutputs field's value.
11417func (s *HlsAdditionalManifest) SetSelectedOutputs(v []*string) *HlsAdditionalManifest {
11418	s.SelectedOutputs = v
11419	return s
11420}
11421
11422// Caption Language Mapping
11423type HlsCaptionLanguageMapping struct {
11424	_ struct{} `type:"structure"`
11425
11426	// Caption channel.
11427	CaptionChannel *int64 `locationName:"captionChannel" type:"integer"`
11428
11429	// Specify the language for this captions channel, using the ISO 639-2 or ISO
11430	// 639-3 three-letter language code
11431	CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"`
11432
11433	// Specify the language, using the ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php.
11434	LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"`
11435
11436	// Caption language description.
11437	LanguageDescription *string `locationName:"languageDescription" type:"string"`
11438}
11439
11440// String returns the string representation
11441func (s HlsCaptionLanguageMapping) String() string {
11442	return awsutil.Prettify(s)
11443}
11444
11445// GoString returns the string representation
11446func (s HlsCaptionLanguageMapping) GoString() string {
11447	return s.String()
11448}
11449
11450// Validate inspects the fields of the type to determine if they are valid.
11451func (s *HlsCaptionLanguageMapping) Validate() error {
11452	invalidParams := request.ErrInvalidParams{Context: "HlsCaptionLanguageMapping"}
11453	if s.CaptionChannel != nil && *s.CaptionChannel < -2.147483648e+09 {
11454		invalidParams.Add(request.NewErrParamMinValue("CaptionChannel", -2.147483648e+09))
11455	}
11456	if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 {
11457		invalidParams.Add(request.NewErrParamMinLen("CustomLanguageCode", 3))
11458	}
11459
11460	if invalidParams.Len() > 0 {
11461		return invalidParams
11462	}
11463	return nil
11464}
11465
11466// SetCaptionChannel sets the CaptionChannel field's value.
11467func (s *HlsCaptionLanguageMapping) SetCaptionChannel(v int64) *HlsCaptionLanguageMapping {
11468	s.CaptionChannel = &v
11469	return s
11470}
11471
11472// SetCustomLanguageCode sets the CustomLanguageCode field's value.
11473func (s *HlsCaptionLanguageMapping) SetCustomLanguageCode(v string) *HlsCaptionLanguageMapping {
11474	s.CustomLanguageCode = &v
11475	return s
11476}
11477
11478// SetLanguageCode sets the LanguageCode field's value.
11479func (s *HlsCaptionLanguageMapping) SetLanguageCode(v string) *HlsCaptionLanguageMapping {
11480	s.LanguageCode = &v
11481	return s
11482}
11483
11484// SetLanguageDescription sets the LanguageDescription field's value.
11485func (s *HlsCaptionLanguageMapping) SetLanguageDescription(v string) *HlsCaptionLanguageMapping {
11486	s.LanguageDescription = &v
11487	return s
11488}
11489
11490// Settings for HLS encryption
11491type HlsEncryptionSettings struct {
11492	_ struct{} `type:"structure"`
11493
11494	// This is a 128-bit, 16-byte hex value represented by a 32-character text string.
11495	// If this parameter is not set then the Initialization Vector will follow the
11496	// segment number by default.
11497	ConstantInitializationVector *string `locationName:"constantInitializationVector" min:"32" type:"string"`
11498
11499	// Encrypts the segments with the given encryption scheme. Leave blank to disable.
11500	// Selecting 'Disabled' in the web interface also disables encryption.
11501	EncryptionMethod *string `locationName:"encryptionMethod" type:"string" enum:"HlsEncryptionType"`
11502
11503	// The Initialization Vector is a 128-bit number used in conjunction with the
11504	// key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed
11505	// in the manifest. Otherwise Initialization Vector is not in the manifest.
11506	InitializationVectorInManifest *string `locationName:"initializationVectorInManifest" type:"string" enum:"HlsInitializationVectorInManifest"`
11507
11508	// Enable this setting to insert the EXT-X-SESSION-KEY element into the master
11509	// playlist. This allows for offline Apple HLS FairPlay content protection.
11510	OfflineEncrypted *string `locationName:"offlineEncrypted" type:"string" enum:"HlsOfflineEncrypted"`
11511
11512	// If your output group type is HLS, DASH, or Microsoft Smooth, use these settings
11513	// when doing DRM encryption with a SPEKE-compliant key provider. If your output
11514	// group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
11515	SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"`
11516
11517	// Use these settings to set up encryption with a static key provider.
11518	StaticKeyProvider *StaticKeyProvider `locationName:"staticKeyProvider" type:"structure"`
11519
11520	// Specify whether your DRM encryption key is static or from a key provider
11521	// that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.
11522	Type *string `locationName:"type" type:"string" enum:"HlsKeyProviderType"`
11523}
11524
11525// String returns the string representation
11526func (s HlsEncryptionSettings) String() string {
11527	return awsutil.Prettify(s)
11528}
11529
11530// GoString returns the string representation
11531func (s HlsEncryptionSettings) GoString() string {
11532	return s.String()
11533}
11534
11535// Validate inspects the fields of the type to determine if they are valid.
11536func (s *HlsEncryptionSettings) Validate() error {
11537	invalidParams := request.ErrInvalidParams{Context: "HlsEncryptionSettings"}
11538	if s.ConstantInitializationVector != nil && len(*s.ConstantInitializationVector) < 32 {
11539		invalidParams.Add(request.NewErrParamMinLen("ConstantInitializationVector", 32))
11540	}
11541
11542	if invalidParams.Len() > 0 {
11543		return invalidParams
11544	}
11545	return nil
11546}
11547
11548// SetConstantInitializationVector sets the ConstantInitializationVector field's value.
11549func (s *HlsEncryptionSettings) SetConstantInitializationVector(v string) *HlsEncryptionSettings {
11550	s.ConstantInitializationVector = &v
11551	return s
11552}
11553
11554// SetEncryptionMethod sets the EncryptionMethod field's value.
11555func (s *HlsEncryptionSettings) SetEncryptionMethod(v string) *HlsEncryptionSettings {
11556	s.EncryptionMethod = &v
11557	return s
11558}
11559
11560// SetInitializationVectorInManifest sets the InitializationVectorInManifest field's value.
11561func (s *HlsEncryptionSettings) SetInitializationVectorInManifest(v string) *HlsEncryptionSettings {
11562	s.InitializationVectorInManifest = &v
11563	return s
11564}
11565
11566// SetOfflineEncrypted sets the OfflineEncrypted field's value.
11567func (s *HlsEncryptionSettings) SetOfflineEncrypted(v string) *HlsEncryptionSettings {
11568	s.OfflineEncrypted = &v
11569	return s
11570}
11571
11572// SetSpekeKeyProvider sets the SpekeKeyProvider field's value.
11573func (s *HlsEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProvider) *HlsEncryptionSettings {
11574	s.SpekeKeyProvider = v
11575	return s
11576}
11577
11578// SetStaticKeyProvider sets the StaticKeyProvider field's value.
11579func (s *HlsEncryptionSettings) SetStaticKeyProvider(v *StaticKeyProvider) *HlsEncryptionSettings {
11580	s.StaticKeyProvider = v
11581	return s
11582}
11583
11584// SetType sets the Type field's value.
11585func (s *HlsEncryptionSettings) SetType(v string) *HlsEncryptionSettings {
11586	s.Type = &v
11587	return s
11588}
11589
11590// Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
11591// When you work directly in your JSON job specification, include this object
11592// and any required children when you set Type, under OutputGroupSettings, to
11593// HLS_GROUP_SETTINGS.
11594type HlsGroupSettings struct {
11595	_ struct{} `type:"structure"`
11596
11597	// Choose one or more ad marker types to decorate your Apple HLS manifest. This
11598	// setting does not determine whether SCTE-35 markers appear in the outputs
11599	// themselves.
11600	AdMarkers []*string `locationName:"adMarkers" type:"list"`
11601
11602	// By default, the service creates one top-level .m3u8 HLS manifest for each
11603	// HLS output group in your job. This default manifest references every output
11604	// in the output group. To create additional top-level manifests that reference
11605	// a subset of the outputs in the output group, specify a list of them here.
11606	AdditionalManifests []*HlsAdditionalManifest `locationName:"additionalManifests" type:"list"`
11607
11608	// Ignore this setting unless you are using FairPlay DRM with Verimatrix and
11609	// you encounter playback issues. Keep the default value, Include (INCLUDE),
11610	// to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only
11611	// headers from your audio segments.
11612	AudioOnlyHeader *string `locationName:"audioOnlyHeader" type:"string" enum:"HlsAudioOnlyHeader"`
11613
11614	// A partial URI prefix that will be prepended to each output in the media .m3u8
11615	// file. Can be used if base manifest is delivered from a different URL than
11616	// the main .m3u8 file.
11617	BaseUrl *string `locationName:"baseUrl" type:"string"`
11618
11619	// Language to be used on Caption outputs
11620	CaptionLanguageMappings []*HlsCaptionLanguageMapping `locationName:"captionLanguageMappings" type:"list"`
11621
11622	// Applies only to 608 Embedded output captions. Insert: Include CLOSED-CAPTIONS
11623	// lines in the manifest. Specify at least one language in the CC1 Language
11624	// Code field. One CLOSED-CAPTION line is added for each Language Code you specify.
11625	// Make sure to specify the languages in the order in which they appear in the
11626	// original source (if the source is embedded format) or the order of the caption
11627	// selectors (if the source is other than embedded). Otherwise, languages in
11628	// the manifest will not match up properly with the output captions. None: Include
11629	// CLOSED-CAPTIONS=NONE line in the manifest. Omit: Omit any CLOSED-CAPTIONS
11630	// line from the manifest.
11631	CaptionLanguageSetting *string `locationName:"captionLanguageSetting" type:"string" enum:"HlsCaptionLanguageSetting"`
11632
11633	// Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no
11634	// tag. Otherwise, keep the default value Enabled (ENABLED) and control caching
11635	// in your video distribution set up. For example, use the Cache-Control http
11636	// header.
11637	ClientCache *string `locationName:"clientCache" type:"string" enum:"HlsClientCache"`
11638
11639	// Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist
11640	// generation.
11641	CodecSpecification *string `locationName:"codecSpecification" type:"string" enum:"HlsCodecSpecification"`
11642
11643	// Use Destination (Destination) to specify the S3 output location and the output
11644	// filename base. Destination accepts format identifiers. If you do not specify
11645	// the base filename in the URI, the service will use the filename of the input
11646	// file. If your job has multiple inputs, the service uses the filename of the
11647	// first input file.
11648	Destination *string `locationName:"destination" type:"string"`
11649
11650	// Settings associated with the destination. Will vary based on the type of
11651	// destination
11652	DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"`
11653
11654	// Indicates whether segments should be placed in subdirectories.
11655	DirectoryStructure *string `locationName:"directoryStructure" type:"string" enum:"HlsDirectoryStructure"`
11656
11657	// DRM settings.
11658	Encryption *HlsEncryptionSettings `locationName:"encryption" type:"structure"`
11659
11660	// Specify whether MediaConvert generates images for trick play. Keep the default
11661	// value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL)
11662	// to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME)
11663	// to generate tiled thumbnails and full-resolution images of single frames.
11664	// MediaConvert creates a child manifest for each set of images that you generate
11665	// and adds corresponding entries to the parent manifest. A common application
11666	// for these images is Roku trick mode. The thumbnails and full-frame images
11667	// that MediaConvert creates with this feature are compatible with this Roku
11668	// specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md
11669	ImageBasedTrickPlay *string `locationName:"imageBasedTrickPlay" type:"string" enum:"HlsImageBasedTrickPlay"`
11670
11671	// When set to GZIP, compresses HLS playlist.
11672	ManifestCompression *string `locationName:"manifestCompression" type:"string" enum:"HlsManifestCompression"`
11673
11674	// Indicates whether the output manifest should use floating point values for
11675	// segment duration.
11676	ManifestDurationFormat *string `locationName:"manifestDurationFormat" type:"string" enum:"HlsManifestDurationFormat"`
11677
11678	// Keep this setting at the default value of 0, unless you are troubleshooting
11679	// a problem with how devices play back the end of your video asset. If you
11680	// know that player devices are hanging on the final segment of your video because
11681	// the length of your final segment is too short, use this setting to specify
11682	// a minimum final segment length, in seconds. Choose a value that is greater
11683	// than or equal to 1 and less than your segment length. When you specify a
11684	// value for this setting, the encoder will combine any final segment that is
11685	// shorter than the length that you specify with the previous segment. For example,
11686	// your segment length is 3 seconds and your final segment is .5 seconds without
11687	// a minimum final segment length; when you set the minimum final segment length
11688	// to 1, your final segment is 3.5 seconds.
11689	MinFinalSegmentLength *float64 `locationName:"minFinalSegmentLength" type:"double"`
11690
11691	// When set, Minimum Segment Size is enforced by looking ahead and back within
11692	// the specified range for a nearby avail and extending the segment size if
11693	// needed.
11694	MinSegmentLength *int64 `locationName:"minSegmentLength" type:"integer"`
11695
11696	// Indicates whether the .m3u8 manifest file should be generated for this HLS
11697	// output group.
11698	OutputSelection *string `locationName:"outputSelection" type:"string" enum:"HlsOutputSelection"`
11699
11700	// Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files.
11701	// The value is calculated as follows: either the program date and time are
11702	// initialized using the input timecode source, or the time is initialized using
11703	// the input timecode source and the date is initialized using the timestamp_offset.
11704	ProgramDateTime *string `locationName:"programDateTime" type:"string" enum:"HlsProgramDateTime"`
11705
11706	// Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds.
11707	ProgramDateTimePeriod *int64 `locationName:"programDateTimePeriod" type:"integer"`
11708
11709	// When set to SINGLE_FILE, emits program as a single media resource (.ts) file,
11710	// uses #EXT-X-BYTERANGE tags to index segment for playback.
11711	SegmentControl *string `locationName:"segmentControl" type:"string" enum:"HlsSegmentControl"`
11712
11713	// Length of MPEG-2 Transport Stream segments to create (in seconds). Note that
11714	// segments will end on the next keyframe after this number of seconds, so actual
11715	// segment length may be longer.
11716	SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"`
11717
11718	// Number of segments to write to a subdirectory before starting a new one.
11719	// directoryStructure must be SINGLE_DIRECTORY for this setting to have an effect.
11720	SegmentsPerSubdirectory *int64 `locationName:"segmentsPerSubdirectory" min:"1" type:"integer"`
11721
11722	// Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag
11723	// of variant manifest.
11724	StreamInfResolution *string `locationName:"streamInfResolution" type:"string" enum:"HlsStreamInfResolution"`
11725
11726	// When set to LEGACY, the segment target duration is always rounded up to the
11727	// nearest integer value above its current value in seconds. When set to SPEC\\_COMPLIANT,
11728	// the segment target duration is rounded up to the nearest integer value if
11729	// fraction seconds are greater than or equal to 0.5 (>= 0.5) and rounded down
11730	// if less than 0.5 (< 0.5). You may need to use LEGACY if your client needs
11731	// to ensure that the target duration is always longer than the actual duration
11732	// of the segment. Some older players may experience interrupted playback when
11733	// the actual duration of a track in a segment is longer than the target duration.
11734	TargetDurationCompatibilityMode *string `locationName:"targetDurationCompatibilityMode" type:"string" enum:"HlsTargetDurationCompatibilityMode"`
11735
11736	// Indicates ID3 frame that has the timecode.
11737	TimedMetadataId3Frame *string `locationName:"timedMetadataId3Frame" type:"string" enum:"HlsTimedMetadataId3Frame"`
11738
11739	// Timed Metadata interval in seconds.
11740	TimedMetadataId3Period *int64 `locationName:"timedMetadataId3Period" type:"integer"`
11741
11742	// Provides an extra millisecond delta offset to fine tune the timestamps.
11743	TimestampDeltaMilliseconds *int64 `locationName:"timestampDeltaMilliseconds" type:"integer"`
11744}
11745
11746// String returns the string representation
11747func (s HlsGroupSettings) String() string {
11748	return awsutil.Prettify(s)
11749}
11750
11751// GoString returns the string representation
11752func (s HlsGroupSettings) GoString() string {
11753	return s.String()
11754}
11755
11756// Validate inspects the fields of the type to determine if they are valid.
11757func (s *HlsGroupSettings) Validate() error {
11758	invalidParams := request.ErrInvalidParams{Context: "HlsGroupSettings"}
11759	if s.SegmentLength != nil && *s.SegmentLength < 1 {
11760		invalidParams.Add(request.NewErrParamMinValue("SegmentLength", 1))
11761	}
11762	if s.SegmentsPerSubdirectory != nil && *s.SegmentsPerSubdirectory < 1 {
11763		invalidParams.Add(request.NewErrParamMinValue("SegmentsPerSubdirectory", 1))
11764	}
11765	if s.TimedMetadataId3Period != nil && *s.TimedMetadataId3Period < -2.147483648e+09 {
11766		invalidParams.Add(request.NewErrParamMinValue("TimedMetadataId3Period", -2.147483648e+09))
11767	}
11768	if s.TimestampDeltaMilliseconds != nil && *s.TimestampDeltaMilliseconds < -2.147483648e+09 {
11769		invalidParams.Add(request.NewErrParamMinValue("TimestampDeltaMilliseconds", -2.147483648e+09))
11770	}
11771	if s.AdditionalManifests != nil {
11772		for i, v := range s.AdditionalManifests {
11773			if v == nil {
11774				continue
11775			}
11776			if err := v.Validate(); err != nil {
11777				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(request.ErrInvalidParams))
11778			}
11779		}
11780	}
11781	if s.CaptionLanguageMappings != nil {
11782		for i, v := range s.CaptionLanguageMappings {
11783			if v == nil {
11784				continue
11785			}
11786			if err := v.Validate(); err != nil {
11787				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionLanguageMappings", i), err.(request.ErrInvalidParams))
11788			}
11789		}
11790	}
11791	if s.Encryption != nil {
11792		if err := s.Encryption.Validate(); err != nil {
11793			invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams))
11794		}
11795	}
11796
11797	if invalidParams.Len() > 0 {
11798		return invalidParams
11799	}
11800	return nil
11801}
11802
11803// SetAdMarkers sets the AdMarkers field's value.
11804func (s *HlsGroupSettings) SetAdMarkers(v []*string) *HlsGroupSettings {
11805	s.AdMarkers = v
11806	return s
11807}
11808
11809// SetAdditionalManifests sets the AdditionalManifests field's value.
11810func (s *HlsGroupSettings) SetAdditionalManifests(v []*HlsAdditionalManifest) *HlsGroupSettings {
11811	s.AdditionalManifests = v
11812	return s
11813}
11814
11815// SetAudioOnlyHeader sets the AudioOnlyHeader field's value.
11816func (s *HlsGroupSettings) SetAudioOnlyHeader(v string) *HlsGroupSettings {
11817	s.AudioOnlyHeader = &v
11818	return s
11819}
11820
11821// SetBaseUrl sets the BaseUrl field's value.
11822func (s *HlsGroupSettings) SetBaseUrl(v string) *HlsGroupSettings {
11823	s.BaseUrl = &v
11824	return s
11825}
11826
11827// SetCaptionLanguageMappings sets the CaptionLanguageMappings field's value.
11828func (s *HlsGroupSettings) SetCaptionLanguageMappings(v []*HlsCaptionLanguageMapping) *HlsGroupSettings {
11829	s.CaptionLanguageMappings = v
11830	return s
11831}
11832
11833// SetCaptionLanguageSetting sets the CaptionLanguageSetting field's value.
11834func (s *HlsGroupSettings) SetCaptionLanguageSetting(v string) *HlsGroupSettings {
11835	s.CaptionLanguageSetting = &v
11836	return s
11837}
11838
11839// SetClientCache sets the ClientCache field's value.
11840func (s *HlsGroupSettings) SetClientCache(v string) *HlsGroupSettings {
11841	s.ClientCache = &v
11842	return s
11843}
11844
11845// SetCodecSpecification sets the CodecSpecification field's value.
11846func (s *HlsGroupSettings) SetCodecSpecification(v string) *HlsGroupSettings {
11847	s.CodecSpecification = &v
11848	return s
11849}
11850
11851// SetDestination sets the Destination field's value.
11852func (s *HlsGroupSettings) SetDestination(v string) *HlsGroupSettings {
11853	s.Destination = &v
11854	return s
11855}
11856
11857// SetDestinationSettings sets the DestinationSettings field's value.
11858func (s *HlsGroupSettings) SetDestinationSettings(v *DestinationSettings) *HlsGroupSettings {
11859	s.DestinationSettings = v
11860	return s
11861}
11862
11863// SetDirectoryStructure sets the DirectoryStructure field's value.
11864func (s *HlsGroupSettings) SetDirectoryStructure(v string) *HlsGroupSettings {
11865	s.DirectoryStructure = &v
11866	return s
11867}
11868
11869// SetEncryption sets the Encryption field's value.
11870func (s *HlsGroupSettings) SetEncryption(v *HlsEncryptionSettings) *HlsGroupSettings {
11871	s.Encryption = v
11872	return s
11873}
11874
11875// SetImageBasedTrickPlay sets the ImageBasedTrickPlay field's value.
11876func (s *HlsGroupSettings) SetImageBasedTrickPlay(v string) *HlsGroupSettings {
11877	s.ImageBasedTrickPlay = &v
11878	return s
11879}
11880
11881// SetManifestCompression sets the ManifestCompression field's value.
11882func (s *HlsGroupSettings) SetManifestCompression(v string) *HlsGroupSettings {
11883	s.ManifestCompression = &v
11884	return s
11885}
11886
11887// SetManifestDurationFormat sets the ManifestDurationFormat field's value.
11888func (s *HlsGroupSettings) SetManifestDurationFormat(v string) *HlsGroupSettings {
11889	s.ManifestDurationFormat = &v
11890	return s
11891}
11892
11893// SetMinFinalSegmentLength sets the MinFinalSegmentLength field's value.
11894func (s *HlsGroupSettings) SetMinFinalSegmentLength(v float64) *HlsGroupSettings {
11895	s.MinFinalSegmentLength = &v
11896	return s
11897}
11898
11899// SetMinSegmentLength sets the MinSegmentLength field's value.
11900func (s *HlsGroupSettings) SetMinSegmentLength(v int64) *HlsGroupSettings {
11901	s.MinSegmentLength = &v
11902	return s
11903}
11904
11905// SetOutputSelection sets the OutputSelection field's value.
11906func (s *HlsGroupSettings) SetOutputSelection(v string) *HlsGroupSettings {
11907	s.OutputSelection = &v
11908	return s
11909}
11910
11911// SetProgramDateTime sets the ProgramDateTime field's value.
11912func (s *HlsGroupSettings) SetProgramDateTime(v string) *HlsGroupSettings {
11913	s.ProgramDateTime = &v
11914	return s
11915}
11916
11917// SetProgramDateTimePeriod sets the ProgramDateTimePeriod field's value.
11918func (s *HlsGroupSettings) SetProgramDateTimePeriod(v int64) *HlsGroupSettings {
11919	s.ProgramDateTimePeriod = &v
11920	return s
11921}
11922
11923// SetSegmentControl sets the SegmentControl field's value.
11924func (s *HlsGroupSettings) SetSegmentControl(v string) *HlsGroupSettings {
11925	s.SegmentControl = &v
11926	return s
11927}
11928
11929// SetSegmentLength sets the SegmentLength field's value.
11930func (s *HlsGroupSettings) SetSegmentLength(v int64) *HlsGroupSettings {
11931	s.SegmentLength = &v
11932	return s
11933}
11934
11935// SetSegmentsPerSubdirectory sets the SegmentsPerSubdirectory field's value.
11936func (s *HlsGroupSettings) SetSegmentsPerSubdirectory(v int64) *HlsGroupSettings {
11937	s.SegmentsPerSubdirectory = &v
11938	return s
11939}
11940
11941// SetStreamInfResolution sets the StreamInfResolution field's value.
11942func (s *HlsGroupSettings) SetStreamInfResolution(v string) *HlsGroupSettings {
11943	s.StreamInfResolution = &v
11944	return s
11945}
11946
11947// SetTargetDurationCompatibilityMode sets the TargetDurationCompatibilityMode field's value.
11948func (s *HlsGroupSettings) SetTargetDurationCompatibilityMode(v string) *HlsGroupSettings {
11949	s.TargetDurationCompatibilityMode = &v
11950	return s
11951}
11952
11953// SetTimedMetadataId3Frame sets the TimedMetadataId3Frame field's value.
11954func (s *HlsGroupSettings) SetTimedMetadataId3Frame(v string) *HlsGroupSettings {
11955	s.TimedMetadataId3Frame = &v
11956	return s
11957}
11958
11959// SetTimedMetadataId3Period sets the TimedMetadataId3Period field's value.
11960func (s *HlsGroupSettings) SetTimedMetadataId3Period(v int64) *HlsGroupSettings {
11961	s.TimedMetadataId3Period = &v
11962	return s
11963}
11964
11965// SetTimestampDeltaMilliseconds sets the TimestampDeltaMilliseconds field's value.
11966func (s *HlsGroupSettings) SetTimestampDeltaMilliseconds(v int64) *HlsGroupSettings {
11967	s.TimestampDeltaMilliseconds = &v
11968	return s
11969}
11970
11971// Settings specific to audio sources in an HLS alternate rendition group. Specify
11972// the properties (renditionGroupId, renditionName or renditionLanguageCode)
11973// to identify the unique audio track among the alternative rendition groups
11974// present in the HLS manifest. If no unique track is found, or multiple tracks
11975// match the properties provided, the job fails. If no properties in hlsRenditionGroupSettings
11976// are specified, the default audio track within the video segment is chosen.
11977// If there is no audio within video segment, the alternative audio with DEFAULT=YES
11978// is chosen instead.
11979type HlsRenditionGroupSettings struct {
11980	_ struct{} `type:"structure"`
11981
11982	// Optional. Specify alternative group ID
11983	RenditionGroupId *string `locationName:"renditionGroupId" type:"string"`
11984
11985	// Optional. Specify ISO 639-2 or ISO 639-3 code in the language property
11986	RenditionLanguageCode *string `locationName:"renditionLanguageCode" type:"string" enum:"LanguageCode"`
11987
11988	// Optional. Specify media name
11989	RenditionName *string `locationName:"renditionName" type:"string"`
11990}
11991
11992// String returns the string representation
11993func (s HlsRenditionGroupSettings) String() string {
11994	return awsutil.Prettify(s)
11995}
11996
11997// GoString returns the string representation
11998func (s HlsRenditionGroupSettings) GoString() string {
11999	return s.String()
12000}
12001
12002// SetRenditionGroupId sets the RenditionGroupId field's value.
12003func (s *HlsRenditionGroupSettings) SetRenditionGroupId(v string) *HlsRenditionGroupSettings {
12004	s.RenditionGroupId = &v
12005	return s
12006}
12007
12008// SetRenditionLanguageCode sets the RenditionLanguageCode field's value.
12009func (s *HlsRenditionGroupSettings) SetRenditionLanguageCode(v string) *HlsRenditionGroupSettings {
12010	s.RenditionLanguageCode = &v
12011	return s
12012}
12013
12014// SetRenditionName sets the RenditionName field's value.
12015func (s *HlsRenditionGroupSettings) SetRenditionName(v string) *HlsRenditionGroupSettings {
12016	s.RenditionName = &v
12017	return s
12018}
12019
12020// Settings for HLS output groups
12021type HlsSettings struct {
12022	_ struct{} `type:"structure"`
12023
12024	// Specifies the group to which the audio rendition belongs.
12025	AudioGroupId *string `locationName:"audioGroupId" type:"string"`
12026
12027	// Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream
12028	// (M2TS) to create a file in an MPEG2-TS container. Keep the default value
12029	// Automatic (AUTOMATIC) to create an audio-only file in a raw container. Regardless
12030	// of the value that you specify here, if this output has video, the service
12031	// will place the output into an MPEG2-TS container.
12032	AudioOnlyContainer *string `locationName:"audioOnlyContainer" type:"string" enum:"HlsAudioOnlyContainer"`
12033
12034	// List all the audio groups that are used with the video output stream. Input
12035	// all the audio GROUP-IDs that are associated to the video, separate by ','.
12036	AudioRenditionSets *string `locationName:"audioRenditionSets" type:"string"`
12037
12038	// Four types of audio-only tracks are supported: Audio-Only Variant Stream
12039	// The client can play back this audio-only stream instead of video in low-bandwidth
12040	// scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest. Alternate
12041	// Audio, Auto Select, Default Alternate rendition that the client should try
12042	// to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest
12043	// with DEFAULT=YES, AUTOSELECT=YES Alternate Audio, Auto Select, Not Default
12044	// Alternate rendition that the client may try to play back by default. Represented
12045	// as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=YES Alternate
12046	// Audio, not Auto Select Alternate rendition that the client will not try to
12047	// play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with
12048	// DEFAULT=NO, AUTOSELECT=NO
12049	AudioTrackType *string `locationName:"audioTrackType" type:"string" enum:"HlsAudioTrackType"`
12050
12051	// Specify whether to flag this audio track as descriptive video service (DVS)
12052	// in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes
12053	// the parameter CHARACTERISTICS="public.accessibility.describes-video" in the
12054	// EXT-X-MEDIA entry for this track. When you keep the default choice, Don't
12055	// flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can
12056	// help with accessibility on Apple devices. For more information, see the Apple
12057	// documentation.
12058	DescriptiveVideoServiceFlag *string `locationName:"descriptiveVideoServiceFlag" type:"string" enum:"HlsDescriptiveVideoServiceFlag"`
12059
12060	// Choose Include (INCLUDE) to have MediaConvert generate a child manifest that
12061	// lists only the I-frames for this rendition, in addition to your regular manifest
12062	// for this rendition. You might use this manifest as part of a workflow that
12063	// creates preview functions for your video. MediaConvert adds both the I-frame
12064	// only child manifest and the regular child manifest to the parent manifest.
12065	// When you don't need the I-frame only child manifest, keep the default value
12066	// Exclude (EXCLUDE).
12067	IFrameOnlyManifest *string `locationName:"iFrameOnlyManifest" type:"string" enum:"HlsIFrameOnlyManifest"`
12068
12069	// Use this setting to add an identifying string to the filename of each segment.
12070	// The service adds this string between the name modifier and segment index
12071	// number. You can use format identifiers in the string. For more information,
12072	// see https://docs.aws.amazon.com/mediaconvert/latest/ug/using-variables-in-your-job-settings.html
12073	SegmentModifier *string `locationName:"segmentModifier" type:"string"`
12074}
12075
12076// String returns the string representation
12077func (s HlsSettings) String() string {
12078	return awsutil.Prettify(s)
12079}
12080
12081// GoString returns the string representation
12082func (s HlsSettings) GoString() string {
12083	return s.String()
12084}
12085
12086// SetAudioGroupId sets the AudioGroupId field's value.
12087func (s *HlsSettings) SetAudioGroupId(v string) *HlsSettings {
12088	s.AudioGroupId = &v
12089	return s
12090}
12091
12092// SetAudioOnlyContainer sets the AudioOnlyContainer field's value.
12093func (s *HlsSettings) SetAudioOnlyContainer(v string) *HlsSettings {
12094	s.AudioOnlyContainer = &v
12095	return s
12096}
12097
12098// SetAudioRenditionSets sets the AudioRenditionSets field's value.
12099func (s *HlsSettings) SetAudioRenditionSets(v string) *HlsSettings {
12100	s.AudioRenditionSets = &v
12101	return s
12102}
12103
12104// SetAudioTrackType sets the AudioTrackType field's value.
12105func (s *HlsSettings) SetAudioTrackType(v string) *HlsSettings {
12106	s.AudioTrackType = &v
12107	return s
12108}
12109
12110// SetDescriptiveVideoServiceFlag sets the DescriptiveVideoServiceFlag field's value.
12111func (s *HlsSettings) SetDescriptiveVideoServiceFlag(v string) *HlsSettings {
12112	s.DescriptiveVideoServiceFlag = &v
12113	return s
12114}
12115
12116// SetIFrameOnlyManifest sets the IFrameOnlyManifest field's value.
12117func (s *HlsSettings) SetIFrameOnlyManifest(v string) *HlsSettings {
12118	s.IFrameOnlyManifest = &v
12119	return s
12120}
12121
12122// SetSegmentModifier sets the SegmentModifier field's value.
12123func (s *HlsSettings) SetSegmentModifier(v string) *HlsSettings {
12124	s.SegmentModifier = &v
12125	return s
12126}
12127
12128// Optional. Configuration for a destination queue to which the job can hop
12129// once a customer-defined minimum wait time has passed.
12130type HopDestination struct {
12131	_ struct{} `type:"structure"`
12132
12133	// Optional. When you set up a job to use queue hopping, you can specify a different
12134	// relative priority for the job in the destination queue. If you don't specify,
12135	// the relative priority will remain the same as in the previous queue.
12136	Priority *int64 `locationName:"priority" type:"integer"`
12137
12138	// Optional unless the job is submitted on the default queue. When you set up
12139	// a job to use queue hopping, you can specify a destination queue. This queue
12140	// cannot be the original queue to which the job is submitted. If the original
12141	// queue isn't the default queue and you don't specify the destination queue,
12142	// the job will move to the default queue.
12143	Queue *string `locationName:"queue" type:"string"`
12144
12145	// Required for setting up a job to use queue hopping. Minimum wait time in
12146	// minutes until the job can hop to the destination queue. Valid range is 1
12147	// to 1440 minutes, inclusive.
12148	WaitMinutes *int64 `locationName:"waitMinutes" type:"integer"`
12149}
12150
12151// String returns the string representation
12152func (s HopDestination) String() string {
12153	return awsutil.Prettify(s)
12154}
12155
12156// GoString returns the string representation
12157func (s HopDestination) GoString() string {
12158	return s.String()
12159}
12160
12161// Validate inspects the fields of the type to determine if they are valid.
12162func (s *HopDestination) Validate() error {
12163	invalidParams := request.ErrInvalidParams{Context: "HopDestination"}
12164	if s.Priority != nil && *s.Priority < -50 {
12165		invalidParams.Add(request.NewErrParamMinValue("Priority", -50))
12166	}
12167
12168	if invalidParams.Len() > 0 {
12169		return invalidParams
12170	}
12171	return nil
12172}
12173
12174// SetPriority sets the Priority field's value.
12175func (s *HopDestination) SetPriority(v int64) *HopDestination {
12176	s.Priority = &v
12177	return s
12178}
12179
12180// SetQueue sets the Queue field's value.
12181func (s *HopDestination) SetQueue(v string) *HopDestination {
12182	s.Queue = &v
12183	return s
12184}
12185
12186// SetWaitMinutes sets the WaitMinutes field's value.
12187func (s *HopDestination) SetWaitMinutes(v int64) *HopDestination {
12188	s.WaitMinutes = &v
12189	return s
12190}
12191
12192// To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3)
12193// to specify the base 64 encoded string and use Timecode (TimeCode) to specify
12194// the time when the tag should be inserted. To insert multiple ID3 tags in
12195// your output, create multiple instances of ID3 insertion (Id3Insertion).
12196type Id3Insertion struct {
12197	_ struct{} `type:"structure"`
12198
12199	// Use ID3 tag (Id3) to provide a tag value in base64-encode format.
12200	Id3 *string `locationName:"id3" type:"string"`
12201
12202	// Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format.
12203	Timecode *string `locationName:"timecode" type:"string"`
12204}
12205
12206// String returns the string representation
12207func (s Id3Insertion) String() string {
12208	return awsutil.Prettify(s)
12209}
12210
12211// GoString returns the string representation
12212func (s Id3Insertion) GoString() string {
12213	return s.String()
12214}
12215
12216// SetId3 sets the Id3 field's value.
12217func (s *Id3Insertion) SetId3(v string) *Id3Insertion {
12218	s.Id3 = &v
12219	return s
12220}
12221
12222// SetTimecode sets the Timecode field's value.
12223func (s *Id3Insertion) SetTimecode(v string) *Id3Insertion {
12224	s.Timecode = &v
12225	return s
12226}
12227
12228// Use the image inserter feature to include a graphic overlay on your video.
12229// Enable or disable this feature for each input or output individually. For
12230// more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/graphic-overlay.html.
12231// This setting is disabled by default.
12232type ImageInserter struct {
12233	_ struct{} `type:"structure"`
12234
12235	// Specify the images that you want to overlay on your video. The images must
12236	// be PNG or TGA files.
12237	InsertableImages []*InsertableImage `locationName:"insertableImages" type:"list"`
12238}
12239
12240// String returns the string representation
12241func (s ImageInserter) String() string {
12242	return awsutil.Prettify(s)
12243}
12244
12245// GoString returns the string representation
12246func (s ImageInserter) GoString() string {
12247	return s.String()
12248}
12249
12250// Validate inspects the fields of the type to determine if they are valid.
12251func (s *ImageInserter) Validate() error {
12252	invalidParams := request.ErrInvalidParams{Context: "ImageInserter"}
12253	if s.InsertableImages != nil {
12254		for i, v := range s.InsertableImages {
12255			if v == nil {
12256				continue
12257			}
12258			if err := v.Validate(); err != nil {
12259				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InsertableImages", i), err.(request.ErrInvalidParams))
12260			}
12261		}
12262	}
12263
12264	if invalidParams.Len() > 0 {
12265		return invalidParams
12266	}
12267	return nil
12268}
12269
12270// SetInsertableImages sets the InsertableImages field's value.
12271func (s *ImageInserter) SetInsertableImages(v []*InsertableImage) *ImageInserter {
12272	s.InsertableImages = v
12273	return s
12274}
12275
12276// Settings related to IMSC captions. IMSC is a sidecar format that holds captions
12277// in a file that is separate from the video container. Set up sidecar captions
12278// in the same output group, but different output from your video. For more
12279// information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
12280// When you work directly in your JSON job specification, include this object
12281// and any required children when you set destinationType to IMSC.
12282type ImscDestinationSettings struct {
12283	_ struct{} `type:"structure"`
12284
12285	// Keep this setting enabled to have MediaConvert use the font style and position
12286	// information from the captions source in the output. This option is available
12287	// only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting
12288	// for simplified output captions.
12289	StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"ImscStylePassthrough"`
12290}
12291
12292// String returns the string representation
12293func (s ImscDestinationSettings) String() string {
12294	return awsutil.Prettify(s)
12295}
12296
12297// GoString returns the string representation
12298func (s ImscDestinationSettings) GoString() string {
12299	return s.String()
12300}
12301
12302// SetStylePassthrough sets the StylePassthrough field's value.
12303func (s *ImscDestinationSettings) SetStylePassthrough(v string) *ImscDestinationSettings {
12304	s.StylePassthrough = &v
12305	return s
12306}
12307
12308// Use inputs to define the source files used in your transcoding job. For more
12309// information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/specify-input-settings.html.
12310// You can use multiple video inputs to do input stitching. For more information,
12311// see https://docs.aws.amazon.com/mediaconvert/latest/ug/assembling-multiple-inputs-and-input-clips.html
12312type Input struct {
12313	_ struct{} `type:"structure"`
12314
12315	// Use audio selector groups to combine multiple sidecar audio inputs so that
12316	// you can assign them to a single output audio tab (AudioDescription). Note
12317	// that, if you're working with embedded audio, it's simpler to assign multiple
12318	// input tracks into a single audio selector rather than use an audio selector
12319	// group.
12320	AudioSelectorGroups map[string]*AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"`
12321
12322	// Use Audio selectors (AudioSelectors) to specify a track or set of tracks
12323	// from the input that you will use in your outputs. You can use multiple Audio
12324	// selectors per input.
12325	AudioSelectors map[string]*AudioSelector `locationName:"audioSelectors" type:"map"`
12326
12327	// Use captions selectors to specify the captions data from your input that
12328	// you use in your outputs. You can use up to 20 captions selectors per input.
12329	CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"`
12330
12331	// Use Cropping selection (crop) to specify the video area that the service
12332	// will include in the output video frame. If you specify a value here, it will
12333	// override any value that you specify in the output setting Cropping selection
12334	// (crop).
12335	Crop *Rectangle `locationName:"crop" type:"structure"`
12336
12337	// Enable Deblock (InputDeblockFilter) to produce smoother motion in the output.
12338	// Default is disabled. Only manually controllable for MPEG2 and uncompressed
12339	// video inputs.
12340	DeblockFilter *string `locationName:"deblockFilter" type:"string" enum:"InputDeblockFilter"`
12341
12342	// Settings for decrypting any input files that you encrypt before you upload
12343	// them to Amazon S3. MediaConvert can decrypt files only when you use AWS Key
12344	// Management Service (KMS) to encrypt the data key that you use to encrypt
12345	// your content.
12346	DecryptionSettings *InputDecryptionSettings `locationName:"decryptionSettings" type:"structure"`
12347
12348	// Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default
12349	// is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video
12350	// inputs.
12351	DenoiseFilter *string `locationName:"denoiseFilter" type:"string" enum:"InputDenoiseFilter"`
12352
12353	// Specify the source file for your transcoding job. You can use multiple inputs
12354	// in a single job. The service concatenates these inputs, in the order that
12355	// you specify them in the job, to create the outputs. If your input format
12356	// is IMF, specify your input by providing the path to your CPL. For example,
12357	// "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to
12358	// use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs
12359	// that contain assets referenced by the CPL.
12360	FileInput *string `locationName:"fileInput" type:"string"`
12361
12362	// Specify how the transcoding service applies the denoise and deblock filters.
12363	// You must also enable the filters separately, with Denoise (InputDenoiseFilter)
12364	// and Deblock (InputDeblockFilter). * Auto - The transcoding service determines
12365	// whether to apply filtering, depending on input type and quality. * Disable
12366	// - The input is not filtered. This is true even if you use the API to enable
12367	// them in (InputDeblockFilter) and (InputDeblockFilter). * Force - The input
12368	// is filtered regardless of input type.
12369	FilterEnable *string `locationName:"filterEnable" type:"string" enum:"InputFilterEnable"`
12370
12371	// Use Filter strength (FilterStrength) to adjust the magnitude the input filter
12372	// settings (Deblock and Denoise). The range is -5 to 5. Default is 0.
12373	FilterStrength *int64 `locationName:"filterStrength" type:"integer"`
12374
12375	// Enable the image inserter feature to include a graphic overlay on your video.
12376	// Enable or disable this feature for each input individually. This setting
12377	// is disabled by default.
12378	ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"`
12379
12380	// (InputClippings) contains sets of start and end times that together specify
12381	// a portion of the input to be used in the outputs. If you provide only a start
12382	// time, the clip will be the entire input from that point to the end. If you
12383	// provide only an end time, it will be the entire input up to that point. When
12384	// you specify more than one input clip, the transcoding service creates the
12385	// job outputs by stringing the clips together in the order you specify them.
12386	InputClippings []*InputClipping `locationName:"inputClippings" type:"list"`
12387
12388	// When you have a progressive segmented frame (PsF) input, use this setting
12389	// to flag the input as PsF. MediaConvert doesn't automatically detect PsF.
12390	// Therefore, flagging your input as PsF results in better preservation of video
12391	// quality when you do deinterlacing and frame rate conversion. If you don't
12392	// specify, the default value is Auto (AUTO). Auto is the correct setting for
12393	// all inputs that are not PsF. Don't set this value to PsF when your input
12394	// is interlaced. Doing so creates horizontal interlacing artifacts.
12395	InputScanType *string `locationName:"inputScanType" type:"string" enum:"InputScanType"`
12396
12397	// Use Selection placement (position) to define the video area in your output
12398	// frame. The area outside of the rectangle that you specify here is black.
12399	// If you specify a value here, it will override any value that you specify
12400	// in the output setting Selection placement (position). If you specify a value
12401	// here, this will override any AFD values in your input, even if you set Respond
12402	// to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here,
12403	// this will ignore anything that you specify for the setting Scaling Behavior
12404	// (scalingBehavior).
12405	Position *Rectangle `locationName:"position" type:"structure"`
12406
12407	// Use Program (programNumber) to select a specific program from within a multi-program
12408	// transport stream. Note that Quad 4K is not currently supported. Default is
12409	// the first program within the transport stream. If the program you specify
12410	// doesn't exist, the transcoding service will use this default.
12411	ProgramNumber *int64 `locationName:"programNumber" min:"1" type:"integer"`
12412
12413	// Set PSI control (InputPsiControl) for transport stream inputs to specify
12414	// which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio
12415	// and video. * Use PSI - Scan only PSI data.
12416	PsiControl *string `locationName:"psiControl" type:"string" enum:"InputPsiControl"`
12417
12418	// Provide a list of any necessary supplemental IMPs. You need supplemental
12419	// IMPs if the CPL that you're using for your input is in an incomplete IMP.
12420	// Specify either the supplemental IMP directories with a trailing slash or
12421	// the ASSETMAP.xml files. For example ["s3://bucket/ov/", "s3://bucket/vf2/ASSETMAP.xml"].
12422	// You don't need to specify the IMP that contains your input CPL, because the
12423	// service automatically detects it.
12424	SupplementalImps []*string `locationName:"supplementalImps" type:"list"`
12425
12426	// Use this Timecode source setting, located under the input settings (InputTimecodeSource),
12427	// to specify how the service counts input video frames. This input frame count
12428	// affects only the behavior of features that apply to a single input at a time,
12429	// such as input clipping and synchronizing some captions formats. Choose Embedded
12430	// (EMBEDDED) to use the timecodes in your input video. Choose Start at zero
12431	// (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART)
12432	// to start the first frame at the timecode that you specify in the setting
12433	// Start timecode (timecodeStart). If you don't specify a value for Timecode
12434	// source, the service will use Embedded by default. For more information about
12435	// timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.
12436	TimecodeSource *string `locationName:"timecodeSource" type:"string" enum:"InputTimecodeSource"`
12437
12438	// Specify the timecode that you want the service to use for this input's initial
12439	// frame. To use this setting, you must set the Timecode source setting, located
12440	// under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART).
12441	// For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.
12442	TimecodeStart *string `locationName:"timecodeStart" min:"11" type:"string"`
12443
12444	// Input video selectors contain the video settings for the input. Each of your
12445	// inputs can have up to one video selector.
12446	VideoSelector *VideoSelector `locationName:"videoSelector" type:"structure"`
12447}
12448
12449// String returns the string representation
12450func (s Input) String() string {
12451	return awsutil.Prettify(s)
12452}
12453
12454// GoString returns the string representation
12455func (s Input) GoString() string {
12456	return s.String()
12457}
12458
12459// Validate inspects the fields of the type to determine if they are valid.
12460func (s *Input) Validate() error {
12461	invalidParams := request.ErrInvalidParams{Context: "Input"}
12462	if s.FilterStrength != nil && *s.FilterStrength < -5 {
12463		invalidParams.Add(request.NewErrParamMinValue("FilterStrength", -5))
12464	}
12465	if s.ProgramNumber != nil && *s.ProgramNumber < 1 {
12466		invalidParams.Add(request.NewErrParamMinValue("ProgramNumber", 1))
12467	}
12468	if s.TimecodeStart != nil && len(*s.TimecodeStart) < 11 {
12469		invalidParams.Add(request.NewErrParamMinLen("TimecodeStart", 11))
12470	}
12471	if s.AudioSelectors != nil {
12472		for i, v := range s.AudioSelectors {
12473			if v == nil {
12474				continue
12475			}
12476			if err := v.Validate(); err != nil {
12477				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioSelectors", i), err.(request.ErrInvalidParams))
12478			}
12479		}
12480	}
12481	if s.CaptionSelectors != nil {
12482		for i, v := range s.CaptionSelectors {
12483			if v == nil {
12484				continue
12485			}
12486			if err := v.Validate(); err != nil {
12487				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionSelectors", i), err.(request.ErrInvalidParams))
12488			}
12489		}
12490	}
12491	if s.Crop != nil {
12492		if err := s.Crop.Validate(); err != nil {
12493			invalidParams.AddNested("Crop", err.(request.ErrInvalidParams))
12494		}
12495	}
12496	if s.DecryptionSettings != nil {
12497		if err := s.DecryptionSettings.Validate(); err != nil {
12498			invalidParams.AddNested("DecryptionSettings", err.(request.ErrInvalidParams))
12499		}
12500	}
12501	if s.ImageInserter != nil {
12502		if err := s.ImageInserter.Validate(); err != nil {
12503			invalidParams.AddNested("ImageInserter", err.(request.ErrInvalidParams))
12504		}
12505	}
12506	if s.Position != nil {
12507		if err := s.Position.Validate(); err != nil {
12508			invalidParams.AddNested("Position", err.(request.ErrInvalidParams))
12509		}
12510	}
12511	if s.VideoSelector != nil {
12512		if err := s.VideoSelector.Validate(); err != nil {
12513			invalidParams.AddNested("VideoSelector", err.(request.ErrInvalidParams))
12514		}
12515	}
12516
12517	if invalidParams.Len() > 0 {
12518		return invalidParams
12519	}
12520	return nil
12521}
12522
12523// SetAudioSelectorGroups sets the AudioSelectorGroups field's value.
12524func (s *Input) SetAudioSelectorGroups(v map[string]*AudioSelectorGroup) *Input {
12525	s.AudioSelectorGroups = v
12526	return s
12527}
12528
12529// SetAudioSelectors sets the AudioSelectors field's value.
12530func (s *Input) SetAudioSelectors(v map[string]*AudioSelector) *Input {
12531	s.AudioSelectors = v
12532	return s
12533}
12534
12535// SetCaptionSelectors sets the CaptionSelectors field's value.
12536func (s *Input) SetCaptionSelectors(v map[string]*CaptionSelector) *Input {
12537	s.CaptionSelectors = v
12538	return s
12539}
12540
12541// SetCrop sets the Crop field's value.
12542func (s *Input) SetCrop(v *Rectangle) *Input {
12543	s.Crop = v
12544	return s
12545}
12546
12547// SetDeblockFilter sets the DeblockFilter field's value.
12548func (s *Input) SetDeblockFilter(v string) *Input {
12549	s.DeblockFilter = &v
12550	return s
12551}
12552
12553// SetDecryptionSettings sets the DecryptionSettings field's value.
12554func (s *Input) SetDecryptionSettings(v *InputDecryptionSettings) *Input {
12555	s.DecryptionSettings = v
12556	return s
12557}
12558
12559// SetDenoiseFilter sets the DenoiseFilter field's value.
12560func (s *Input) SetDenoiseFilter(v string) *Input {
12561	s.DenoiseFilter = &v
12562	return s
12563}
12564
12565// SetFileInput sets the FileInput field's value.
12566func (s *Input) SetFileInput(v string) *Input {
12567	s.FileInput = &v
12568	return s
12569}
12570
12571// SetFilterEnable sets the FilterEnable field's value.
12572func (s *Input) SetFilterEnable(v string) *Input {
12573	s.FilterEnable = &v
12574	return s
12575}
12576
12577// SetFilterStrength sets the FilterStrength field's value.
12578func (s *Input) SetFilterStrength(v int64) *Input {
12579	s.FilterStrength = &v
12580	return s
12581}
12582
12583// SetImageInserter sets the ImageInserter field's value.
12584func (s *Input) SetImageInserter(v *ImageInserter) *Input {
12585	s.ImageInserter = v
12586	return s
12587}
12588
12589// SetInputClippings sets the InputClippings field's value.
12590func (s *Input) SetInputClippings(v []*InputClipping) *Input {
12591	s.InputClippings = v
12592	return s
12593}
12594
12595// SetInputScanType sets the InputScanType field's value.
12596func (s *Input) SetInputScanType(v string) *Input {
12597	s.InputScanType = &v
12598	return s
12599}
12600
12601// SetPosition sets the Position field's value.
12602func (s *Input) SetPosition(v *Rectangle) *Input {
12603	s.Position = v
12604	return s
12605}
12606
12607// SetProgramNumber sets the ProgramNumber field's value.
12608func (s *Input) SetProgramNumber(v int64) *Input {
12609	s.ProgramNumber = &v
12610	return s
12611}
12612
12613// SetPsiControl sets the PsiControl field's value.
12614func (s *Input) SetPsiControl(v string) *Input {
12615	s.PsiControl = &v
12616	return s
12617}
12618
12619// SetSupplementalImps sets the SupplementalImps field's value.
12620func (s *Input) SetSupplementalImps(v []*string) *Input {
12621	s.SupplementalImps = v
12622	return s
12623}
12624
12625// SetTimecodeSource sets the TimecodeSource field's value.
12626func (s *Input) SetTimecodeSource(v string) *Input {
12627	s.TimecodeSource = &v
12628	return s
12629}
12630
12631// SetTimecodeStart sets the TimecodeStart field's value.
12632func (s *Input) SetTimecodeStart(v string) *Input {
12633	s.TimecodeStart = &v
12634	return s
12635}
12636
12637// SetVideoSelector sets the VideoSelector field's value.
12638func (s *Input) SetVideoSelector(v *VideoSelector) *Input {
12639	s.VideoSelector = v
12640	return s
12641}
12642
12643// To transcode only portions of your input, include one input clip for each
12644// part of your input that you want in your output. All input clips that you
12645// specify will be included in every output of the job. For more information,
12646// see https://docs.aws.amazon.com/mediaconvert/latest/ug/assembling-multiple-inputs-and-input-clips.html.
12647type InputClipping struct {
12648	_ struct{} `type:"structure"`
12649
12650	// Set End timecode (EndTimecode) to the end of the portion of the input you
12651	// are clipping. The frame corresponding to the End timecode value is included
12652	// in the clip. Start timecode or End timecode may be left blank, but not both.
12653	// Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the
12654	// minute, SS is the second, and FF is the frame number. When choosing this
12655	// value, take into account your setting for timecode source under input settings
12656	// (InputTimecodeSource). For example, if you have embedded timecodes that start
12657	// at 01:00:00:00 and you want your clip to end six minutes into the video,
12658	// use 01:06:00:00.
12659	EndTimecode *string `locationName:"endTimecode" type:"string"`
12660
12661	// Set Start timecode (StartTimecode) to the beginning of the portion of the
12662	// input you are clipping. The frame corresponding to the Start timecode value
12663	// is included in the clip. Start timecode or End timecode may be left blank,
12664	// but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the
12665	// hour, MM is the minute, SS is the second, and FF is the frame number. When
12666	// choosing this value, take into account your setting for Input timecode source.
12667	// For example, if you have embedded timecodes that start at 01:00:00:00 and
12668	// you want your clip to begin five minutes into the video, use 01:05:00:00.
12669	StartTimecode *string `locationName:"startTimecode" type:"string"`
12670}
12671
12672// String returns the string representation
12673func (s InputClipping) String() string {
12674	return awsutil.Prettify(s)
12675}
12676
12677// GoString returns the string representation
12678func (s InputClipping) GoString() string {
12679	return s.String()
12680}
12681
12682// SetEndTimecode sets the EndTimecode field's value.
12683func (s *InputClipping) SetEndTimecode(v string) *InputClipping {
12684	s.EndTimecode = &v
12685	return s
12686}
12687
12688// SetStartTimecode sets the StartTimecode field's value.
12689func (s *InputClipping) SetStartTimecode(v string) *InputClipping {
12690	s.StartTimecode = &v
12691	return s
12692}
12693
12694// Settings for decrypting any input files that you encrypt before you upload
12695// them to Amazon S3. MediaConvert can decrypt files only when you use AWS Key
12696// Management Service (KMS) to encrypt the data key that you use to encrypt
12697// your content.
12698type InputDecryptionSettings struct {
12699	_ struct{} `type:"structure"`
12700
12701	// Specify the encryption mode that you used to encrypt your input files.
12702	DecryptionMode *string `locationName:"decryptionMode" type:"string" enum:"DecryptionMode"`
12703
12704	// Warning! Don't provide your encryption key in plaintext. Your job settings
12705	// could be intercepted, making your encrypted content vulnerable. Specify the
12706	// encrypted version of the data key that you used to encrypt your content.
12707	// The data key must be encrypted by AWS Key Management Service (KMS). The key
12708	// can be 128, 192, or 256 bits.
12709	EncryptedDecryptionKey *string `locationName:"encryptedDecryptionKey" min:"24" type:"string"`
12710
12711	// Specify the initialization vector that you used when you encrypted your content
12712	// before uploading it to Amazon S3. You can use a 16-byte initialization vector
12713	// with any encryption mode. Or, you can use a 12-byte initialization vector
12714	// with GCM or CTR. MediaConvert accepts only initialization vectors that are
12715	// base64-encoded.
12716	InitializationVector *string `locationName:"initializationVector" min:"16" type:"string"`
12717
12718	// Specify the AWS Region for AWS Key Management Service (KMS) that you used
12719	// to encrypt your data key, if that Region is different from the one you are
12720	// using for AWS Elemental MediaConvert.
12721	KmsKeyRegion *string `locationName:"kmsKeyRegion" min:"9" type:"string"`
12722}
12723
12724// String returns the string representation
12725func (s InputDecryptionSettings) String() string {
12726	return awsutil.Prettify(s)
12727}
12728
12729// GoString returns the string representation
12730func (s InputDecryptionSettings) GoString() string {
12731	return s.String()
12732}
12733
12734// Validate inspects the fields of the type to determine if they are valid.
12735func (s *InputDecryptionSettings) Validate() error {
12736	invalidParams := request.ErrInvalidParams{Context: "InputDecryptionSettings"}
12737	if s.EncryptedDecryptionKey != nil && len(*s.EncryptedDecryptionKey) < 24 {
12738		invalidParams.Add(request.NewErrParamMinLen("EncryptedDecryptionKey", 24))
12739	}
12740	if s.InitializationVector != nil && len(*s.InitializationVector) < 16 {
12741		invalidParams.Add(request.NewErrParamMinLen("InitializationVector", 16))
12742	}
12743	if s.KmsKeyRegion != nil && len(*s.KmsKeyRegion) < 9 {
12744		invalidParams.Add(request.NewErrParamMinLen("KmsKeyRegion", 9))
12745	}
12746
12747	if invalidParams.Len() > 0 {
12748		return invalidParams
12749	}
12750	return nil
12751}
12752
12753// SetDecryptionMode sets the DecryptionMode field's value.
12754func (s *InputDecryptionSettings) SetDecryptionMode(v string) *InputDecryptionSettings {
12755	s.DecryptionMode = &v
12756	return s
12757}
12758
12759// SetEncryptedDecryptionKey sets the EncryptedDecryptionKey field's value.
12760func (s *InputDecryptionSettings) SetEncryptedDecryptionKey(v string) *InputDecryptionSettings {
12761	s.EncryptedDecryptionKey = &v
12762	return s
12763}
12764
12765// SetInitializationVector sets the InitializationVector field's value.
12766func (s *InputDecryptionSettings) SetInitializationVector(v string) *InputDecryptionSettings {
12767	s.InitializationVector = &v
12768	return s
12769}
12770
12771// SetKmsKeyRegion sets the KmsKeyRegion field's value.
12772func (s *InputDecryptionSettings) SetKmsKeyRegion(v string) *InputDecryptionSettings {
12773	s.KmsKeyRegion = &v
12774	return s
12775}
12776
12777// Specified video input in a template.
12778type InputTemplate struct {
12779	_ struct{} `type:"structure"`
12780
12781	// Use audio selector groups to combine multiple sidecar audio inputs so that
12782	// you can assign them to a single output audio tab (AudioDescription). Note
12783	// that, if you're working with embedded audio, it's simpler to assign multiple
12784	// input tracks into a single audio selector rather than use an audio selector
12785	// group.
12786	AudioSelectorGroups map[string]*AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"`
12787
12788	// Use Audio selectors (AudioSelectors) to specify a track or set of tracks
12789	// from the input that you will use in your outputs. You can use multiple Audio
12790	// selectors per input.
12791	AudioSelectors map[string]*AudioSelector `locationName:"audioSelectors" type:"map"`
12792
12793	// Use captions selectors to specify the captions data from your input that
12794	// you use in your outputs. You can use up to 20 captions selectors per input.
12795	CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"`
12796
12797	// Use Cropping selection (crop) to specify the video area that the service
12798	// will include in the output video frame. If you specify a value here, it will
12799	// override any value that you specify in the output setting Cropping selection
12800	// (crop).
12801	Crop *Rectangle `locationName:"crop" type:"structure"`
12802
12803	// Enable Deblock (InputDeblockFilter) to produce smoother motion in the output.
12804	// Default is disabled. Only manually controllable for MPEG2 and uncompressed
12805	// video inputs.
12806	DeblockFilter *string `locationName:"deblockFilter" type:"string" enum:"InputDeblockFilter"`
12807
12808	// Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default
12809	// is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video
12810	// inputs.
12811	DenoiseFilter *string `locationName:"denoiseFilter" type:"string" enum:"InputDenoiseFilter"`
12812
12813	// Specify how the transcoding service applies the denoise and deblock filters.
12814	// You must also enable the filters separately, with Denoise (InputDenoiseFilter)
12815	// and Deblock (InputDeblockFilter). * Auto - The transcoding service determines
12816	// whether to apply filtering, depending on input type and quality. * Disable
12817	// - The input is not filtered. This is true even if you use the API to enable
12818	// them in (InputDeblockFilter) and (InputDeblockFilter). * Force - The input
12819	// is filtered regardless of input type.
12820	FilterEnable *string `locationName:"filterEnable" type:"string" enum:"InputFilterEnable"`
12821
12822	// Use Filter strength (FilterStrength) to adjust the magnitude the input filter
12823	// settings (Deblock and Denoise). The range is -5 to 5. Default is 0.
12824	FilterStrength *int64 `locationName:"filterStrength" type:"integer"`
12825
12826	// Enable the image inserter feature to include a graphic overlay on your video.
12827	// Enable or disable this feature for each input individually. This setting
12828	// is disabled by default.
12829	ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"`
12830
12831	// (InputClippings) contains sets of start and end times that together specify
12832	// a portion of the input to be used in the outputs. If you provide only a start
12833	// time, the clip will be the entire input from that point to the end. If you
12834	// provide only an end time, it will be the entire input up to that point. When
12835	// you specify more than one input clip, the transcoding service creates the
12836	// job outputs by stringing the clips together in the order you specify them.
12837	InputClippings []*InputClipping `locationName:"inputClippings" type:"list"`
12838
12839	// When you have a progressive segmented frame (PsF) input, use this setting
12840	// to flag the input as PsF. MediaConvert doesn't automatically detect PsF.
12841	// Therefore, flagging your input as PsF results in better preservation of video
12842	// quality when you do deinterlacing and frame rate conversion. If you don't
12843	// specify, the default value is Auto (AUTO). Auto is the correct setting for
12844	// all inputs that are not PsF. Don't set this value to PsF when your input
12845	// is interlaced. Doing so creates horizontal interlacing artifacts.
12846	InputScanType *string `locationName:"inputScanType" type:"string" enum:"InputScanType"`
12847
12848	// Use Selection placement (position) to define the video area in your output
12849	// frame. The area outside of the rectangle that you specify here is black.
12850	// If you specify a value here, it will override any value that you specify
12851	// in the output setting Selection placement (position). If you specify a value
12852	// here, this will override any AFD values in your input, even if you set Respond
12853	// to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here,
12854	// this will ignore anything that you specify for the setting Scaling Behavior
12855	// (scalingBehavior).
12856	Position *Rectangle `locationName:"position" type:"structure"`
12857
12858	// Use Program (programNumber) to select a specific program from within a multi-program
12859	// transport stream. Note that Quad 4K is not currently supported. Default is
12860	// the first program within the transport stream. If the program you specify
12861	// doesn't exist, the transcoding service will use this default.
12862	ProgramNumber *int64 `locationName:"programNumber" min:"1" type:"integer"`
12863
12864	// Set PSI control (InputPsiControl) for transport stream inputs to specify
12865	// which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio
12866	// and video. * Use PSI - Scan only PSI data.
12867	PsiControl *string `locationName:"psiControl" type:"string" enum:"InputPsiControl"`
12868
12869	// Use this Timecode source setting, located under the input settings (InputTimecodeSource),
12870	// to specify how the service counts input video frames. This input frame count
12871	// affects only the behavior of features that apply to a single input at a time,
12872	// such as input clipping and synchronizing some captions formats. Choose Embedded
12873	// (EMBEDDED) to use the timecodes in your input video. Choose Start at zero
12874	// (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART)
12875	// to start the first frame at the timecode that you specify in the setting
12876	// Start timecode (timecodeStart). If you don't specify a value for Timecode
12877	// source, the service will use Embedded by default. For more information about
12878	// timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.
12879	TimecodeSource *string `locationName:"timecodeSource" type:"string" enum:"InputTimecodeSource"`
12880
12881	// Specify the timecode that you want the service to use for this input's initial
12882	// frame. To use this setting, you must set the Timecode source setting, located
12883	// under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART).
12884	// For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.
12885	TimecodeStart *string `locationName:"timecodeStart" min:"11" type:"string"`
12886
12887	// Input video selectors contain the video settings for the input. Each of your
12888	// inputs can have up to one video selector.
12889	VideoSelector *VideoSelector `locationName:"videoSelector" type:"structure"`
12890}
12891
12892// String returns the string representation
12893func (s InputTemplate) String() string {
12894	return awsutil.Prettify(s)
12895}
12896
12897// GoString returns the string representation
12898func (s InputTemplate) GoString() string {
12899	return s.String()
12900}
12901
12902// Validate inspects the fields of the type to determine if they are valid.
12903func (s *InputTemplate) Validate() error {
12904	invalidParams := request.ErrInvalidParams{Context: "InputTemplate"}
12905	if s.FilterStrength != nil && *s.FilterStrength < -5 {
12906		invalidParams.Add(request.NewErrParamMinValue("FilterStrength", -5))
12907	}
12908	if s.ProgramNumber != nil && *s.ProgramNumber < 1 {
12909		invalidParams.Add(request.NewErrParamMinValue("ProgramNumber", 1))
12910	}
12911	if s.TimecodeStart != nil && len(*s.TimecodeStart) < 11 {
12912		invalidParams.Add(request.NewErrParamMinLen("TimecodeStart", 11))
12913	}
12914	if s.AudioSelectors != nil {
12915		for i, v := range s.AudioSelectors {
12916			if v == nil {
12917				continue
12918			}
12919			if err := v.Validate(); err != nil {
12920				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioSelectors", i), err.(request.ErrInvalidParams))
12921			}
12922		}
12923	}
12924	if s.CaptionSelectors != nil {
12925		for i, v := range s.CaptionSelectors {
12926			if v == nil {
12927				continue
12928			}
12929			if err := v.Validate(); err != nil {
12930				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionSelectors", i), err.(request.ErrInvalidParams))
12931			}
12932		}
12933	}
12934	if s.Crop != nil {
12935		if err := s.Crop.Validate(); err != nil {
12936			invalidParams.AddNested("Crop", err.(request.ErrInvalidParams))
12937		}
12938	}
12939	if s.ImageInserter != nil {
12940		if err := s.ImageInserter.Validate(); err != nil {
12941			invalidParams.AddNested("ImageInserter", err.(request.ErrInvalidParams))
12942		}
12943	}
12944	if s.Position != nil {
12945		if err := s.Position.Validate(); err != nil {
12946			invalidParams.AddNested("Position", err.(request.ErrInvalidParams))
12947		}
12948	}
12949	if s.VideoSelector != nil {
12950		if err := s.VideoSelector.Validate(); err != nil {
12951			invalidParams.AddNested("VideoSelector", err.(request.ErrInvalidParams))
12952		}
12953	}
12954
12955	if invalidParams.Len() > 0 {
12956		return invalidParams
12957	}
12958	return nil
12959}
12960
12961// SetAudioSelectorGroups sets the AudioSelectorGroups field's value.
12962func (s *InputTemplate) SetAudioSelectorGroups(v map[string]*AudioSelectorGroup) *InputTemplate {
12963	s.AudioSelectorGroups = v
12964	return s
12965}
12966
12967// SetAudioSelectors sets the AudioSelectors field's value.
12968func (s *InputTemplate) SetAudioSelectors(v map[string]*AudioSelector) *InputTemplate {
12969	s.AudioSelectors = v
12970	return s
12971}
12972
12973// SetCaptionSelectors sets the CaptionSelectors field's value.
12974func (s *InputTemplate) SetCaptionSelectors(v map[string]*CaptionSelector) *InputTemplate {
12975	s.CaptionSelectors = v
12976	return s
12977}
12978
12979// SetCrop sets the Crop field's value.
12980func (s *InputTemplate) SetCrop(v *Rectangle) *InputTemplate {
12981	s.Crop = v
12982	return s
12983}
12984
12985// SetDeblockFilter sets the DeblockFilter field's value.
12986func (s *InputTemplate) SetDeblockFilter(v string) *InputTemplate {
12987	s.DeblockFilter = &v
12988	return s
12989}
12990
12991// SetDenoiseFilter sets the DenoiseFilter field's value.
12992func (s *InputTemplate) SetDenoiseFilter(v string) *InputTemplate {
12993	s.DenoiseFilter = &v
12994	return s
12995}
12996
12997// SetFilterEnable sets the FilterEnable field's value.
12998func (s *InputTemplate) SetFilterEnable(v string) *InputTemplate {
12999	s.FilterEnable = &v
13000	return s
13001}
13002
13003// SetFilterStrength sets the FilterStrength field's value.
13004func (s *InputTemplate) SetFilterStrength(v int64) *InputTemplate {
13005	s.FilterStrength = &v
13006	return s
13007}
13008
13009// SetImageInserter sets the ImageInserter field's value.
13010func (s *InputTemplate) SetImageInserter(v *ImageInserter) *InputTemplate {
13011	s.ImageInserter = v
13012	return s
13013}
13014
13015// SetInputClippings sets the InputClippings field's value.
13016func (s *InputTemplate) SetInputClippings(v []*InputClipping) *InputTemplate {
13017	s.InputClippings = v
13018	return s
13019}
13020
13021// SetInputScanType sets the InputScanType field's value.
13022func (s *InputTemplate) SetInputScanType(v string) *InputTemplate {
13023	s.InputScanType = &v
13024	return s
13025}
13026
13027// SetPosition sets the Position field's value.
13028func (s *InputTemplate) SetPosition(v *Rectangle) *InputTemplate {
13029	s.Position = v
13030	return s
13031}
13032
13033// SetProgramNumber sets the ProgramNumber field's value.
13034func (s *InputTemplate) SetProgramNumber(v int64) *InputTemplate {
13035	s.ProgramNumber = &v
13036	return s
13037}
13038
13039// SetPsiControl sets the PsiControl field's value.
13040func (s *InputTemplate) SetPsiControl(v string) *InputTemplate {
13041	s.PsiControl = &v
13042	return s
13043}
13044
13045// SetTimecodeSource sets the TimecodeSource field's value.
13046func (s *InputTemplate) SetTimecodeSource(v string) *InputTemplate {
13047	s.TimecodeSource = &v
13048	return s
13049}
13050
13051// SetTimecodeStart sets the TimecodeStart field's value.
13052func (s *InputTemplate) SetTimecodeStart(v string) *InputTemplate {
13053	s.TimecodeStart = &v
13054	return s
13055}
13056
13057// SetVideoSelector sets the VideoSelector field's value.
13058func (s *InputTemplate) SetVideoSelector(v *VideoSelector) *InputTemplate {
13059	s.VideoSelector = v
13060	return s
13061}
13062
13063// These settings apply to a specific graphic overlay. You can include multiple
13064// overlays in your job.
13065type InsertableImage struct {
13066	_ struct{} `type:"structure"`
13067
13068	// Specify the time, in milliseconds, for the image to remain on the output
13069	// video. This duration includes fade-in time but not fade-out time.
13070	Duration *int64 `locationName:"duration" type:"integer"`
13071
13072	// Specify the length of time, in milliseconds, between the Start time that
13073	// you specify for the image insertion and the time that the image appears at
13074	// full opacity. Full opacity is the level that you specify for the opacity
13075	// setting. If you don't specify a value for Fade-in, the image will appear
13076	// abruptly at the overlay start time.
13077	FadeIn *int64 `locationName:"fadeIn" type:"integer"`
13078
13079	// Specify the length of time, in milliseconds, between the end of the time
13080	// that you have specified for the image overlay Duration and when the overlaid
13081	// image has faded to total transparency. If you don't specify a value for Fade-out,
13082	// the image will disappear abruptly at the end of the inserted image duration.
13083	FadeOut *int64 `locationName:"fadeOut" type:"integer"`
13084
13085	// Specify the height of the inserted image in pixels. If you specify a value
13086	// that's larger than the video resolution height, the service will crop your
13087	// overlaid image to fit. To use the native height of the image, keep this setting
13088	// blank.
13089	Height *int64 `locationName:"height" type:"integer"`
13090
13091	// Specify the HTTP, HTTPS, or Amazon S3 location of the image that you want
13092	// to overlay on the video. Use a PNG or TGA file.
13093	ImageInserterInput *string `locationName:"imageInserterInput" min:"14" type:"string"`
13094
13095	// Specify the distance, in pixels, between the inserted image and the left
13096	// edge of the video frame. Required for any image overlay that you specify.
13097	ImageX *int64 `locationName:"imageX" type:"integer"`
13098
13099	// Specify the distance, in pixels, between the overlaid image and the top edge
13100	// of the video frame. Required for any image overlay that you specify.
13101	ImageY *int64 `locationName:"imageY" type:"integer"`
13102
13103	// Specify how overlapping inserted images appear. Images with higher values
13104	// for Layer appear on top of images with lower values for Layer.
13105	Layer *int64 `locationName:"layer" type:"integer"`
13106
13107	// Use Opacity (Opacity) to specify how much of the underlying video shows through
13108	// the inserted image. 0 is transparent and 100 is fully opaque. Default is
13109	// 50.
13110	Opacity *int64 `locationName:"opacity" type:"integer"`
13111
13112	// Specify the timecode of the frame that you want the overlay to first appear
13113	// on. This must be in timecode (HH:MM:SS:FF or HH:MM:SS;FF) format. Remember
13114	// to take into account your timecode source settings.
13115	StartTime *string `locationName:"startTime" type:"string"`
13116
13117	// Specify the width of the inserted image in pixels. If you specify a value
13118	// that's larger than the video resolution width, the service will crop your
13119	// overlaid image to fit. To use the native width of the image, keep this setting
13120	// blank.
13121	Width *int64 `locationName:"width" type:"integer"`
13122}
13123
13124// String returns the string representation
13125func (s InsertableImage) String() string {
13126	return awsutil.Prettify(s)
13127}
13128
13129// GoString returns the string representation
13130func (s InsertableImage) GoString() string {
13131	return s.String()
13132}
13133
13134// Validate inspects the fields of the type to determine if they are valid.
13135func (s *InsertableImage) Validate() error {
13136	invalidParams := request.ErrInvalidParams{Context: "InsertableImage"}
13137	if s.ImageInserterInput != nil && len(*s.ImageInserterInput) < 14 {
13138		invalidParams.Add(request.NewErrParamMinLen("ImageInserterInput", 14))
13139	}
13140
13141	if invalidParams.Len() > 0 {
13142		return invalidParams
13143	}
13144	return nil
13145}
13146
13147// SetDuration sets the Duration field's value.
13148func (s *InsertableImage) SetDuration(v int64) *InsertableImage {
13149	s.Duration = &v
13150	return s
13151}
13152
13153// SetFadeIn sets the FadeIn field's value.
13154func (s *InsertableImage) SetFadeIn(v int64) *InsertableImage {
13155	s.FadeIn = &v
13156	return s
13157}
13158
13159// SetFadeOut sets the FadeOut field's value.
13160func (s *InsertableImage) SetFadeOut(v int64) *InsertableImage {
13161	s.FadeOut = &v
13162	return s
13163}
13164
13165// SetHeight sets the Height field's value.
13166func (s *InsertableImage) SetHeight(v int64) *InsertableImage {
13167	s.Height = &v
13168	return s
13169}
13170
13171// SetImageInserterInput sets the ImageInserterInput field's value.
13172func (s *InsertableImage) SetImageInserterInput(v string) *InsertableImage {
13173	s.ImageInserterInput = &v
13174	return s
13175}
13176
13177// SetImageX sets the ImageX field's value.
13178func (s *InsertableImage) SetImageX(v int64) *InsertableImage {
13179	s.ImageX = &v
13180	return s
13181}
13182
13183// SetImageY sets the ImageY field's value.
13184func (s *InsertableImage) SetImageY(v int64) *InsertableImage {
13185	s.ImageY = &v
13186	return s
13187}
13188
13189// SetLayer sets the Layer field's value.
13190func (s *InsertableImage) SetLayer(v int64) *InsertableImage {
13191	s.Layer = &v
13192	return s
13193}
13194
13195// SetOpacity sets the Opacity field's value.
13196func (s *InsertableImage) SetOpacity(v int64) *InsertableImage {
13197	s.Opacity = &v
13198	return s
13199}
13200
13201// SetStartTime sets the StartTime field's value.
13202func (s *InsertableImage) SetStartTime(v string) *InsertableImage {
13203	s.StartTime = &v
13204	return s
13205}
13206
13207// SetWidth sets the Width field's value.
13208func (s *InsertableImage) SetWidth(v int64) *InsertableImage {
13209	s.Width = &v
13210	return s
13211}
13212
13213type InternalServerErrorException struct {
13214	_            struct{}                  `type:"structure"`
13215	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
13216
13217	Message_ *string `locationName:"message" type:"string"`
13218}
13219
13220// String returns the string representation
13221func (s InternalServerErrorException) String() string {
13222	return awsutil.Prettify(s)
13223}
13224
13225// GoString returns the string representation
13226func (s InternalServerErrorException) GoString() string {
13227	return s.String()
13228}
13229
13230func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error {
13231	return &InternalServerErrorException{
13232		RespMetadata: v,
13233	}
13234}
13235
13236// Code returns the exception type name.
13237func (s *InternalServerErrorException) Code() string {
13238	return "InternalServerErrorException"
13239}
13240
13241// Message returns the exception's message.
13242func (s *InternalServerErrorException) Message() string {
13243	if s.Message_ != nil {
13244		return *s.Message_
13245	}
13246	return ""
13247}
13248
13249// OrigErr always returns nil, satisfies awserr.Error interface.
13250func (s *InternalServerErrorException) OrigErr() error {
13251	return nil
13252}
13253
13254func (s *InternalServerErrorException) Error() string {
13255	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
13256}
13257
13258// Status code returns the HTTP status code for the request's response error.
13259func (s *InternalServerErrorException) StatusCode() int {
13260	return s.RespMetadata.StatusCode
13261}
13262
13263// RequestID returns the service's response RequestID for request.
13264func (s *InternalServerErrorException) RequestID() string {
13265	return s.RespMetadata.RequestID
13266}
13267
13268// Each job converts an input file into an output file or files. For more information,
13269// see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html
13270type Job struct {
13271	_ struct{} `type:"structure"`
13272
13273	// Accelerated transcoding can significantly speed up jobs with long, visually
13274	// complex content.
13275	AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"`
13276
13277	// Describes whether the current job is running with accelerated transcoding.
13278	// For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus
13279	// is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode)
13280	// set to ENABLED or PREFERRED, AccelerationStatus is one of the other states.
13281	// AccelerationStatus is IN_PROGRESS initially, while the service determines
13282	// whether the input files and job settings are compatible with accelerated
13283	// transcoding. If they are, AcclerationStatus is ACCELERATED. If your input
13284	// files and job settings aren't compatible with accelerated transcoding, the
13285	// service either fails your job or runs it without accelerated transcoding,
13286	// depending on how you set Acceleration (AccelerationMode). When the service
13287	// runs your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED.
13288	AccelerationStatus *string `locationName:"accelerationStatus" type:"string" enum:"AccelerationStatus"`
13289
13290	// An identifier for this resource that is unique within all of AWS.
13291	Arn *string `locationName:"arn" type:"string"`
13292
13293	// The tag type that AWS Billing and Cost Management will use to sort your AWS
13294	// Elemental MediaConvert costs on any billing report that you set up.
13295	BillingTagsSource *string `locationName:"billingTagsSource" type:"string" enum:"BillingTagsSource"`
13296
13297	// The time, in Unix epoch format in seconds, when the job got created.
13298	CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"`
13299
13300	// A job's phase can be PROBING, TRANSCODING OR UPLOADING
13301	CurrentPhase *string `locationName:"currentPhase" type:"string" enum:"JobPhase"`
13302
13303	// Error code for the job
13304	ErrorCode *int64 `locationName:"errorCode" type:"integer"`
13305
13306	// Error message of Job
13307	ErrorMessage *string `locationName:"errorMessage" type:"string"`
13308
13309	// Optional list of hop destinations.
13310	HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"`
13311
13312	// A portion of the job's ARN, unique within your AWS Elemental MediaConvert
13313	// resources
13314	Id *string `locationName:"id" type:"string"`
13315
13316	// An estimate of how far your job has progressed. This estimate is shown as
13317	// a percentage of the total time from when your job leaves its queue to when
13318	// your output files appear in your output Amazon S3 bucket. AWS Elemental MediaConvert
13319	// provides jobPercentComplete in CloudWatch STATUS_UPDATE events and in the
13320	// response to GetJob and ListJobs requests. The jobPercentComplete estimate
13321	// is reliable for the following input containers: Quicktime, Transport Stream,
13322	// MP4, and MXF. For some jobs, the service can't provide information about
13323	// job progress. In those cases, jobPercentComplete returns a null value.
13324	JobPercentComplete *int64 `locationName:"jobPercentComplete" type:"integer"`
13325
13326	// The job template that the job is created from, if it is created from a job
13327	// template.
13328	JobTemplate *string `locationName:"jobTemplate" type:"string"`
13329
13330	// Provides messages from the service about jobs that you have already successfully
13331	// submitted.
13332	Messages *JobMessages `locationName:"messages" type:"structure"`
13333
13334	// List of output group details
13335	OutputGroupDetails []*OutputGroupDetail `locationName:"outputGroupDetails" type:"list"`
13336
13337	// Relative priority on the job.
13338	Priority *int64 `locationName:"priority" type:"integer"`
13339
13340	// When you create a job, you can specify a queue to send it to. If you don't
13341	// specify, the job will go to the default queue. For more about queues, see
13342	// the User Guide topic at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html
13343	Queue *string `locationName:"queue" type:"string"`
13344
13345	// The job's queue hopping history.
13346	QueueTransitions []*QueueTransition `locationName:"queueTransitions" type:"list"`
13347
13348	// The number of times that the service automatically attempted to process your
13349	// job after encountering an error.
13350	RetryCount *int64 `locationName:"retryCount" type:"integer"`
13351
13352	// The IAM role you use for creating this job. For details about permissions,
13353	// see the User Guide topic at the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html
13354	//
13355	// Role is a required field
13356	Role *string `locationName:"role" type:"string" required:"true"`
13357
13358	// JobSettings contains all the transcode settings for a job.
13359	//
13360	// Settings is a required field
13361	Settings *JobSettings `locationName:"settings" type:"structure" required:"true"`
13362
13363	// Enable this setting when you run a test job to estimate how many reserved
13364	// transcoding slots (RTS) you need. When this is enabled, MediaConvert runs
13365	// your job from an on-demand queue with similar performance to what you will
13366	// see with one RTS in a reserved queue. This setting is disabled by default.
13367	SimulateReservedQueue *string `locationName:"simulateReservedQueue" type:"string" enum:"SimulateReservedQueue"`
13368
13369	// A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR.
13370	Status *string `locationName:"status" type:"string" enum:"JobStatus"`
13371
13372	// Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch
13373	// Events. Set the interval, in seconds, between status updates. MediaConvert
13374	// sends an update at this interval from the time the service begins processing
13375	// your job to the time it completes the transcode or encounters an error.
13376	StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"`
13377
13378	// Information about when jobs are submitted, started, and finished is specified
13379	// in Unix epoch format in seconds.
13380	Timing *Timing `locationName:"timing" type:"structure"`
13381
13382	// User-defined metadata that you want to associate with an MediaConvert job.
13383	// You specify metadata in key/value pairs.
13384	UserMetadata map[string]*string `locationName:"userMetadata" type:"map"`
13385}
13386
13387// String returns the string representation
13388func (s Job) String() string {
13389	return awsutil.Prettify(s)
13390}
13391
13392// GoString returns the string representation
13393func (s Job) GoString() string {
13394	return s.String()
13395}
13396
13397// SetAccelerationSettings sets the AccelerationSettings field's value.
13398func (s *Job) SetAccelerationSettings(v *AccelerationSettings) *Job {
13399	s.AccelerationSettings = v
13400	return s
13401}
13402
13403// SetAccelerationStatus sets the AccelerationStatus field's value.
13404func (s *Job) SetAccelerationStatus(v string) *Job {
13405	s.AccelerationStatus = &v
13406	return s
13407}
13408
13409// SetArn sets the Arn field's value.
13410func (s *Job) SetArn(v string) *Job {
13411	s.Arn = &v
13412	return s
13413}
13414
13415// SetBillingTagsSource sets the BillingTagsSource field's value.
13416func (s *Job) SetBillingTagsSource(v string) *Job {
13417	s.BillingTagsSource = &v
13418	return s
13419}
13420
13421// SetCreatedAt sets the CreatedAt field's value.
13422func (s *Job) SetCreatedAt(v time.Time) *Job {
13423	s.CreatedAt = &v
13424	return s
13425}
13426
13427// SetCurrentPhase sets the CurrentPhase field's value.
13428func (s *Job) SetCurrentPhase(v string) *Job {
13429	s.CurrentPhase = &v
13430	return s
13431}
13432
13433// SetErrorCode sets the ErrorCode field's value.
13434func (s *Job) SetErrorCode(v int64) *Job {
13435	s.ErrorCode = &v
13436	return s
13437}
13438
13439// SetErrorMessage sets the ErrorMessage field's value.
13440func (s *Job) SetErrorMessage(v string) *Job {
13441	s.ErrorMessage = &v
13442	return s
13443}
13444
13445// SetHopDestinations sets the HopDestinations field's value.
13446func (s *Job) SetHopDestinations(v []*HopDestination) *Job {
13447	s.HopDestinations = v
13448	return s
13449}
13450
13451// SetId sets the Id field's value.
13452func (s *Job) SetId(v string) *Job {
13453	s.Id = &v
13454	return s
13455}
13456
13457// SetJobPercentComplete sets the JobPercentComplete field's value.
13458func (s *Job) SetJobPercentComplete(v int64) *Job {
13459	s.JobPercentComplete = &v
13460	return s
13461}
13462
13463// SetJobTemplate sets the JobTemplate field's value.
13464func (s *Job) SetJobTemplate(v string) *Job {
13465	s.JobTemplate = &v
13466	return s
13467}
13468
13469// SetMessages sets the Messages field's value.
13470func (s *Job) SetMessages(v *JobMessages) *Job {
13471	s.Messages = v
13472	return s
13473}
13474
13475// SetOutputGroupDetails sets the OutputGroupDetails field's value.
13476func (s *Job) SetOutputGroupDetails(v []*OutputGroupDetail) *Job {
13477	s.OutputGroupDetails = v
13478	return s
13479}
13480
13481// SetPriority sets the Priority field's value.
13482func (s *Job) SetPriority(v int64) *Job {
13483	s.Priority = &v
13484	return s
13485}
13486
13487// SetQueue sets the Queue field's value.
13488func (s *Job) SetQueue(v string) *Job {
13489	s.Queue = &v
13490	return s
13491}
13492
13493// SetQueueTransitions sets the QueueTransitions field's value.
13494func (s *Job) SetQueueTransitions(v []*QueueTransition) *Job {
13495	s.QueueTransitions = v
13496	return s
13497}
13498
13499// SetRetryCount sets the RetryCount field's value.
13500func (s *Job) SetRetryCount(v int64) *Job {
13501	s.RetryCount = &v
13502	return s
13503}
13504
13505// SetRole sets the Role field's value.
13506func (s *Job) SetRole(v string) *Job {
13507	s.Role = &v
13508	return s
13509}
13510
13511// SetSettings sets the Settings field's value.
13512func (s *Job) SetSettings(v *JobSettings) *Job {
13513	s.Settings = v
13514	return s
13515}
13516
13517// SetSimulateReservedQueue sets the SimulateReservedQueue field's value.
13518func (s *Job) SetSimulateReservedQueue(v string) *Job {
13519	s.SimulateReservedQueue = &v
13520	return s
13521}
13522
13523// SetStatus sets the Status field's value.
13524func (s *Job) SetStatus(v string) *Job {
13525	s.Status = &v
13526	return s
13527}
13528
13529// SetStatusUpdateInterval sets the StatusUpdateInterval field's value.
13530func (s *Job) SetStatusUpdateInterval(v string) *Job {
13531	s.StatusUpdateInterval = &v
13532	return s
13533}
13534
13535// SetTiming sets the Timing field's value.
13536func (s *Job) SetTiming(v *Timing) *Job {
13537	s.Timing = v
13538	return s
13539}
13540
13541// SetUserMetadata sets the UserMetadata field's value.
13542func (s *Job) SetUserMetadata(v map[string]*string) *Job {
13543	s.UserMetadata = v
13544	return s
13545}
13546
13547// Provides messages from the service about jobs that you have already successfully
13548// submitted.
13549type JobMessages struct {
13550	_ struct{} `type:"structure"`
13551
13552	// List of messages that are informational only and don't indicate a problem
13553	// with your job.
13554	Info []*string `locationName:"info" type:"list"`
13555
13556	// List of messages that warn about conditions that might cause your job not
13557	// to run or to fail.
13558	Warning []*string `locationName:"warning" type:"list"`
13559}
13560
13561// String returns the string representation
13562func (s JobMessages) String() string {
13563	return awsutil.Prettify(s)
13564}
13565
13566// GoString returns the string representation
13567func (s JobMessages) GoString() string {
13568	return s.String()
13569}
13570
13571// SetInfo sets the Info field's value.
13572func (s *JobMessages) SetInfo(v []*string) *JobMessages {
13573	s.Info = v
13574	return s
13575}
13576
13577// SetWarning sets the Warning field's value.
13578func (s *JobMessages) SetWarning(v []*string) *JobMessages {
13579	s.Warning = v
13580	return s
13581}
13582
13583// JobSettings contains all the transcode settings for a job.
13584type JobSettings struct {
13585	_ struct{} `type:"structure"`
13586
13587	// When specified, this offset (in milliseconds) is added to the input Ad Avail
13588	// PTS time.
13589	AdAvailOffset *int64 `locationName:"adAvailOffset" type:"integer"`
13590
13591	// Settings for ad avail blanking. Video can be blanked or overlaid with an
13592	// image, and audio muted during SCTE-35 triggered ad avails.
13593	AvailBlanking *AvailBlanking `locationName:"availBlanking" type:"structure"`
13594
13595	// Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion,
13596	// you can ignore these settings.
13597	Esam *EsamSettings `locationName:"esam" type:"structure"`
13598
13599	// Hexadecimal value as per EIA-608 Line 21 Data Services, section 9.5.1.5 05h
13600	// Content Advisory.
13601	ExtendedDataServices *ExtendedDataServices `locationName:"extendedDataServices" type:"structure"`
13602
13603	// Use Inputs (inputs) to define source file used in the transcode job. There
13604	// can be multiple inputs add in a job. These inputs will be concantenated together
13605	// to create the output.
13606	Inputs []*Input `locationName:"inputs" type:"list"`
13607
13608	// Use these settings only when you use Kantar watermarking. Specify the values
13609	// that MediaConvert uses to generate and place Kantar watermarks in your output
13610	// audio. These settings apply to every output in your job. In addition to specifying
13611	// these values, you also need to store your Kantar credentials in AWS Secrets
13612	// Manager. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/kantar-watermarking.html.
13613	KantarWatermark *KantarWatermarkSettings `locationName:"kantarWatermark" type:"structure"`
13614
13615	// Overlay motion graphics on top of your video. The motion graphics that you
13616	// specify here appear on all outputs in all output groups. For more information,
13617	// see https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html.
13618	MotionImageInserter *MotionImageInserter `locationName:"motionImageInserter" type:"structure"`
13619
13620	// Settings for your Nielsen configuration. If you don't do Nielsen measurement
13621	// and analytics, ignore these settings. When you enable Nielsen configuration
13622	// (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs
13623	// in the job. To enable Nielsen configuration programmatically, include an
13624	// instance of nielsenConfiguration in your JSON job specification. Even if
13625	// you don't include any children of nielsenConfiguration, you still enable
13626	// the setting.
13627	NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"`
13628
13629	// Ignore these settings unless you are using Nielsen non-linear watermarking.
13630	// Specify the values that MediaConvert uses to generate and place Nielsen watermarks
13631	// in your output audio. In addition to specifying these values, you also need
13632	// to set up your cloud TIC server. These settings apply to every output in
13633	// your job. The MediaConvert implementation is currently with the following
13634	// Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark
13635	// Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0]
13636	NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings `locationName:"nielsenNonLinearWatermark" type:"structure"`
13637
13638	// (OutputGroups) contains one group of settings for each set of outputs that
13639	// share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime,
13640	// MXF, and no container) are grouped in a single output group as well. Required
13641	// in (OutputGroups) is a group of settings that apply to the whole group. This
13642	// required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings).
13643	// Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings
13644	// * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings
13645	// * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS,
13646	// CmafGroupSettings
13647	OutputGroups []*OutputGroup `locationName:"outputGroups" type:"list"`
13648
13649	// These settings control how the service handles timecodes throughout the job.
13650	// These settings don't affect input clipping.
13651	TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"`
13652
13653	// Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags
13654	// in any HLS outputs. To include timed metadata, you must enable it here, enable
13655	// it in each output container, and specify tags and timecodes in ID3 insertion
13656	// (Id3Insertion) objects.
13657	TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"`
13658}
13659
13660// String returns the string representation
13661func (s JobSettings) String() string {
13662	return awsutil.Prettify(s)
13663}
13664
13665// GoString returns the string representation
13666func (s JobSettings) GoString() string {
13667	return s.String()
13668}
13669
13670// Validate inspects the fields of the type to determine if they are valid.
13671func (s *JobSettings) Validate() error {
13672	invalidParams := request.ErrInvalidParams{Context: "JobSettings"}
13673	if s.AdAvailOffset != nil && *s.AdAvailOffset < -1000 {
13674		invalidParams.Add(request.NewErrParamMinValue("AdAvailOffset", -1000))
13675	}
13676	if s.AvailBlanking != nil {
13677		if err := s.AvailBlanking.Validate(); err != nil {
13678			invalidParams.AddNested("AvailBlanking", err.(request.ErrInvalidParams))
13679		}
13680	}
13681	if s.Inputs != nil {
13682		for i, v := range s.Inputs {
13683			if v == nil {
13684				continue
13685			}
13686			if err := v.Validate(); err != nil {
13687				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Inputs", i), err.(request.ErrInvalidParams))
13688			}
13689		}
13690	}
13691	if s.KantarWatermark != nil {
13692		if err := s.KantarWatermark.Validate(); err != nil {
13693			invalidParams.AddNested("KantarWatermark", err.(request.ErrInvalidParams))
13694		}
13695	}
13696	if s.MotionImageInserter != nil {
13697		if err := s.MotionImageInserter.Validate(); err != nil {
13698			invalidParams.AddNested("MotionImageInserter", err.(request.ErrInvalidParams))
13699		}
13700	}
13701	if s.NielsenNonLinearWatermark != nil {
13702		if err := s.NielsenNonLinearWatermark.Validate(); err != nil {
13703			invalidParams.AddNested("NielsenNonLinearWatermark", err.(request.ErrInvalidParams))
13704		}
13705	}
13706	if s.OutputGroups != nil {
13707		for i, v := range s.OutputGroups {
13708			if v == nil {
13709				continue
13710			}
13711			if err := v.Validate(); err != nil {
13712				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputGroups", i), err.(request.ErrInvalidParams))
13713			}
13714		}
13715	}
13716
13717	if invalidParams.Len() > 0 {
13718		return invalidParams
13719	}
13720	return nil
13721}
13722
13723// SetAdAvailOffset sets the AdAvailOffset field's value.
13724func (s *JobSettings) SetAdAvailOffset(v int64) *JobSettings {
13725	s.AdAvailOffset = &v
13726	return s
13727}
13728
13729// SetAvailBlanking sets the AvailBlanking field's value.
13730func (s *JobSettings) SetAvailBlanking(v *AvailBlanking) *JobSettings {
13731	s.AvailBlanking = v
13732	return s
13733}
13734
13735// SetEsam sets the Esam field's value.
13736func (s *JobSettings) SetEsam(v *EsamSettings) *JobSettings {
13737	s.Esam = v
13738	return s
13739}
13740
13741// SetExtendedDataServices sets the ExtendedDataServices field's value.
13742func (s *JobSettings) SetExtendedDataServices(v *ExtendedDataServices) *JobSettings {
13743	s.ExtendedDataServices = v
13744	return s
13745}
13746
13747// SetInputs sets the Inputs field's value.
13748func (s *JobSettings) SetInputs(v []*Input) *JobSettings {
13749	s.Inputs = v
13750	return s
13751}
13752
13753// SetKantarWatermark sets the KantarWatermark field's value.
13754func (s *JobSettings) SetKantarWatermark(v *KantarWatermarkSettings) *JobSettings {
13755	s.KantarWatermark = v
13756	return s
13757}
13758
13759// SetMotionImageInserter sets the MotionImageInserter field's value.
13760func (s *JobSettings) SetMotionImageInserter(v *MotionImageInserter) *JobSettings {
13761	s.MotionImageInserter = v
13762	return s
13763}
13764
13765// SetNielsenConfiguration sets the NielsenConfiguration field's value.
13766func (s *JobSettings) SetNielsenConfiguration(v *NielsenConfiguration) *JobSettings {
13767	s.NielsenConfiguration = v
13768	return s
13769}
13770
13771// SetNielsenNonLinearWatermark sets the NielsenNonLinearWatermark field's value.
13772func (s *JobSettings) SetNielsenNonLinearWatermark(v *NielsenNonLinearWatermarkSettings) *JobSettings {
13773	s.NielsenNonLinearWatermark = v
13774	return s
13775}
13776
13777// SetOutputGroups sets the OutputGroups field's value.
13778func (s *JobSettings) SetOutputGroups(v []*OutputGroup) *JobSettings {
13779	s.OutputGroups = v
13780	return s
13781}
13782
13783// SetTimecodeConfig sets the TimecodeConfig field's value.
13784func (s *JobSettings) SetTimecodeConfig(v *TimecodeConfig) *JobSettings {
13785	s.TimecodeConfig = v
13786	return s
13787}
13788
13789// SetTimedMetadataInsertion sets the TimedMetadataInsertion field's value.
13790func (s *JobSettings) SetTimedMetadataInsertion(v *TimedMetadataInsertion) *JobSettings {
13791	s.TimedMetadataInsertion = v
13792	return s
13793}
13794
13795// A job template is a pre-made set of encoding instructions that you can use
13796// to quickly create a job.
13797type JobTemplate struct {
13798	_ struct{} `type:"structure"`
13799
13800	// Accelerated transcoding can significantly speed up jobs with long, visually
13801	// complex content.
13802	AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"`
13803
13804	// An identifier for this resource that is unique within all of AWS.
13805	Arn *string `locationName:"arn" type:"string"`
13806
13807	// An optional category you create to organize your job templates.
13808	Category *string `locationName:"category" type:"string"`
13809
13810	// The timestamp in epoch seconds for Job template creation.
13811	CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"`
13812
13813	// An optional description you create for each job template.
13814	Description *string `locationName:"description" type:"string"`
13815
13816	// Optional list of hop destinations.
13817	HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"`
13818
13819	// The timestamp in epoch seconds when the Job template was last updated.
13820	LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"`
13821
13822	// A name you create for each job template. Each name must be unique within
13823	// your account.
13824	//
13825	// Name is a required field
13826	Name *string `locationName:"name" type:"string" required:"true"`
13827
13828	// Relative priority on the job.
13829	Priority *int64 `locationName:"priority" type:"integer"`
13830
13831	// Optional. The queue that jobs created from this template are assigned to.
13832	// If you don't specify this, jobs will go to the default queue.
13833	Queue *string `locationName:"queue" type:"string"`
13834
13835	// JobTemplateSettings contains all the transcode settings saved in the template
13836	// that will be applied to jobs created from it.
13837	//
13838	// Settings is a required field
13839	Settings *JobTemplateSettings `locationName:"settings" type:"structure" required:"true"`
13840
13841	// Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch
13842	// Events. Set the interval, in seconds, between status updates. MediaConvert
13843	// sends an update at this interval from the time the service begins processing
13844	// your job to the time it completes the transcode or encounters an error.
13845	StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"`
13846
13847	// A job template can be of two types: system or custom. System or built-in
13848	// job templates can't be modified or deleted by the user.
13849	Type *string `locationName:"type" type:"string" enum:"Type"`
13850}
13851
13852// String returns the string representation
13853func (s JobTemplate) String() string {
13854	return awsutil.Prettify(s)
13855}
13856
13857// GoString returns the string representation
13858func (s JobTemplate) GoString() string {
13859	return s.String()
13860}
13861
13862// SetAccelerationSettings sets the AccelerationSettings field's value.
13863func (s *JobTemplate) SetAccelerationSettings(v *AccelerationSettings) *JobTemplate {
13864	s.AccelerationSettings = v
13865	return s
13866}
13867
13868// SetArn sets the Arn field's value.
13869func (s *JobTemplate) SetArn(v string) *JobTemplate {
13870	s.Arn = &v
13871	return s
13872}
13873
13874// SetCategory sets the Category field's value.
13875func (s *JobTemplate) SetCategory(v string) *JobTemplate {
13876	s.Category = &v
13877	return s
13878}
13879
13880// SetCreatedAt sets the CreatedAt field's value.
13881func (s *JobTemplate) SetCreatedAt(v time.Time) *JobTemplate {
13882	s.CreatedAt = &v
13883	return s
13884}
13885
13886// SetDescription sets the Description field's value.
13887func (s *JobTemplate) SetDescription(v string) *JobTemplate {
13888	s.Description = &v
13889	return s
13890}
13891
13892// SetHopDestinations sets the HopDestinations field's value.
13893func (s *JobTemplate) SetHopDestinations(v []*HopDestination) *JobTemplate {
13894	s.HopDestinations = v
13895	return s
13896}
13897
13898// SetLastUpdated sets the LastUpdated field's value.
13899func (s *JobTemplate) SetLastUpdated(v time.Time) *JobTemplate {
13900	s.LastUpdated = &v
13901	return s
13902}
13903
13904// SetName sets the Name field's value.
13905func (s *JobTemplate) SetName(v string) *JobTemplate {
13906	s.Name = &v
13907	return s
13908}
13909
13910// SetPriority sets the Priority field's value.
13911func (s *JobTemplate) SetPriority(v int64) *JobTemplate {
13912	s.Priority = &v
13913	return s
13914}
13915
13916// SetQueue sets the Queue field's value.
13917func (s *JobTemplate) SetQueue(v string) *JobTemplate {
13918	s.Queue = &v
13919	return s
13920}
13921
13922// SetSettings sets the Settings field's value.
13923func (s *JobTemplate) SetSettings(v *JobTemplateSettings) *JobTemplate {
13924	s.Settings = v
13925	return s
13926}
13927
13928// SetStatusUpdateInterval sets the StatusUpdateInterval field's value.
13929func (s *JobTemplate) SetStatusUpdateInterval(v string) *JobTemplate {
13930	s.StatusUpdateInterval = &v
13931	return s
13932}
13933
13934// SetType sets the Type field's value.
13935func (s *JobTemplate) SetType(v string) *JobTemplate {
13936	s.Type = &v
13937	return s
13938}
13939
13940// JobTemplateSettings contains all the transcode settings saved in the template
13941// that will be applied to jobs created from it.
13942type JobTemplateSettings struct {
13943	_ struct{} `type:"structure"`
13944
13945	// When specified, this offset (in milliseconds) is added to the input Ad Avail
13946	// PTS time.
13947	AdAvailOffset *int64 `locationName:"adAvailOffset" type:"integer"`
13948
13949	// Settings for ad avail blanking. Video can be blanked or overlaid with an
13950	// image, and audio muted during SCTE-35 triggered ad avails.
13951	AvailBlanking *AvailBlanking `locationName:"availBlanking" type:"structure"`
13952
13953	// Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion,
13954	// you can ignore these settings.
13955	Esam *EsamSettings `locationName:"esam" type:"structure"`
13956
13957	// Hexadecimal value as per EIA-608 Line 21 Data Services, section 9.5.1.5 05h
13958	// Content Advisory.
13959	ExtendedDataServices *ExtendedDataServices `locationName:"extendedDataServices" type:"structure"`
13960
13961	// Use Inputs (inputs) to define the source file used in the transcode job.
13962	// There can only be one input in a job template. Using the API, you can include
13963	// multiple inputs when referencing a job template.
13964	Inputs []*InputTemplate `locationName:"inputs" type:"list"`
13965
13966	// Use these settings only when you use Kantar watermarking. Specify the values
13967	// that MediaConvert uses to generate and place Kantar watermarks in your output
13968	// audio. These settings apply to every output in your job. In addition to specifying
13969	// these values, you also need to store your Kantar credentials in AWS Secrets
13970	// Manager. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/kantar-watermarking.html.
13971	KantarWatermark *KantarWatermarkSettings `locationName:"kantarWatermark" type:"structure"`
13972
13973	// Overlay motion graphics on top of your video. The motion graphics that you
13974	// specify here appear on all outputs in all output groups. For more information,
13975	// see https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html.
13976	MotionImageInserter *MotionImageInserter `locationName:"motionImageInserter" type:"structure"`
13977
13978	// Settings for your Nielsen configuration. If you don't do Nielsen measurement
13979	// and analytics, ignore these settings. When you enable Nielsen configuration
13980	// (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs
13981	// in the job. To enable Nielsen configuration programmatically, include an
13982	// instance of nielsenConfiguration in your JSON job specification. Even if
13983	// you don't include any children of nielsenConfiguration, you still enable
13984	// the setting.
13985	NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"`
13986
13987	// Ignore these settings unless you are using Nielsen non-linear watermarking.
13988	// Specify the values that MediaConvert uses to generate and place Nielsen watermarks
13989	// in your output audio. In addition to specifying these values, you also need
13990	// to set up your cloud TIC server. These settings apply to every output in
13991	// your job. The MediaConvert implementation is currently with the following
13992	// Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark
13993	// Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0]
13994	NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings `locationName:"nielsenNonLinearWatermark" type:"structure"`
13995
13996	// (OutputGroups) contains one group of settings for each set of outputs that
13997	// share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime,
13998	// MXF, and no container) are grouped in a single output group as well. Required
13999	// in (OutputGroups) is a group of settings that apply to the whole group. This
14000	// required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings).
14001	// Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings
14002	// * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings
14003	// * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS,
14004	// CmafGroupSettings
14005	OutputGroups []*OutputGroup `locationName:"outputGroups" type:"list"`
14006
14007	// These settings control how the service handles timecodes throughout the job.
14008	// These settings don't affect input clipping.
14009	TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"`
14010
14011	// Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags
14012	// in any HLS outputs. To include timed metadata, you must enable it here, enable
14013	// it in each output container, and specify tags and timecodes in ID3 insertion
14014	// (Id3Insertion) objects.
14015	TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"`
14016}
14017
14018// String returns the string representation
14019func (s JobTemplateSettings) String() string {
14020	return awsutil.Prettify(s)
14021}
14022
14023// GoString returns the string representation
14024func (s JobTemplateSettings) GoString() string {
14025	return s.String()
14026}
14027
14028// Validate inspects the fields of the type to determine if they are valid.
14029func (s *JobTemplateSettings) Validate() error {
14030	invalidParams := request.ErrInvalidParams{Context: "JobTemplateSettings"}
14031	if s.AdAvailOffset != nil && *s.AdAvailOffset < -1000 {
14032		invalidParams.Add(request.NewErrParamMinValue("AdAvailOffset", -1000))
14033	}
14034	if s.AvailBlanking != nil {
14035		if err := s.AvailBlanking.Validate(); err != nil {
14036			invalidParams.AddNested("AvailBlanking", err.(request.ErrInvalidParams))
14037		}
14038	}
14039	if s.Inputs != nil {
14040		for i, v := range s.Inputs {
14041			if v == nil {
14042				continue
14043			}
14044			if err := v.Validate(); err != nil {
14045				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Inputs", i), err.(request.ErrInvalidParams))
14046			}
14047		}
14048	}
14049	if s.KantarWatermark != nil {
14050		if err := s.KantarWatermark.Validate(); err != nil {
14051			invalidParams.AddNested("KantarWatermark", err.(request.ErrInvalidParams))
14052		}
14053	}
14054	if s.MotionImageInserter != nil {
14055		if err := s.MotionImageInserter.Validate(); err != nil {
14056			invalidParams.AddNested("MotionImageInserter", err.(request.ErrInvalidParams))
14057		}
14058	}
14059	if s.NielsenNonLinearWatermark != nil {
14060		if err := s.NielsenNonLinearWatermark.Validate(); err != nil {
14061			invalidParams.AddNested("NielsenNonLinearWatermark", err.(request.ErrInvalidParams))
14062		}
14063	}
14064	if s.OutputGroups != nil {
14065		for i, v := range s.OutputGroups {
14066			if v == nil {
14067				continue
14068			}
14069			if err := v.Validate(); err != nil {
14070				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputGroups", i), err.(request.ErrInvalidParams))
14071			}
14072		}
14073	}
14074
14075	if invalidParams.Len() > 0 {
14076		return invalidParams
14077	}
14078	return nil
14079}
14080
14081// SetAdAvailOffset sets the AdAvailOffset field's value.
14082func (s *JobTemplateSettings) SetAdAvailOffset(v int64) *JobTemplateSettings {
14083	s.AdAvailOffset = &v
14084	return s
14085}
14086
14087// SetAvailBlanking sets the AvailBlanking field's value.
14088func (s *JobTemplateSettings) SetAvailBlanking(v *AvailBlanking) *JobTemplateSettings {
14089	s.AvailBlanking = v
14090	return s
14091}
14092
14093// SetEsam sets the Esam field's value.
14094func (s *JobTemplateSettings) SetEsam(v *EsamSettings) *JobTemplateSettings {
14095	s.Esam = v
14096	return s
14097}
14098
14099// SetExtendedDataServices sets the ExtendedDataServices field's value.
14100func (s *JobTemplateSettings) SetExtendedDataServices(v *ExtendedDataServices) *JobTemplateSettings {
14101	s.ExtendedDataServices = v
14102	return s
14103}
14104
14105// SetInputs sets the Inputs field's value.
14106func (s *JobTemplateSettings) SetInputs(v []*InputTemplate) *JobTemplateSettings {
14107	s.Inputs = v
14108	return s
14109}
14110
14111// SetKantarWatermark sets the KantarWatermark field's value.
14112func (s *JobTemplateSettings) SetKantarWatermark(v *KantarWatermarkSettings) *JobTemplateSettings {
14113	s.KantarWatermark = v
14114	return s
14115}
14116
14117// SetMotionImageInserter sets the MotionImageInserter field's value.
14118func (s *JobTemplateSettings) SetMotionImageInserter(v *MotionImageInserter) *JobTemplateSettings {
14119	s.MotionImageInserter = v
14120	return s
14121}
14122
14123// SetNielsenConfiguration sets the NielsenConfiguration field's value.
14124func (s *JobTemplateSettings) SetNielsenConfiguration(v *NielsenConfiguration) *JobTemplateSettings {
14125	s.NielsenConfiguration = v
14126	return s
14127}
14128
14129// SetNielsenNonLinearWatermark sets the NielsenNonLinearWatermark field's value.
14130func (s *JobTemplateSettings) SetNielsenNonLinearWatermark(v *NielsenNonLinearWatermarkSettings) *JobTemplateSettings {
14131	s.NielsenNonLinearWatermark = v
14132	return s
14133}
14134
14135// SetOutputGroups sets the OutputGroups field's value.
14136func (s *JobTemplateSettings) SetOutputGroups(v []*OutputGroup) *JobTemplateSettings {
14137	s.OutputGroups = v
14138	return s
14139}
14140
14141// SetTimecodeConfig sets the TimecodeConfig field's value.
14142func (s *JobTemplateSettings) SetTimecodeConfig(v *TimecodeConfig) *JobTemplateSettings {
14143	s.TimecodeConfig = v
14144	return s
14145}
14146
14147// SetTimedMetadataInsertion sets the TimedMetadataInsertion field's value.
14148func (s *JobTemplateSettings) SetTimedMetadataInsertion(v *TimedMetadataInsertion) *JobTemplateSettings {
14149	s.TimedMetadataInsertion = v
14150	return s
14151}
14152
14153// Use these settings only when you use Kantar watermarking. Specify the values
14154// that MediaConvert uses to generate and place Kantar watermarks in your output
14155// audio. These settings apply to every output in your job. In addition to specifying
14156// these values, you also need to store your Kantar credentials in AWS Secrets
14157// Manager. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/kantar-watermarking.html.
14158type KantarWatermarkSettings struct {
14159	_ struct{} `type:"structure"`
14160
14161	// Provide an audio channel name from your Kantar audio license.
14162	ChannelName *string `locationName:"channelName" min:"1" type:"string"`
14163
14164	// Specify a unique identifier for Kantar to use for this piece of content.
14165	ContentReference *string `locationName:"contentReference" min:"1" type:"string"`
14166
14167	// Provide the name of the AWS Secrets Manager secret where your Kantar credentials
14168	// are stored. Note that your MediaConvert service role must provide access
14169	// to this secret. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/granting-permissions-for-mediaconvert-to-access-secrets-manager-secret.html.
14170	// For instructions on creating a secret, see https://docs.aws.amazon.com/secretsmanager/latest/userguide/tutorials_basic.html,
14171	// in the AWS Secrets Manager User Guide.
14172	CredentialsSecretName *string `locationName:"credentialsSecretName" min:"1" type:"string"`
14173
14174	// Optional. Specify an offset, in whole seconds, from the start of your output
14175	// and the beginning of the watermarking. When you don't specify an offset,
14176	// Kantar defaults to zero.
14177	FileOffset *float64 `locationName:"fileOffset" type:"double"`
14178
14179	// Provide your Kantar license ID number. You should get this number from Kantar.
14180	KantarLicenseId *int64 `locationName:"kantarLicenseId" type:"integer"`
14181
14182	// Provide the HTTPS endpoint to the Kantar server. You should get this endpoint
14183	// from Kantar.
14184	KantarServerUrl *string `locationName:"kantarServerUrl" type:"string"`
14185
14186	// Optional. Specify the Amazon S3 bucket where you want MediaConvert to store
14187	// your Kantar watermark XML logs. When you don't specify a bucket, MediaConvert
14188	// doesn't save these logs. Note that your MediaConvert service role must provide
14189	// access to this location. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html
14190	LogDestination *string `locationName:"logDestination" type:"string"`
14191
14192	// You can optionally use this field to specify the first timestamp that Kantar
14193	// embeds during watermarking. Kantar suggests that you be very cautious when
14194	// using this Kantar feature, and that you use it only on channels that are
14195	// managed specifically for use with this feature by your Audience Measurement
14196	// Operator. For more information about this feature, contact Kantar technical
14197	// support.
14198	Metadata3 *string `locationName:"metadata3" min:"1" type:"string"`
14199
14200	// Additional metadata that MediaConvert sends to Kantar. Maximum length is
14201	// 50 characters.
14202	Metadata4 *string `locationName:"metadata4" min:"1" type:"string"`
14203
14204	// Additional metadata that MediaConvert sends to Kantar. Maximum length is
14205	// 50 characters.
14206	Metadata5 *string `locationName:"metadata5" min:"1" type:"string"`
14207
14208	// Additional metadata that MediaConvert sends to Kantar. Maximum length is
14209	// 50 characters.
14210	Metadata6 *string `locationName:"metadata6" min:"1" type:"string"`
14211
14212	// Additional metadata that MediaConvert sends to Kantar. Maximum length is
14213	// 50 characters.
14214	Metadata7 *string `locationName:"metadata7" min:"1" type:"string"`
14215
14216	// Additional metadata that MediaConvert sends to Kantar. Maximum length is
14217	// 50 characters.
14218	Metadata8 *string `locationName:"metadata8" min:"1" type:"string"`
14219}
14220
14221// String returns the string representation
14222func (s KantarWatermarkSettings) String() string {
14223	return awsutil.Prettify(s)
14224}
14225
14226// GoString returns the string representation
14227func (s KantarWatermarkSettings) GoString() string {
14228	return s.String()
14229}
14230
14231// Validate inspects the fields of the type to determine if they are valid.
14232func (s *KantarWatermarkSettings) Validate() error {
14233	invalidParams := request.ErrInvalidParams{Context: "KantarWatermarkSettings"}
14234	if s.ChannelName != nil && len(*s.ChannelName) < 1 {
14235		invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1))
14236	}
14237	if s.ContentReference != nil && len(*s.ContentReference) < 1 {
14238		invalidParams.Add(request.NewErrParamMinLen("ContentReference", 1))
14239	}
14240	if s.CredentialsSecretName != nil && len(*s.CredentialsSecretName) < 1 {
14241		invalidParams.Add(request.NewErrParamMinLen("CredentialsSecretName", 1))
14242	}
14243	if s.Metadata3 != nil && len(*s.Metadata3) < 1 {
14244		invalidParams.Add(request.NewErrParamMinLen("Metadata3", 1))
14245	}
14246	if s.Metadata4 != nil && len(*s.Metadata4) < 1 {
14247		invalidParams.Add(request.NewErrParamMinLen("Metadata4", 1))
14248	}
14249	if s.Metadata5 != nil && len(*s.Metadata5) < 1 {
14250		invalidParams.Add(request.NewErrParamMinLen("Metadata5", 1))
14251	}
14252	if s.Metadata6 != nil && len(*s.Metadata6) < 1 {
14253		invalidParams.Add(request.NewErrParamMinLen("Metadata6", 1))
14254	}
14255	if s.Metadata7 != nil && len(*s.Metadata7) < 1 {
14256		invalidParams.Add(request.NewErrParamMinLen("Metadata7", 1))
14257	}
14258	if s.Metadata8 != nil && len(*s.Metadata8) < 1 {
14259		invalidParams.Add(request.NewErrParamMinLen("Metadata8", 1))
14260	}
14261
14262	if invalidParams.Len() > 0 {
14263		return invalidParams
14264	}
14265	return nil
14266}
14267
14268// SetChannelName sets the ChannelName field's value.
14269func (s *KantarWatermarkSettings) SetChannelName(v string) *KantarWatermarkSettings {
14270	s.ChannelName = &v
14271	return s
14272}
14273
14274// SetContentReference sets the ContentReference field's value.
14275func (s *KantarWatermarkSettings) SetContentReference(v string) *KantarWatermarkSettings {
14276	s.ContentReference = &v
14277	return s
14278}
14279
14280// SetCredentialsSecretName sets the CredentialsSecretName field's value.
14281func (s *KantarWatermarkSettings) SetCredentialsSecretName(v string) *KantarWatermarkSettings {
14282	s.CredentialsSecretName = &v
14283	return s
14284}
14285
14286// SetFileOffset sets the FileOffset field's value.
14287func (s *KantarWatermarkSettings) SetFileOffset(v float64) *KantarWatermarkSettings {
14288	s.FileOffset = &v
14289	return s
14290}
14291
14292// SetKantarLicenseId sets the KantarLicenseId field's value.
14293func (s *KantarWatermarkSettings) SetKantarLicenseId(v int64) *KantarWatermarkSettings {
14294	s.KantarLicenseId = &v
14295	return s
14296}
14297
14298// SetKantarServerUrl sets the KantarServerUrl field's value.
14299func (s *KantarWatermarkSettings) SetKantarServerUrl(v string) *KantarWatermarkSettings {
14300	s.KantarServerUrl = &v
14301	return s
14302}
14303
14304// SetLogDestination sets the LogDestination field's value.
14305func (s *KantarWatermarkSettings) SetLogDestination(v string) *KantarWatermarkSettings {
14306	s.LogDestination = &v
14307	return s
14308}
14309
14310// SetMetadata3 sets the Metadata3 field's value.
14311func (s *KantarWatermarkSettings) SetMetadata3(v string) *KantarWatermarkSettings {
14312	s.Metadata3 = &v
14313	return s
14314}
14315
14316// SetMetadata4 sets the Metadata4 field's value.
14317func (s *KantarWatermarkSettings) SetMetadata4(v string) *KantarWatermarkSettings {
14318	s.Metadata4 = &v
14319	return s
14320}
14321
14322// SetMetadata5 sets the Metadata5 field's value.
14323func (s *KantarWatermarkSettings) SetMetadata5(v string) *KantarWatermarkSettings {
14324	s.Metadata5 = &v
14325	return s
14326}
14327
14328// SetMetadata6 sets the Metadata6 field's value.
14329func (s *KantarWatermarkSettings) SetMetadata6(v string) *KantarWatermarkSettings {
14330	s.Metadata6 = &v
14331	return s
14332}
14333
14334// SetMetadata7 sets the Metadata7 field's value.
14335func (s *KantarWatermarkSettings) SetMetadata7(v string) *KantarWatermarkSettings {
14336	s.Metadata7 = &v
14337	return s
14338}
14339
14340// SetMetadata8 sets the Metadata8 field's value.
14341func (s *KantarWatermarkSettings) SetMetadata8(v string) *KantarWatermarkSettings {
14342	s.Metadata8 = &v
14343	return s
14344}
14345
14346// You can send list job templates requests with an empty body. Optionally,
14347// you can filter the response by category by specifying it in your request
14348// body. You can also optionally specify the maximum number, up to twenty, of
14349// job templates to be returned.
14350type ListJobTemplatesInput struct {
14351	_ struct{} `type:"structure"`
14352
14353	// Optionally, specify a job template category to limit responses to only job
14354	// templates from that category.
14355	Category *string `location:"querystring" locationName:"category" type:"string"`
14356
14357	// Optional. When you request a list of job templates, you can choose to list
14358	// them alphabetically by NAME or chronologically by CREATION_DATE. If you don't
14359	// specify, the service will list them by name.
14360	ListBy *string `location:"querystring" locationName:"listBy" type:"string" enum:"JobTemplateListBy"`
14361
14362	// Optional. Number of job templates, up to twenty, that will be returned at
14363	// one time.
14364	MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
14365
14366	// Use this string, provided with the response to a previous request, to request
14367	// the next batch of job templates.
14368	NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
14369
14370	// Optional. When you request lists of resources, you can specify whether they
14371	// are sorted in ASCENDING or DESCENDING order. Default varies by resource.
14372	Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"`
14373}
14374
14375// String returns the string representation
14376func (s ListJobTemplatesInput) String() string {
14377	return awsutil.Prettify(s)
14378}
14379
14380// GoString returns the string representation
14381func (s ListJobTemplatesInput) GoString() string {
14382	return s.String()
14383}
14384
14385// Validate inspects the fields of the type to determine if they are valid.
14386func (s *ListJobTemplatesInput) Validate() error {
14387	invalidParams := request.ErrInvalidParams{Context: "ListJobTemplatesInput"}
14388	if s.MaxResults != nil && *s.MaxResults < 1 {
14389		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
14390	}
14391
14392	if invalidParams.Len() > 0 {
14393		return invalidParams
14394	}
14395	return nil
14396}
14397
14398// SetCategory sets the Category field's value.
14399func (s *ListJobTemplatesInput) SetCategory(v string) *ListJobTemplatesInput {
14400	s.Category = &v
14401	return s
14402}
14403
14404// SetListBy sets the ListBy field's value.
14405func (s *ListJobTemplatesInput) SetListBy(v string) *ListJobTemplatesInput {
14406	s.ListBy = &v
14407	return s
14408}
14409
14410// SetMaxResults sets the MaxResults field's value.
14411func (s *ListJobTemplatesInput) SetMaxResults(v int64) *ListJobTemplatesInput {
14412	s.MaxResults = &v
14413	return s
14414}
14415
14416// SetNextToken sets the NextToken field's value.
14417func (s *ListJobTemplatesInput) SetNextToken(v string) *ListJobTemplatesInput {
14418	s.NextToken = &v
14419	return s
14420}
14421
14422// SetOrder sets the Order field's value.
14423func (s *ListJobTemplatesInput) SetOrder(v string) *ListJobTemplatesInput {
14424	s.Order = &v
14425	return s
14426}
14427
14428// Successful list job templates requests return a JSON array of job templates.
14429// If you don't specify how they are ordered, you will receive them in alphabetical
14430// order by name.
14431type ListJobTemplatesOutput struct {
14432	_ struct{} `type:"structure"`
14433
14434	// List of Job templates.
14435	JobTemplates []*JobTemplate `locationName:"jobTemplates" type:"list"`
14436
14437	// Use this string to request the next batch of job templates.
14438	NextToken *string `locationName:"nextToken" type:"string"`
14439}
14440
14441// String returns the string representation
14442func (s ListJobTemplatesOutput) String() string {
14443	return awsutil.Prettify(s)
14444}
14445
14446// GoString returns the string representation
14447func (s ListJobTemplatesOutput) GoString() string {
14448	return s.String()
14449}
14450
14451// SetJobTemplates sets the JobTemplates field's value.
14452func (s *ListJobTemplatesOutput) SetJobTemplates(v []*JobTemplate) *ListJobTemplatesOutput {
14453	s.JobTemplates = v
14454	return s
14455}
14456
14457// SetNextToken sets the NextToken field's value.
14458func (s *ListJobTemplatesOutput) SetNextToken(v string) *ListJobTemplatesOutput {
14459	s.NextToken = &v
14460	return s
14461}
14462
14463// You can send list jobs requests with an empty body. Optionally, you can filter
14464// the response by queue and/or job status by specifying them in your request
14465// body. You can also optionally specify the maximum number, up to twenty, of
14466// jobs to be returned.
14467type ListJobsInput struct {
14468	_ struct{} `type:"structure"`
14469
14470	// Optional. Number of jobs, up to twenty, that will be returned at one time.
14471	MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
14472
14473	// Optional. Use this string, provided with the response to a previous request,
14474	// to request the next batch of jobs.
14475	NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
14476
14477	// Optional. When you request lists of resources, you can specify whether they
14478	// are sorted in ASCENDING or DESCENDING order. Default varies by resource.
14479	Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"`
14480
14481	// Optional. Provide a queue name to get back only jobs from that queue.
14482	Queue *string `location:"querystring" locationName:"queue" type:"string"`
14483
14484	// Optional. A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED,
14485	// or ERROR.
14486	Status *string `location:"querystring" locationName:"status" type:"string" enum:"JobStatus"`
14487}
14488
14489// String returns the string representation
14490func (s ListJobsInput) String() string {
14491	return awsutil.Prettify(s)
14492}
14493
14494// GoString returns the string representation
14495func (s ListJobsInput) GoString() string {
14496	return s.String()
14497}
14498
14499// Validate inspects the fields of the type to determine if they are valid.
14500func (s *ListJobsInput) Validate() error {
14501	invalidParams := request.ErrInvalidParams{Context: "ListJobsInput"}
14502	if s.MaxResults != nil && *s.MaxResults < 1 {
14503		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
14504	}
14505
14506	if invalidParams.Len() > 0 {
14507		return invalidParams
14508	}
14509	return nil
14510}
14511
14512// SetMaxResults sets the MaxResults field's value.
14513func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput {
14514	s.MaxResults = &v
14515	return s
14516}
14517
14518// SetNextToken sets the NextToken field's value.
14519func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput {
14520	s.NextToken = &v
14521	return s
14522}
14523
14524// SetOrder sets the Order field's value.
14525func (s *ListJobsInput) SetOrder(v string) *ListJobsInput {
14526	s.Order = &v
14527	return s
14528}
14529
14530// SetQueue sets the Queue field's value.
14531func (s *ListJobsInput) SetQueue(v string) *ListJobsInput {
14532	s.Queue = &v
14533	return s
14534}
14535
14536// SetStatus sets the Status field's value.
14537func (s *ListJobsInput) SetStatus(v string) *ListJobsInput {
14538	s.Status = &v
14539	return s
14540}
14541
14542// Successful list jobs requests return a JSON array of jobs. If you don't specify
14543// how they are ordered, you will receive the most recently created first.
14544type ListJobsOutput struct {
14545	_ struct{} `type:"structure"`
14546
14547	// List of jobs
14548	Jobs []*Job `locationName:"jobs" type:"list"`
14549
14550	// Use this string to request the next batch of jobs.
14551	NextToken *string `locationName:"nextToken" type:"string"`
14552}
14553
14554// String returns the string representation
14555func (s ListJobsOutput) String() string {
14556	return awsutil.Prettify(s)
14557}
14558
14559// GoString returns the string representation
14560func (s ListJobsOutput) GoString() string {
14561	return s.String()
14562}
14563
14564// SetJobs sets the Jobs field's value.
14565func (s *ListJobsOutput) SetJobs(v []*Job) *ListJobsOutput {
14566	s.Jobs = v
14567	return s
14568}
14569
14570// SetNextToken sets the NextToken field's value.
14571func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput {
14572	s.NextToken = &v
14573	return s
14574}
14575
14576// You can send list presets requests with an empty body. Optionally, you can
14577// filter the response by category by specifying it in your request body. You
14578// can also optionally specify the maximum number, up to twenty, of queues to
14579// be returned.
14580type ListPresetsInput struct {
14581	_ struct{} `type:"structure"`
14582
14583	// Optionally, specify a preset category to limit responses to only presets
14584	// from that category.
14585	Category *string `location:"querystring" locationName:"category" type:"string"`
14586
14587	// Optional. When you request a list of presets, you can choose to list them
14588	// alphabetically by NAME or chronologically by CREATION_DATE. If you don't
14589	// specify, the service will list them by name.
14590	ListBy *string `location:"querystring" locationName:"listBy" type:"string" enum:"PresetListBy"`
14591
14592	// Optional. Number of presets, up to twenty, that will be returned at one time
14593	MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
14594
14595	// Use this string, provided with the response to a previous request, to request
14596	// the next batch of presets.
14597	NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
14598
14599	// Optional. When you request lists of resources, you can specify whether they
14600	// are sorted in ASCENDING or DESCENDING order. Default varies by resource.
14601	Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"`
14602}
14603
14604// String returns the string representation
14605func (s ListPresetsInput) String() string {
14606	return awsutil.Prettify(s)
14607}
14608
14609// GoString returns the string representation
14610func (s ListPresetsInput) GoString() string {
14611	return s.String()
14612}
14613
14614// Validate inspects the fields of the type to determine if they are valid.
14615func (s *ListPresetsInput) Validate() error {
14616	invalidParams := request.ErrInvalidParams{Context: "ListPresetsInput"}
14617	if s.MaxResults != nil && *s.MaxResults < 1 {
14618		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
14619	}
14620
14621	if invalidParams.Len() > 0 {
14622		return invalidParams
14623	}
14624	return nil
14625}
14626
14627// SetCategory sets the Category field's value.
14628func (s *ListPresetsInput) SetCategory(v string) *ListPresetsInput {
14629	s.Category = &v
14630	return s
14631}
14632
14633// SetListBy sets the ListBy field's value.
14634func (s *ListPresetsInput) SetListBy(v string) *ListPresetsInput {
14635	s.ListBy = &v
14636	return s
14637}
14638
14639// SetMaxResults sets the MaxResults field's value.
14640func (s *ListPresetsInput) SetMaxResults(v int64) *ListPresetsInput {
14641	s.MaxResults = &v
14642	return s
14643}
14644
14645// SetNextToken sets the NextToken field's value.
14646func (s *ListPresetsInput) SetNextToken(v string) *ListPresetsInput {
14647	s.NextToken = &v
14648	return s
14649}
14650
14651// SetOrder sets the Order field's value.
14652func (s *ListPresetsInput) SetOrder(v string) *ListPresetsInput {
14653	s.Order = &v
14654	return s
14655}
14656
14657// Successful list presets requests return a JSON array of presets. If you don't
14658// specify how they are ordered, you will receive them alphabetically by name.
14659type ListPresetsOutput struct {
14660	_ struct{} `type:"structure"`
14661
14662	// Use this string to request the next batch of presets.
14663	NextToken *string `locationName:"nextToken" type:"string"`
14664
14665	// List of presets
14666	Presets []*Preset `locationName:"presets" type:"list"`
14667}
14668
14669// String returns the string representation
14670func (s ListPresetsOutput) String() string {
14671	return awsutil.Prettify(s)
14672}
14673
14674// GoString returns the string representation
14675func (s ListPresetsOutput) GoString() string {
14676	return s.String()
14677}
14678
14679// SetNextToken sets the NextToken field's value.
14680func (s *ListPresetsOutput) SetNextToken(v string) *ListPresetsOutput {
14681	s.NextToken = &v
14682	return s
14683}
14684
14685// SetPresets sets the Presets field's value.
14686func (s *ListPresetsOutput) SetPresets(v []*Preset) *ListPresetsOutput {
14687	s.Presets = v
14688	return s
14689}
14690
14691// You can send list queues requests with an empty body. You can optionally
14692// specify the maximum number, up to twenty, of queues to be returned.
14693type ListQueuesInput struct {
14694	_ struct{} `type:"structure"`
14695
14696	// Optional. When you request a list of queues, you can choose to list them
14697	// alphabetically by NAME or chronologically by CREATION_DATE. If you don't
14698	// specify, the service will list them by creation date.
14699	ListBy *string `location:"querystring" locationName:"listBy" type:"string" enum:"QueueListBy"`
14700
14701	// Optional. Number of queues, up to twenty, that will be returned at one time.
14702	MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"`
14703
14704	// Use this string, provided with the response to a previous request, to request
14705	// the next batch of queues.
14706	NextToken *string `location:"querystring" locationName:"nextToken" type:"string"`
14707
14708	// Optional. When you request lists of resources, you can specify whether they
14709	// are sorted in ASCENDING or DESCENDING order. Default varies by resource.
14710	Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"`
14711}
14712
14713// String returns the string representation
14714func (s ListQueuesInput) String() string {
14715	return awsutil.Prettify(s)
14716}
14717
14718// GoString returns the string representation
14719func (s ListQueuesInput) GoString() string {
14720	return s.String()
14721}
14722
14723// Validate inspects the fields of the type to determine if they are valid.
14724func (s *ListQueuesInput) Validate() error {
14725	invalidParams := request.ErrInvalidParams{Context: "ListQueuesInput"}
14726	if s.MaxResults != nil && *s.MaxResults < 1 {
14727		invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
14728	}
14729
14730	if invalidParams.Len() > 0 {
14731		return invalidParams
14732	}
14733	return nil
14734}
14735
14736// SetListBy sets the ListBy field's value.
14737func (s *ListQueuesInput) SetListBy(v string) *ListQueuesInput {
14738	s.ListBy = &v
14739	return s
14740}
14741
14742// SetMaxResults sets the MaxResults field's value.
14743func (s *ListQueuesInput) SetMaxResults(v int64) *ListQueuesInput {
14744	s.MaxResults = &v
14745	return s
14746}
14747
14748// SetNextToken sets the NextToken field's value.
14749func (s *ListQueuesInput) SetNextToken(v string) *ListQueuesInput {
14750	s.NextToken = &v
14751	return s
14752}
14753
14754// SetOrder sets the Order field's value.
14755func (s *ListQueuesInput) SetOrder(v string) *ListQueuesInput {
14756	s.Order = &v
14757	return s
14758}
14759
14760// Successful list queues requests return a JSON array of queues. If you don't
14761// specify how they are ordered, you will receive them alphabetically by name.
14762type ListQueuesOutput struct {
14763	_ struct{} `type:"structure"`
14764
14765	// Use this string to request the next batch of queues.
14766	NextToken *string `locationName:"nextToken" type:"string"`
14767
14768	// List of queues.
14769	Queues []*Queue `locationName:"queues" type:"list"`
14770}
14771
14772// String returns the string representation
14773func (s ListQueuesOutput) String() string {
14774	return awsutil.Prettify(s)
14775}
14776
14777// GoString returns the string representation
14778func (s ListQueuesOutput) GoString() string {
14779	return s.String()
14780}
14781
14782// SetNextToken sets the NextToken field's value.
14783func (s *ListQueuesOutput) SetNextToken(v string) *ListQueuesOutput {
14784	s.NextToken = &v
14785	return s
14786}
14787
14788// SetQueues sets the Queues field's value.
14789func (s *ListQueuesOutput) SetQueues(v []*Queue) *ListQueuesOutput {
14790	s.Queues = v
14791	return s
14792}
14793
14794// List the tags for your AWS Elemental MediaConvert resource by sending a request
14795// with the Amazon Resource Name (ARN) of the resource. To get the ARN, send
14796// a GET request with the resource name.
14797type ListTagsForResourceInput struct {
14798	_ struct{} `type:"structure"`
14799
14800	// The Amazon Resource Name (ARN) of the resource that you want to list tags
14801	// for. To get the ARN, send a GET request with the resource name.
14802	//
14803	// Arn is a required field
14804	Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"`
14805}
14806
14807// String returns the string representation
14808func (s ListTagsForResourceInput) String() string {
14809	return awsutil.Prettify(s)
14810}
14811
14812// GoString returns the string representation
14813func (s ListTagsForResourceInput) GoString() string {
14814	return s.String()
14815}
14816
14817// Validate inspects the fields of the type to determine if they are valid.
14818func (s *ListTagsForResourceInput) Validate() error {
14819	invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"}
14820	if s.Arn == nil {
14821		invalidParams.Add(request.NewErrParamRequired("Arn"))
14822	}
14823	if s.Arn != nil && len(*s.Arn) < 1 {
14824		invalidParams.Add(request.NewErrParamMinLen("Arn", 1))
14825	}
14826
14827	if invalidParams.Len() > 0 {
14828		return invalidParams
14829	}
14830	return nil
14831}
14832
14833// SetArn sets the Arn field's value.
14834func (s *ListTagsForResourceInput) SetArn(v string) *ListTagsForResourceInput {
14835	s.Arn = &v
14836	return s
14837}
14838
14839// A successful request to list the tags for a resource returns a JSON map of
14840// tags.
14841type ListTagsForResourceOutput struct {
14842	_ struct{} `type:"structure"`
14843
14844	// The Amazon Resource Name (ARN) and tags for an AWS Elemental MediaConvert
14845	// resource.
14846	ResourceTags *ResourceTags `locationName:"resourceTags" type:"structure"`
14847}
14848
14849// String returns the string representation
14850func (s ListTagsForResourceOutput) String() string {
14851	return awsutil.Prettify(s)
14852}
14853
14854// GoString returns the string representation
14855func (s ListTagsForResourceOutput) GoString() string {
14856	return s.String()
14857}
14858
14859// SetResourceTags sets the ResourceTags field's value.
14860func (s *ListTagsForResourceOutput) SetResourceTags(v *ResourceTags) *ListTagsForResourceOutput {
14861	s.ResourceTags = v
14862	return s
14863}
14864
14865// Settings for SCTE-35 signals from ESAM. Include this in your job settings
14866// to put SCTE-35 markers in your HLS and transport stream outputs at the insertion
14867// points that you specify in an ESAM XML document. Provide the document in
14868// the setting SCC XML (sccXml).
14869type M2tsScte35Esam struct {
14870	_ struct{} `type:"structure"`
14871
14872	// Packet Identifier (PID) of the SCTE-35 stream in the transport stream generated
14873	// by ESAM.
14874	Scte35EsamPid *int64 `locationName:"scte35EsamPid" min:"32" type:"integer"`
14875}
14876
14877// String returns the string representation
14878func (s M2tsScte35Esam) String() string {
14879	return awsutil.Prettify(s)
14880}
14881
14882// GoString returns the string representation
14883func (s M2tsScte35Esam) GoString() string {
14884	return s.String()
14885}
14886
14887// Validate inspects the fields of the type to determine if they are valid.
14888func (s *M2tsScte35Esam) Validate() error {
14889	invalidParams := request.ErrInvalidParams{Context: "M2tsScte35Esam"}
14890	if s.Scte35EsamPid != nil && *s.Scte35EsamPid < 32 {
14891		invalidParams.Add(request.NewErrParamMinValue("Scte35EsamPid", 32))
14892	}
14893
14894	if invalidParams.Len() > 0 {
14895		return invalidParams
14896	}
14897	return nil
14898}
14899
14900// SetScte35EsamPid sets the Scte35EsamPid field's value.
14901func (s *M2tsScte35Esam) SetScte35EsamPid(v int64) *M2tsScte35Esam {
14902	s.Scte35EsamPid = &v
14903	return s
14904}
14905
14906// MPEG-2 TS container settings. These apply to outputs in a File output group
14907// when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS).
14908// In these assets, data is organized by the program map table (PMT). Each transport
14909// stream program contains subsets of data, including audio, video, and metadata.
14910// Each of these subsets of data has a numerical label called a packet identifier
14911// (PID). Each transport stream program corresponds to one MediaConvert output.
14912// The PMT lists the types of data in a program along with their PID. Downstream
14913// systems and players use the program map table to look up the PID for each
14914// type of data it accesses and then uses the PIDs to locate specific data within
14915// the asset.
14916type M2tsSettings struct {
14917	_ struct{} `type:"structure"`
14918
14919	// Selects between the DVB and ATSC buffer models for Dolby Digital audio.
14920	AudioBufferModel *string `locationName:"audioBufferModel" type:"string" enum:"M2tsAudioBufferModel"`
14921
14922	// Specify this setting only when your output will be consumed by a downstream
14923	// repackaging workflow that is sensitive to very small duration differences
14924	// between video and audio. For this situation, choose Match video duration
14925	// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
14926	// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
14927	// MediaConvert pads the output audio streams with silence or trims them to
14928	// ensure that the total duration of each audio stream is at least as long as
14929	// the total duration of the video stream. After padding or trimming, the audio
14930	// stream duration is no more than one frame longer than the video stream. MediaConvert
14931	// applies audio padding or trimming only to the end of the last segment of
14932	// the output. For unsegmented outputs, MediaConvert adds padding only to the
14933	// end of the file. When you keep the default value, any minor discrepancies
14934	// between audio and video duration will depend on your output audio codec.
14935	AudioDuration *string `locationName:"audioDuration" type:"string" enum:"M2tsAudioDuration"`
14936
14937	// The number of audio frames to insert for each PES packet.
14938	AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"`
14939
14940	// Specify the packet identifiers (PIDs) for any elementary audio streams you
14941	// include in this output. Specify multiple PIDs as a JSON array. Default is
14942	// the range 482-492.
14943	AudioPids []*int64 `locationName:"audioPids" type:"list"`
14944
14945	// Specify the output bitrate of the transport stream in bits per second. Setting
14946	// to 0 lets the muxer automatically determine the appropriate bitrate. Other
14947	// common values are 3750000, 7500000, and 15000000.
14948	Bitrate *int64 `locationName:"bitrate" type:"integer"`
14949
14950	// Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX,
14951	// use multiplex buffer model. If set to NONE, this can lead to lower latency,
14952	// but low-memory devices may not be able to play back the stream without interruptions.
14953	BufferModel *string `locationName:"bufferModel" type:"string" enum:"M2tsBufferModel"`
14954
14955	// Use these settings to insert a DVB Network Information Table (NIT) in the
14956	// transport stream of this output. When you work directly in your JSON job
14957	// specification, include this object only when your job has a transport stream
14958	// output and the container settings contain the object M2tsSettings.
14959	DvbNitSettings *DvbNitSettings `locationName:"dvbNitSettings" type:"structure"`
14960
14961	// Use these settings to insert a DVB Service Description Table (SDT) in the
14962	// transport stream of this output. When you work directly in your JSON job
14963	// specification, include this object only when your job has a transport stream
14964	// output and the container settings contain the object M2tsSettings.
14965	DvbSdtSettings *DvbSdtSettings `locationName:"dvbSdtSettings" type:"structure"`
14966
14967	// Specify the packet identifiers (PIDs) for DVB subtitle data included in this
14968	// output. Specify multiple PIDs as a JSON array. Default is the range 460-479.
14969	DvbSubPids []*int64 `locationName:"dvbSubPids" type:"list"`
14970
14971	// Use these settings to insert a DVB Time and Date Table (TDT) in the transport
14972	// stream of this output. When you work directly in your JSON job specification,
14973	// include this object only when your job has a transport stream output and
14974	// the container settings contain the object M2tsSettings.
14975	DvbTdtSettings *DvbTdtSettings `locationName:"dvbTdtSettings" type:"structure"`
14976
14977	// Specify the packet identifier (PID) for DVB teletext data you include in
14978	// this output. Default is 499.
14979	DvbTeletextPid *int64 `locationName:"dvbTeletextPid" min:"32" type:"integer"`
14980
14981	// When set to VIDEO_AND_FIXED_INTERVALS, audio EBP markers will be added to
14982	// partitions 3 and 4. The interval between these additional markers will be
14983	// fixed, and will be slightly shorter than the video EBP marker interval. When
14984	// set to VIDEO_INTERVAL, these additional markers will not be inserted. Only
14985	// applicable when EBP segmentation markers are is selected (segmentationMarkers
14986	// is EBP or EBP_LEGACY).
14987	EbpAudioInterval *string `locationName:"ebpAudioInterval" type:"string" enum:"M2tsEbpAudioInterval"`
14988
14989	// Selects which PIDs to place EBP markers on. They can either be placed only
14990	// on the video PID, or on both the video PID and all audio PIDs. Only applicable
14991	// when EBP segmentation markers are is selected (segmentationMarkers is EBP
14992	// or EBP_LEGACY).
14993	EbpPlacement *string `locationName:"ebpPlacement" type:"string" enum:"M2tsEbpPlacement"`
14994
14995	// Controls whether to include the ES Rate field in the PES header.
14996	EsRateInPes *string `locationName:"esRateInPes" type:"string" enum:"M2tsEsRateInPes"`
14997
14998	// Keep the default value (DEFAULT) unless you know that your audio EBP markers
14999	// are incorrectly appearing before your video EBP markers. To correct this
15000	// problem, set this value to Force (FORCE).
15001	ForceTsVideoEbpOrder *string `locationName:"forceTsVideoEbpOrder" type:"string" enum:"M2tsForceTsVideoEbpOrder"`
15002
15003	// The length, in seconds, of each fragment. Only used with EBP markers.
15004	FragmentTime *float64 `locationName:"fragmentTime" type:"double"`
15005
15006	// Specify the maximum time, in milliseconds, between Program Clock References
15007	// (PCRs) inserted into the transport stream.
15008	MaxPcrInterval *int64 `locationName:"maxPcrInterval" type:"integer"`
15009
15010	// When set, enforces that Encoder Boundary Points do not come within the specified
15011	// time interval of each other by looking ahead at input video. If another EBP
15012	// is going to come in within the specified time interval, the current EBP is
15013	// not emitted, and the segment is "stretched" to the next marker. The lookahead
15014	// value does not add latency to the system. The Live Event must be configured
15015	// elsewhere to create sufficient latency to make the lookahead accurate.
15016	MinEbpInterval *int64 `locationName:"minEbpInterval" type:"integer"`
15017
15018	// If INSERT, Nielsen inaudible tones for media tracking will be detected in
15019	// the input audio and an equivalent ID3 tag will be inserted in the output.
15020	NielsenId3 *string `locationName:"nielsenId3" type:"string" enum:"M2tsNielsenId3"`
15021
15022	// Value in bits per second of extra null packets to insert into the transport
15023	// stream. This can be used if a downstream encryption system requires periodic
15024	// null packets.
15025	NullPacketBitrate *float64 `locationName:"nullPacketBitrate" type:"double"`
15026
15027	// The number of milliseconds between instances of this table in the output
15028	// transport stream.
15029	PatInterval *int64 `locationName:"patInterval" type:"integer"`
15030
15031	// When set to PCR_EVERY_PES_PACKET, a Program Clock Reference value is inserted
15032	// for every Packetized Elementary Stream (PES) header. This is effective only
15033	// when the PCR PID is the same as the video or audio elementary stream.
15034	PcrControl *string `locationName:"pcrControl" type:"string" enum:"M2tsPcrControl"`
15035
15036	// Specify the packet identifier (PID) for the program clock reference (PCR)
15037	// in this output. If you do not specify a value, the service will use the value
15038	// for Video PID (VideoPid).
15039	PcrPid *int64 `locationName:"pcrPid" min:"32" type:"integer"`
15040
15041	// Specify the number of milliseconds between instances of the program map table
15042	// (PMT) in the output transport stream.
15043	PmtInterval *int64 `locationName:"pmtInterval" type:"integer"`
15044
15045	// Specify the packet identifier (PID) for the program map table (PMT) itself.
15046	// Default is 480.
15047	PmtPid *int64 `locationName:"pmtPid" min:"32" type:"integer"`
15048
15049	// Specify the packet identifier (PID) of the private metadata stream. Default
15050	// is 503.
15051	PrivateMetadataPid *int64 `locationName:"privateMetadataPid" min:"32" type:"integer"`
15052
15053	// Use Program number (programNumber) to specify the program number used in
15054	// the program map table (PMT) for this output. Default is 1. Program numbers
15055	// and program map tables are parts of MPEG-2 transport stream containers, used
15056	// for organizing data.
15057	ProgramNumber *int64 `locationName:"programNumber" type:"integer"`
15058
15059	// When set to CBR, inserts null packets into transport stream to fill specified
15060	// bitrate. When set to VBR, the bitrate setting acts as the maximum bitrate,
15061	// but the output will not be padded up to that bitrate.
15062	RateMode *string `locationName:"rateMode" type:"string" enum:"M2tsRateMode"`
15063
15064	// Include this in your job settings to put SCTE-35 markers in your HLS and
15065	// transport stream outputs at the insertion points that you specify in an ESAM
15066	// XML document. Provide the document in the setting SCC XML (sccXml).
15067	Scte35Esam *M2tsScte35Esam `locationName:"scte35Esam" type:"structure"`
15068
15069	// Specify the packet identifier (PID) of the SCTE-35 stream in the transport
15070	// stream.
15071	Scte35Pid *int64 `locationName:"scte35Pid" min:"32" type:"integer"`
15072
15073	// For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if
15074	// you want SCTE-35 markers that appear in your input to also appear in this
15075	// output. Choose None (NONE) if you don't want SCTE-35 markers in this output.
15076	// For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also
15077	// provide the ESAM XML as a string in the setting Signal processing notification
15078	// XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam).
15079	Scte35Source *string `locationName:"scte35Source" type:"string" enum:"M2tsScte35Source"`
15080
15081	// Inserts segmentation markers at each segmentation_time period. rai_segstart
15082	// sets the Random Access Indicator bit in the adaptation field. rai_adapt sets
15083	// the RAI bit and adds the current timecode in the private data bytes. psi_segstart
15084	// inserts PAT and PMT tables at the start of segments. ebp adds Encoder Boundary
15085	// Point information to the adaptation field as per OpenCable specification
15086	// OC-SP-EBP-I01-130118. ebp_legacy adds Encoder Boundary Point information
15087	// to the adaptation field using a legacy proprietary format.
15088	SegmentationMarkers *string `locationName:"segmentationMarkers" type:"string" enum:"M2tsSegmentationMarkers"`
15089
15090	// The segmentation style parameter controls how segmentation markers are inserted
15091	// into the transport stream. With avails, it is possible that segments may
15092	// be truncated, which can influence where future segmentation markers are inserted.
15093	// When a segmentation style of "reset_cadence" is selected and a segment is
15094	// truncated due to an avail, we will reset the segmentation cadence. This means
15095	// the subsequent segment will have a duration of of $segmentation_time seconds.
15096	// When a segmentation style of "maintain_cadence" is selected and a segment
15097	// is truncated due to an avail, we will not reset the segmentation cadence.
15098	// This means the subsequent segment will likely be truncated as well. However,
15099	// all segments after that will have a duration of $segmentation_time seconds.
15100	// Note that EBP lookahead is a slight exception to this rule.
15101	SegmentationStyle *string `locationName:"segmentationStyle" type:"string" enum:"M2tsSegmentationStyle"`
15102
15103	// Specify the length, in seconds, of each segment. Required unless markers
15104	// is set to _none_.
15105	SegmentationTime *float64 `locationName:"segmentationTime" type:"double"`
15106
15107	// Specify the packet identifier (PID) for timed metadata in this output. Default
15108	// is 502.
15109	TimedMetadataPid *int64 `locationName:"timedMetadataPid" min:"32" type:"integer"`
15110
15111	// Specify the ID for the transport stream itself in the program map table for
15112	// this output. Transport stream IDs and program map tables are parts of MPEG-2
15113	// transport stream containers, used for organizing data.
15114	TransportStreamId *int64 `locationName:"transportStreamId" type:"integer"`
15115
15116	// Specify the packet identifier (PID) of the elementary video stream in the
15117	// transport stream.
15118	VideoPid *int64 `locationName:"videoPid" min:"32" type:"integer"`
15119}
15120
15121// String returns the string representation
15122func (s M2tsSettings) String() string {
15123	return awsutil.Prettify(s)
15124}
15125
15126// GoString returns the string representation
15127func (s M2tsSettings) GoString() string {
15128	return s.String()
15129}
15130
15131// Validate inspects the fields of the type to determine if they are valid.
15132func (s *M2tsSettings) Validate() error {
15133	invalidParams := request.ErrInvalidParams{Context: "M2tsSettings"}
15134	if s.DvbTeletextPid != nil && *s.DvbTeletextPid < 32 {
15135		invalidParams.Add(request.NewErrParamMinValue("DvbTeletextPid", 32))
15136	}
15137	if s.PcrPid != nil && *s.PcrPid < 32 {
15138		invalidParams.Add(request.NewErrParamMinValue("PcrPid", 32))
15139	}
15140	if s.PmtPid != nil && *s.PmtPid < 32 {
15141		invalidParams.Add(request.NewErrParamMinValue("PmtPid", 32))
15142	}
15143	if s.PrivateMetadataPid != nil && *s.PrivateMetadataPid < 32 {
15144		invalidParams.Add(request.NewErrParamMinValue("PrivateMetadataPid", 32))
15145	}
15146	if s.Scte35Pid != nil && *s.Scte35Pid < 32 {
15147		invalidParams.Add(request.NewErrParamMinValue("Scte35Pid", 32))
15148	}
15149	if s.TimedMetadataPid != nil && *s.TimedMetadataPid < 32 {
15150		invalidParams.Add(request.NewErrParamMinValue("TimedMetadataPid", 32))
15151	}
15152	if s.VideoPid != nil && *s.VideoPid < 32 {
15153		invalidParams.Add(request.NewErrParamMinValue("VideoPid", 32))
15154	}
15155	if s.DvbNitSettings != nil {
15156		if err := s.DvbNitSettings.Validate(); err != nil {
15157			invalidParams.AddNested("DvbNitSettings", err.(request.ErrInvalidParams))
15158		}
15159	}
15160	if s.DvbSdtSettings != nil {
15161		if err := s.DvbSdtSettings.Validate(); err != nil {
15162			invalidParams.AddNested("DvbSdtSettings", err.(request.ErrInvalidParams))
15163		}
15164	}
15165	if s.DvbTdtSettings != nil {
15166		if err := s.DvbTdtSettings.Validate(); err != nil {
15167			invalidParams.AddNested("DvbTdtSettings", err.(request.ErrInvalidParams))
15168		}
15169	}
15170	if s.Scte35Esam != nil {
15171		if err := s.Scte35Esam.Validate(); err != nil {
15172			invalidParams.AddNested("Scte35Esam", err.(request.ErrInvalidParams))
15173		}
15174	}
15175
15176	if invalidParams.Len() > 0 {
15177		return invalidParams
15178	}
15179	return nil
15180}
15181
15182// SetAudioBufferModel sets the AudioBufferModel field's value.
15183func (s *M2tsSettings) SetAudioBufferModel(v string) *M2tsSettings {
15184	s.AudioBufferModel = &v
15185	return s
15186}
15187
15188// SetAudioDuration sets the AudioDuration field's value.
15189func (s *M2tsSettings) SetAudioDuration(v string) *M2tsSettings {
15190	s.AudioDuration = &v
15191	return s
15192}
15193
15194// SetAudioFramesPerPes sets the AudioFramesPerPes field's value.
15195func (s *M2tsSettings) SetAudioFramesPerPes(v int64) *M2tsSettings {
15196	s.AudioFramesPerPes = &v
15197	return s
15198}
15199
15200// SetAudioPids sets the AudioPids field's value.
15201func (s *M2tsSettings) SetAudioPids(v []*int64) *M2tsSettings {
15202	s.AudioPids = v
15203	return s
15204}
15205
15206// SetBitrate sets the Bitrate field's value.
15207func (s *M2tsSettings) SetBitrate(v int64) *M2tsSettings {
15208	s.Bitrate = &v
15209	return s
15210}
15211
15212// SetBufferModel sets the BufferModel field's value.
15213func (s *M2tsSettings) SetBufferModel(v string) *M2tsSettings {
15214	s.BufferModel = &v
15215	return s
15216}
15217
15218// SetDvbNitSettings sets the DvbNitSettings field's value.
15219func (s *M2tsSettings) SetDvbNitSettings(v *DvbNitSettings) *M2tsSettings {
15220	s.DvbNitSettings = v
15221	return s
15222}
15223
15224// SetDvbSdtSettings sets the DvbSdtSettings field's value.
15225func (s *M2tsSettings) SetDvbSdtSettings(v *DvbSdtSettings) *M2tsSettings {
15226	s.DvbSdtSettings = v
15227	return s
15228}
15229
15230// SetDvbSubPids sets the DvbSubPids field's value.
15231func (s *M2tsSettings) SetDvbSubPids(v []*int64) *M2tsSettings {
15232	s.DvbSubPids = v
15233	return s
15234}
15235
15236// SetDvbTdtSettings sets the DvbTdtSettings field's value.
15237func (s *M2tsSettings) SetDvbTdtSettings(v *DvbTdtSettings) *M2tsSettings {
15238	s.DvbTdtSettings = v
15239	return s
15240}
15241
15242// SetDvbTeletextPid sets the DvbTeletextPid field's value.
15243func (s *M2tsSettings) SetDvbTeletextPid(v int64) *M2tsSettings {
15244	s.DvbTeletextPid = &v
15245	return s
15246}
15247
15248// SetEbpAudioInterval sets the EbpAudioInterval field's value.
15249func (s *M2tsSettings) SetEbpAudioInterval(v string) *M2tsSettings {
15250	s.EbpAudioInterval = &v
15251	return s
15252}
15253
15254// SetEbpPlacement sets the EbpPlacement field's value.
15255func (s *M2tsSettings) SetEbpPlacement(v string) *M2tsSettings {
15256	s.EbpPlacement = &v
15257	return s
15258}
15259
15260// SetEsRateInPes sets the EsRateInPes field's value.
15261func (s *M2tsSettings) SetEsRateInPes(v string) *M2tsSettings {
15262	s.EsRateInPes = &v
15263	return s
15264}
15265
15266// SetForceTsVideoEbpOrder sets the ForceTsVideoEbpOrder field's value.
15267func (s *M2tsSettings) SetForceTsVideoEbpOrder(v string) *M2tsSettings {
15268	s.ForceTsVideoEbpOrder = &v
15269	return s
15270}
15271
15272// SetFragmentTime sets the FragmentTime field's value.
15273func (s *M2tsSettings) SetFragmentTime(v float64) *M2tsSettings {
15274	s.FragmentTime = &v
15275	return s
15276}
15277
15278// SetMaxPcrInterval sets the MaxPcrInterval field's value.
15279func (s *M2tsSettings) SetMaxPcrInterval(v int64) *M2tsSettings {
15280	s.MaxPcrInterval = &v
15281	return s
15282}
15283
15284// SetMinEbpInterval sets the MinEbpInterval field's value.
15285func (s *M2tsSettings) SetMinEbpInterval(v int64) *M2tsSettings {
15286	s.MinEbpInterval = &v
15287	return s
15288}
15289
15290// SetNielsenId3 sets the NielsenId3 field's value.
15291func (s *M2tsSettings) SetNielsenId3(v string) *M2tsSettings {
15292	s.NielsenId3 = &v
15293	return s
15294}
15295
15296// SetNullPacketBitrate sets the NullPacketBitrate field's value.
15297func (s *M2tsSettings) SetNullPacketBitrate(v float64) *M2tsSettings {
15298	s.NullPacketBitrate = &v
15299	return s
15300}
15301
15302// SetPatInterval sets the PatInterval field's value.
15303func (s *M2tsSettings) SetPatInterval(v int64) *M2tsSettings {
15304	s.PatInterval = &v
15305	return s
15306}
15307
15308// SetPcrControl sets the PcrControl field's value.
15309func (s *M2tsSettings) SetPcrControl(v string) *M2tsSettings {
15310	s.PcrControl = &v
15311	return s
15312}
15313
15314// SetPcrPid sets the PcrPid field's value.
15315func (s *M2tsSettings) SetPcrPid(v int64) *M2tsSettings {
15316	s.PcrPid = &v
15317	return s
15318}
15319
15320// SetPmtInterval sets the PmtInterval field's value.
15321func (s *M2tsSettings) SetPmtInterval(v int64) *M2tsSettings {
15322	s.PmtInterval = &v
15323	return s
15324}
15325
15326// SetPmtPid sets the PmtPid field's value.
15327func (s *M2tsSettings) SetPmtPid(v int64) *M2tsSettings {
15328	s.PmtPid = &v
15329	return s
15330}
15331
15332// SetPrivateMetadataPid sets the PrivateMetadataPid field's value.
15333func (s *M2tsSettings) SetPrivateMetadataPid(v int64) *M2tsSettings {
15334	s.PrivateMetadataPid = &v
15335	return s
15336}
15337
15338// SetProgramNumber sets the ProgramNumber field's value.
15339func (s *M2tsSettings) SetProgramNumber(v int64) *M2tsSettings {
15340	s.ProgramNumber = &v
15341	return s
15342}
15343
15344// SetRateMode sets the RateMode field's value.
15345func (s *M2tsSettings) SetRateMode(v string) *M2tsSettings {
15346	s.RateMode = &v
15347	return s
15348}
15349
15350// SetScte35Esam sets the Scte35Esam field's value.
15351func (s *M2tsSettings) SetScte35Esam(v *M2tsScte35Esam) *M2tsSettings {
15352	s.Scte35Esam = v
15353	return s
15354}
15355
15356// SetScte35Pid sets the Scte35Pid field's value.
15357func (s *M2tsSettings) SetScte35Pid(v int64) *M2tsSettings {
15358	s.Scte35Pid = &v
15359	return s
15360}
15361
15362// SetScte35Source sets the Scte35Source field's value.
15363func (s *M2tsSettings) SetScte35Source(v string) *M2tsSettings {
15364	s.Scte35Source = &v
15365	return s
15366}
15367
15368// SetSegmentationMarkers sets the SegmentationMarkers field's value.
15369func (s *M2tsSettings) SetSegmentationMarkers(v string) *M2tsSettings {
15370	s.SegmentationMarkers = &v
15371	return s
15372}
15373
15374// SetSegmentationStyle sets the SegmentationStyle field's value.
15375func (s *M2tsSettings) SetSegmentationStyle(v string) *M2tsSettings {
15376	s.SegmentationStyle = &v
15377	return s
15378}
15379
15380// SetSegmentationTime sets the SegmentationTime field's value.
15381func (s *M2tsSettings) SetSegmentationTime(v float64) *M2tsSettings {
15382	s.SegmentationTime = &v
15383	return s
15384}
15385
15386// SetTimedMetadataPid sets the TimedMetadataPid field's value.
15387func (s *M2tsSettings) SetTimedMetadataPid(v int64) *M2tsSettings {
15388	s.TimedMetadataPid = &v
15389	return s
15390}
15391
15392// SetTransportStreamId sets the TransportStreamId field's value.
15393func (s *M2tsSettings) SetTransportStreamId(v int64) *M2tsSettings {
15394	s.TransportStreamId = &v
15395	return s
15396}
15397
15398// SetVideoPid sets the VideoPid field's value.
15399func (s *M2tsSettings) SetVideoPid(v int64) *M2tsSettings {
15400	s.VideoPid = &v
15401	return s
15402}
15403
15404// These settings relate to the MPEG-2 transport stream (MPEG2-TS) container
15405// for the MPEG2-TS segments in your HLS outputs.
15406type M3u8Settings struct {
15407	_ struct{} `type:"structure"`
15408
15409	// Specify this setting only when your output will be consumed by a downstream
15410	// repackaging workflow that is sensitive to very small duration differences
15411	// between video and audio. For this situation, choose Match video duration
15412	// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
15413	// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
15414	// MediaConvert pads the output audio streams with silence or trims them to
15415	// ensure that the total duration of each audio stream is at least as long as
15416	// the total duration of the video stream. After padding or trimming, the audio
15417	// stream duration is no more than one frame longer than the video stream. MediaConvert
15418	// applies audio padding or trimming only to the end of the last segment of
15419	// the output. For unsegmented outputs, MediaConvert adds padding only to the
15420	// end of the file. When you keep the default value, any minor discrepancies
15421	// between audio and video duration will depend on your output audio codec.
15422	AudioDuration *string `locationName:"audioDuration" type:"string" enum:"M3u8AudioDuration"`
15423
15424	// The number of audio frames to insert for each PES packet.
15425	AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"`
15426
15427	// Packet Identifier (PID) of the elementary audio stream(s) in the transport
15428	// stream. Multiple values are accepted, and can be entered in ranges and/or
15429	// by comma separation.
15430	AudioPids []*int64 `locationName:"audioPids" type:"list"`
15431
15432	// Specify the maximum time, in milliseconds, between Program Clock References
15433	// (PCRs) inserted into the transport stream.
15434	MaxPcrInterval *int64 `locationName:"maxPcrInterval" type:"integer"`
15435
15436	// If INSERT, Nielsen inaudible tones for media tracking will be detected in
15437	// the input audio and an equivalent ID3 tag will be inserted in the output.
15438	NielsenId3 *string `locationName:"nielsenId3" type:"string" enum:"M3u8NielsenId3"`
15439
15440	// The number of milliseconds between instances of this table in the output
15441	// transport stream.
15442	PatInterval *int64 `locationName:"patInterval" type:"integer"`
15443
15444	// When set to PCR_EVERY_PES_PACKET a Program Clock Reference value is inserted
15445	// for every Packetized Elementary Stream (PES) header. This parameter is effective
15446	// only when the PCR PID is the same as the video or audio elementary stream.
15447	PcrControl *string `locationName:"pcrControl" type:"string" enum:"M3u8PcrControl"`
15448
15449	// Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport
15450	// stream. When no value is given, the encoder will assign the same value as
15451	// the Video PID.
15452	PcrPid *int64 `locationName:"pcrPid" min:"32" type:"integer"`
15453
15454	// The number of milliseconds between instances of this table in the output
15455	// transport stream.
15456	PmtInterval *int64 `locationName:"pmtInterval" type:"integer"`
15457
15458	// Packet Identifier (PID) for the Program Map Table (PMT) in the transport
15459	// stream.
15460	PmtPid *int64 `locationName:"pmtPid" min:"32" type:"integer"`
15461
15462	// Packet Identifier (PID) of the private metadata stream in the transport stream.
15463	PrivateMetadataPid *int64 `locationName:"privateMetadataPid" min:"32" type:"integer"`
15464
15465	// The value of the program number field in the Program Map Table.
15466	ProgramNumber *int64 `locationName:"programNumber" type:"integer"`
15467
15468	// Packet Identifier (PID) of the SCTE-35 stream in the transport stream.
15469	Scte35Pid *int64 `locationName:"scte35Pid" min:"32" type:"integer"`
15470
15471	// For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if
15472	// you want SCTE-35 markers that appear in your input to also appear in this
15473	// output. Choose None (NONE) if you don't want SCTE-35 markers in this output.
15474	// For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you
15475	// don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose
15476	// Ad markers (adMarkers) if you do want manifest conditioning. In both cases,
15477	// also provide the ESAM XML as a string in the setting Signal processing notification
15478	// XML (sccXml).
15479	Scte35Source *string `locationName:"scte35Source" type:"string" enum:"M3u8Scte35Source"`
15480
15481	// Applies only to HLS outputs. Use this setting to specify whether the service
15482	// inserts the ID3 timed metadata from the input in this output.
15483	TimedMetadata *string `locationName:"timedMetadata" type:"string" enum:"TimedMetadata"`
15484
15485	// Packet Identifier (PID) of the timed metadata stream in the transport stream.
15486	TimedMetadataPid *int64 `locationName:"timedMetadataPid" min:"32" type:"integer"`
15487
15488	// The value of the transport stream ID field in the Program Map Table.
15489	TransportStreamId *int64 `locationName:"transportStreamId" type:"integer"`
15490
15491	// Packet Identifier (PID) of the elementary video stream in the transport stream.
15492	VideoPid *int64 `locationName:"videoPid" min:"32" type:"integer"`
15493}
15494
15495// String returns the string representation
15496func (s M3u8Settings) String() string {
15497	return awsutil.Prettify(s)
15498}
15499
15500// GoString returns the string representation
15501func (s M3u8Settings) GoString() string {
15502	return s.String()
15503}
15504
15505// Validate inspects the fields of the type to determine if they are valid.
15506func (s *M3u8Settings) Validate() error {
15507	invalidParams := request.ErrInvalidParams{Context: "M3u8Settings"}
15508	if s.PcrPid != nil && *s.PcrPid < 32 {
15509		invalidParams.Add(request.NewErrParamMinValue("PcrPid", 32))
15510	}
15511	if s.PmtPid != nil && *s.PmtPid < 32 {
15512		invalidParams.Add(request.NewErrParamMinValue("PmtPid", 32))
15513	}
15514	if s.PrivateMetadataPid != nil && *s.PrivateMetadataPid < 32 {
15515		invalidParams.Add(request.NewErrParamMinValue("PrivateMetadataPid", 32))
15516	}
15517	if s.Scte35Pid != nil && *s.Scte35Pid < 32 {
15518		invalidParams.Add(request.NewErrParamMinValue("Scte35Pid", 32))
15519	}
15520	if s.TimedMetadataPid != nil && *s.TimedMetadataPid < 32 {
15521		invalidParams.Add(request.NewErrParamMinValue("TimedMetadataPid", 32))
15522	}
15523	if s.VideoPid != nil && *s.VideoPid < 32 {
15524		invalidParams.Add(request.NewErrParamMinValue("VideoPid", 32))
15525	}
15526
15527	if invalidParams.Len() > 0 {
15528		return invalidParams
15529	}
15530	return nil
15531}
15532
15533// SetAudioDuration sets the AudioDuration field's value.
15534func (s *M3u8Settings) SetAudioDuration(v string) *M3u8Settings {
15535	s.AudioDuration = &v
15536	return s
15537}
15538
15539// SetAudioFramesPerPes sets the AudioFramesPerPes field's value.
15540func (s *M3u8Settings) SetAudioFramesPerPes(v int64) *M3u8Settings {
15541	s.AudioFramesPerPes = &v
15542	return s
15543}
15544
15545// SetAudioPids sets the AudioPids field's value.
15546func (s *M3u8Settings) SetAudioPids(v []*int64) *M3u8Settings {
15547	s.AudioPids = v
15548	return s
15549}
15550
15551// SetMaxPcrInterval sets the MaxPcrInterval field's value.
15552func (s *M3u8Settings) SetMaxPcrInterval(v int64) *M3u8Settings {
15553	s.MaxPcrInterval = &v
15554	return s
15555}
15556
15557// SetNielsenId3 sets the NielsenId3 field's value.
15558func (s *M3u8Settings) SetNielsenId3(v string) *M3u8Settings {
15559	s.NielsenId3 = &v
15560	return s
15561}
15562
15563// SetPatInterval sets the PatInterval field's value.
15564func (s *M3u8Settings) SetPatInterval(v int64) *M3u8Settings {
15565	s.PatInterval = &v
15566	return s
15567}
15568
15569// SetPcrControl sets the PcrControl field's value.
15570func (s *M3u8Settings) SetPcrControl(v string) *M3u8Settings {
15571	s.PcrControl = &v
15572	return s
15573}
15574
15575// SetPcrPid sets the PcrPid field's value.
15576func (s *M3u8Settings) SetPcrPid(v int64) *M3u8Settings {
15577	s.PcrPid = &v
15578	return s
15579}
15580
15581// SetPmtInterval sets the PmtInterval field's value.
15582func (s *M3u8Settings) SetPmtInterval(v int64) *M3u8Settings {
15583	s.PmtInterval = &v
15584	return s
15585}
15586
15587// SetPmtPid sets the PmtPid field's value.
15588func (s *M3u8Settings) SetPmtPid(v int64) *M3u8Settings {
15589	s.PmtPid = &v
15590	return s
15591}
15592
15593// SetPrivateMetadataPid sets the PrivateMetadataPid field's value.
15594func (s *M3u8Settings) SetPrivateMetadataPid(v int64) *M3u8Settings {
15595	s.PrivateMetadataPid = &v
15596	return s
15597}
15598
15599// SetProgramNumber sets the ProgramNumber field's value.
15600func (s *M3u8Settings) SetProgramNumber(v int64) *M3u8Settings {
15601	s.ProgramNumber = &v
15602	return s
15603}
15604
15605// SetScte35Pid sets the Scte35Pid field's value.
15606func (s *M3u8Settings) SetScte35Pid(v int64) *M3u8Settings {
15607	s.Scte35Pid = &v
15608	return s
15609}
15610
15611// SetScte35Source sets the Scte35Source field's value.
15612func (s *M3u8Settings) SetScte35Source(v string) *M3u8Settings {
15613	s.Scte35Source = &v
15614	return s
15615}
15616
15617// SetTimedMetadata sets the TimedMetadata field's value.
15618func (s *M3u8Settings) SetTimedMetadata(v string) *M3u8Settings {
15619	s.TimedMetadata = &v
15620	return s
15621}
15622
15623// SetTimedMetadataPid sets the TimedMetadataPid field's value.
15624func (s *M3u8Settings) SetTimedMetadataPid(v int64) *M3u8Settings {
15625	s.TimedMetadataPid = &v
15626	return s
15627}
15628
15629// SetTransportStreamId sets the TransportStreamId field's value.
15630func (s *M3u8Settings) SetTransportStreamId(v int64) *M3u8Settings {
15631	s.TransportStreamId = &v
15632	return s
15633}
15634
15635// SetVideoPid sets the VideoPid field's value.
15636func (s *M3u8Settings) SetVideoPid(v int64) *M3u8Settings {
15637	s.VideoPid = &v
15638	return s
15639}
15640
15641// Overlay motion graphics on top of your video. The motion graphics that you
15642// specify here appear on all outputs in all output groups. For more information,
15643// see https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html.
15644type MotionImageInserter struct {
15645	_ struct{} `type:"structure"`
15646
15647	// If your motion graphic asset is a .mov file, keep this setting unspecified.
15648	// If your motion graphic asset is a series of .png files, specify the frame
15649	// rate of the overlay in frames per second, as a fraction. For example, specify
15650	// 24 fps as 24/1. Make sure that the number of images in your series matches
15651	// the frame rate and your intended overlay duration. For example, if you want
15652	// a 30-second overlay at 30 fps, you should have 900 .png images. This overlay
15653	// frame rate doesn't need to match the frame rate of the underlying video.
15654	Framerate *MotionImageInsertionFramerate `locationName:"framerate" type:"structure"`
15655
15656	// Specify the .mov file or series of .png files that you want to overlay on
15657	// your video. For .png files, provide the file name of the first file in the
15658	// series. Make sure that the names of the .png files end with sequential numbers
15659	// that specify the order that they are played in. For example, overlay_000.png,
15660	// overlay_001.png, overlay_002.png, and so on. The sequence must start at zero,
15661	// and each image file name must have the same number of digits. Pad your initial
15662	// file names with enough zeros to complete the sequence. For example, if the
15663	// first image is overlay_0.png, there can be only 10 images in the sequence,
15664	// with the last image being overlay_9.png. But if the first image is overlay_00.png,
15665	// there can be 100 images in the sequence.
15666	Input *string `locationName:"input" min:"14" type:"string"`
15667
15668	// Choose the type of motion graphic asset that you are providing for your overlay.
15669	// You can choose either a .mov file or a series of .png files.
15670	InsertionMode *string `locationName:"insertionMode" type:"string" enum:"MotionImageInsertionMode"`
15671
15672	// Use Offset to specify the placement of your motion graphic overlay on the
15673	// video frame. Specify in pixels, from the upper-left corner of the frame.
15674	// If you don't specify an offset, the service scales your overlay to the full
15675	// size of the frame. Otherwise, the service inserts the overlay at its native
15676	// resolution and scales the size up or down with any video scaling.
15677	Offset *MotionImageInsertionOffset `locationName:"offset" type:"structure"`
15678
15679	// Specify whether your motion graphic overlay repeats on a loop or plays only
15680	// once.
15681	Playback *string `locationName:"playback" type:"string" enum:"MotionImagePlayback"`
15682
15683	// Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF
15684	// or HH:MM:SS;FF). Make sure that the timecode you provide here takes into
15685	// account how you have set up your timecode configuration under both job settings
15686	// and input settings. The simplest way to do that is to set both to start at
15687	// 0. If you need to set up your job to follow timecodes embedded in your source
15688	// that don't start at zero, make sure that you specify a start time that is
15689	// after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html
15690	// Find job-wide and input timecode configuration settings in your JSON job
15691	// settings specification at settings>timecodeConfig>source and settings>inputs>timecodeSource.
15692	StartTime *string `locationName:"startTime" min:"11" type:"string"`
15693}
15694
15695// String returns the string representation
15696func (s MotionImageInserter) String() string {
15697	return awsutil.Prettify(s)
15698}
15699
15700// GoString returns the string representation
15701func (s MotionImageInserter) GoString() string {
15702	return s.String()
15703}
15704
15705// Validate inspects the fields of the type to determine if they are valid.
15706func (s *MotionImageInserter) Validate() error {
15707	invalidParams := request.ErrInvalidParams{Context: "MotionImageInserter"}
15708	if s.Input != nil && len(*s.Input) < 14 {
15709		invalidParams.Add(request.NewErrParamMinLen("Input", 14))
15710	}
15711	if s.StartTime != nil && len(*s.StartTime) < 11 {
15712		invalidParams.Add(request.NewErrParamMinLen("StartTime", 11))
15713	}
15714	if s.Framerate != nil {
15715		if err := s.Framerate.Validate(); err != nil {
15716			invalidParams.AddNested("Framerate", err.(request.ErrInvalidParams))
15717		}
15718	}
15719
15720	if invalidParams.Len() > 0 {
15721		return invalidParams
15722	}
15723	return nil
15724}
15725
15726// SetFramerate sets the Framerate field's value.
15727func (s *MotionImageInserter) SetFramerate(v *MotionImageInsertionFramerate) *MotionImageInserter {
15728	s.Framerate = v
15729	return s
15730}
15731
15732// SetInput sets the Input field's value.
15733func (s *MotionImageInserter) SetInput(v string) *MotionImageInserter {
15734	s.Input = &v
15735	return s
15736}
15737
15738// SetInsertionMode sets the InsertionMode field's value.
15739func (s *MotionImageInserter) SetInsertionMode(v string) *MotionImageInserter {
15740	s.InsertionMode = &v
15741	return s
15742}
15743
15744// SetOffset sets the Offset field's value.
15745func (s *MotionImageInserter) SetOffset(v *MotionImageInsertionOffset) *MotionImageInserter {
15746	s.Offset = v
15747	return s
15748}
15749
15750// SetPlayback sets the Playback field's value.
15751func (s *MotionImageInserter) SetPlayback(v string) *MotionImageInserter {
15752	s.Playback = &v
15753	return s
15754}
15755
15756// SetStartTime sets the StartTime field's value.
15757func (s *MotionImageInserter) SetStartTime(v string) *MotionImageInserter {
15758	s.StartTime = &v
15759	return s
15760}
15761
15762// For motion overlays that don't have a built-in frame rate, specify the frame
15763// rate of the overlay in frames per second, as a fraction. For example, specify
15764// 24 fps as 24/1. The overlay frame rate doesn't need to match the frame rate
15765// of the underlying video.
15766type MotionImageInsertionFramerate struct {
15767	_ struct{} `type:"structure"`
15768
15769	// The bottom of the fraction that expresses your overlay frame rate. For example,
15770	// if your frame rate is 24 fps, set this value to 1.
15771	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
15772
15773	// The top of the fraction that expresses your overlay frame rate. For example,
15774	// if your frame rate is 24 fps, set this value to 24.
15775	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"`
15776}
15777
15778// String returns the string representation
15779func (s MotionImageInsertionFramerate) String() string {
15780	return awsutil.Prettify(s)
15781}
15782
15783// GoString returns the string representation
15784func (s MotionImageInsertionFramerate) GoString() string {
15785	return s.String()
15786}
15787
15788// Validate inspects the fields of the type to determine if they are valid.
15789func (s *MotionImageInsertionFramerate) Validate() error {
15790	invalidParams := request.ErrInvalidParams{Context: "MotionImageInsertionFramerate"}
15791	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
15792		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
15793	}
15794	if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 {
15795		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1))
15796	}
15797
15798	if invalidParams.Len() > 0 {
15799		return invalidParams
15800	}
15801	return nil
15802}
15803
15804// SetFramerateDenominator sets the FramerateDenominator field's value.
15805func (s *MotionImageInsertionFramerate) SetFramerateDenominator(v int64) *MotionImageInsertionFramerate {
15806	s.FramerateDenominator = &v
15807	return s
15808}
15809
15810// SetFramerateNumerator sets the FramerateNumerator field's value.
15811func (s *MotionImageInsertionFramerate) SetFramerateNumerator(v int64) *MotionImageInsertionFramerate {
15812	s.FramerateNumerator = &v
15813	return s
15814}
15815
15816// Specify the offset between the upper-left corner of the video frame and the
15817// top left corner of the overlay.
15818type MotionImageInsertionOffset struct {
15819	_ struct{} `type:"structure"`
15820
15821	// Set the distance, in pixels, between the overlay and the left edge of the
15822	// video frame.
15823	ImageX *int64 `locationName:"imageX" type:"integer"`
15824
15825	// Set the distance, in pixels, between the overlay and the top edge of the
15826	// video frame.
15827	ImageY *int64 `locationName:"imageY" type:"integer"`
15828}
15829
15830// String returns the string representation
15831func (s MotionImageInsertionOffset) String() string {
15832	return awsutil.Prettify(s)
15833}
15834
15835// GoString returns the string representation
15836func (s MotionImageInsertionOffset) GoString() string {
15837	return s.String()
15838}
15839
15840// SetImageX sets the ImageX field's value.
15841func (s *MotionImageInsertionOffset) SetImageX(v int64) *MotionImageInsertionOffset {
15842	s.ImageX = &v
15843	return s
15844}
15845
15846// SetImageY sets the ImageY field's value.
15847func (s *MotionImageInsertionOffset) SetImageY(v int64) *MotionImageInsertionOffset {
15848	s.ImageY = &v
15849	return s
15850}
15851
15852// These settings relate to your QuickTime MOV output container.
15853type MovSettings struct {
15854	_ struct{} `type:"structure"`
15855
15856	// When enabled, include 'clap' atom if appropriate for the video output settings.
15857	ClapAtom *string `locationName:"clapAtom" type:"string" enum:"MovClapAtom"`
15858
15859	// When enabled, file composition times will start at zero, composition times
15860	// in the 'ctts' (composition time to sample) box for B-frames will be negative,
15861	// and a 'cslg' (composition shift least greatest) box will be included per
15862	// 14496-1 amendment 1. This improves compatibility with Apple players and tools.
15863	CslgAtom *string `locationName:"cslgAtom" type:"string" enum:"MovCslgAtom"`
15864
15865	// When set to XDCAM, writes MPEG2 video streams into the QuickTime file using
15866	// XDCAM fourcc codes. This increases compatibility with Apple editors and players,
15867	// but may decrease compatibility with other players. Only applicable when the
15868	// video codec is MPEG2.
15869	Mpeg2FourCCControl *string `locationName:"mpeg2FourCCControl" type:"string" enum:"MovMpeg2FourCCControl"`
15870
15871	// To make this output compatible with Omenon, keep the default value, OMNEON.
15872	// Unless you need Omneon compatibility, set this value to NONE. When you keep
15873	// the default value, OMNEON, MediaConvert increases the length of the edit
15874	// list atom. This might cause file rejections when a recipient of the output
15875	// file doesn't expct this extra padding.
15876	PaddingControl *string `locationName:"paddingControl" type:"string" enum:"MovPaddingControl"`
15877
15878	// Always keep the default value (SELF_CONTAINED) for this setting.
15879	Reference *string `locationName:"reference" type:"string" enum:"MovReference"`
15880}
15881
15882// String returns the string representation
15883func (s MovSettings) String() string {
15884	return awsutil.Prettify(s)
15885}
15886
15887// GoString returns the string representation
15888func (s MovSettings) GoString() string {
15889	return s.String()
15890}
15891
15892// SetClapAtom sets the ClapAtom field's value.
15893func (s *MovSettings) SetClapAtom(v string) *MovSettings {
15894	s.ClapAtom = &v
15895	return s
15896}
15897
15898// SetCslgAtom sets the CslgAtom field's value.
15899func (s *MovSettings) SetCslgAtom(v string) *MovSettings {
15900	s.CslgAtom = &v
15901	return s
15902}
15903
15904// SetMpeg2FourCCControl sets the Mpeg2FourCCControl field's value.
15905func (s *MovSettings) SetMpeg2FourCCControl(v string) *MovSettings {
15906	s.Mpeg2FourCCControl = &v
15907	return s
15908}
15909
15910// SetPaddingControl sets the PaddingControl field's value.
15911func (s *MovSettings) SetPaddingControl(v string) *MovSettings {
15912	s.PaddingControl = &v
15913	return s
15914}
15915
15916// SetReference sets the Reference field's value.
15917func (s *MovSettings) SetReference(v string) *MovSettings {
15918	s.Reference = &v
15919	return s
15920}
15921
15922// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
15923// the value MP2.
15924type Mp2Settings struct {
15925	_ struct{} `type:"structure"`
15926
15927	// Specify the average bitrate in bits per second.
15928	Bitrate *int64 `locationName:"bitrate" min:"32000" type:"integer"`
15929
15930	// Set Channels to specify the number of channels in this output audio track.
15931	// Choosing Mono in the console will give you 1 output channel; choosing Stereo
15932	// will give you 2. In the API, valid values are 1 and 2.
15933	Channels *int64 `locationName:"channels" min:"1" type:"integer"`
15934
15935	// Sample rate in hz.
15936	SampleRate *int64 `locationName:"sampleRate" min:"32000" type:"integer"`
15937}
15938
15939// String returns the string representation
15940func (s Mp2Settings) String() string {
15941	return awsutil.Prettify(s)
15942}
15943
15944// GoString returns the string representation
15945func (s Mp2Settings) GoString() string {
15946	return s.String()
15947}
15948
15949// Validate inspects the fields of the type to determine if they are valid.
15950func (s *Mp2Settings) Validate() error {
15951	invalidParams := request.ErrInvalidParams{Context: "Mp2Settings"}
15952	if s.Bitrate != nil && *s.Bitrate < 32000 {
15953		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 32000))
15954	}
15955	if s.Channels != nil && *s.Channels < 1 {
15956		invalidParams.Add(request.NewErrParamMinValue("Channels", 1))
15957	}
15958	if s.SampleRate != nil && *s.SampleRate < 32000 {
15959		invalidParams.Add(request.NewErrParamMinValue("SampleRate", 32000))
15960	}
15961
15962	if invalidParams.Len() > 0 {
15963		return invalidParams
15964	}
15965	return nil
15966}
15967
15968// SetBitrate sets the Bitrate field's value.
15969func (s *Mp2Settings) SetBitrate(v int64) *Mp2Settings {
15970	s.Bitrate = &v
15971	return s
15972}
15973
15974// SetChannels sets the Channels field's value.
15975func (s *Mp2Settings) SetChannels(v int64) *Mp2Settings {
15976	s.Channels = &v
15977	return s
15978}
15979
15980// SetSampleRate sets the SampleRate field's value.
15981func (s *Mp2Settings) SetSampleRate(v int64) *Mp2Settings {
15982	s.SampleRate = &v
15983	return s
15984}
15985
15986// Required when you set Codec, under AudioDescriptions>CodecSettings, to the
15987// value MP3.
15988type Mp3Settings struct {
15989	_ struct{} `type:"structure"`
15990
15991	// Specify the average bitrate in bits per second.
15992	Bitrate *int64 `locationName:"bitrate" min:"16000" type:"integer"`
15993
15994	// Specify the number of channels in this output audio track. Choosing Mono
15995	// on the console gives you 1 output channel; choosing Stereo gives you 2. In
15996	// the API, valid values are 1 and 2.
15997	Channels *int64 `locationName:"channels" min:"1" type:"integer"`
15998
15999	// Specify whether the service encodes this MP3 audio output with a constant
16000	// bitrate (CBR) or a variable bitrate (VBR).
16001	RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Mp3RateControlMode"`
16002
16003	// Sample rate in hz.
16004	SampleRate *int64 `locationName:"sampleRate" min:"22050" type:"integer"`
16005
16006	// Required when you set Bitrate control mode (rateControlMode) to VBR. Specify
16007	// the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest
16008	// quality).
16009	VbrQuality *int64 `locationName:"vbrQuality" type:"integer"`
16010}
16011
16012// String returns the string representation
16013func (s Mp3Settings) String() string {
16014	return awsutil.Prettify(s)
16015}
16016
16017// GoString returns the string representation
16018func (s Mp3Settings) GoString() string {
16019	return s.String()
16020}
16021
16022// Validate inspects the fields of the type to determine if they are valid.
16023func (s *Mp3Settings) Validate() error {
16024	invalidParams := request.ErrInvalidParams{Context: "Mp3Settings"}
16025	if s.Bitrate != nil && *s.Bitrate < 16000 {
16026		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 16000))
16027	}
16028	if s.Channels != nil && *s.Channels < 1 {
16029		invalidParams.Add(request.NewErrParamMinValue("Channels", 1))
16030	}
16031	if s.SampleRate != nil && *s.SampleRate < 22050 {
16032		invalidParams.Add(request.NewErrParamMinValue("SampleRate", 22050))
16033	}
16034
16035	if invalidParams.Len() > 0 {
16036		return invalidParams
16037	}
16038	return nil
16039}
16040
16041// SetBitrate sets the Bitrate field's value.
16042func (s *Mp3Settings) SetBitrate(v int64) *Mp3Settings {
16043	s.Bitrate = &v
16044	return s
16045}
16046
16047// SetChannels sets the Channels field's value.
16048func (s *Mp3Settings) SetChannels(v int64) *Mp3Settings {
16049	s.Channels = &v
16050	return s
16051}
16052
16053// SetRateControlMode sets the RateControlMode field's value.
16054func (s *Mp3Settings) SetRateControlMode(v string) *Mp3Settings {
16055	s.RateControlMode = &v
16056	return s
16057}
16058
16059// SetSampleRate sets the SampleRate field's value.
16060func (s *Mp3Settings) SetSampleRate(v int64) *Mp3Settings {
16061	s.SampleRate = &v
16062	return s
16063}
16064
16065// SetVbrQuality sets the VbrQuality field's value.
16066func (s *Mp3Settings) SetVbrQuality(v int64) *Mp3Settings {
16067	s.VbrQuality = &v
16068	return s
16069}
16070
16071// These settings relate to your MP4 output container. You can create audio
16072// only outputs with this container. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/supported-codecs-containers-audio-only.html#output-codecs-and-containers-supported-for-audio-only.
16073type Mp4Settings struct {
16074	_ struct{} `type:"structure"`
16075
16076	// Specify this setting only when your output will be consumed by a downstream
16077	// repackaging workflow that is sensitive to very small duration differences
16078	// between video and audio. For this situation, choose Match video duration
16079	// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
16080	// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
16081	// MediaConvert pads the output audio streams with silence or trims them to
16082	// ensure that the total duration of each audio stream is at least as long as
16083	// the total duration of the video stream. After padding or trimming, the audio
16084	// stream duration is no more than one frame longer than the video stream. MediaConvert
16085	// applies audio padding or trimming only to the end of the last segment of
16086	// the output. For unsegmented outputs, MediaConvert adds padding only to the
16087	// end of the file. When you keep the default value, any minor discrepancies
16088	// between audio and video duration will depend on your output audio codec.
16089	AudioDuration *string `locationName:"audioDuration" type:"string" enum:"CmfcAudioDuration"`
16090
16091	// When enabled, file composition times will start at zero, composition times
16092	// in the 'ctts' (composition time to sample) box for B-frames will be negative,
16093	// and a 'cslg' (composition shift least greatest) box will be included per
16094	// 14496-1 amendment 1. This improves compatibility with Apple players and tools.
16095	CslgAtom *string `locationName:"cslgAtom" type:"string" enum:"Mp4CslgAtom"`
16096
16097	// Ignore this setting unless compliance to the CTTS box version specification
16098	// matters in your workflow. Specify a value of 1 to set your CTTS box version
16099	// to 1 and make your output compliant with the specification. When you specify
16100	// a value of 1, you must also set CSLG atom (cslgAtom) to the value INCLUDE.
16101	// Keep the default value 0 to set your CTTS box version to 0. This can provide
16102	// backward compatibility for some players and packagers.
16103	CttsVersion *int64 `locationName:"cttsVersion" type:"integer"`
16104
16105	// Inserts a free-space box immediately after the moov box.
16106	FreeSpaceBox *string `locationName:"freeSpaceBox" type:"string" enum:"Mp4FreeSpaceBox"`
16107
16108	// If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning
16109	// of the archive as required for progressive downloading. Otherwise it is placed
16110	// normally at the end.
16111	MoovPlacement *string `locationName:"moovPlacement" type:"string" enum:"Mp4MoovPlacement"`
16112
16113	// Overrides the "Major Brand" field in the output file. Usually not necessary
16114	// to specify.
16115	Mp4MajorBrand *string `locationName:"mp4MajorBrand" type:"string"`
16116}
16117
16118// String returns the string representation
16119func (s Mp4Settings) String() string {
16120	return awsutil.Prettify(s)
16121}
16122
16123// GoString returns the string representation
16124func (s Mp4Settings) GoString() string {
16125	return s.String()
16126}
16127
16128// SetAudioDuration sets the AudioDuration field's value.
16129func (s *Mp4Settings) SetAudioDuration(v string) *Mp4Settings {
16130	s.AudioDuration = &v
16131	return s
16132}
16133
16134// SetCslgAtom sets the CslgAtom field's value.
16135func (s *Mp4Settings) SetCslgAtom(v string) *Mp4Settings {
16136	s.CslgAtom = &v
16137	return s
16138}
16139
16140// SetCttsVersion sets the CttsVersion field's value.
16141func (s *Mp4Settings) SetCttsVersion(v int64) *Mp4Settings {
16142	s.CttsVersion = &v
16143	return s
16144}
16145
16146// SetFreeSpaceBox sets the FreeSpaceBox field's value.
16147func (s *Mp4Settings) SetFreeSpaceBox(v string) *Mp4Settings {
16148	s.FreeSpaceBox = &v
16149	return s
16150}
16151
16152// SetMoovPlacement sets the MoovPlacement field's value.
16153func (s *Mp4Settings) SetMoovPlacement(v string) *Mp4Settings {
16154	s.MoovPlacement = &v
16155	return s
16156}
16157
16158// SetMp4MajorBrand sets the Mp4MajorBrand field's value.
16159func (s *Mp4Settings) SetMp4MajorBrand(v string) *Mp4Settings {
16160	s.Mp4MajorBrand = &v
16161	return s
16162}
16163
16164// These settings relate to the fragmented MP4 container for the segments in
16165// your DASH outputs.
16166type MpdSettings struct {
16167	_ struct{} `type:"structure"`
16168
16169	// Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH
16170	// manifest with elements for embedded 608 captions. This markup isn't generally
16171	// required, but some video players require it to discover and play embedded
16172	// 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements
16173	// out. When you enable this setting, this is the markup that MediaConvert includes
16174	// in your manifest:
16175	AccessibilityCaptionHints *string `locationName:"accessibilityCaptionHints" type:"string" enum:"MpdAccessibilityCaptionHints"`
16176
16177	// Specify this setting only when your output will be consumed by a downstream
16178	// repackaging workflow that is sensitive to very small duration differences
16179	// between video and audio. For this situation, choose Match video duration
16180	// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
16181	// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
16182	// MediaConvert pads the output audio streams with silence or trims them to
16183	// ensure that the total duration of each audio stream is at least as long as
16184	// the total duration of the video stream. After padding or trimming, the audio
16185	// stream duration is no more than one frame longer than the video stream. MediaConvert
16186	// applies audio padding or trimming only to the end of the last segment of
16187	// the output. For unsegmented outputs, MediaConvert adds padding only to the
16188	// end of the file. When you keep the default value, any minor discrepancies
16189	// between audio and video duration will depend on your output audio codec.
16190	AudioDuration *string `locationName:"audioDuration" type:"string" enum:"MpdAudioDuration"`
16191
16192	// Use this setting only in DASH output groups that include sidecar TTML or
16193	// IMSC captions. You specify sidecar captions in a separate output from your
16194	// audio and video. Choose Raw (RAW) for captions in a single XML file in a
16195	// raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in
16196	// XML format contained within fragmented MP4 files. This set of fragmented
16197	// MP4 files is separate from your video and audio fragmented MP4 files.
16198	CaptionContainerType *string `locationName:"captionContainerType" type:"string" enum:"MpdCaptionContainerType"`
16199
16200	// Use this setting only when you specify SCTE-35 markers from ESAM. Choose
16201	// INSERT to put SCTE-35 markers in this output at the insertion points that
16202	// you specify in an ESAM XML document. Provide the document in the setting
16203	// SCC XML (sccXml).
16204	Scte35Esam *string `locationName:"scte35Esam" type:"string" enum:"MpdScte35Esam"`
16205
16206	// Ignore this setting unless you have SCTE-35 markers in your input video file.
16207	// Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear
16208	// in your input to also appear in this output. Choose None (NONE) if you don't
16209	// want those SCTE-35 markers in this output.
16210	Scte35Source *string `locationName:"scte35Source" type:"string" enum:"MpdScte35Source"`
16211}
16212
16213// String returns the string representation
16214func (s MpdSettings) String() string {
16215	return awsutil.Prettify(s)
16216}
16217
16218// GoString returns the string representation
16219func (s MpdSettings) GoString() string {
16220	return s.String()
16221}
16222
16223// SetAccessibilityCaptionHints sets the AccessibilityCaptionHints field's value.
16224func (s *MpdSettings) SetAccessibilityCaptionHints(v string) *MpdSettings {
16225	s.AccessibilityCaptionHints = &v
16226	return s
16227}
16228
16229// SetAudioDuration sets the AudioDuration field's value.
16230func (s *MpdSettings) SetAudioDuration(v string) *MpdSettings {
16231	s.AudioDuration = &v
16232	return s
16233}
16234
16235// SetCaptionContainerType sets the CaptionContainerType field's value.
16236func (s *MpdSettings) SetCaptionContainerType(v string) *MpdSettings {
16237	s.CaptionContainerType = &v
16238	return s
16239}
16240
16241// SetScte35Esam sets the Scte35Esam field's value.
16242func (s *MpdSettings) SetScte35Esam(v string) *MpdSettings {
16243	s.Scte35Esam = &v
16244	return s
16245}
16246
16247// SetScte35Source sets the Scte35Source field's value.
16248func (s *MpdSettings) SetScte35Source(v string) *MpdSettings {
16249	s.Scte35Source = &v
16250	return s
16251}
16252
16253// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
16254// the value MPEG2.
16255type Mpeg2Settings struct {
16256	_ struct{} `type:"structure"`
16257
16258	// Specify the strength of any adaptive quantization filters that you enable.
16259	// The value that you choose here applies to the following settings: Spatial
16260	// adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive
16261	// quantization (temporalAdaptiveQuantization).
16262	AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"Mpeg2AdaptiveQuantization"`
16263
16264	// Specify the average bitrate in bits per second. Required for VBR and CBR.
16265	// For MS Smooth outputs, bitrates must be unique when rounded down to the nearest
16266	// multiple of 1000.
16267	Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"`
16268
16269	// Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output.
16270	CodecLevel *string `locationName:"codecLevel" type:"string" enum:"Mpeg2CodecLevel"`
16271
16272	// Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output.
16273	CodecProfile *string `locationName:"codecProfile" type:"string" enum:"Mpeg2CodecProfile"`
16274
16275	// Choose Adaptive to improve subjective video quality for high-motion content.
16276	// This will cause the service to use fewer B-frames (which infer information
16277	// based on other frames) for high-motion portions of the video and more B-frames
16278	// for low-motion portions. The maximum number of B-frames is limited by the
16279	// value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).
16280	DynamicSubGop *string `locationName:"dynamicSubGop" type:"string" enum:"Mpeg2DynamicSubGop"`
16281
16282	// If you are using the console, use the Framerate setting to specify the frame
16283	// rate for this output. If you want to keep the same frame rate as the input
16284	// video, choose Follow source. If you want to do frame rate conversion, choose
16285	// a frame rate from the dropdown list or choose Custom. The framerates shown
16286	// in the dropdown list are decimal approximations of fractions. If you choose
16287	// Custom, specify your frame rate as a fraction. If you are creating your transcoding
16288	// job specification as a JSON file without the console, use FramerateControl
16289	// to specify which value the service uses for the frame rate for this output.
16290	// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
16291	// from the input. Choose SPECIFIED if you want the service to use the frame
16292	// rate you specify in the settings FramerateNumerator and FramerateDenominator.
16293	FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Mpeg2FramerateControl"`
16294
16295	// Choose the method that you want MediaConvert to use when increasing or decreasing
16296	// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
16297	// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
16298	// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
16299	// smooth picture, but might introduce undesirable video artifacts. For complex
16300	// frame rate conversions, especially if your source video has already been
16301	// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
16302	// motion-compensated interpolation. FrameFormer chooses the best conversion
16303	// method frame by frame. Note that using FrameFormer increases the transcoding
16304	// time and incurs a significant add-on cost.
16305	FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Mpeg2FramerateConversionAlgorithm"`
16306
16307	// When you use the API for transcode jobs that use frame rate conversion, specify
16308	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
16309	// FramerateDenominator to specify the denominator of this fraction. In this
16310	// example, use 1001 for the value of FramerateDenominator. When you use the
16311	// console for transcode jobs that use frame rate conversion, provide the value
16312	// as a decimal number for Framerate. In this example, specify 23.976.
16313	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
16314
16315	// When you use the API for transcode jobs that use frame rate conversion, specify
16316	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
16317	// FramerateNumerator to specify the numerator of this fraction. In this example,
16318	// use 24000 for the value of FramerateNumerator. When you use the console for
16319	// transcode jobs that use frame rate conversion, provide the value as a decimal
16320	// number for Framerate. In this example, specify 23.976.
16321	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"`
16322
16323	// Frequency of closed GOPs. In streaming applications, it is recommended that
16324	// this be set to 1 so a decoder joining mid-stream will receive an IDR frame
16325	// as quickly as possible. Setting this value to 0 will break output segmenting.
16326	GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"`
16327
16328	// GOP Length (keyframe interval) in frames or seconds. Must be greater than
16329	// zero.
16330	GopSize *float64 `locationName:"gopSize" type:"double"`
16331
16332	// Indicates if the GOP Size in MPEG2 is specified in frames or seconds. If
16333	// seconds the system will convert the GOP Size into a frame count at run time.
16334	GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"Mpeg2GopSizeUnits"`
16335
16336	// Percentage of the buffer that should initially be filled (HRD buffer model).
16337	HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"`
16338
16339	// Size of buffer (HRD buffer model) in bits. For example, enter five megabits
16340	// as 5000000.
16341	HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"`
16342
16343	// Choose the scan line type for the output. Keep the default value, Progressive
16344	// (PROGRESSIVE) to create a progressive output, regardless of the scan type
16345	// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
16346	// to create an output that's interlaced with the same field polarity throughout.
16347	// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
16348	// to produce outputs with the same field polarity as the source. For jobs that
16349	// have multiple inputs, the output field polarity might change over the course
16350	// of the output. Follow behavior depends on the input scan type. If the source
16351	// is interlaced, the output will be interlaced with the same polarity as the
16352	// source. If the source is progressive, the output will be interlaced with
16353	// top field bottom field first, depending on which of the Follow options you
16354	// choose.
16355	InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"Mpeg2InterlaceMode"`
16356
16357	// Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision
16358	// for intra-block DC coefficients. If you choose the value auto, the service
16359	// will automatically select the precision based on the per-frame compression
16360	// ratio.
16361	IntraDcPrecision *string `locationName:"intraDcPrecision" type:"string" enum:"Mpeg2IntraDcPrecision"`
16362
16363	// Maximum bitrate in bits/second. For example, enter five megabits per second
16364	// as 5000000.
16365	MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"`
16366
16367	// Enforces separation between repeated (cadence) I-frames and I-frames inserted
16368	// by Scene Change Detection. If a scene change I-frame is within I-interval
16369	// frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene
16370	// change I-frame. GOP stretch requires enabling lookahead as well as setting
16371	// I-interval. The normal cadence resumes for the next GOP. This setting is
16372	// only used when Scene Change Detect is enabled. Note: Maximum GOP stretch
16373	// = GOP size + Min-I-interval - 1
16374	MinIInterval *int64 `locationName:"minIInterval" type:"integer"`
16375
16376	// Number of B-frames between reference frames.
16377	NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"`
16378
16379	// Optional. Specify how the service determines the pixel aspect ratio (PAR)
16380	// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE),
16381	// uses the PAR from your input video for your output. To specify a different
16382	// PAR in the console, choose any value other than Follow source. To specify
16383	// a different PAR by editing the JSON job specification, choose SPECIFIED.
16384	// When you choose SPECIFIED for this setting, you must also specify values
16385	// for the parNumerator and parDenominator settings.
16386	ParControl *string `locationName:"parControl" type:"string" enum:"Mpeg2ParControl"`
16387
16388	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
16389	// console, this corresponds to any value other than Follow source. When you
16390	// specify an output pixel aspect ratio (PAR) that is different from your input
16391	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
16392	// widescreen, you would specify the ratio 40:33. In this example, the value
16393	// for parDenominator is 33.
16394	ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"`
16395
16396	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
16397	// console, this corresponds to any value other than Follow source. When you
16398	// specify an output pixel aspect ratio (PAR) that is different from your input
16399	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
16400	// widescreen, you would specify the ratio 40:33. In this example, the value
16401	// for parNumerator is 40.
16402	ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"`
16403
16404	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
16405	// want to trade off encoding speed for output video quality. The default behavior
16406	// is faster, lower quality, single-pass encoding.
16407	QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Mpeg2QualityTuningLevel"`
16408
16409	// Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate
16410	// is variable (vbr) or constant (cbr).
16411	RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Mpeg2RateControlMode"`
16412
16413	// Use this setting for interlaced outputs, when your output frame rate is half
16414	// of your input frame rate. In this situation, choose Optimized interlacing
16415	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
16416	// case, each progressive frame from the input corresponds to an interlaced
16417	// field in the output. Keep the default value, Basic interlacing (INTERLACED),
16418	// for all other output frame rates. With basic interlacing, MediaConvert performs
16419	// any frame rate conversion first and then interlaces the frames. When you
16420	// choose Optimized interlacing and you set your output frame rate to a value
16421	// that isn't suitable for optimized interlacing, MediaConvert automatically
16422	// falls back to basic interlacing. Required settings: To use optimized interlacing,
16423	// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
16424	// use optimized interlacing for hard telecine outputs. You must also set Interlace
16425	// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
16426	ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"Mpeg2ScanTypeConversionMode"`
16427
16428	// Enable this setting to insert I-frames at scene changes that the service
16429	// automatically detects. This improves video quality and is enabled by default.
16430	SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"Mpeg2SceneChangeDetect"`
16431
16432	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
16433	// second (fps). Enable slow PAL to create a 25 fps output. When you enable
16434	// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples
16435	// your audio to keep it synchronized with the video. Note that enabling this
16436	// setting will slightly reduce the duration of your video. Required settings:
16437	// You must also set Framerate to 25. In your JSON job specification, set (framerateControl)
16438	// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
16439	// 1.
16440	SlowPal *string `locationName:"slowPal" type:"string" enum:"Mpeg2SlowPal"`
16441
16442	// Ignore this setting unless you need to comply with a specification that requires
16443	// a specific value. If you don't have a specification requirement, we recommend
16444	// that you adjust the softness of your output by using a lower value for the
16445	// setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter).
16446	// The Softness (softness) setting specifies the quantization matrices that
16447	// the encoder uses. Keep the default value, 0, to use the AWS Elemental default
16448	// matrices. Choose a value from 17 to 128 to use planar interpolation. Increasing
16449	// values from 17 to 128 result in increasing reduction of high-frequency data.
16450	// The value 128 results in the softest video.
16451	Softness *int64 `locationName:"softness" type:"integer"`
16452
16453	// Keep the default value, Enabled (ENABLED), to adjust quantization within
16454	// each frame based on spatial variation of content complexity. When you enable
16455	// this feature, the encoder uses fewer bits on areas that can sustain more
16456	// distortion with no noticeable visual degradation and uses more bits on areas
16457	// where any small distortion will be noticeable. For example, complex textured
16458	// blocks are encoded with fewer bits and smooth textured blocks are encoded
16459	// with more bits. Enabling this feature will almost always improve your video
16460	// quality. Note, though, that this feature doesn't take into account where
16461	// the viewer's attention is likely to be. If viewers are likely to be focusing
16462	// their attention on a part of the screen with a lot of complex texture, you
16463	// might choose to disable this feature. Related setting: When you enable spatial
16464	// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization)
16465	// depending on your content. For homogeneous content, such as cartoons and
16466	// video games, set it to Low. For content with a wider variety of textures,
16467	// set it to High or Higher.
16468	SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"Mpeg2SpatialAdaptiveQuantization"`
16469
16470	// Specify whether this output's video uses the D10 syntax. Keep the default
16471	// value to not use the syntax. Related settings: When you choose D10 (D_10)
16472	// for your MXF profile (profile), you must also set this value to to D10 (D_10).
16473	Syntax *string `locationName:"syntax" type:"string" enum:"Mpeg2Syntax"`
16474
16475	// When you do frame rate conversion from 23.976 frames per second (fps) to
16476	// 29.97 fps, and your output scan type is interlaced, you can optionally enable
16477	// hard or soft telecine to create a smoother picture. Hard telecine (HARD)
16478	// produces a 29.97i output. Soft telecine (SOFT) produces an output with a
16479	// 23.976 output that signals to the video player device to do the conversion
16480	// during play back. When you keep the default value, None (NONE), MediaConvert
16481	// does a standard frame rate conversion to 29.97 without doing anything with
16482	// the field polarity to create a smoother picture.
16483	Telecine *string `locationName:"telecine" type:"string" enum:"Mpeg2Telecine"`
16484
16485	// Keep the default value, Enabled (ENABLED), to adjust quantization within
16486	// each frame based on temporal variation of content complexity. When you enable
16487	// this feature, the encoder uses fewer bits on areas of the frame that aren't
16488	// moving and uses more bits on complex objects with sharp edges that move a
16489	// lot. For example, this feature improves the readability of text tickers on
16490	// newscasts and scoreboards on sports matches. Enabling this feature will almost
16491	// always improve your video quality. Note, though, that this feature doesn't
16492	// take into account where the viewer's attention is likely to be. If viewers
16493	// are likely to be focusing their attention on a part of the screen that doesn't
16494	// have moving objects with sharp edges, such as sports athletes' faces, you
16495	// might choose to disable this feature. Related setting: When you enable temporal
16496	// quantization, adjust the strength of the filter with the setting Adaptive
16497	// quantization (adaptiveQuantization).
16498	TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"Mpeg2TemporalAdaptiveQuantization"`
16499}
16500
16501// String returns the string representation
16502func (s Mpeg2Settings) String() string {
16503	return awsutil.Prettify(s)
16504}
16505
16506// GoString returns the string representation
16507func (s Mpeg2Settings) GoString() string {
16508	return s.String()
16509}
16510
16511// Validate inspects the fields of the type to determine if they are valid.
16512func (s *Mpeg2Settings) Validate() error {
16513	invalidParams := request.ErrInvalidParams{Context: "Mpeg2Settings"}
16514	if s.Bitrate != nil && *s.Bitrate < 1000 {
16515		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000))
16516	}
16517	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
16518		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
16519	}
16520	if s.FramerateNumerator != nil && *s.FramerateNumerator < 24 {
16521		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 24))
16522	}
16523	if s.MaxBitrate != nil && *s.MaxBitrate < 1000 {
16524		invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000))
16525	}
16526	if s.ParDenominator != nil && *s.ParDenominator < 1 {
16527		invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1))
16528	}
16529	if s.ParNumerator != nil && *s.ParNumerator < 1 {
16530		invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1))
16531	}
16532
16533	if invalidParams.Len() > 0 {
16534		return invalidParams
16535	}
16536	return nil
16537}
16538
16539// SetAdaptiveQuantization sets the AdaptiveQuantization field's value.
16540func (s *Mpeg2Settings) SetAdaptiveQuantization(v string) *Mpeg2Settings {
16541	s.AdaptiveQuantization = &v
16542	return s
16543}
16544
16545// SetBitrate sets the Bitrate field's value.
16546func (s *Mpeg2Settings) SetBitrate(v int64) *Mpeg2Settings {
16547	s.Bitrate = &v
16548	return s
16549}
16550
16551// SetCodecLevel sets the CodecLevel field's value.
16552func (s *Mpeg2Settings) SetCodecLevel(v string) *Mpeg2Settings {
16553	s.CodecLevel = &v
16554	return s
16555}
16556
16557// SetCodecProfile sets the CodecProfile field's value.
16558func (s *Mpeg2Settings) SetCodecProfile(v string) *Mpeg2Settings {
16559	s.CodecProfile = &v
16560	return s
16561}
16562
16563// SetDynamicSubGop sets the DynamicSubGop field's value.
16564func (s *Mpeg2Settings) SetDynamicSubGop(v string) *Mpeg2Settings {
16565	s.DynamicSubGop = &v
16566	return s
16567}
16568
16569// SetFramerateControl sets the FramerateControl field's value.
16570func (s *Mpeg2Settings) SetFramerateControl(v string) *Mpeg2Settings {
16571	s.FramerateControl = &v
16572	return s
16573}
16574
16575// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value.
16576func (s *Mpeg2Settings) SetFramerateConversionAlgorithm(v string) *Mpeg2Settings {
16577	s.FramerateConversionAlgorithm = &v
16578	return s
16579}
16580
16581// SetFramerateDenominator sets the FramerateDenominator field's value.
16582func (s *Mpeg2Settings) SetFramerateDenominator(v int64) *Mpeg2Settings {
16583	s.FramerateDenominator = &v
16584	return s
16585}
16586
16587// SetFramerateNumerator sets the FramerateNumerator field's value.
16588func (s *Mpeg2Settings) SetFramerateNumerator(v int64) *Mpeg2Settings {
16589	s.FramerateNumerator = &v
16590	return s
16591}
16592
16593// SetGopClosedCadence sets the GopClosedCadence field's value.
16594func (s *Mpeg2Settings) SetGopClosedCadence(v int64) *Mpeg2Settings {
16595	s.GopClosedCadence = &v
16596	return s
16597}
16598
16599// SetGopSize sets the GopSize field's value.
16600func (s *Mpeg2Settings) SetGopSize(v float64) *Mpeg2Settings {
16601	s.GopSize = &v
16602	return s
16603}
16604
16605// SetGopSizeUnits sets the GopSizeUnits field's value.
16606func (s *Mpeg2Settings) SetGopSizeUnits(v string) *Mpeg2Settings {
16607	s.GopSizeUnits = &v
16608	return s
16609}
16610
16611// SetHrdBufferInitialFillPercentage sets the HrdBufferInitialFillPercentage field's value.
16612func (s *Mpeg2Settings) SetHrdBufferInitialFillPercentage(v int64) *Mpeg2Settings {
16613	s.HrdBufferInitialFillPercentage = &v
16614	return s
16615}
16616
16617// SetHrdBufferSize sets the HrdBufferSize field's value.
16618func (s *Mpeg2Settings) SetHrdBufferSize(v int64) *Mpeg2Settings {
16619	s.HrdBufferSize = &v
16620	return s
16621}
16622
16623// SetInterlaceMode sets the InterlaceMode field's value.
16624func (s *Mpeg2Settings) SetInterlaceMode(v string) *Mpeg2Settings {
16625	s.InterlaceMode = &v
16626	return s
16627}
16628
16629// SetIntraDcPrecision sets the IntraDcPrecision field's value.
16630func (s *Mpeg2Settings) SetIntraDcPrecision(v string) *Mpeg2Settings {
16631	s.IntraDcPrecision = &v
16632	return s
16633}
16634
16635// SetMaxBitrate sets the MaxBitrate field's value.
16636func (s *Mpeg2Settings) SetMaxBitrate(v int64) *Mpeg2Settings {
16637	s.MaxBitrate = &v
16638	return s
16639}
16640
16641// SetMinIInterval sets the MinIInterval field's value.
16642func (s *Mpeg2Settings) SetMinIInterval(v int64) *Mpeg2Settings {
16643	s.MinIInterval = &v
16644	return s
16645}
16646
16647// SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value.
16648func (s *Mpeg2Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *Mpeg2Settings {
16649	s.NumberBFramesBetweenReferenceFrames = &v
16650	return s
16651}
16652
16653// SetParControl sets the ParControl field's value.
16654func (s *Mpeg2Settings) SetParControl(v string) *Mpeg2Settings {
16655	s.ParControl = &v
16656	return s
16657}
16658
16659// SetParDenominator sets the ParDenominator field's value.
16660func (s *Mpeg2Settings) SetParDenominator(v int64) *Mpeg2Settings {
16661	s.ParDenominator = &v
16662	return s
16663}
16664
16665// SetParNumerator sets the ParNumerator field's value.
16666func (s *Mpeg2Settings) SetParNumerator(v int64) *Mpeg2Settings {
16667	s.ParNumerator = &v
16668	return s
16669}
16670
16671// SetQualityTuningLevel sets the QualityTuningLevel field's value.
16672func (s *Mpeg2Settings) SetQualityTuningLevel(v string) *Mpeg2Settings {
16673	s.QualityTuningLevel = &v
16674	return s
16675}
16676
16677// SetRateControlMode sets the RateControlMode field's value.
16678func (s *Mpeg2Settings) SetRateControlMode(v string) *Mpeg2Settings {
16679	s.RateControlMode = &v
16680	return s
16681}
16682
16683// SetScanTypeConversionMode sets the ScanTypeConversionMode field's value.
16684func (s *Mpeg2Settings) SetScanTypeConversionMode(v string) *Mpeg2Settings {
16685	s.ScanTypeConversionMode = &v
16686	return s
16687}
16688
16689// SetSceneChangeDetect sets the SceneChangeDetect field's value.
16690func (s *Mpeg2Settings) SetSceneChangeDetect(v string) *Mpeg2Settings {
16691	s.SceneChangeDetect = &v
16692	return s
16693}
16694
16695// SetSlowPal sets the SlowPal field's value.
16696func (s *Mpeg2Settings) SetSlowPal(v string) *Mpeg2Settings {
16697	s.SlowPal = &v
16698	return s
16699}
16700
16701// SetSoftness sets the Softness field's value.
16702func (s *Mpeg2Settings) SetSoftness(v int64) *Mpeg2Settings {
16703	s.Softness = &v
16704	return s
16705}
16706
16707// SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value.
16708func (s *Mpeg2Settings) SetSpatialAdaptiveQuantization(v string) *Mpeg2Settings {
16709	s.SpatialAdaptiveQuantization = &v
16710	return s
16711}
16712
16713// SetSyntax sets the Syntax field's value.
16714func (s *Mpeg2Settings) SetSyntax(v string) *Mpeg2Settings {
16715	s.Syntax = &v
16716	return s
16717}
16718
16719// SetTelecine sets the Telecine field's value.
16720func (s *Mpeg2Settings) SetTelecine(v string) *Mpeg2Settings {
16721	s.Telecine = &v
16722	return s
16723}
16724
16725// SetTemporalAdaptiveQuantization sets the TemporalAdaptiveQuantization field's value.
16726func (s *Mpeg2Settings) SetTemporalAdaptiveQuantization(v string) *Mpeg2Settings {
16727	s.TemporalAdaptiveQuantization = &v
16728	return s
16729}
16730
16731// Specify the details for each additional Microsoft Smooth Streaming manifest
16732// that you want the service to generate for this output group. Each manifest
16733// can reference a different subset of outputs in the group.
16734type MsSmoothAdditionalManifest struct {
16735	_ struct{} `type:"structure"`
16736
16737	// Specify a name modifier that the service adds to the name of this manifest
16738	// to make it different from the file names of the other main manifests in the
16739	// output group. For example, say that the default main manifest for your Microsoft
16740	// Smooth group is film-name.ismv. If you enter "-no-premium" for this setting,
16741	// then the file name the service generates for this top-level manifest is film-name-no-premium.ismv.
16742	ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"`
16743
16744	// Specify the outputs that you want this additional top-level manifest to reference.
16745	SelectedOutputs []*string `locationName:"selectedOutputs" type:"list"`
16746}
16747
16748// String returns the string representation
16749func (s MsSmoothAdditionalManifest) String() string {
16750	return awsutil.Prettify(s)
16751}
16752
16753// GoString returns the string representation
16754func (s MsSmoothAdditionalManifest) GoString() string {
16755	return s.String()
16756}
16757
16758// Validate inspects the fields of the type to determine if they are valid.
16759func (s *MsSmoothAdditionalManifest) Validate() error {
16760	invalidParams := request.ErrInvalidParams{Context: "MsSmoothAdditionalManifest"}
16761	if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 {
16762		invalidParams.Add(request.NewErrParamMinLen("ManifestNameModifier", 1))
16763	}
16764
16765	if invalidParams.Len() > 0 {
16766		return invalidParams
16767	}
16768	return nil
16769}
16770
16771// SetManifestNameModifier sets the ManifestNameModifier field's value.
16772func (s *MsSmoothAdditionalManifest) SetManifestNameModifier(v string) *MsSmoothAdditionalManifest {
16773	s.ManifestNameModifier = &v
16774	return s
16775}
16776
16777// SetSelectedOutputs sets the SelectedOutputs field's value.
16778func (s *MsSmoothAdditionalManifest) SetSelectedOutputs(v []*string) *MsSmoothAdditionalManifest {
16779	s.SelectedOutputs = v
16780	return s
16781}
16782
16783// If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify
16784// the value SpekeKeyProvider.
16785type MsSmoothEncryptionSettings struct {
16786	_ struct{} `type:"structure"`
16787
16788	// If your output group type is HLS, DASH, or Microsoft Smooth, use these settings
16789	// when doing DRM encryption with a SPEKE-compliant key provider. If your output
16790	// group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
16791	SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"`
16792}
16793
16794// String returns the string representation
16795func (s MsSmoothEncryptionSettings) String() string {
16796	return awsutil.Prettify(s)
16797}
16798
16799// GoString returns the string representation
16800func (s MsSmoothEncryptionSettings) GoString() string {
16801	return s.String()
16802}
16803
16804// SetSpekeKeyProvider sets the SpekeKeyProvider field's value.
16805func (s *MsSmoothEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProvider) *MsSmoothEncryptionSettings {
16806	s.SpekeKeyProvider = v
16807	return s
16808}
16809
16810// Settings related to your Microsoft Smooth Streaming output package. For more
16811// information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
16812// When you work directly in your JSON job specification, include this object
16813// and any required children when you set Type, under OutputGroupSettings, to
16814// MS_SMOOTH_GROUP_SETTINGS.
16815type MsSmoothGroupSettings struct {
16816	_ struct{} `type:"structure"`
16817
16818	// By default, the service creates one .ism Microsoft Smooth Streaming manifest
16819	// for each Microsoft Smooth Streaming output group in your job. This default
16820	// manifest references every output in the output group. To create additional
16821	// manifests that reference a subset of the outputs in the output group, specify
16822	// a list of them here.
16823	AdditionalManifests []*MsSmoothAdditionalManifest `locationName:"additionalManifests" type:"list"`
16824
16825	// COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across
16826	// a Microsoft Smooth output group into a single audio stream.
16827	AudioDeduplication *string `locationName:"audioDeduplication" type:"string" enum:"MsSmoothAudioDeduplication"`
16828
16829	// Use Destination (Destination) to specify the S3 output location and the output
16830	// filename base. Destination accepts format identifiers. If you do not specify
16831	// the base filename in the URI, the service will use the filename of the input
16832	// file. If your job has multiple inputs, the service uses the filename of the
16833	// first input file.
16834	Destination *string `locationName:"destination" type:"string"`
16835
16836	// Settings associated with the destination. Will vary based on the type of
16837	// destination
16838	DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"`
16839
16840	// If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify
16841	// the value SpekeKeyProvider.
16842	Encryption *MsSmoothEncryptionSettings `locationName:"encryption" type:"structure"`
16843
16844	// Use Fragment length (FragmentLength) to specify the mp4 fragment sizes in
16845	// seconds. Fragment length must be compatible with GOP size and frame rate.
16846	FragmentLength *int64 `locationName:"fragmentLength" min:"1" type:"integer"`
16847
16848	// Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding
16849	// format for the server and client manifest. Valid options are utf8 and utf16.
16850	ManifestEncoding *string `locationName:"manifestEncoding" type:"string" enum:"MsSmoothManifestEncoding"`
16851}
16852
16853// String returns the string representation
16854func (s MsSmoothGroupSettings) String() string {
16855	return awsutil.Prettify(s)
16856}
16857
16858// GoString returns the string representation
16859func (s MsSmoothGroupSettings) GoString() string {
16860	return s.String()
16861}
16862
16863// Validate inspects the fields of the type to determine if they are valid.
16864func (s *MsSmoothGroupSettings) Validate() error {
16865	invalidParams := request.ErrInvalidParams{Context: "MsSmoothGroupSettings"}
16866	if s.FragmentLength != nil && *s.FragmentLength < 1 {
16867		invalidParams.Add(request.NewErrParamMinValue("FragmentLength", 1))
16868	}
16869	if s.AdditionalManifests != nil {
16870		for i, v := range s.AdditionalManifests {
16871			if v == nil {
16872				continue
16873			}
16874			if err := v.Validate(); err != nil {
16875				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(request.ErrInvalidParams))
16876			}
16877		}
16878	}
16879
16880	if invalidParams.Len() > 0 {
16881		return invalidParams
16882	}
16883	return nil
16884}
16885
16886// SetAdditionalManifests sets the AdditionalManifests field's value.
16887func (s *MsSmoothGroupSettings) SetAdditionalManifests(v []*MsSmoothAdditionalManifest) *MsSmoothGroupSettings {
16888	s.AdditionalManifests = v
16889	return s
16890}
16891
16892// SetAudioDeduplication sets the AudioDeduplication field's value.
16893func (s *MsSmoothGroupSettings) SetAudioDeduplication(v string) *MsSmoothGroupSettings {
16894	s.AudioDeduplication = &v
16895	return s
16896}
16897
16898// SetDestination sets the Destination field's value.
16899func (s *MsSmoothGroupSettings) SetDestination(v string) *MsSmoothGroupSettings {
16900	s.Destination = &v
16901	return s
16902}
16903
16904// SetDestinationSettings sets the DestinationSettings field's value.
16905func (s *MsSmoothGroupSettings) SetDestinationSettings(v *DestinationSettings) *MsSmoothGroupSettings {
16906	s.DestinationSettings = v
16907	return s
16908}
16909
16910// SetEncryption sets the Encryption field's value.
16911func (s *MsSmoothGroupSettings) SetEncryption(v *MsSmoothEncryptionSettings) *MsSmoothGroupSettings {
16912	s.Encryption = v
16913	return s
16914}
16915
16916// SetFragmentLength sets the FragmentLength field's value.
16917func (s *MsSmoothGroupSettings) SetFragmentLength(v int64) *MsSmoothGroupSettings {
16918	s.FragmentLength = &v
16919	return s
16920}
16921
16922// SetManifestEncoding sets the ManifestEncoding field's value.
16923func (s *MsSmoothGroupSettings) SetManifestEncoding(v string) *MsSmoothGroupSettings {
16924	s.ManifestEncoding = &v
16925	return s
16926}
16927
16928// These settings relate to your MXF output container.
16929type MxfSettings struct {
16930	_ struct{} `type:"structure"`
16931
16932	// Optional. When you have AFD signaling set up in your output video stream,
16933	// use this setting to choose whether to also include it in the MXF wrapper.
16934	// Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper.
16935	// Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from
16936	// the video stream for this output to the MXF wrapper. Regardless of which
16937	// option you choose, the AFD values remain in the video stream. Related settings:
16938	// To set up your output to include or exclude AFD values, see AfdSignaling,
16939	// under VideoDescription. On the console, find AFD signaling under the output's
16940	// video encoding settings.
16941	AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"MxfAfdSignaling"`
16942
16943	// Specify the MXF profile, also called shim, for this output. When you choose
16944	// Auto, MediaConvert chooses a profile based on the video codec and resolution.
16945	// For a list of codecs supported with each MXF profile, see https://docs.aws.amazon.com/mediaconvert/latest/ug/codecs-supported-with-each-mxf-profile.html.
16946	// For more information about the automatic selection behavior, see https://docs.aws.amazon.com/mediaconvert/latest/ug/default-automatic-selection-of-mxf-profiles.html.
16947	Profile *string `locationName:"profile" type:"string" enum:"MxfProfile"`
16948
16949	// Specify the XAVC profile settings for MXF outputs when you set your MXF profile
16950	// to XAVC.
16951	XavcProfileSettings *MxfXavcProfileSettings `locationName:"xavcProfileSettings" type:"structure"`
16952}
16953
16954// String returns the string representation
16955func (s MxfSettings) String() string {
16956	return awsutil.Prettify(s)
16957}
16958
16959// GoString returns the string representation
16960func (s MxfSettings) GoString() string {
16961	return s.String()
16962}
16963
16964// SetAfdSignaling sets the AfdSignaling field's value.
16965func (s *MxfSettings) SetAfdSignaling(v string) *MxfSettings {
16966	s.AfdSignaling = &v
16967	return s
16968}
16969
16970// SetProfile sets the Profile field's value.
16971func (s *MxfSettings) SetProfile(v string) *MxfSettings {
16972	s.Profile = &v
16973	return s
16974}
16975
16976// SetXavcProfileSettings sets the XavcProfileSettings field's value.
16977func (s *MxfSettings) SetXavcProfileSettings(v *MxfXavcProfileSettings) *MxfSettings {
16978	s.XavcProfileSettings = v
16979	return s
16980}
16981
16982// Specify the XAVC profile settings for MXF outputs when you set your MXF profile
16983// to XAVC.
16984type MxfXavcProfileSettings struct {
16985	_ struct{} `type:"structure"`
16986
16987	// To create an output that complies with the XAVC file format guidelines for
16988	// interoperability, keep the default value, Drop frames for compliance (DROP_FRAMES_FOR_COMPLIANCE).
16989	// To include all frames from your input in this output, keep the default setting,
16990	// Allow any duration (ALLOW_ANY_DURATION). The number of frames that MediaConvert
16991	// excludes when you set this to Drop frames for compliance depends on the output
16992	// frame rate and duration.
16993	DurationMode *string `locationName:"durationMode" type:"string" enum:"MxfXavcDurationMode"`
16994
16995	// Specify a value for this setting only for outputs that you set up with one
16996	// of these two XAVC profiles: XAVC HD Intra CBG (XAVC_HD_INTRA_CBG) or XAVC
16997	// 4K Intra CBG (XAVC_4K_INTRA_CBG). Specify the amount of space in each frame
16998	// that the service reserves for ancillary data, such as teletext captions.
16999	// The default value for this setting is 1492 bytes per frame. This should be
17000	// sufficient to prevent overflow unless you have multiple pages of teletext
17001	// captions data. If you have a large amount of teletext data, specify a larger
17002	// number.
17003	MaxAncDataSize *int64 `locationName:"maxAncDataSize" type:"integer"`
17004}
17005
17006// String returns the string representation
17007func (s MxfXavcProfileSettings) String() string {
17008	return awsutil.Prettify(s)
17009}
17010
17011// GoString returns the string representation
17012func (s MxfXavcProfileSettings) GoString() string {
17013	return s.String()
17014}
17015
17016// SetDurationMode sets the DurationMode field's value.
17017func (s *MxfXavcProfileSettings) SetDurationMode(v string) *MxfXavcProfileSettings {
17018	s.DurationMode = &v
17019	return s
17020}
17021
17022// SetMaxAncDataSize sets the MaxAncDataSize field's value.
17023func (s *MxfXavcProfileSettings) SetMaxAncDataSize(v int64) *MxfXavcProfileSettings {
17024	s.MaxAncDataSize = &v
17025	return s
17026}
17027
17028// For forensic video watermarking, MediaConvert supports Nagra NexGuard File
17029// Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2)
17030// and OTT Streaming workflows.
17031type NexGuardFileMarkerSettings struct {
17032	_ struct{} `type:"structure"`
17033
17034	// Use the base64 license string that Nagra provides you. Enter it directly
17035	// in your JSON job specification or in the console. Required when you include
17036	// Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in
17037	// your job.
17038	License *string `locationName:"license" min:"1" type:"string"`
17039
17040	// Specify the payload ID that you want associated with this output. Valid values
17041	// vary depending on your Nagra NexGuard forensic watermarking workflow. Required
17042	// when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings)
17043	// in your job. For PreRelease Content (NGPR/G2), specify an integer from 1
17044	// through 4,194,303. You must generate a unique ID for each asset you watermark,
17045	// and keep a record of which ID you have assigned to each asset. Neither Nagra
17046	// nor MediaConvert keep track of the relationship between output files and
17047	// your IDs. For OTT Streaming, create two adaptive bitrate (ABR) stacks for
17048	// each asset. Do this by setting up two output groups. For one output group,
17049	// set the value of Payload ID (payload) to 0 in every output. For the other
17050	// output group, set Payload ID (payload) to 1 in every output.
17051	Payload *int64 `locationName:"payload" type:"integer"`
17052
17053	// Enter one of the watermarking preset strings that Nagra provides you. Required
17054	// when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings)
17055	// in your job.
17056	Preset *string `locationName:"preset" min:"1" type:"string"`
17057
17058	// Optional. Ignore this setting unless Nagra support directs you to specify
17059	// a value. When you don't specify a value here, the Nagra NexGuard library
17060	// uses its default value.
17061	Strength *string `locationName:"strength" type:"string" enum:"WatermarkingStrength"`
17062}
17063
17064// String returns the string representation
17065func (s NexGuardFileMarkerSettings) String() string {
17066	return awsutil.Prettify(s)
17067}
17068
17069// GoString returns the string representation
17070func (s NexGuardFileMarkerSettings) GoString() string {
17071	return s.String()
17072}
17073
17074// Validate inspects the fields of the type to determine if they are valid.
17075func (s *NexGuardFileMarkerSettings) Validate() error {
17076	invalidParams := request.ErrInvalidParams{Context: "NexGuardFileMarkerSettings"}
17077	if s.License != nil && len(*s.License) < 1 {
17078		invalidParams.Add(request.NewErrParamMinLen("License", 1))
17079	}
17080	if s.Preset != nil && len(*s.Preset) < 1 {
17081		invalidParams.Add(request.NewErrParamMinLen("Preset", 1))
17082	}
17083
17084	if invalidParams.Len() > 0 {
17085		return invalidParams
17086	}
17087	return nil
17088}
17089
17090// SetLicense sets the License field's value.
17091func (s *NexGuardFileMarkerSettings) SetLicense(v string) *NexGuardFileMarkerSettings {
17092	s.License = &v
17093	return s
17094}
17095
17096// SetPayload sets the Payload field's value.
17097func (s *NexGuardFileMarkerSettings) SetPayload(v int64) *NexGuardFileMarkerSettings {
17098	s.Payload = &v
17099	return s
17100}
17101
17102// SetPreset sets the Preset field's value.
17103func (s *NexGuardFileMarkerSettings) SetPreset(v string) *NexGuardFileMarkerSettings {
17104	s.Preset = &v
17105	return s
17106}
17107
17108// SetStrength sets the Strength field's value.
17109func (s *NexGuardFileMarkerSettings) SetStrength(v string) *NexGuardFileMarkerSettings {
17110	s.Strength = &v
17111	return s
17112}
17113
17114// Settings for your Nielsen configuration. If you don't do Nielsen measurement
17115// and analytics, ignore these settings. When you enable Nielsen configuration
17116// (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs
17117// in the job. To enable Nielsen configuration programmatically, include an
17118// instance of nielsenConfiguration in your JSON job specification. Even if
17119// you don't include any children of nielsenConfiguration, you still enable
17120// the setting.
17121type NielsenConfiguration struct {
17122	_ struct{} `type:"structure"`
17123
17124	// Nielsen has discontinued the use of breakout code functionality. If you must
17125	// include this property, set the value to zero.
17126	BreakoutCode *int64 `locationName:"breakoutCode" type:"integer"`
17127
17128	// Use Distributor ID (DistributorID) to specify the distributor ID that is
17129	// assigned to your organization by Neilsen.
17130	DistributorId *string `locationName:"distributorId" type:"string"`
17131}
17132
17133// String returns the string representation
17134func (s NielsenConfiguration) String() string {
17135	return awsutil.Prettify(s)
17136}
17137
17138// GoString returns the string representation
17139func (s NielsenConfiguration) GoString() string {
17140	return s.String()
17141}
17142
17143// SetBreakoutCode sets the BreakoutCode field's value.
17144func (s *NielsenConfiguration) SetBreakoutCode(v int64) *NielsenConfiguration {
17145	s.BreakoutCode = &v
17146	return s
17147}
17148
17149// SetDistributorId sets the DistributorId field's value.
17150func (s *NielsenConfiguration) SetDistributorId(v string) *NielsenConfiguration {
17151	s.DistributorId = &v
17152	return s
17153}
17154
17155// Ignore these settings unless you are using Nielsen non-linear watermarking.
17156// Specify the values that MediaConvert uses to generate and place Nielsen watermarks
17157// in your output audio. In addition to specifying these values, you also need
17158// to set up your cloud TIC server. These settings apply to every output in
17159// your job. The MediaConvert implementation is currently with the following
17160// Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark
17161// Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0]
17162type NielsenNonLinearWatermarkSettings struct {
17163	_ struct{} `type:"structure"`
17164
17165	// Choose the type of Nielsen watermarks that you want in your outputs. When
17166	// you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the
17167	// setting SID (sourceId). When you choose CBET (CBET), you must provide a value
17168	// for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET
17169	// (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings.
17170	ActiveWatermarkProcess *string `locationName:"activeWatermarkProcess" type:"string" enum:"NielsenActiveWatermarkProcessType"`
17171
17172	// Optional. Use this setting when you want the service to include an ADI file
17173	// in the Nielsen metadata .zip file. To provide an ADI file, store it in Amazon
17174	// S3 and provide a URL to it here. The URL should be in the following format:
17175	// S3://bucket/path/ADI-file. For more information about the metadata .zip file,
17176	// see the setting Metadata destination (metadataDestination).
17177	AdiFilename *string `locationName:"adiFilename" type:"string"`
17178
17179	// Use the asset ID that you provide to Nielsen to uniquely identify this asset.
17180	// Required for all Nielsen non-linear watermarking.
17181	AssetId *string `locationName:"assetId" min:"1" type:"string"`
17182
17183	// Use the asset name that you provide to Nielsen for this asset. Required for
17184	// all Nielsen non-linear watermarking.
17185	AssetName *string `locationName:"assetName" min:"1" type:"string"`
17186
17187	// Use the CSID that Nielsen provides to you. This CBET source ID should be
17188	// unique to your Nielsen account but common to all of your output assets that
17189	// have CBET watermarking. Required when you choose a value for the setting
17190	// Watermark types (ActiveWatermarkProcess) that includes CBET.
17191	CbetSourceId *string `locationName:"cbetSourceId" type:"string"`
17192
17193	// Optional. If this asset uses an episode ID with Nielsen, provide it here.
17194	EpisodeId *string `locationName:"episodeId" min:"1" type:"string"`
17195
17196	// Specify the Amazon S3 location where you want MediaConvert to save your Nielsen
17197	// non-linear metadata .zip file. This Amazon S3 bucket must be in the same
17198	// Region as the one where you do your MediaConvert transcoding. If you want
17199	// to include an ADI file in this .zip file, use the setting ADI file (adiFilename)
17200	// to specify it. MediaConvert delivers the Nielsen metadata .zip files only
17201	// to your metadata destination Amazon S3 bucket. It doesn't deliver the .zip
17202	// files to Nielsen. You are responsible for delivering the metadata .zip files
17203	// to Nielsen.
17204	MetadataDestination *string `locationName:"metadataDestination" type:"string"`
17205
17206	// Use the SID that Nielsen provides to you. This source ID should be unique
17207	// to your Nielsen account but common to all of your output assets. Required
17208	// for all Nielsen non-linear watermarking. This ID should be unique to your
17209	// Nielsen account but common to all of your output assets. Required for all
17210	// Nielsen non-linear watermarking.
17211	SourceId *int64 `locationName:"sourceId" type:"integer"`
17212
17213	// Required. Specify whether your source content already contains Nielsen non-linear
17214	// watermarks. When you set this value to Watermarked (WATERMARKED), the service
17215	// fails the job. Nielsen requires that you add non-linear watermarking to only
17216	// clean content that doesn't already have non-linear Nielsen watermarks.
17217	SourceWatermarkStatus *string `locationName:"sourceWatermarkStatus" type:"string" enum:"NielsenSourceWatermarkStatusType"`
17218
17219	// Specify the endpoint for the TIC server that you have deployed and configured
17220	// in the AWS Cloud. Required for all Nielsen non-linear watermarking. MediaConvert
17221	// can't connect directly to a TIC server. Instead, you must use API Gateway
17222	// to provide a RESTful interface between MediaConvert and a TIC server that
17223	// you deploy in your AWS account. For more information on deploying a TIC server
17224	// in your AWS account and the required API Gateway, contact Nielsen support.
17225	TicServerUrl *string `locationName:"ticServerUrl" type:"string"`
17226
17227	// To create assets that have the same TIC values in each audio track, keep
17228	// the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that
17229	// have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK).
17230	UniqueTicPerAudioTrack *string `locationName:"uniqueTicPerAudioTrack" type:"string" enum:"NielsenUniqueTicPerAudioTrackType"`
17231}
17232
17233// String returns the string representation
17234func (s NielsenNonLinearWatermarkSettings) String() string {
17235	return awsutil.Prettify(s)
17236}
17237
17238// GoString returns the string representation
17239func (s NielsenNonLinearWatermarkSettings) GoString() string {
17240	return s.String()
17241}
17242
17243// Validate inspects the fields of the type to determine if they are valid.
17244func (s *NielsenNonLinearWatermarkSettings) Validate() error {
17245	invalidParams := request.ErrInvalidParams{Context: "NielsenNonLinearWatermarkSettings"}
17246	if s.AssetId != nil && len(*s.AssetId) < 1 {
17247		invalidParams.Add(request.NewErrParamMinLen("AssetId", 1))
17248	}
17249	if s.AssetName != nil && len(*s.AssetName) < 1 {
17250		invalidParams.Add(request.NewErrParamMinLen("AssetName", 1))
17251	}
17252	if s.EpisodeId != nil && len(*s.EpisodeId) < 1 {
17253		invalidParams.Add(request.NewErrParamMinLen("EpisodeId", 1))
17254	}
17255
17256	if invalidParams.Len() > 0 {
17257		return invalidParams
17258	}
17259	return nil
17260}
17261
17262// SetActiveWatermarkProcess sets the ActiveWatermarkProcess field's value.
17263func (s *NielsenNonLinearWatermarkSettings) SetActiveWatermarkProcess(v string) *NielsenNonLinearWatermarkSettings {
17264	s.ActiveWatermarkProcess = &v
17265	return s
17266}
17267
17268// SetAdiFilename sets the AdiFilename field's value.
17269func (s *NielsenNonLinearWatermarkSettings) SetAdiFilename(v string) *NielsenNonLinearWatermarkSettings {
17270	s.AdiFilename = &v
17271	return s
17272}
17273
17274// SetAssetId sets the AssetId field's value.
17275func (s *NielsenNonLinearWatermarkSettings) SetAssetId(v string) *NielsenNonLinearWatermarkSettings {
17276	s.AssetId = &v
17277	return s
17278}
17279
17280// SetAssetName sets the AssetName field's value.
17281func (s *NielsenNonLinearWatermarkSettings) SetAssetName(v string) *NielsenNonLinearWatermarkSettings {
17282	s.AssetName = &v
17283	return s
17284}
17285
17286// SetCbetSourceId sets the CbetSourceId field's value.
17287func (s *NielsenNonLinearWatermarkSettings) SetCbetSourceId(v string) *NielsenNonLinearWatermarkSettings {
17288	s.CbetSourceId = &v
17289	return s
17290}
17291
17292// SetEpisodeId sets the EpisodeId field's value.
17293func (s *NielsenNonLinearWatermarkSettings) SetEpisodeId(v string) *NielsenNonLinearWatermarkSettings {
17294	s.EpisodeId = &v
17295	return s
17296}
17297
17298// SetMetadataDestination sets the MetadataDestination field's value.
17299func (s *NielsenNonLinearWatermarkSettings) SetMetadataDestination(v string) *NielsenNonLinearWatermarkSettings {
17300	s.MetadataDestination = &v
17301	return s
17302}
17303
17304// SetSourceId sets the SourceId field's value.
17305func (s *NielsenNonLinearWatermarkSettings) SetSourceId(v int64) *NielsenNonLinearWatermarkSettings {
17306	s.SourceId = &v
17307	return s
17308}
17309
17310// SetSourceWatermarkStatus sets the SourceWatermarkStatus field's value.
17311func (s *NielsenNonLinearWatermarkSettings) SetSourceWatermarkStatus(v string) *NielsenNonLinearWatermarkSettings {
17312	s.SourceWatermarkStatus = &v
17313	return s
17314}
17315
17316// SetTicServerUrl sets the TicServerUrl field's value.
17317func (s *NielsenNonLinearWatermarkSettings) SetTicServerUrl(v string) *NielsenNonLinearWatermarkSettings {
17318	s.TicServerUrl = &v
17319	return s
17320}
17321
17322// SetUniqueTicPerAudioTrack sets the UniqueTicPerAudioTrack field's value.
17323func (s *NielsenNonLinearWatermarkSettings) SetUniqueTicPerAudioTrack(v string) *NielsenNonLinearWatermarkSettings {
17324	s.UniqueTicPerAudioTrack = &v
17325	return s
17326}
17327
17328// Enable the Noise reducer (NoiseReducer) feature to remove noise from your
17329// video output if necessary. Enable or disable this feature for each output
17330// individually. This setting is disabled by default. When you enable Noise
17331// reducer (NoiseReducer), you must also select a value for Noise reducer filter
17332// (NoiseReducerFilter).
17333type NoiseReducer struct {
17334	_ struct{} `type:"structure"`
17335
17336	// Use Noise reducer filter (NoiseReducerFilter) to select one of the following
17337	// spatial image filtering functions. To use this setting, you must also enable
17338	// Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing
17339	// noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution
17340	// filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain
17341	// filtering based on JND principles. * Temporal optimizes video quality for
17342	// complex motion.
17343	Filter *string `locationName:"filter" type:"string" enum:"NoiseReducerFilter"`
17344
17345	// Settings for a noise reducer filter
17346	FilterSettings *NoiseReducerFilterSettings `locationName:"filterSettings" type:"structure"`
17347
17348	// Noise reducer filter settings for spatial filter.
17349	SpatialFilterSettings *NoiseReducerSpatialFilterSettings `locationName:"spatialFilterSettings" type:"structure"`
17350
17351	// Noise reducer filter settings for temporal filter.
17352	TemporalFilterSettings *NoiseReducerTemporalFilterSettings `locationName:"temporalFilterSettings" type:"structure"`
17353}
17354
17355// String returns the string representation
17356func (s NoiseReducer) String() string {
17357	return awsutil.Prettify(s)
17358}
17359
17360// GoString returns the string representation
17361func (s NoiseReducer) GoString() string {
17362	return s.String()
17363}
17364
17365// Validate inspects the fields of the type to determine if they are valid.
17366func (s *NoiseReducer) Validate() error {
17367	invalidParams := request.ErrInvalidParams{Context: "NoiseReducer"}
17368	if s.SpatialFilterSettings != nil {
17369		if err := s.SpatialFilterSettings.Validate(); err != nil {
17370			invalidParams.AddNested("SpatialFilterSettings", err.(request.ErrInvalidParams))
17371		}
17372	}
17373	if s.TemporalFilterSettings != nil {
17374		if err := s.TemporalFilterSettings.Validate(); err != nil {
17375			invalidParams.AddNested("TemporalFilterSettings", err.(request.ErrInvalidParams))
17376		}
17377	}
17378
17379	if invalidParams.Len() > 0 {
17380		return invalidParams
17381	}
17382	return nil
17383}
17384
17385// SetFilter sets the Filter field's value.
17386func (s *NoiseReducer) SetFilter(v string) *NoiseReducer {
17387	s.Filter = &v
17388	return s
17389}
17390
17391// SetFilterSettings sets the FilterSettings field's value.
17392func (s *NoiseReducer) SetFilterSettings(v *NoiseReducerFilterSettings) *NoiseReducer {
17393	s.FilterSettings = v
17394	return s
17395}
17396
17397// SetSpatialFilterSettings sets the SpatialFilterSettings field's value.
17398func (s *NoiseReducer) SetSpatialFilterSettings(v *NoiseReducerSpatialFilterSettings) *NoiseReducer {
17399	s.SpatialFilterSettings = v
17400	return s
17401}
17402
17403// SetTemporalFilterSettings sets the TemporalFilterSettings field's value.
17404func (s *NoiseReducer) SetTemporalFilterSettings(v *NoiseReducerTemporalFilterSettings) *NoiseReducer {
17405	s.TemporalFilterSettings = v
17406	return s
17407}
17408
17409// Settings for a noise reducer filter
17410type NoiseReducerFilterSettings struct {
17411	_ struct{} `type:"structure"`
17412
17413	// Relative strength of noise reducing filter. Higher values produce stronger
17414	// filtering.
17415	Strength *int64 `locationName:"strength" type:"integer"`
17416}
17417
17418// String returns the string representation
17419func (s NoiseReducerFilterSettings) String() string {
17420	return awsutil.Prettify(s)
17421}
17422
17423// GoString returns the string representation
17424func (s NoiseReducerFilterSettings) GoString() string {
17425	return s.String()
17426}
17427
17428// SetStrength sets the Strength field's value.
17429func (s *NoiseReducerFilterSettings) SetStrength(v int64) *NoiseReducerFilterSettings {
17430	s.Strength = &v
17431	return s
17432}
17433
17434// Noise reducer filter settings for spatial filter.
17435type NoiseReducerSpatialFilterSettings struct {
17436	_ struct{} `type:"structure"`
17437
17438	// Specify strength of post noise reduction sharpening filter, with 0 disabling
17439	// the filter and 3 enabling it at maximum strength.
17440	PostFilterSharpenStrength *int64 `locationName:"postFilterSharpenStrength" type:"integer"`
17441
17442	// The speed of the filter, from -2 (lower speed) to 3 (higher speed), with
17443	// 0 being the nominal value.
17444	Speed *int64 `locationName:"speed" type:"integer"`
17445
17446	// Relative strength of noise reducing filter. Higher values produce stronger
17447	// filtering.
17448	Strength *int64 `locationName:"strength" type:"integer"`
17449}
17450
17451// String returns the string representation
17452func (s NoiseReducerSpatialFilterSettings) String() string {
17453	return awsutil.Prettify(s)
17454}
17455
17456// GoString returns the string representation
17457func (s NoiseReducerSpatialFilterSettings) GoString() string {
17458	return s.String()
17459}
17460
17461// Validate inspects the fields of the type to determine if they are valid.
17462func (s *NoiseReducerSpatialFilterSettings) Validate() error {
17463	invalidParams := request.ErrInvalidParams{Context: "NoiseReducerSpatialFilterSettings"}
17464	if s.Speed != nil && *s.Speed < -2 {
17465		invalidParams.Add(request.NewErrParamMinValue("Speed", -2))
17466	}
17467
17468	if invalidParams.Len() > 0 {
17469		return invalidParams
17470	}
17471	return nil
17472}
17473
17474// SetPostFilterSharpenStrength sets the PostFilterSharpenStrength field's value.
17475func (s *NoiseReducerSpatialFilterSettings) SetPostFilterSharpenStrength(v int64) *NoiseReducerSpatialFilterSettings {
17476	s.PostFilterSharpenStrength = &v
17477	return s
17478}
17479
17480// SetSpeed sets the Speed field's value.
17481func (s *NoiseReducerSpatialFilterSettings) SetSpeed(v int64) *NoiseReducerSpatialFilterSettings {
17482	s.Speed = &v
17483	return s
17484}
17485
17486// SetStrength sets the Strength field's value.
17487func (s *NoiseReducerSpatialFilterSettings) SetStrength(v int64) *NoiseReducerSpatialFilterSettings {
17488	s.Strength = &v
17489	return s
17490}
17491
17492// Noise reducer filter settings for temporal filter.
17493type NoiseReducerTemporalFilterSettings struct {
17494	_ struct{} `type:"structure"`
17495
17496	// Use Aggressive mode for content that has complex motion. Higher values produce
17497	// stronger temporal filtering. This filters highly complex scenes more aggressively
17498	// and creates better VQ for low bitrate outputs.
17499	AggressiveMode *int64 `locationName:"aggressiveMode" type:"integer"`
17500
17501	// Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL),
17502	// you can use this setting to apply sharpening. The default behavior, Auto
17503	// (AUTO), allows the transcoder to determine whether to apply filtering, depending
17504	// on input type and quality. When you set Noise reducer to Temporal, your output
17505	// bandwidth is reduced. When Post temporal sharpening is also enabled, that
17506	// bandwidth reduction is smaller.
17507	PostTemporalSharpening *string `locationName:"postTemporalSharpening" type:"string" enum:"NoiseFilterPostTemporalSharpening"`
17508
17509	// The speed of the filter (higher number is faster). Low setting reduces bit
17510	// rate at the cost of transcode time, high setting improves transcode time
17511	// at the cost of bit rate.
17512	Speed *int64 `locationName:"speed" type:"integer"`
17513
17514	// Specify the strength of the noise reducing filter on this output. Higher
17515	// values produce stronger filtering. We recommend the following value ranges,
17516	// depending on the result that you want: * 0-2 for complexity reduction with
17517	// minimal sharpness loss * 2-8 for complexity reduction with image preservation
17518	// * 8-16 for a high level of complexity reduction
17519	Strength *int64 `locationName:"strength" type:"integer"`
17520}
17521
17522// String returns the string representation
17523func (s NoiseReducerTemporalFilterSettings) String() string {
17524	return awsutil.Prettify(s)
17525}
17526
17527// GoString returns the string representation
17528func (s NoiseReducerTemporalFilterSettings) GoString() string {
17529	return s.String()
17530}
17531
17532// Validate inspects the fields of the type to determine if they are valid.
17533func (s *NoiseReducerTemporalFilterSettings) Validate() error {
17534	invalidParams := request.ErrInvalidParams{Context: "NoiseReducerTemporalFilterSettings"}
17535	if s.Speed != nil && *s.Speed < -1 {
17536		invalidParams.Add(request.NewErrParamMinValue("Speed", -1))
17537	}
17538
17539	if invalidParams.Len() > 0 {
17540		return invalidParams
17541	}
17542	return nil
17543}
17544
17545// SetAggressiveMode sets the AggressiveMode field's value.
17546func (s *NoiseReducerTemporalFilterSettings) SetAggressiveMode(v int64) *NoiseReducerTemporalFilterSettings {
17547	s.AggressiveMode = &v
17548	return s
17549}
17550
17551// SetPostTemporalSharpening sets the PostTemporalSharpening field's value.
17552func (s *NoiseReducerTemporalFilterSettings) SetPostTemporalSharpening(v string) *NoiseReducerTemporalFilterSettings {
17553	s.PostTemporalSharpening = &v
17554	return s
17555}
17556
17557// SetSpeed sets the Speed field's value.
17558func (s *NoiseReducerTemporalFilterSettings) SetSpeed(v int64) *NoiseReducerTemporalFilterSettings {
17559	s.Speed = &v
17560	return s
17561}
17562
17563// SetStrength sets the Strength field's value.
17564func (s *NoiseReducerTemporalFilterSettings) SetStrength(v int64) *NoiseReducerTemporalFilterSettings {
17565	s.Strength = &v
17566	return s
17567}
17568
17569type NotFoundException struct {
17570	_            struct{}                  `type:"structure"`
17571	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
17572
17573	Message_ *string `locationName:"message" type:"string"`
17574}
17575
17576// String returns the string representation
17577func (s NotFoundException) String() string {
17578	return awsutil.Prettify(s)
17579}
17580
17581// GoString returns the string representation
17582func (s NotFoundException) GoString() string {
17583	return s.String()
17584}
17585
17586func newErrorNotFoundException(v protocol.ResponseMetadata) error {
17587	return &NotFoundException{
17588		RespMetadata: v,
17589	}
17590}
17591
17592// Code returns the exception type name.
17593func (s *NotFoundException) Code() string {
17594	return "NotFoundException"
17595}
17596
17597// Message returns the exception's message.
17598func (s *NotFoundException) Message() string {
17599	if s.Message_ != nil {
17600		return *s.Message_
17601	}
17602	return ""
17603}
17604
17605// OrigErr always returns nil, satisfies awserr.Error interface.
17606func (s *NotFoundException) OrigErr() error {
17607	return nil
17608}
17609
17610func (s *NotFoundException) Error() string {
17611	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
17612}
17613
17614// Status code returns the HTTP status code for the request's response error.
17615func (s *NotFoundException) StatusCode() int {
17616	return s.RespMetadata.StatusCode
17617}
17618
17619// RequestID returns the service's response RequestID for request.
17620func (s *NotFoundException) RequestID() string {
17621	return s.RespMetadata.RequestID
17622}
17623
17624// Required when you set Codec, under AudioDescriptions>CodecSettings, to the
17625// value OPUS.
17626type OpusSettings struct {
17627	_ struct{} `type:"structure"`
17628
17629	// Optional. Specify the average bitrate in bits per second. Valid values are
17630	// multiples of 8000, from 32000 through 192000. The default value is 96000,
17631	// which we recommend for quality and bandwidth.
17632	Bitrate *int64 `locationName:"bitrate" min:"32000" type:"integer"`
17633
17634	// Specify the number of channels in this output audio track. Choosing Mono
17635	// on the console gives you 1 output channel; choosing Stereo gives you 2. In
17636	// the API, valid values are 1 and 2.
17637	Channels *int64 `locationName:"channels" min:"1" type:"integer"`
17638
17639	// Optional. Sample rate in hz. Valid values are 16000, 24000, and 48000. The
17640	// default value is 48000.
17641	SampleRate *int64 `locationName:"sampleRate" min:"16000" type:"integer"`
17642}
17643
17644// String returns the string representation
17645func (s OpusSettings) String() string {
17646	return awsutil.Prettify(s)
17647}
17648
17649// GoString returns the string representation
17650func (s OpusSettings) GoString() string {
17651	return s.String()
17652}
17653
17654// Validate inspects the fields of the type to determine if they are valid.
17655func (s *OpusSettings) Validate() error {
17656	invalidParams := request.ErrInvalidParams{Context: "OpusSettings"}
17657	if s.Bitrate != nil && *s.Bitrate < 32000 {
17658		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 32000))
17659	}
17660	if s.Channels != nil && *s.Channels < 1 {
17661		invalidParams.Add(request.NewErrParamMinValue("Channels", 1))
17662	}
17663	if s.SampleRate != nil && *s.SampleRate < 16000 {
17664		invalidParams.Add(request.NewErrParamMinValue("SampleRate", 16000))
17665	}
17666
17667	if invalidParams.Len() > 0 {
17668		return invalidParams
17669	}
17670	return nil
17671}
17672
17673// SetBitrate sets the Bitrate field's value.
17674func (s *OpusSettings) SetBitrate(v int64) *OpusSettings {
17675	s.Bitrate = &v
17676	return s
17677}
17678
17679// SetChannels sets the Channels field's value.
17680func (s *OpusSettings) SetChannels(v int64) *OpusSettings {
17681	s.Channels = &v
17682	return s
17683}
17684
17685// SetSampleRate sets the SampleRate field's value.
17686func (s *OpusSettings) SetSampleRate(v int64) *OpusSettings {
17687	s.SampleRate = &v
17688	return s
17689}
17690
17691// Each output in your job is a collection of settings that describes how you
17692// want MediaConvert to encode a single output file or stream. For more information,
17693// see https://docs.aws.amazon.com/mediaconvert/latest/ug/create-outputs.html.
17694type Output struct {
17695	_ struct{} `type:"structure"`
17696
17697	// (AudioDescriptions) contains groups of audio encoding settings organized
17698	// by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions)
17699	// can contain multiple groups of encoding settings.
17700	AudioDescriptions []*AudioDescription `locationName:"audioDescriptions" type:"list"`
17701
17702	// (CaptionDescriptions) contains groups of captions settings. For each output
17703	// that has captions, include one instance of (CaptionDescriptions). (CaptionDescriptions)
17704	// can contain multiple groups of captions settings.
17705	CaptionDescriptions []*CaptionDescription `locationName:"captionDescriptions" type:"list"`
17706
17707	// Container specific settings.
17708	ContainerSettings *ContainerSettings `locationName:"containerSettings" type:"structure"`
17709
17710	// Use Extension (Extension) to specify the file extension for outputs in File
17711	// output groups. If you do not specify a value, the service will use default
17712	// extensions by container type as follows * MPEG-2 transport stream, m2ts *
17713	// Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container,
17714	// webm * No Container, the service will use codec extensions (e.g. AAC, H265,
17715	// H265, AC3)
17716	Extension *string `locationName:"extension" type:"string"`
17717
17718	// Use Name modifier (NameModifier) to have the service add a string to the
17719	// end of each output filename. You specify the base filename as part of your
17720	// destination URI. When you create multiple outputs in the same output group,
17721	// Name modifier (NameModifier) is required. Name modifier also accepts format
17722	// identifiers. For DASH ISO outputs, if you use the format identifiers $Number$
17723	// or $Time$ in one output, you must use them in the same way in all outputs
17724	// of the output group.
17725	NameModifier *string `locationName:"nameModifier" min:"1" type:"string"`
17726
17727	// Specific settings for this type of output.
17728	OutputSettings *OutputSettings `locationName:"outputSettings" type:"structure"`
17729
17730	// Use Preset (Preset) to specify a preset for your transcoding settings. Provide
17731	// the system or custom preset name. You can specify either Preset (Preset)
17732	// or Container settings (ContainerSettings), but not both.
17733	Preset *string `locationName:"preset" type:"string"`
17734
17735	// VideoDescription contains a group of video encoding settings. The specific
17736	// video settings depend on the video codec that you choose for the property
17737	// codec. Include one instance of VideoDescription per output.
17738	VideoDescription *VideoDescription `locationName:"videoDescription" type:"structure"`
17739}
17740
17741// String returns the string representation
17742func (s Output) String() string {
17743	return awsutil.Prettify(s)
17744}
17745
17746// GoString returns the string representation
17747func (s Output) GoString() string {
17748	return s.String()
17749}
17750
17751// Validate inspects the fields of the type to determine if they are valid.
17752func (s *Output) Validate() error {
17753	invalidParams := request.ErrInvalidParams{Context: "Output"}
17754	if s.NameModifier != nil && len(*s.NameModifier) < 1 {
17755		invalidParams.Add(request.NewErrParamMinLen("NameModifier", 1))
17756	}
17757	if s.AudioDescriptions != nil {
17758		for i, v := range s.AudioDescriptions {
17759			if v == nil {
17760				continue
17761			}
17762			if err := v.Validate(); err != nil {
17763				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioDescriptions", i), err.(request.ErrInvalidParams))
17764			}
17765		}
17766	}
17767	if s.CaptionDescriptions != nil {
17768		for i, v := range s.CaptionDescriptions {
17769			if v == nil {
17770				continue
17771			}
17772			if err := v.Validate(); err != nil {
17773				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionDescriptions", i), err.(request.ErrInvalidParams))
17774			}
17775		}
17776	}
17777	if s.ContainerSettings != nil {
17778		if err := s.ContainerSettings.Validate(); err != nil {
17779			invalidParams.AddNested("ContainerSettings", err.(request.ErrInvalidParams))
17780		}
17781	}
17782	if s.VideoDescription != nil {
17783		if err := s.VideoDescription.Validate(); err != nil {
17784			invalidParams.AddNested("VideoDescription", err.(request.ErrInvalidParams))
17785		}
17786	}
17787
17788	if invalidParams.Len() > 0 {
17789		return invalidParams
17790	}
17791	return nil
17792}
17793
17794// SetAudioDescriptions sets the AudioDescriptions field's value.
17795func (s *Output) SetAudioDescriptions(v []*AudioDescription) *Output {
17796	s.AudioDescriptions = v
17797	return s
17798}
17799
17800// SetCaptionDescriptions sets the CaptionDescriptions field's value.
17801func (s *Output) SetCaptionDescriptions(v []*CaptionDescription) *Output {
17802	s.CaptionDescriptions = v
17803	return s
17804}
17805
17806// SetContainerSettings sets the ContainerSettings field's value.
17807func (s *Output) SetContainerSettings(v *ContainerSettings) *Output {
17808	s.ContainerSettings = v
17809	return s
17810}
17811
17812// SetExtension sets the Extension field's value.
17813func (s *Output) SetExtension(v string) *Output {
17814	s.Extension = &v
17815	return s
17816}
17817
17818// SetNameModifier sets the NameModifier field's value.
17819func (s *Output) SetNameModifier(v string) *Output {
17820	s.NameModifier = &v
17821	return s
17822}
17823
17824// SetOutputSettings sets the OutputSettings field's value.
17825func (s *Output) SetOutputSettings(v *OutputSettings) *Output {
17826	s.OutputSettings = v
17827	return s
17828}
17829
17830// SetPreset sets the Preset field's value.
17831func (s *Output) SetPreset(v string) *Output {
17832	s.Preset = &v
17833	return s
17834}
17835
17836// SetVideoDescription sets the VideoDescription field's value.
17837func (s *Output) SetVideoDescription(v *VideoDescription) *Output {
17838	s.VideoDescription = v
17839	return s
17840}
17841
17842// OutputChannel mapping settings.
17843type OutputChannelMapping struct {
17844	_ struct{} `type:"structure"`
17845
17846	// Use this setting to specify your remix values when they are integers, such
17847	// as -10, 0, or 4.
17848	InputChannels []*int64 `locationName:"inputChannels" type:"list"`
17849
17850	// Use this setting to specify your remix values when they have a decimal component,
17851	// such as -10.312, 0.08, or 4.9. MediaConvert rounds your remixing values to
17852	// the nearest thousandth.
17853	InputChannelsFineTune []*float64 `locationName:"inputChannelsFineTune" type:"list"`
17854}
17855
17856// String returns the string representation
17857func (s OutputChannelMapping) String() string {
17858	return awsutil.Prettify(s)
17859}
17860
17861// GoString returns the string representation
17862func (s OutputChannelMapping) GoString() string {
17863	return s.String()
17864}
17865
17866// SetInputChannels sets the InputChannels field's value.
17867func (s *OutputChannelMapping) SetInputChannels(v []*int64) *OutputChannelMapping {
17868	s.InputChannels = v
17869	return s
17870}
17871
17872// SetInputChannelsFineTune sets the InputChannelsFineTune field's value.
17873func (s *OutputChannelMapping) SetInputChannelsFineTune(v []*float64) *OutputChannelMapping {
17874	s.InputChannelsFineTune = v
17875	return s
17876}
17877
17878// Details regarding output
17879type OutputDetail struct {
17880	_ struct{} `type:"structure"`
17881
17882	// Duration in milliseconds
17883	DurationInMs *int64 `locationName:"durationInMs" type:"integer"`
17884
17885	// Contains details about the output's video stream
17886	VideoDetails *VideoDetail `locationName:"videoDetails" type:"structure"`
17887}
17888
17889// String returns the string representation
17890func (s OutputDetail) String() string {
17891	return awsutil.Prettify(s)
17892}
17893
17894// GoString returns the string representation
17895func (s OutputDetail) GoString() string {
17896	return s.String()
17897}
17898
17899// SetDurationInMs sets the DurationInMs field's value.
17900func (s *OutputDetail) SetDurationInMs(v int64) *OutputDetail {
17901	s.DurationInMs = &v
17902	return s
17903}
17904
17905// SetVideoDetails sets the VideoDetails field's value.
17906func (s *OutputDetail) SetVideoDetails(v *VideoDetail) *OutputDetail {
17907	s.VideoDetails = v
17908	return s
17909}
17910
17911// Group of outputs
17912type OutputGroup struct {
17913	_ struct{} `type:"structure"`
17914
17915	// Use automated encoding to have MediaConvert choose your encoding settings
17916	// for you, based on characteristics of your input video.
17917	AutomatedEncodingSettings *AutomatedEncodingSettings `locationName:"automatedEncodingSettings" type:"structure"`
17918
17919	// Use Custom Group Name (CustomName) to specify a name for the output group.
17920	// This value is displayed on the console and can make your job settings JSON
17921	// more human-readable. It does not affect your outputs. Use up to twelve characters
17922	// that are either letters, numbers, spaces, or underscores.
17923	CustomName *string `locationName:"customName" type:"string"`
17924
17925	// Name of the output group
17926	Name *string `locationName:"name" type:"string"`
17927
17928	// Output Group settings, including type
17929	OutputGroupSettings *OutputGroupSettings `locationName:"outputGroupSettings" type:"structure"`
17930
17931	// This object holds groups of encoding settings, one group of settings per
17932	// output.
17933	Outputs []*Output `locationName:"outputs" type:"list"`
17934}
17935
17936// String returns the string representation
17937func (s OutputGroup) String() string {
17938	return awsutil.Prettify(s)
17939}
17940
17941// GoString returns the string representation
17942func (s OutputGroup) GoString() string {
17943	return s.String()
17944}
17945
17946// Validate inspects the fields of the type to determine if they are valid.
17947func (s *OutputGroup) Validate() error {
17948	invalidParams := request.ErrInvalidParams{Context: "OutputGroup"}
17949	if s.AutomatedEncodingSettings != nil {
17950		if err := s.AutomatedEncodingSettings.Validate(); err != nil {
17951			invalidParams.AddNested("AutomatedEncodingSettings", err.(request.ErrInvalidParams))
17952		}
17953	}
17954	if s.OutputGroupSettings != nil {
17955		if err := s.OutputGroupSettings.Validate(); err != nil {
17956			invalidParams.AddNested("OutputGroupSettings", err.(request.ErrInvalidParams))
17957		}
17958	}
17959	if s.Outputs != nil {
17960		for i, v := range s.Outputs {
17961			if v == nil {
17962				continue
17963			}
17964			if err := v.Validate(); err != nil {
17965				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Outputs", i), err.(request.ErrInvalidParams))
17966			}
17967		}
17968	}
17969
17970	if invalidParams.Len() > 0 {
17971		return invalidParams
17972	}
17973	return nil
17974}
17975
17976// SetAutomatedEncodingSettings sets the AutomatedEncodingSettings field's value.
17977func (s *OutputGroup) SetAutomatedEncodingSettings(v *AutomatedEncodingSettings) *OutputGroup {
17978	s.AutomatedEncodingSettings = v
17979	return s
17980}
17981
17982// SetCustomName sets the CustomName field's value.
17983func (s *OutputGroup) SetCustomName(v string) *OutputGroup {
17984	s.CustomName = &v
17985	return s
17986}
17987
17988// SetName sets the Name field's value.
17989func (s *OutputGroup) SetName(v string) *OutputGroup {
17990	s.Name = &v
17991	return s
17992}
17993
17994// SetOutputGroupSettings sets the OutputGroupSettings field's value.
17995func (s *OutputGroup) SetOutputGroupSettings(v *OutputGroupSettings) *OutputGroup {
17996	s.OutputGroupSettings = v
17997	return s
17998}
17999
18000// SetOutputs sets the Outputs field's value.
18001func (s *OutputGroup) SetOutputs(v []*Output) *OutputGroup {
18002	s.Outputs = v
18003	return s
18004}
18005
18006// Contains details about the output groups specified in the job settings.
18007type OutputGroupDetail struct {
18008	_ struct{} `type:"structure"`
18009
18010	// Details about the output
18011	OutputDetails []*OutputDetail `locationName:"outputDetails" type:"list"`
18012}
18013
18014// String returns the string representation
18015func (s OutputGroupDetail) String() string {
18016	return awsutil.Prettify(s)
18017}
18018
18019// GoString returns the string representation
18020func (s OutputGroupDetail) GoString() string {
18021	return s.String()
18022}
18023
18024// SetOutputDetails sets the OutputDetails field's value.
18025func (s *OutputGroupDetail) SetOutputDetails(v []*OutputDetail) *OutputGroupDetail {
18026	s.OutputDetails = v
18027	return s
18028}
18029
18030// Output Group settings, including type
18031type OutputGroupSettings struct {
18032	_ struct{} `type:"structure"`
18033
18034	// Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
18035	// When you work directly in your JSON job specification, include this object
18036	// and any required children when you set Type, under OutputGroupSettings, to
18037	// CMAF_GROUP_SETTINGS.
18038	CmafGroupSettings *CmafGroupSettings `locationName:"cmafGroupSettings" type:"structure"`
18039
18040	// Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
18041	// When you work directly in your JSON job specification, include this object
18042	// and any required children when you set Type, under OutputGroupSettings, to
18043	// DASH_ISO_GROUP_SETTINGS.
18044	DashIsoGroupSettings *DashIsoGroupSettings `locationName:"dashIsoGroupSettings" type:"structure"`
18045
18046	// Settings related to your File output group. MediaConvert uses this group
18047	// of settings to generate a single standalone file, rather than a streaming
18048	// package. When you work directly in your JSON job specification, include this
18049	// object and any required children when you set Type, under OutputGroupSettings,
18050	// to FILE_GROUP_SETTINGS.
18051	FileGroupSettings *FileGroupSettings `locationName:"fileGroupSettings" type:"structure"`
18052
18053	// Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
18054	// When you work directly in your JSON job specification, include this object
18055	// and any required children when you set Type, under OutputGroupSettings, to
18056	// HLS_GROUP_SETTINGS.
18057	HlsGroupSettings *HlsGroupSettings `locationName:"hlsGroupSettings" type:"structure"`
18058
18059	// Settings related to your Microsoft Smooth Streaming output package. For more
18060	// information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
18061	// When you work directly in your JSON job specification, include this object
18062	// and any required children when you set Type, under OutputGroupSettings, to
18063	// MS_SMOOTH_GROUP_SETTINGS.
18064	MsSmoothGroupSettings *MsSmoothGroupSettings `locationName:"msSmoothGroupSettings" type:"structure"`
18065
18066	// Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming,
18067	// CMAF)
18068	Type *string `locationName:"type" type:"string" enum:"OutputGroupType"`
18069}
18070
18071// String returns the string representation
18072func (s OutputGroupSettings) String() string {
18073	return awsutil.Prettify(s)
18074}
18075
18076// GoString returns the string representation
18077func (s OutputGroupSettings) GoString() string {
18078	return s.String()
18079}
18080
18081// Validate inspects the fields of the type to determine if they are valid.
18082func (s *OutputGroupSettings) Validate() error {
18083	invalidParams := request.ErrInvalidParams{Context: "OutputGroupSettings"}
18084	if s.CmafGroupSettings != nil {
18085		if err := s.CmafGroupSettings.Validate(); err != nil {
18086			invalidParams.AddNested("CmafGroupSettings", err.(request.ErrInvalidParams))
18087		}
18088	}
18089	if s.DashIsoGroupSettings != nil {
18090		if err := s.DashIsoGroupSettings.Validate(); err != nil {
18091			invalidParams.AddNested("DashIsoGroupSettings", err.(request.ErrInvalidParams))
18092		}
18093	}
18094	if s.HlsGroupSettings != nil {
18095		if err := s.HlsGroupSettings.Validate(); err != nil {
18096			invalidParams.AddNested("HlsGroupSettings", err.(request.ErrInvalidParams))
18097		}
18098	}
18099	if s.MsSmoothGroupSettings != nil {
18100		if err := s.MsSmoothGroupSettings.Validate(); err != nil {
18101			invalidParams.AddNested("MsSmoothGroupSettings", err.(request.ErrInvalidParams))
18102		}
18103	}
18104
18105	if invalidParams.Len() > 0 {
18106		return invalidParams
18107	}
18108	return nil
18109}
18110
18111// SetCmafGroupSettings sets the CmafGroupSettings field's value.
18112func (s *OutputGroupSettings) SetCmafGroupSettings(v *CmafGroupSettings) *OutputGroupSettings {
18113	s.CmafGroupSettings = v
18114	return s
18115}
18116
18117// SetDashIsoGroupSettings sets the DashIsoGroupSettings field's value.
18118func (s *OutputGroupSettings) SetDashIsoGroupSettings(v *DashIsoGroupSettings) *OutputGroupSettings {
18119	s.DashIsoGroupSettings = v
18120	return s
18121}
18122
18123// SetFileGroupSettings sets the FileGroupSettings field's value.
18124func (s *OutputGroupSettings) SetFileGroupSettings(v *FileGroupSettings) *OutputGroupSettings {
18125	s.FileGroupSettings = v
18126	return s
18127}
18128
18129// SetHlsGroupSettings sets the HlsGroupSettings field's value.
18130func (s *OutputGroupSettings) SetHlsGroupSettings(v *HlsGroupSettings) *OutputGroupSettings {
18131	s.HlsGroupSettings = v
18132	return s
18133}
18134
18135// SetMsSmoothGroupSettings sets the MsSmoothGroupSettings field's value.
18136func (s *OutputGroupSettings) SetMsSmoothGroupSettings(v *MsSmoothGroupSettings) *OutputGroupSettings {
18137	s.MsSmoothGroupSettings = v
18138	return s
18139}
18140
18141// SetType sets the Type field's value.
18142func (s *OutputGroupSettings) SetType(v string) *OutputGroupSettings {
18143	s.Type = &v
18144	return s
18145}
18146
18147// Specific settings for this type of output.
18148type OutputSettings struct {
18149	_ struct{} `type:"structure"`
18150
18151	// Settings for HLS output groups
18152	HlsSettings *HlsSettings `locationName:"hlsSettings" type:"structure"`
18153}
18154
18155// String returns the string representation
18156func (s OutputSettings) String() string {
18157	return awsutil.Prettify(s)
18158}
18159
18160// GoString returns the string representation
18161func (s OutputSettings) GoString() string {
18162	return s.String()
18163}
18164
18165// SetHlsSettings sets the HlsSettings field's value.
18166func (s *OutputSettings) SetHlsSettings(v *HlsSettings) *OutputSettings {
18167	s.HlsSettings = v
18168	return s
18169}
18170
18171// If you work with a third party video watermarking partner, use the group
18172// of settings that correspond with your watermarking partner to include watermarks
18173// in your output.
18174type PartnerWatermarking struct {
18175	_ struct{} `type:"structure"`
18176
18177	// For forensic video watermarking, MediaConvert supports Nagra NexGuard File
18178	// Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2)
18179	// and OTT Streaming workflows.
18180	NexguardFileMarkerSettings *NexGuardFileMarkerSettings `locationName:"nexguardFileMarkerSettings" type:"structure"`
18181}
18182
18183// String returns the string representation
18184func (s PartnerWatermarking) String() string {
18185	return awsutil.Prettify(s)
18186}
18187
18188// GoString returns the string representation
18189func (s PartnerWatermarking) GoString() string {
18190	return s.String()
18191}
18192
18193// Validate inspects the fields of the type to determine if they are valid.
18194func (s *PartnerWatermarking) Validate() error {
18195	invalidParams := request.ErrInvalidParams{Context: "PartnerWatermarking"}
18196	if s.NexguardFileMarkerSettings != nil {
18197		if err := s.NexguardFileMarkerSettings.Validate(); err != nil {
18198			invalidParams.AddNested("NexguardFileMarkerSettings", err.(request.ErrInvalidParams))
18199		}
18200	}
18201
18202	if invalidParams.Len() > 0 {
18203		return invalidParams
18204	}
18205	return nil
18206}
18207
18208// SetNexguardFileMarkerSettings sets the NexguardFileMarkerSettings field's value.
18209func (s *PartnerWatermarking) SetNexguardFileMarkerSettings(v *NexGuardFileMarkerSettings) *PartnerWatermarking {
18210	s.NexguardFileMarkerSettings = v
18211	return s
18212}
18213
18214// A preset is a collection of preconfigured media conversion settings that
18215// you want MediaConvert to apply to the output during the conversion process.
18216type Preset struct {
18217	_ struct{} `type:"structure"`
18218
18219	// An identifier for this resource that is unique within all of AWS.
18220	Arn *string `locationName:"arn" type:"string"`
18221
18222	// An optional category you create to organize your presets.
18223	Category *string `locationName:"category" type:"string"`
18224
18225	// The timestamp in epoch seconds for preset creation.
18226	CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"`
18227
18228	// An optional description you create for each preset.
18229	Description *string `locationName:"description" type:"string"`
18230
18231	// The timestamp in epoch seconds when the preset was last updated.
18232	LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"`
18233
18234	// A name you create for each preset. Each name must be unique within your account.
18235	//
18236	// Name is a required field
18237	Name *string `locationName:"name" type:"string" required:"true"`
18238
18239	// Settings for preset
18240	//
18241	// Settings is a required field
18242	Settings *PresetSettings `locationName:"settings" type:"structure" required:"true"`
18243
18244	// A preset can be of two types: system or custom. System or built-in preset
18245	// can't be modified or deleted by the user.
18246	Type *string `locationName:"type" type:"string" enum:"Type"`
18247}
18248
18249// String returns the string representation
18250func (s Preset) String() string {
18251	return awsutil.Prettify(s)
18252}
18253
18254// GoString returns the string representation
18255func (s Preset) GoString() string {
18256	return s.String()
18257}
18258
18259// SetArn sets the Arn field's value.
18260func (s *Preset) SetArn(v string) *Preset {
18261	s.Arn = &v
18262	return s
18263}
18264
18265// SetCategory sets the Category field's value.
18266func (s *Preset) SetCategory(v string) *Preset {
18267	s.Category = &v
18268	return s
18269}
18270
18271// SetCreatedAt sets the CreatedAt field's value.
18272func (s *Preset) SetCreatedAt(v time.Time) *Preset {
18273	s.CreatedAt = &v
18274	return s
18275}
18276
18277// SetDescription sets the Description field's value.
18278func (s *Preset) SetDescription(v string) *Preset {
18279	s.Description = &v
18280	return s
18281}
18282
18283// SetLastUpdated sets the LastUpdated field's value.
18284func (s *Preset) SetLastUpdated(v time.Time) *Preset {
18285	s.LastUpdated = &v
18286	return s
18287}
18288
18289// SetName sets the Name field's value.
18290func (s *Preset) SetName(v string) *Preset {
18291	s.Name = &v
18292	return s
18293}
18294
18295// SetSettings sets the Settings field's value.
18296func (s *Preset) SetSettings(v *PresetSettings) *Preset {
18297	s.Settings = v
18298	return s
18299}
18300
18301// SetType sets the Type field's value.
18302func (s *Preset) SetType(v string) *Preset {
18303	s.Type = &v
18304	return s
18305}
18306
18307// Settings for preset
18308type PresetSettings struct {
18309	_ struct{} `type:"structure"`
18310
18311	// (AudioDescriptions) contains groups of audio encoding settings organized
18312	// by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions)
18313	// can contain multiple groups of encoding settings.
18314	AudioDescriptions []*AudioDescription `locationName:"audioDescriptions" type:"list"`
18315
18316	// This object holds groups of settings related to captions for one output.
18317	// For each output that has captions, include one instance of CaptionDescriptions.
18318	CaptionDescriptions []*CaptionDescriptionPreset `locationName:"captionDescriptions" type:"list"`
18319
18320	// Container specific settings.
18321	ContainerSettings *ContainerSettings `locationName:"containerSettings" type:"structure"`
18322
18323	// VideoDescription contains a group of video encoding settings. The specific
18324	// video settings depend on the video codec that you choose for the property
18325	// codec. Include one instance of VideoDescription per output.
18326	VideoDescription *VideoDescription `locationName:"videoDescription" type:"structure"`
18327}
18328
18329// String returns the string representation
18330func (s PresetSettings) String() string {
18331	return awsutil.Prettify(s)
18332}
18333
18334// GoString returns the string representation
18335func (s PresetSettings) GoString() string {
18336	return s.String()
18337}
18338
18339// Validate inspects the fields of the type to determine if they are valid.
18340func (s *PresetSettings) Validate() error {
18341	invalidParams := request.ErrInvalidParams{Context: "PresetSettings"}
18342	if s.AudioDescriptions != nil {
18343		for i, v := range s.AudioDescriptions {
18344			if v == nil {
18345				continue
18346			}
18347			if err := v.Validate(); err != nil {
18348				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioDescriptions", i), err.(request.ErrInvalidParams))
18349			}
18350		}
18351	}
18352	if s.CaptionDescriptions != nil {
18353		for i, v := range s.CaptionDescriptions {
18354			if v == nil {
18355				continue
18356			}
18357			if err := v.Validate(); err != nil {
18358				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionDescriptions", i), err.(request.ErrInvalidParams))
18359			}
18360		}
18361	}
18362	if s.ContainerSettings != nil {
18363		if err := s.ContainerSettings.Validate(); err != nil {
18364			invalidParams.AddNested("ContainerSettings", err.(request.ErrInvalidParams))
18365		}
18366	}
18367	if s.VideoDescription != nil {
18368		if err := s.VideoDescription.Validate(); err != nil {
18369			invalidParams.AddNested("VideoDescription", err.(request.ErrInvalidParams))
18370		}
18371	}
18372
18373	if invalidParams.Len() > 0 {
18374		return invalidParams
18375	}
18376	return nil
18377}
18378
18379// SetAudioDescriptions sets the AudioDescriptions field's value.
18380func (s *PresetSettings) SetAudioDescriptions(v []*AudioDescription) *PresetSettings {
18381	s.AudioDescriptions = v
18382	return s
18383}
18384
18385// SetCaptionDescriptions sets the CaptionDescriptions field's value.
18386func (s *PresetSettings) SetCaptionDescriptions(v []*CaptionDescriptionPreset) *PresetSettings {
18387	s.CaptionDescriptions = v
18388	return s
18389}
18390
18391// SetContainerSettings sets the ContainerSettings field's value.
18392func (s *PresetSettings) SetContainerSettings(v *ContainerSettings) *PresetSettings {
18393	s.ContainerSettings = v
18394	return s
18395}
18396
18397// SetVideoDescription sets the VideoDescription field's value.
18398func (s *PresetSettings) SetVideoDescription(v *VideoDescription) *PresetSettings {
18399	s.VideoDescription = v
18400	return s
18401}
18402
18403// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
18404// the value PRORES.
18405type ProresSettings struct {
18406	_ struct{} `type:"structure"`
18407
18408	// This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that
18409	// you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4
18410	// sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma
18411	// sampling. You must specify a value for this setting when your output codec
18412	// profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma
18413	// sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose
18414	// an output codec profile that supports 4:4:4 chroma sampling. These values
18415	// for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444
18416	// (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When
18417	// you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all
18418	// video preprocessors except for Nexguard file marker (PartnerWatermarking).
18419	// When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate
18420	// conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm)
18421	// to Drop duplicate (DUPLICATE_DROP).
18422	ChromaSampling *string `locationName:"chromaSampling" type:"string" enum:"ProresChromaSampling"`
18423
18424	// Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec
18425	// to use for this output.
18426	CodecProfile *string `locationName:"codecProfile" type:"string" enum:"ProresCodecProfile"`
18427
18428	// If you are using the console, use the Framerate setting to specify the frame
18429	// rate for this output. If you want to keep the same frame rate as the input
18430	// video, choose Follow source. If you want to do frame rate conversion, choose
18431	// a frame rate from the dropdown list or choose Custom. The framerates shown
18432	// in the dropdown list are decimal approximations of fractions. If you choose
18433	// Custom, specify your frame rate as a fraction. If you are creating your transcoding
18434	// job specification as a JSON file without the console, use FramerateControl
18435	// to specify which value the service uses for the frame rate for this output.
18436	// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
18437	// from the input. Choose SPECIFIED if you want the service to use the frame
18438	// rate you specify in the settings FramerateNumerator and FramerateDenominator.
18439	FramerateControl *string `locationName:"framerateControl" type:"string" enum:"ProresFramerateControl"`
18440
18441	// Choose the method that you want MediaConvert to use when increasing or decreasing
18442	// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
18443	// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
18444	// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
18445	// smooth picture, but might introduce undesirable video artifacts. For complex
18446	// frame rate conversions, especially if your source video has already been
18447	// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
18448	// motion-compensated interpolation. FrameFormer chooses the best conversion
18449	// method frame by frame. Note that using FrameFormer increases the transcoding
18450	// time and incurs a significant add-on cost.
18451	FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"ProresFramerateConversionAlgorithm"`
18452
18453	// When you use the API for transcode jobs that use frame rate conversion, specify
18454	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
18455	// FramerateDenominator to specify the denominator of this fraction. In this
18456	// example, use 1001 for the value of FramerateDenominator. When you use the
18457	// console for transcode jobs that use frame rate conversion, provide the value
18458	// as a decimal number for Framerate. In this example, specify 23.976.
18459	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
18460
18461	// When you use the API for transcode jobs that use frame rate conversion, specify
18462	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
18463	// FramerateNumerator to specify the numerator of this fraction. In this example,
18464	// use 24000 for the value of FramerateNumerator. When you use the console for
18465	// transcode jobs that use frame rate conversion, provide the value as a decimal
18466	// number for Framerate. In this example, specify 23.976.
18467	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"`
18468
18469	// Choose the scan line type for the output. Keep the default value, Progressive
18470	// (PROGRESSIVE) to create a progressive output, regardless of the scan type
18471	// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
18472	// to create an output that's interlaced with the same field polarity throughout.
18473	// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
18474	// to produce outputs with the same field polarity as the source. For jobs that
18475	// have multiple inputs, the output field polarity might change over the course
18476	// of the output. Follow behavior depends on the input scan type. If the source
18477	// is interlaced, the output will be interlaced with the same polarity as the
18478	// source. If the source is progressive, the output will be interlaced with
18479	// top field bottom field first, depending on which of the Follow options you
18480	// choose.
18481	InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"ProresInterlaceMode"`
18482
18483	// Optional. Specify how the service determines the pixel aspect ratio (PAR)
18484	// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE),
18485	// uses the PAR from your input video for your output. To specify a different
18486	// PAR in the console, choose any value other than Follow source. To specify
18487	// a different PAR by editing the JSON job specification, choose SPECIFIED.
18488	// When you choose SPECIFIED for this setting, you must also specify values
18489	// for the parNumerator and parDenominator settings.
18490	ParControl *string `locationName:"parControl" type:"string" enum:"ProresParControl"`
18491
18492	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
18493	// console, this corresponds to any value other than Follow source. When you
18494	// specify an output pixel aspect ratio (PAR) that is different from your input
18495	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
18496	// widescreen, you would specify the ratio 40:33. In this example, the value
18497	// for parDenominator is 33.
18498	ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"`
18499
18500	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
18501	// console, this corresponds to any value other than Follow source. When you
18502	// specify an output pixel aspect ratio (PAR) that is different from your input
18503	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
18504	// widescreen, you would specify the ratio 40:33. In this example, the value
18505	// for parNumerator is 40.
18506	ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"`
18507
18508	// Use this setting for interlaced outputs, when your output frame rate is half
18509	// of your input frame rate. In this situation, choose Optimized interlacing
18510	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
18511	// case, each progressive frame from the input corresponds to an interlaced
18512	// field in the output. Keep the default value, Basic interlacing (INTERLACED),
18513	// for all other output frame rates. With basic interlacing, MediaConvert performs
18514	// any frame rate conversion first and then interlaces the frames. When you
18515	// choose Optimized interlacing and you set your output frame rate to a value
18516	// that isn't suitable for optimized interlacing, MediaConvert automatically
18517	// falls back to basic interlacing. Required settings: To use optimized interlacing,
18518	// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
18519	// use optimized interlacing for hard telecine outputs. You must also set Interlace
18520	// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
18521	ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"ProresScanTypeConversionMode"`
18522
18523	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
18524	// second (fps). Enable slow PAL to create a 25 fps output. When you enable
18525	// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples
18526	// your audio to keep it synchronized with the video. Note that enabling this
18527	// setting will slightly reduce the duration of your video. Required settings:
18528	// You must also set Framerate to 25. In your JSON job specification, set (framerateControl)
18529	// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
18530	// 1.
18531	SlowPal *string `locationName:"slowPal" type:"string" enum:"ProresSlowPal"`
18532
18533	// When you do frame rate conversion from 23.976 frames per second (fps) to
18534	// 29.97 fps, and your output scan type is interlaced, you can optionally enable
18535	// hard telecine (HARD) to create a smoother picture. When you keep the default
18536	// value, None (NONE), MediaConvert does a standard frame rate conversion to
18537	// 29.97 without doing anything with the field polarity to create a smoother
18538	// picture.
18539	Telecine *string `locationName:"telecine" type:"string" enum:"ProresTelecine"`
18540}
18541
18542// String returns the string representation
18543func (s ProresSettings) String() string {
18544	return awsutil.Prettify(s)
18545}
18546
18547// GoString returns the string representation
18548func (s ProresSettings) GoString() string {
18549	return s.String()
18550}
18551
18552// Validate inspects the fields of the type to determine if they are valid.
18553func (s *ProresSettings) Validate() error {
18554	invalidParams := request.ErrInvalidParams{Context: "ProresSettings"}
18555	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
18556		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
18557	}
18558	if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 {
18559		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1))
18560	}
18561	if s.ParDenominator != nil && *s.ParDenominator < 1 {
18562		invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1))
18563	}
18564	if s.ParNumerator != nil && *s.ParNumerator < 1 {
18565		invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1))
18566	}
18567
18568	if invalidParams.Len() > 0 {
18569		return invalidParams
18570	}
18571	return nil
18572}
18573
18574// SetChromaSampling sets the ChromaSampling field's value.
18575func (s *ProresSettings) SetChromaSampling(v string) *ProresSettings {
18576	s.ChromaSampling = &v
18577	return s
18578}
18579
18580// SetCodecProfile sets the CodecProfile field's value.
18581func (s *ProresSettings) SetCodecProfile(v string) *ProresSettings {
18582	s.CodecProfile = &v
18583	return s
18584}
18585
18586// SetFramerateControl sets the FramerateControl field's value.
18587func (s *ProresSettings) SetFramerateControl(v string) *ProresSettings {
18588	s.FramerateControl = &v
18589	return s
18590}
18591
18592// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value.
18593func (s *ProresSettings) SetFramerateConversionAlgorithm(v string) *ProresSettings {
18594	s.FramerateConversionAlgorithm = &v
18595	return s
18596}
18597
18598// SetFramerateDenominator sets the FramerateDenominator field's value.
18599func (s *ProresSettings) SetFramerateDenominator(v int64) *ProresSettings {
18600	s.FramerateDenominator = &v
18601	return s
18602}
18603
18604// SetFramerateNumerator sets the FramerateNumerator field's value.
18605func (s *ProresSettings) SetFramerateNumerator(v int64) *ProresSettings {
18606	s.FramerateNumerator = &v
18607	return s
18608}
18609
18610// SetInterlaceMode sets the InterlaceMode field's value.
18611func (s *ProresSettings) SetInterlaceMode(v string) *ProresSettings {
18612	s.InterlaceMode = &v
18613	return s
18614}
18615
18616// SetParControl sets the ParControl field's value.
18617func (s *ProresSettings) SetParControl(v string) *ProresSettings {
18618	s.ParControl = &v
18619	return s
18620}
18621
18622// SetParDenominator sets the ParDenominator field's value.
18623func (s *ProresSettings) SetParDenominator(v int64) *ProresSettings {
18624	s.ParDenominator = &v
18625	return s
18626}
18627
18628// SetParNumerator sets the ParNumerator field's value.
18629func (s *ProresSettings) SetParNumerator(v int64) *ProresSettings {
18630	s.ParNumerator = &v
18631	return s
18632}
18633
18634// SetScanTypeConversionMode sets the ScanTypeConversionMode field's value.
18635func (s *ProresSettings) SetScanTypeConversionMode(v string) *ProresSettings {
18636	s.ScanTypeConversionMode = &v
18637	return s
18638}
18639
18640// SetSlowPal sets the SlowPal field's value.
18641func (s *ProresSettings) SetSlowPal(v string) *ProresSettings {
18642	s.SlowPal = &v
18643	return s
18644}
18645
18646// SetTelecine sets the Telecine field's value.
18647func (s *ProresSettings) SetTelecine(v string) *ProresSettings {
18648	s.Telecine = &v
18649	return s
18650}
18651
18652// You can use queues to manage the resources that are available to your AWS
18653// account for running multiple transcoding jobs at the same time. If you don't
18654// specify a queue, the service sends all jobs through the default queue. For
18655// more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html.
18656type Queue struct {
18657	_ struct{} `type:"structure"`
18658
18659	// An identifier for this resource that is unique within all of AWS.
18660	Arn *string `locationName:"arn" type:"string"`
18661
18662	// The timestamp in epoch seconds for when you created the queue.
18663	CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"`
18664
18665	// An optional description that you create for each queue.
18666	Description *string `locationName:"description" type:"string"`
18667
18668	// The timestamp in epoch seconds for when you most recently updated the queue.
18669	LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"`
18670
18671	// A name that you create for each queue. Each name must be unique within your
18672	// account.
18673	//
18674	// Name is a required field
18675	Name *string `locationName:"name" type:"string" required:"true"`
18676
18677	// Specifies whether the pricing plan for the queue is on-demand or reserved.
18678	// For on-demand, you pay per minute, billed in increments of .01 minute. For
18679	// reserved, you pay for the transcoding capacity of the entire queue, regardless
18680	// of how much or how little you use it. Reserved pricing requires a 12-month
18681	// commitment.
18682	PricingPlan *string `locationName:"pricingPlan" type:"string" enum:"PricingPlan"`
18683
18684	// The estimated number of jobs with a PROGRESSING status.
18685	ProgressingJobsCount *int64 `locationName:"progressingJobsCount" type:"integer"`
18686
18687	// Details about the pricing plan for your reserved queue. Required for reserved
18688	// queues and not applicable to on-demand queues.
18689	ReservationPlan *ReservationPlan `locationName:"reservationPlan" type:"structure"`
18690
18691	// Queues can be ACTIVE or PAUSED. If you pause a queue, the service won't begin
18692	// processing jobs in that queue. Jobs that are running when you pause the queue
18693	// continue to run until they finish or result in an error.
18694	Status *string `locationName:"status" type:"string" enum:"QueueStatus"`
18695
18696	// The estimated number of jobs with a SUBMITTED status.
18697	SubmittedJobsCount *int64 `locationName:"submittedJobsCount" type:"integer"`
18698
18699	// Specifies whether this on-demand queue is system or custom. System queues
18700	// are built in. You can't modify or delete system queues. You can create and
18701	// modify custom queues.
18702	Type *string `locationName:"type" type:"string" enum:"Type"`
18703}
18704
18705// String returns the string representation
18706func (s Queue) String() string {
18707	return awsutil.Prettify(s)
18708}
18709
18710// GoString returns the string representation
18711func (s Queue) GoString() string {
18712	return s.String()
18713}
18714
18715// SetArn sets the Arn field's value.
18716func (s *Queue) SetArn(v string) *Queue {
18717	s.Arn = &v
18718	return s
18719}
18720
18721// SetCreatedAt sets the CreatedAt field's value.
18722func (s *Queue) SetCreatedAt(v time.Time) *Queue {
18723	s.CreatedAt = &v
18724	return s
18725}
18726
18727// SetDescription sets the Description field's value.
18728func (s *Queue) SetDescription(v string) *Queue {
18729	s.Description = &v
18730	return s
18731}
18732
18733// SetLastUpdated sets the LastUpdated field's value.
18734func (s *Queue) SetLastUpdated(v time.Time) *Queue {
18735	s.LastUpdated = &v
18736	return s
18737}
18738
18739// SetName sets the Name field's value.
18740func (s *Queue) SetName(v string) *Queue {
18741	s.Name = &v
18742	return s
18743}
18744
18745// SetPricingPlan sets the PricingPlan field's value.
18746func (s *Queue) SetPricingPlan(v string) *Queue {
18747	s.PricingPlan = &v
18748	return s
18749}
18750
18751// SetProgressingJobsCount sets the ProgressingJobsCount field's value.
18752func (s *Queue) SetProgressingJobsCount(v int64) *Queue {
18753	s.ProgressingJobsCount = &v
18754	return s
18755}
18756
18757// SetReservationPlan sets the ReservationPlan field's value.
18758func (s *Queue) SetReservationPlan(v *ReservationPlan) *Queue {
18759	s.ReservationPlan = v
18760	return s
18761}
18762
18763// SetStatus sets the Status field's value.
18764func (s *Queue) SetStatus(v string) *Queue {
18765	s.Status = &v
18766	return s
18767}
18768
18769// SetSubmittedJobsCount sets the SubmittedJobsCount field's value.
18770func (s *Queue) SetSubmittedJobsCount(v int64) *Queue {
18771	s.SubmittedJobsCount = &v
18772	return s
18773}
18774
18775// SetType sets the Type field's value.
18776func (s *Queue) SetType(v string) *Queue {
18777	s.Type = &v
18778	return s
18779}
18780
18781// Description of the source and destination queues between which the job has
18782// moved, along with the timestamp of the move
18783type QueueTransition struct {
18784	_ struct{} `type:"structure"`
18785
18786	// The queue that the job was on after the transition.
18787	DestinationQueue *string `locationName:"destinationQueue" type:"string"`
18788
18789	// The queue that the job was on before the transition.
18790	SourceQueue *string `locationName:"sourceQueue" type:"string"`
18791
18792	// The time, in Unix epoch format, that the job moved from the source queue
18793	// to the destination queue.
18794	Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"unixTimestamp"`
18795}
18796
18797// String returns the string representation
18798func (s QueueTransition) String() string {
18799	return awsutil.Prettify(s)
18800}
18801
18802// GoString returns the string representation
18803func (s QueueTransition) GoString() string {
18804	return s.String()
18805}
18806
18807// SetDestinationQueue sets the DestinationQueue field's value.
18808func (s *QueueTransition) SetDestinationQueue(v string) *QueueTransition {
18809	s.DestinationQueue = &v
18810	return s
18811}
18812
18813// SetSourceQueue sets the SourceQueue field's value.
18814func (s *QueueTransition) SetSourceQueue(v string) *QueueTransition {
18815	s.SourceQueue = &v
18816	return s
18817}
18818
18819// SetTimestamp sets the Timestamp field's value.
18820func (s *QueueTransition) SetTimestamp(v time.Time) *QueueTransition {
18821	s.Timestamp = &v
18822	return s
18823}
18824
18825// Use Rectangle to identify a specific area of the video frame.
18826type Rectangle struct {
18827	_ struct{} `type:"structure"`
18828
18829	// Height of rectangle in pixels. Specify only even numbers.
18830	Height *int64 `locationName:"height" min:"2" type:"integer"`
18831
18832	// Width of rectangle in pixels. Specify only even numbers.
18833	Width *int64 `locationName:"width" min:"2" type:"integer"`
18834
18835	// The distance, in pixels, between the rectangle and the left edge of the video
18836	// frame. Specify only even numbers.
18837	X *int64 `locationName:"x" type:"integer"`
18838
18839	// The distance, in pixels, between the rectangle and the top edge of the video
18840	// frame. Specify only even numbers.
18841	Y *int64 `locationName:"y" type:"integer"`
18842}
18843
18844// String returns the string representation
18845func (s Rectangle) String() string {
18846	return awsutil.Prettify(s)
18847}
18848
18849// GoString returns the string representation
18850func (s Rectangle) GoString() string {
18851	return s.String()
18852}
18853
18854// Validate inspects the fields of the type to determine if they are valid.
18855func (s *Rectangle) Validate() error {
18856	invalidParams := request.ErrInvalidParams{Context: "Rectangle"}
18857	if s.Height != nil && *s.Height < 2 {
18858		invalidParams.Add(request.NewErrParamMinValue("Height", 2))
18859	}
18860	if s.Width != nil && *s.Width < 2 {
18861		invalidParams.Add(request.NewErrParamMinValue("Width", 2))
18862	}
18863
18864	if invalidParams.Len() > 0 {
18865		return invalidParams
18866	}
18867	return nil
18868}
18869
18870// SetHeight sets the Height field's value.
18871func (s *Rectangle) SetHeight(v int64) *Rectangle {
18872	s.Height = &v
18873	return s
18874}
18875
18876// SetWidth sets the Width field's value.
18877func (s *Rectangle) SetWidth(v int64) *Rectangle {
18878	s.Width = &v
18879	return s
18880}
18881
18882// SetX sets the X field's value.
18883func (s *Rectangle) SetX(v int64) *Rectangle {
18884	s.X = &v
18885	return s
18886}
18887
18888// SetY sets the Y field's value.
18889func (s *Rectangle) SetY(v int64) *Rectangle {
18890	s.Y = &v
18891	return s
18892}
18893
18894// Use Manual audio remixing (RemixSettings) to adjust audio levels for each
18895// audio channel in each output of your job. With audio remixing, you can output
18896// more or fewer audio channels than your input audio source provides.
18897type RemixSettings struct {
18898	_ struct{} `type:"structure"`
18899
18900	// Channel mapping (ChannelMapping) contains the group of fields that hold the
18901	// remixing value for each channel, in dB. Specify remix values to indicate
18902	// how much of the content from your input audio channel you want in your output
18903	// audio channels. Each instance of the InputChannels or InputChannelsFineTune
18904	// array specifies these values for one output channel. Use one instance of
18905	// this array for each output channel. In the console, each array corresponds
18906	// to a column in the graphical depiction of the mapping matrix. The rows of
18907	// the graphical matrix correspond to input channels. Valid values are within
18908	// the range from -60 (mute) through 6. A setting of 0 passes the input channel
18909	// unchanged to the output channel (no attenuation or amplification). Use InputChannels
18910	// or InputChannelsFineTune to specify your remix values. Don't use both.
18911	ChannelMapping *ChannelMapping `locationName:"channelMapping" type:"structure"`
18912
18913	// Specify the number of audio channels from your input that you want to use
18914	// in your output. With remixing, you might combine or split the data in these
18915	// channels, so the number of channels in your final output might be different.
18916	// If you are doing both input channel mapping and output channel mapping, the
18917	// number of output channels in your input mapping must be the same as the number
18918	// of input channels in your output mapping.
18919	ChannelsIn *int64 `locationName:"channelsIn" min:"1" type:"integer"`
18920
18921	// Specify the number of channels in this output after remixing. Valid values:
18922	// 1, 2, 4, 6, 8... 64. (1 and even numbers to 64.) If you are doing both input
18923	// channel mapping and output channel mapping, the number of output channels
18924	// in your input mapping must be the same as the number of input channels in
18925	// your output mapping.
18926	ChannelsOut *int64 `locationName:"channelsOut" min:"1" type:"integer"`
18927}
18928
18929// String returns the string representation
18930func (s RemixSettings) String() string {
18931	return awsutil.Prettify(s)
18932}
18933
18934// GoString returns the string representation
18935func (s RemixSettings) GoString() string {
18936	return s.String()
18937}
18938
18939// Validate inspects the fields of the type to determine if they are valid.
18940func (s *RemixSettings) Validate() error {
18941	invalidParams := request.ErrInvalidParams{Context: "RemixSettings"}
18942	if s.ChannelsIn != nil && *s.ChannelsIn < 1 {
18943		invalidParams.Add(request.NewErrParamMinValue("ChannelsIn", 1))
18944	}
18945	if s.ChannelsOut != nil && *s.ChannelsOut < 1 {
18946		invalidParams.Add(request.NewErrParamMinValue("ChannelsOut", 1))
18947	}
18948
18949	if invalidParams.Len() > 0 {
18950		return invalidParams
18951	}
18952	return nil
18953}
18954
18955// SetChannelMapping sets the ChannelMapping field's value.
18956func (s *RemixSettings) SetChannelMapping(v *ChannelMapping) *RemixSettings {
18957	s.ChannelMapping = v
18958	return s
18959}
18960
18961// SetChannelsIn sets the ChannelsIn field's value.
18962func (s *RemixSettings) SetChannelsIn(v int64) *RemixSettings {
18963	s.ChannelsIn = &v
18964	return s
18965}
18966
18967// SetChannelsOut sets the ChannelsOut field's value.
18968func (s *RemixSettings) SetChannelsOut(v int64) *RemixSettings {
18969	s.ChannelsOut = &v
18970	return s
18971}
18972
18973// Details about the pricing plan for your reserved queue. Required for reserved
18974// queues and not applicable to on-demand queues.
18975type ReservationPlan struct {
18976	_ struct{} `type:"structure"`
18977
18978	// The length of the term of your reserved queue pricing plan commitment.
18979	Commitment *string `locationName:"commitment" type:"string" enum:"Commitment"`
18980
18981	// The timestamp in epoch seconds for when the current pricing plan term for
18982	// this reserved queue expires.
18983	ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp" timestampFormat:"unixTimestamp"`
18984
18985	// The timestamp in epoch seconds for when you set up the current pricing plan
18986	// for this reserved queue.
18987	PurchasedAt *time.Time `locationName:"purchasedAt" type:"timestamp" timestampFormat:"unixTimestamp"`
18988
18989	// Specifies whether the term of your reserved queue pricing plan is automatically
18990	// extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term.
18991	RenewalType *string `locationName:"renewalType" type:"string" enum:"RenewalType"`
18992
18993	// Specifies the number of reserved transcode slots (RTS) for this queue. The
18994	// number of RTS determines how many jobs the queue can process in parallel;
18995	// each RTS can process one job at a time. When you increase this number, you
18996	// extend your existing commitment with a new 12-month commitment for a larger
18997	// number of RTS. The new commitment begins when you purchase the additional
18998	// capacity. You can't decrease the number of RTS in your reserved queue.
18999	ReservedSlots *int64 `locationName:"reservedSlots" type:"integer"`
19000
19001	// Specifies whether the pricing plan for your reserved queue is ACTIVE or EXPIRED.
19002	Status *string `locationName:"status" type:"string" enum:"ReservationPlanStatus"`
19003}
19004
19005// String returns the string representation
19006func (s ReservationPlan) String() string {
19007	return awsutil.Prettify(s)
19008}
19009
19010// GoString returns the string representation
19011func (s ReservationPlan) GoString() string {
19012	return s.String()
19013}
19014
19015// SetCommitment sets the Commitment field's value.
19016func (s *ReservationPlan) SetCommitment(v string) *ReservationPlan {
19017	s.Commitment = &v
19018	return s
19019}
19020
19021// SetExpiresAt sets the ExpiresAt field's value.
19022func (s *ReservationPlan) SetExpiresAt(v time.Time) *ReservationPlan {
19023	s.ExpiresAt = &v
19024	return s
19025}
19026
19027// SetPurchasedAt sets the PurchasedAt field's value.
19028func (s *ReservationPlan) SetPurchasedAt(v time.Time) *ReservationPlan {
19029	s.PurchasedAt = &v
19030	return s
19031}
19032
19033// SetRenewalType sets the RenewalType field's value.
19034func (s *ReservationPlan) SetRenewalType(v string) *ReservationPlan {
19035	s.RenewalType = &v
19036	return s
19037}
19038
19039// SetReservedSlots sets the ReservedSlots field's value.
19040func (s *ReservationPlan) SetReservedSlots(v int64) *ReservationPlan {
19041	s.ReservedSlots = &v
19042	return s
19043}
19044
19045// SetStatus sets the Status field's value.
19046func (s *ReservationPlan) SetStatus(v string) *ReservationPlan {
19047	s.Status = &v
19048	return s
19049}
19050
19051// Details about the pricing plan for your reserved queue. Required for reserved
19052// queues and not applicable to on-demand queues.
19053type ReservationPlanSettings struct {
19054	_ struct{} `type:"structure"`
19055
19056	// The length of the term of your reserved queue pricing plan commitment.
19057	//
19058	// Commitment is a required field
19059	Commitment *string `locationName:"commitment" type:"string" required:"true" enum:"Commitment"`
19060
19061	// Specifies whether the term of your reserved queue pricing plan is automatically
19062	// extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term. When your
19063	// term is auto renewed, you extend your commitment by 12 months from the auto
19064	// renew date. You can cancel this commitment.
19065	//
19066	// RenewalType is a required field
19067	RenewalType *string `locationName:"renewalType" type:"string" required:"true" enum:"RenewalType"`
19068
19069	// Specifies the number of reserved transcode slots (RTS) for this queue. The
19070	// number of RTS determines how many jobs the queue can process in parallel;
19071	// each RTS can process one job at a time. You can't decrease the number of
19072	// RTS in your reserved queue. You can increase the number of RTS by extending
19073	// your existing commitment with a new 12-month commitment for the larger number.
19074	// The new commitment begins when you purchase the additional capacity. You
19075	// can't cancel your commitment or revert to your original commitment after
19076	// you increase the capacity.
19077	//
19078	// ReservedSlots is a required field
19079	ReservedSlots *int64 `locationName:"reservedSlots" type:"integer" required:"true"`
19080}
19081
19082// String returns the string representation
19083func (s ReservationPlanSettings) String() string {
19084	return awsutil.Prettify(s)
19085}
19086
19087// GoString returns the string representation
19088func (s ReservationPlanSettings) GoString() string {
19089	return s.String()
19090}
19091
19092// Validate inspects the fields of the type to determine if they are valid.
19093func (s *ReservationPlanSettings) Validate() error {
19094	invalidParams := request.ErrInvalidParams{Context: "ReservationPlanSettings"}
19095	if s.Commitment == nil {
19096		invalidParams.Add(request.NewErrParamRequired("Commitment"))
19097	}
19098	if s.RenewalType == nil {
19099		invalidParams.Add(request.NewErrParamRequired("RenewalType"))
19100	}
19101	if s.ReservedSlots == nil {
19102		invalidParams.Add(request.NewErrParamRequired("ReservedSlots"))
19103	}
19104
19105	if invalidParams.Len() > 0 {
19106		return invalidParams
19107	}
19108	return nil
19109}
19110
19111// SetCommitment sets the Commitment field's value.
19112func (s *ReservationPlanSettings) SetCommitment(v string) *ReservationPlanSettings {
19113	s.Commitment = &v
19114	return s
19115}
19116
19117// SetRenewalType sets the RenewalType field's value.
19118func (s *ReservationPlanSettings) SetRenewalType(v string) *ReservationPlanSettings {
19119	s.RenewalType = &v
19120	return s
19121}
19122
19123// SetReservedSlots sets the ReservedSlots field's value.
19124func (s *ReservationPlanSettings) SetReservedSlots(v int64) *ReservationPlanSettings {
19125	s.ReservedSlots = &v
19126	return s
19127}
19128
19129// The Amazon Resource Name (ARN) and tags for an AWS Elemental MediaConvert
19130// resource.
19131type ResourceTags struct {
19132	_ struct{} `type:"structure"`
19133
19134	// The Amazon Resource Name (ARN) of the resource.
19135	Arn *string `locationName:"arn" type:"string"`
19136
19137	// The tags for the resource.
19138	Tags map[string]*string `locationName:"tags" type:"map"`
19139}
19140
19141// String returns the string representation
19142func (s ResourceTags) String() string {
19143	return awsutil.Prettify(s)
19144}
19145
19146// GoString returns the string representation
19147func (s ResourceTags) GoString() string {
19148	return s.String()
19149}
19150
19151// SetArn sets the Arn field's value.
19152func (s *ResourceTags) SetArn(v string) *ResourceTags {
19153	s.Arn = &v
19154	return s
19155}
19156
19157// SetTags sets the Tags field's value.
19158func (s *ResourceTags) SetTags(v map[string]*string) *ResourceTags {
19159	s.Tags = v
19160	return s
19161}
19162
19163// Optional. Have MediaConvert automatically apply Amazon S3 access control
19164// for the outputs in this output group. When you don't use this setting, S3
19165// automatically applies the default access control list PRIVATE.
19166type S3DestinationAccessControl struct {
19167	_ struct{} `type:"structure"`
19168
19169	// Choose an Amazon S3 canned ACL for MediaConvert to apply to this output.
19170	CannedAcl *string `locationName:"cannedAcl" type:"string" enum:"S3ObjectCannedAcl"`
19171}
19172
19173// String returns the string representation
19174func (s S3DestinationAccessControl) String() string {
19175	return awsutil.Prettify(s)
19176}
19177
19178// GoString returns the string representation
19179func (s S3DestinationAccessControl) GoString() string {
19180	return s.String()
19181}
19182
19183// SetCannedAcl sets the CannedAcl field's value.
19184func (s *S3DestinationAccessControl) SetCannedAcl(v string) *S3DestinationAccessControl {
19185	s.CannedAcl = &v
19186	return s
19187}
19188
19189// Settings associated with S3 destination
19190type S3DestinationSettings struct {
19191	_ struct{} `type:"structure"`
19192
19193	// Optional. Have MediaConvert automatically apply Amazon S3 access control
19194	// for the outputs in this output group. When you don't use this setting, S3
19195	// automatically applies the default access control list PRIVATE.
19196	AccessControl *S3DestinationAccessControl `locationName:"accessControl" type:"structure"`
19197
19198	// Settings for how your job outputs are encrypted as they are uploaded to Amazon
19199	// S3.
19200	Encryption *S3EncryptionSettings `locationName:"encryption" type:"structure"`
19201}
19202
19203// String returns the string representation
19204func (s S3DestinationSettings) String() string {
19205	return awsutil.Prettify(s)
19206}
19207
19208// GoString returns the string representation
19209func (s S3DestinationSettings) GoString() string {
19210	return s.String()
19211}
19212
19213// SetAccessControl sets the AccessControl field's value.
19214func (s *S3DestinationSettings) SetAccessControl(v *S3DestinationAccessControl) *S3DestinationSettings {
19215	s.AccessControl = v
19216	return s
19217}
19218
19219// SetEncryption sets the Encryption field's value.
19220func (s *S3DestinationSettings) SetEncryption(v *S3EncryptionSettings) *S3DestinationSettings {
19221	s.Encryption = v
19222	return s
19223}
19224
19225// Settings for how your job outputs are encrypted as they are uploaded to Amazon
19226// S3.
19227type S3EncryptionSettings struct {
19228	_ struct{} `type:"structure"`
19229
19230	// Specify how you want your data keys managed. AWS uses data keys to encrypt
19231	// your content. AWS also encrypts the data keys themselves, using a customer
19232	// master key (CMK), and then stores the encrypted data keys alongside your
19233	// encrypted content. Use this setting to specify which AWS service manages
19234	// the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3).
19235	// If you want your master key to be managed by AWS Key Management Service (KMS),
19236	// choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose
19237	// AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with
19238	// Amazon S3 to encrypt your data keys. You can optionally choose to specify
19239	// a different, customer managed CMK. Do so by specifying the Amazon Resource
19240	// Name (ARN) of the key for the setting KMS ARN (kmsKeyArn).
19241	EncryptionType *string `locationName:"encryptionType" type:"string" enum:"S3ServerSideEncryptionType"`
19242
19243	// Optionally, specify the customer master key (CMK) that you want to use to
19244	// encrypt the data key that AWS uses to encrypt your output content. Enter
19245	// the Amazon Resource Name (ARN) of the CMK. To use this setting, you must
19246	// also set Server-side encryption (S3ServerSideEncryptionType) to AWS KMS (SERVER_SIDE_ENCRYPTION_KMS).
19247	// If you set Server-side encryption to AWS KMS but don't specify a CMK here,
19248	// AWS uses the AWS managed CMK associated with Amazon S3.
19249	KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"`
19250}
19251
19252// String returns the string representation
19253func (s S3EncryptionSettings) String() string {
19254	return awsutil.Prettify(s)
19255}
19256
19257// GoString returns the string representation
19258func (s S3EncryptionSettings) GoString() string {
19259	return s.String()
19260}
19261
19262// SetEncryptionType sets the EncryptionType field's value.
19263func (s *S3EncryptionSettings) SetEncryptionType(v string) *S3EncryptionSettings {
19264	s.EncryptionType = &v
19265	return s
19266}
19267
19268// SetKmsKeyArn sets the KmsKeyArn field's value.
19269func (s *S3EncryptionSettings) SetKmsKeyArn(v string) *S3EncryptionSettings {
19270	s.KmsKeyArn = &v
19271	return s
19272}
19273
19274// Settings related to SCC captions. SCC is a sidecar format that holds captions
19275// in a file that is separate from the video container. Set up sidecar captions
19276// in the same output group, but different output from your video. For more
19277// information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html.
19278// When you work directly in your JSON job specification, include this object
19279// and any required children when you set destinationType to SCC.
19280type SccDestinationSettings struct {
19281	_ struct{} `type:"structure"`
19282
19283	// Set Framerate (SccDestinationFramerate) to make sure that the captions and
19284	// the video are synchronized in the output. Specify a frame rate that matches
19285	// the frame rate of the associated video. If the video frame rate is 29.97,
19286	// choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has
19287	// video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97
19288	// non-dropframe (FRAMERATE_29_97_NON_DROPFRAME).
19289	Framerate *string `locationName:"framerate" type:"string" enum:"SccDestinationFramerate"`
19290}
19291
19292// String returns the string representation
19293func (s SccDestinationSettings) String() string {
19294	return awsutil.Prettify(s)
19295}
19296
19297// GoString returns the string representation
19298func (s SccDestinationSettings) GoString() string {
19299	return s.String()
19300}
19301
19302// SetFramerate sets the Framerate field's value.
19303func (s *SccDestinationSettings) SetFramerate(v string) *SccDestinationSettings {
19304	s.Framerate = &v
19305	return s
19306}
19307
19308// If your output group type is HLS, DASH, or Microsoft Smooth, use these settings
19309// when doing DRM encryption with a SPEKE-compliant key provider. If your output
19310// group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
19311type SpekeKeyProvider struct {
19312	_ struct{} `type:"structure"`
19313
19314	// If you want your key provider to encrypt the content keys that it provides
19315	// to MediaConvert, set up a certificate with a master key using AWS Certificate
19316	// Manager. Specify the certificate's Amazon Resource Name (ARN) here.
19317	CertificateArn *string `locationName:"certificateArn" type:"string"`
19318
19319	// Specify the resource ID that your SPEKE-compliant key provider uses to identify
19320	// this content.
19321	ResourceId *string `locationName:"resourceId" type:"string"`
19322
19323	// Relates to SPEKE implementation. DRM system identifiers. DASH output groups
19324	// support a max of two system ids. Other group types support one system id.
19325	// See https://dashif.org/identifiers/content_protection/ for more details.
19326	SystemIds []*string `locationName:"systemIds" type:"list"`
19327
19328	// Specify the URL to the key server that your SPEKE-compliant DRM key provider
19329	// uses to provide keys for encrypting your content.
19330	Url *string `locationName:"url" type:"string"`
19331}
19332
19333// String returns the string representation
19334func (s SpekeKeyProvider) String() string {
19335	return awsutil.Prettify(s)
19336}
19337
19338// GoString returns the string representation
19339func (s SpekeKeyProvider) GoString() string {
19340	return s.String()
19341}
19342
19343// SetCertificateArn sets the CertificateArn field's value.
19344func (s *SpekeKeyProvider) SetCertificateArn(v string) *SpekeKeyProvider {
19345	s.CertificateArn = &v
19346	return s
19347}
19348
19349// SetResourceId sets the ResourceId field's value.
19350func (s *SpekeKeyProvider) SetResourceId(v string) *SpekeKeyProvider {
19351	s.ResourceId = &v
19352	return s
19353}
19354
19355// SetSystemIds sets the SystemIds field's value.
19356func (s *SpekeKeyProvider) SetSystemIds(v []*string) *SpekeKeyProvider {
19357	s.SystemIds = v
19358	return s
19359}
19360
19361// SetUrl sets the Url field's value.
19362func (s *SpekeKeyProvider) SetUrl(v string) *SpekeKeyProvider {
19363	s.Url = &v
19364	return s
19365}
19366
19367// If your output group type is CMAF, use these settings when doing DRM encryption
19368// with a SPEKE-compliant key provider. If your output group type is HLS, DASH,
19369// or Microsoft Smooth, use the SpekeKeyProvider settings instead.
19370type SpekeKeyProviderCmaf struct {
19371	_ struct{} `type:"structure"`
19372
19373	// If you want your key provider to encrypt the content keys that it provides
19374	// to MediaConvert, set up a certificate with a master key using AWS Certificate
19375	// Manager. Specify the certificate's Amazon Resource Name (ARN) here.
19376	CertificateArn *string `locationName:"certificateArn" type:"string"`
19377
19378	// Specify the DRM system IDs that you want signaled in the DASH manifest that
19379	// MediaConvert creates as part of this CMAF package. The DASH manifest can
19380	// currently signal up to three system IDs. For more information, see https://dashif.org/identifiers/content_protection/.
19381	DashSignaledSystemIds []*string `locationName:"dashSignaledSystemIds" type:"list"`
19382
19383	// Specify the DRM system ID that you want signaled in the HLS manifest that
19384	// MediaConvert creates as part of this CMAF package. The HLS manifest can currently
19385	// signal only one system ID. For more information, see https://dashif.org/identifiers/content_protection/.
19386	HlsSignaledSystemIds []*string `locationName:"hlsSignaledSystemIds" type:"list"`
19387
19388	// Specify the resource ID that your SPEKE-compliant key provider uses to identify
19389	// this content.
19390	ResourceId *string `locationName:"resourceId" type:"string"`
19391
19392	// Specify the URL to the key server that your SPEKE-compliant DRM key provider
19393	// uses to provide keys for encrypting your content.
19394	Url *string `locationName:"url" type:"string"`
19395}
19396
19397// String returns the string representation
19398func (s SpekeKeyProviderCmaf) String() string {
19399	return awsutil.Prettify(s)
19400}
19401
19402// GoString returns the string representation
19403func (s SpekeKeyProviderCmaf) GoString() string {
19404	return s.String()
19405}
19406
19407// SetCertificateArn sets the CertificateArn field's value.
19408func (s *SpekeKeyProviderCmaf) SetCertificateArn(v string) *SpekeKeyProviderCmaf {
19409	s.CertificateArn = &v
19410	return s
19411}
19412
19413// SetDashSignaledSystemIds sets the DashSignaledSystemIds field's value.
19414func (s *SpekeKeyProviderCmaf) SetDashSignaledSystemIds(v []*string) *SpekeKeyProviderCmaf {
19415	s.DashSignaledSystemIds = v
19416	return s
19417}
19418
19419// SetHlsSignaledSystemIds sets the HlsSignaledSystemIds field's value.
19420func (s *SpekeKeyProviderCmaf) SetHlsSignaledSystemIds(v []*string) *SpekeKeyProviderCmaf {
19421	s.HlsSignaledSystemIds = v
19422	return s
19423}
19424
19425// SetResourceId sets the ResourceId field's value.
19426func (s *SpekeKeyProviderCmaf) SetResourceId(v string) *SpekeKeyProviderCmaf {
19427	s.ResourceId = &v
19428	return s
19429}
19430
19431// SetUrl sets the Url field's value.
19432func (s *SpekeKeyProviderCmaf) SetUrl(v string) *SpekeKeyProviderCmaf {
19433	s.Url = &v
19434	return s
19435}
19436
19437// SRT Destination Settings
19438type SrtDestinationSettings struct {
19439	_ struct{} `type:"structure"`
19440
19441	// Choose Enabled (ENABLED) to have MediaConvert use the font style, color,
19442	// and position information from the captions source in the input. Keep the
19443	// default value, Disabled (DISABLED), for simplified output captions.
19444	StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"SrtStylePassthrough"`
19445}
19446
19447// String returns the string representation
19448func (s SrtDestinationSettings) String() string {
19449	return awsutil.Prettify(s)
19450}
19451
19452// GoString returns the string representation
19453func (s SrtDestinationSettings) GoString() string {
19454	return s.String()
19455}
19456
19457// SetStylePassthrough sets the StylePassthrough field's value.
19458func (s *SrtDestinationSettings) SetStylePassthrough(v string) *SrtDestinationSettings {
19459	s.StylePassthrough = &v
19460	return s
19461}
19462
19463// Use these settings to set up encryption with a static key provider.
19464type StaticKeyProvider struct {
19465	_ struct{} `type:"structure"`
19466
19467	// Relates to DRM implementation. Sets the value of the KEYFORMAT attribute.
19468	// Must be 'identity' or a reverse DNS string. May be omitted to indicate an
19469	// implicit value of 'identity'.
19470	KeyFormat *string `locationName:"keyFormat" type:"string"`
19471
19472	// Relates to DRM implementation. Either a single positive integer version value
19473	// or a slash delimited list of version values (1/2/3).
19474	KeyFormatVersions *string `locationName:"keyFormatVersions" type:"string"`
19475
19476	// Relates to DRM implementation. Use a 32-character hexidecimal string to specify
19477	// Key Value (StaticKeyValue).
19478	StaticKeyValue *string `locationName:"staticKeyValue" type:"string"`
19479
19480	// Relates to DRM implementation. The location of the license server used for
19481	// protecting content.
19482	Url *string `locationName:"url" type:"string"`
19483}
19484
19485// String returns the string representation
19486func (s StaticKeyProvider) String() string {
19487	return awsutil.Prettify(s)
19488}
19489
19490// GoString returns the string representation
19491func (s StaticKeyProvider) GoString() string {
19492	return s.String()
19493}
19494
19495// SetKeyFormat sets the KeyFormat field's value.
19496func (s *StaticKeyProvider) SetKeyFormat(v string) *StaticKeyProvider {
19497	s.KeyFormat = &v
19498	return s
19499}
19500
19501// SetKeyFormatVersions sets the KeyFormatVersions field's value.
19502func (s *StaticKeyProvider) SetKeyFormatVersions(v string) *StaticKeyProvider {
19503	s.KeyFormatVersions = &v
19504	return s
19505}
19506
19507// SetStaticKeyValue sets the StaticKeyValue field's value.
19508func (s *StaticKeyProvider) SetStaticKeyValue(v string) *StaticKeyProvider {
19509	s.StaticKeyValue = &v
19510	return s
19511}
19512
19513// SetUrl sets the Url field's value.
19514func (s *StaticKeyProvider) SetUrl(v string) *StaticKeyProvider {
19515	s.Url = &v
19516	return s
19517}
19518
19519// To add tags to a queue, preset, or job template, send a request with the
19520// Amazon Resource Name (ARN) of the resource and the tags that you want to
19521// add.
19522type TagResourceInput struct {
19523	_ struct{} `type:"structure"`
19524
19525	// The Amazon Resource Name (ARN) of the resource that you want to tag. To get
19526	// the ARN, send a GET request with the resource name.
19527	//
19528	// Arn is a required field
19529	Arn *string `locationName:"arn" type:"string" required:"true"`
19530
19531	// The tags that you want to add to the resource. You can tag resources with
19532	// a key-value pair or with only a key.
19533	//
19534	// Tags is a required field
19535	Tags map[string]*string `locationName:"tags" type:"map" required:"true"`
19536}
19537
19538// String returns the string representation
19539func (s TagResourceInput) String() string {
19540	return awsutil.Prettify(s)
19541}
19542
19543// GoString returns the string representation
19544func (s TagResourceInput) GoString() string {
19545	return s.String()
19546}
19547
19548// Validate inspects the fields of the type to determine if they are valid.
19549func (s *TagResourceInput) Validate() error {
19550	invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"}
19551	if s.Arn == nil {
19552		invalidParams.Add(request.NewErrParamRequired("Arn"))
19553	}
19554	if s.Tags == nil {
19555		invalidParams.Add(request.NewErrParamRequired("Tags"))
19556	}
19557
19558	if invalidParams.Len() > 0 {
19559		return invalidParams
19560	}
19561	return nil
19562}
19563
19564// SetArn sets the Arn field's value.
19565func (s *TagResourceInput) SetArn(v string) *TagResourceInput {
19566	s.Arn = &v
19567	return s
19568}
19569
19570// SetTags sets the Tags field's value.
19571func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput {
19572	s.Tags = v
19573	return s
19574}
19575
19576// A successful request to add tags to a resource returns an OK message.
19577type TagResourceOutput struct {
19578	_ struct{} `type:"structure"`
19579}
19580
19581// String returns the string representation
19582func (s TagResourceOutput) String() string {
19583	return awsutil.Prettify(s)
19584}
19585
19586// GoString returns the string representation
19587func (s TagResourceOutput) GoString() string {
19588	return s.String()
19589}
19590
19591// Settings related to teletext captions. Set up teletext captions in the same
19592// output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html.
19593// When you work directly in your JSON job specification, include this object
19594// and any required children when you set destinationType to TELETEXT.
19595type TeletextDestinationSettings struct {
19596	_ struct{} `type:"structure"`
19597
19598	// Set pageNumber to the Teletext page number for the destination captions for
19599	// this output. This value must be a three-digit hexadecimal string; strings
19600	// ending in -FF are invalid. If you are passing through the entire set of Teletext
19601	// data, do not use this field.
19602	PageNumber *string `locationName:"pageNumber" min:"3" type:"string"`
19603
19604	// Specify the page types for this Teletext page. If you don't specify a value
19605	// here, the service sets the page type to the default value Subtitle (PAGE_TYPE_SUBTITLE).
19606	// If you pass through the entire set of Teletext data, don't use this field.
19607	// When you pass through a set of Teletext pages, your output has the same page
19608	// types as your input.
19609	PageTypes []*string `locationName:"pageTypes" type:"list"`
19610}
19611
19612// String returns the string representation
19613func (s TeletextDestinationSettings) String() string {
19614	return awsutil.Prettify(s)
19615}
19616
19617// GoString returns the string representation
19618func (s TeletextDestinationSettings) GoString() string {
19619	return s.String()
19620}
19621
19622// Validate inspects the fields of the type to determine if they are valid.
19623func (s *TeletextDestinationSettings) Validate() error {
19624	invalidParams := request.ErrInvalidParams{Context: "TeletextDestinationSettings"}
19625	if s.PageNumber != nil && len(*s.PageNumber) < 3 {
19626		invalidParams.Add(request.NewErrParamMinLen("PageNumber", 3))
19627	}
19628
19629	if invalidParams.Len() > 0 {
19630		return invalidParams
19631	}
19632	return nil
19633}
19634
19635// SetPageNumber sets the PageNumber field's value.
19636func (s *TeletextDestinationSettings) SetPageNumber(v string) *TeletextDestinationSettings {
19637	s.PageNumber = &v
19638	return s
19639}
19640
19641// SetPageTypes sets the PageTypes field's value.
19642func (s *TeletextDestinationSettings) SetPageTypes(v []*string) *TeletextDestinationSettings {
19643	s.PageTypes = v
19644	return s
19645}
19646
19647// Settings specific to Teletext caption sources, including Page number.
19648type TeletextSourceSettings struct {
19649	_ struct{} `type:"structure"`
19650
19651	// Use Page Number (PageNumber) to specify the three-digit hexadecimal page
19652	// number that will be used for Teletext captions. Do not use this setting if
19653	// you are passing through teletext from the input source to output.
19654	PageNumber *string `locationName:"pageNumber" min:"3" type:"string"`
19655}
19656
19657// String returns the string representation
19658func (s TeletextSourceSettings) String() string {
19659	return awsutil.Prettify(s)
19660}
19661
19662// GoString returns the string representation
19663func (s TeletextSourceSettings) GoString() string {
19664	return s.String()
19665}
19666
19667// Validate inspects the fields of the type to determine if they are valid.
19668func (s *TeletextSourceSettings) Validate() error {
19669	invalidParams := request.ErrInvalidParams{Context: "TeletextSourceSettings"}
19670	if s.PageNumber != nil && len(*s.PageNumber) < 3 {
19671		invalidParams.Add(request.NewErrParamMinLen("PageNumber", 3))
19672	}
19673
19674	if invalidParams.Len() > 0 {
19675		return invalidParams
19676	}
19677	return nil
19678}
19679
19680// SetPageNumber sets the PageNumber field's value.
19681func (s *TeletextSourceSettings) SetPageNumber(v string) *TeletextSourceSettings {
19682	s.PageNumber = &v
19683	return s
19684}
19685
19686// Settings for burning the output timecode and specified prefix into the output.
19687type TimecodeBurnin struct {
19688	_ struct{} `type:"structure"`
19689
19690	// Use Font Size (FontSize) to set the font size of any burned-in timecode.
19691	// Valid values are 10, 16, 32, 48.
19692	FontSize *int64 `locationName:"fontSize" min:"10" type:"integer"`
19693
19694	// Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to
19695	// specify the location the burned-in timecode on output video.
19696	Position *string `locationName:"position" type:"string" enum:"TimecodeBurninPosition"`
19697
19698	// Use Prefix (Prefix) to place ASCII characters before any burned-in timecode.
19699	// For example, a prefix of "EZ-" will result in the timecode "EZ-00:00:00:00".
19700	// Provide either the characters themselves or the ASCII code equivalents. The
19701	// supported range of characters is 0x20 through 0x7e. This includes letters,
19702	// numbers, and all special characters represented on a standard English keyboard.
19703	Prefix *string `locationName:"prefix" type:"string"`
19704}
19705
19706// String returns the string representation
19707func (s TimecodeBurnin) String() string {
19708	return awsutil.Prettify(s)
19709}
19710
19711// GoString returns the string representation
19712func (s TimecodeBurnin) GoString() string {
19713	return s.String()
19714}
19715
19716// Validate inspects the fields of the type to determine if they are valid.
19717func (s *TimecodeBurnin) Validate() error {
19718	invalidParams := request.ErrInvalidParams{Context: "TimecodeBurnin"}
19719	if s.FontSize != nil && *s.FontSize < 10 {
19720		invalidParams.Add(request.NewErrParamMinValue("FontSize", 10))
19721	}
19722
19723	if invalidParams.Len() > 0 {
19724		return invalidParams
19725	}
19726	return nil
19727}
19728
19729// SetFontSize sets the FontSize field's value.
19730func (s *TimecodeBurnin) SetFontSize(v int64) *TimecodeBurnin {
19731	s.FontSize = &v
19732	return s
19733}
19734
19735// SetPosition sets the Position field's value.
19736func (s *TimecodeBurnin) SetPosition(v string) *TimecodeBurnin {
19737	s.Position = &v
19738	return s
19739}
19740
19741// SetPrefix sets the Prefix field's value.
19742func (s *TimecodeBurnin) SetPrefix(v string) *TimecodeBurnin {
19743	s.Prefix = &v
19744	return s
19745}
19746
19747// These settings control how the service handles timecodes throughout the job.
19748// These settings don't affect input clipping.
19749type TimecodeConfig struct {
19750	_ struct{} `type:"structure"`
19751
19752	// If you use an editing platform that relies on an anchor timecode, use Anchor
19753	// Timecode (Anchor) to specify a timecode that will match the input video frame
19754	// to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF)
19755	// or (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior
19756	// for Anchor Timecode varies depending on your setting for Source (TimecodeSource).
19757	// * If Source (TimecodeSource) is set to Specified Start (SPECIFIEDSTART),
19758	// the first input frame is the specified value in Start Timecode (Start). Anchor
19759	// Timecode (Anchor) and Start Timecode (Start) are used calculate output timecode.
19760	// * If Source (TimecodeSource) is set to Start at 0 (ZEROBASED) the first frame
19761	// is 00:00:00:00. * If Source (TimecodeSource) is set to Embedded (EMBEDDED),
19762	// the first frame is the timecode value on the first input frame of the input.
19763	Anchor *string `locationName:"anchor" type:"string"`
19764
19765	// Use Source (TimecodeSource) to set how timecodes are handled within this
19766	// job. To make sure that your video, audio, captions, and markers are synchronized
19767	// and that time-based features, such as image inserter, work correctly, choose
19768	// the Timecode source option that matches your assets. All timecodes are in
19769	// a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) -
19770	// Use the timecode that is in the input video. If no embedded timecode is in
19771	// the source, the service will use Start at 0 (ZEROBASED) instead. * Start
19772	// at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00.
19773	// * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame
19774	// to a value other than zero. You use Start timecode (Start) to provide this
19775	// value.
19776	Source *string `locationName:"source" type:"string" enum:"TimecodeSource"`
19777
19778	// Only use when you set Source (TimecodeSource) to Specified start (SPECIFIEDSTART).
19779	// Use Start timecode (Start) to specify the timecode for the initial frame.
19780	// Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF).
19781	Start *string `locationName:"start" type:"string"`
19782
19783	// Only applies to outputs that support program-date-time stamp. Use Timestamp
19784	// offset (TimestampOffset) to overwrite the timecode date without affecting
19785	// the time and frame number. Provide the new date as a string in the format
19786	// "yyyy-mm-dd". To use Time stamp offset, you must also enable Insert program-date-time
19787	// (InsertProgramDateTime) in the output settings. For example, if the date
19788	// part of your timecodes is 2002-1-25 and you want to change it to one year
19789	// later, set Timestamp offset (TimestampOffset) to 2003-1-25.
19790	TimestampOffset *string `locationName:"timestampOffset" type:"string"`
19791}
19792
19793// String returns the string representation
19794func (s TimecodeConfig) String() string {
19795	return awsutil.Prettify(s)
19796}
19797
19798// GoString returns the string representation
19799func (s TimecodeConfig) GoString() string {
19800	return s.String()
19801}
19802
19803// SetAnchor sets the Anchor field's value.
19804func (s *TimecodeConfig) SetAnchor(v string) *TimecodeConfig {
19805	s.Anchor = &v
19806	return s
19807}
19808
19809// SetSource sets the Source field's value.
19810func (s *TimecodeConfig) SetSource(v string) *TimecodeConfig {
19811	s.Source = &v
19812	return s
19813}
19814
19815// SetStart sets the Start field's value.
19816func (s *TimecodeConfig) SetStart(v string) *TimecodeConfig {
19817	s.Start = &v
19818	return s
19819}
19820
19821// SetTimestampOffset sets the TimestampOffset field's value.
19822func (s *TimecodeConfig) SetTimestampOffset(v string) *TimecodeConfig {
19823	s.TimestampOffset = &v
19824	return s
19825}
19826
19827// Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags
19828// in any HLS outputs. To include timed metadata, you must enable it here, enable
19829// it in each output container, and specify tags and timecodes in ID3 insertion
19830// (Id3Insertion) objects.
19831type TimedMetadataInsertion struct {
19832	_ struct{} `type:"structure"`
19833
19834	// Id3Insertions contains the array of Id3Insertion instances.
19835	Id3Insertions []*Id3Insertion `locationName:"id3Insertions" type:"list"`
19836}
19837
19838// String returns the string representation
19839func (s TimedMetadataInsertion) String() string {
19840	return awsutil.Prettify(s)
19841}
19842
19843// GoString returns the string representation
19844func (s TimedMetadataInsertion) GoString() string {
19845	return s.String()
19846}
19847
19848// SetId3Insertions sets the Id3Insertions field's value.
19849func (s *TimedMetadataInsertion) SetId3Insertions(v []*Id3Insertion) *TimedMetadataInsertion {
19850	s.Id3Insertions = v
19851	return s
19852}
19853
19854// Information about when jobs are submitted, started, and finished is specified
19855// in Unix epoch format in seconds.
19856type Timing struct {
19857	_ struct{} `type:"structure"`
19858
19859	// The time, in Unix epoch format, that the transcoding job finished
19860	FinishTime *time.Time `locationName:"finishTime" type:"timestamp" timestampFormat:"unixTimestamp"`
19861
19862	// The time, in Unix epoch format, that transcoding for the job began.
19863	StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"unixTimestamp"`
19864
19865	// The time, in Unix epoch format, that you submitted the job.
19866	SubmitTime *time.Time `locationName:"submitTime" type:"timestamp" timestampFormat:"unixTimestamp"`
19867}
19868
19869// String returns the string representation
19870func (s Timing) String() string {
19871	return awsutil.Prettify(s)
19872}
19873
19874// GoString returns the string representation
19875func (s Timing) GoString() string {
19876	return s.String()
19877}
19878
19879// SetFinishTime sets the FinishTime field's value.
19880func (s *Timing) SetFinishTime(v time.Time) *Timing {
19881	s.FinishTime = &v
19882	return s
19883}
19884
19885// SetStartTime sets the StartTime field's value.
19886func (s *Timing) SetStartTime(v time.Time) *Timing {
19887	s.StartTime = &v
19888	return s
19889}
19890
19891// SetSubmitTime sets the SubmitTime field's value.
19892func (s *Timing) SetSubmitTime(v time.Time) *Timing {
19893	s.SubmitTime = &v
19894	return s
19895}
19896
19897type TooManyRequestsException struct {
19898	_            struct{}                  `type:"structure"`
19899	RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
19900
19901	Message_ *string `locationName:"message" type:"string"`
19902}
19903
19904// String returns the string representation
19905func (s TooManyRequestsException) String() string {
19906	return awsutil.Prettify(s)
19907}
19908
19909// GoString returns the string representation
19910func (s TooManyRequestsException) GoString() string {
19911	return s.String()
19912}
19913
19914func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error {
19915	return &TooManyRequestsException{
19916		RespMetadata: v,
19917	}
19918}
19919
19920// Code returns the exception type name.
19921func (s *TooManyRequestsException) Code() string {
19922	return "TooManyRequestsException"
19923}
19924
19925// Message returns the exception's message.
19926func (s *TooManyRequestsException) Message() string {
19927	if s.Message_ != nil {
19928		return *s.Message_
19929	}
19930	return ""
19931}
19932
19933// OrigErr always returns nil, satisfies awserr.Error interface.
19934func (s *TooManyRequestsException) OrigErr() error {
19935	return nil
19936}
19937
19938func (s *TooManyRequestsException) Error() string {
19939	return fmt.Sprintf("%s: %s", s.Code(), s.Message())
19940}
19941
19942// Status code returns the HTTP status code for the request's response error.
19943func (s *TooManyRequestsException) StatusCode() int {
19944	return s.RespMetadata.StatusCode
19945}
19946
19947// RequestID returns the service's response RequestID for request.
19948func (s *TooManyRequestsException) RequestID() string {
19949	return s.RespMetadata.RequestID
19950}
19951
19952// Settings specific to caption sources that are specified by track number.
19953// Currently, this is only IMSC captions in an IMF package. If your caption
19954// source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead
19955// of TrackSourceSettings.
19956type TrackSourceSettings struct {
19957	_ struct{} `type:"structure"`
19958
19959	// Use this setting to select a single captions track from a source. Track numbers
19960	// correspond to the order in the captions source file. For IMF sources, track
19961	// numbering is based on the order that the captions appear in the CPL. For
19962	// example, use 1 to select the captions asset that is listed first in the CPL.
19963	// To include more than one captions track in your job outputs, create multiple
19964	// input captions selectors. Specify one track per selector.
19965	TrackNumber *int64 `locationName:"trackNumber" min:"1" type:"integer"`
19966}
19967
19968// String returns the string representation
19969func (s TrackSourceSettings) String() string {
19970	return awsutil.Prettify(s)
19971}
19972
19973// GoString returns the string representation
19974func (s TrackSourceSettings) GoString() string {
19975	return s.String()
19976}
19977
19978// Validate inspects the fields of the type to determine if they are valid.
19979func (s *TrackSourceSettings) Validate() error {
19980	invalidParams := request.ErrInvalidParams{Context: "TrackSourceSettings"}
19981	if s.TrackNumber != nil && *s.TrackNumber < 1 {
19982		invalidParams.Add(request.NewErrParamMinValue("TrackNumber", 1))
19983	}
19984
19985	if invalidParams.Len() > 0 {
19986		return invalidParams
19987	}
19988	return nil
19989}
19990
19991// SetTrackNumber sets the TrackNumber field's value.
19992func (s *TrackSourceSettings) SetTrackNumber(v int64) *TrackSourceSettings {
19993	s.TrackNumber = &v
19994	return s
19995}
19996
19997// Settings related to TTML captions. TTML is a sidecar format that holds captions
19998// in a file that is separate from the video container. Set up sidecar captions
19999// in the same output group, but different output from your video. For more
20000// information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
20001// When you work directly in your JSON job specification, include this object
20002// and any required children when you set destinationType to TTML.
20003type TtmlDestinationSettings struct {
20004	_ struct{} `type:"structure"`
20005
20006	// Pass through style and position information from a TTML-like input source
20007	// (TTML, IMSC, SMPTE-TT) to the TTML output.
20008	StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"TtmlStylePassthrough"`
20009}
20010
20011// String returns the string representation
20012func (s TtmlDestinationSettings) String() string {
20013	return awsutil.Prettify(s)
20014}
20015
20016// GoString returns the string representation
20017func (s TtmlDestinationSettings) GoString() string {
20018	return s.String()
20019}
20020
20021// SetStylePassthrough sets the StylePassthrough field's value.
20022func (s *TtmlDestinationSettings) SetStylePassthrough(v string) *TtmlDestinationSettings {
20023	s.StylePassthrough = &v
20024	return s
20025}
20026
20027// To remove tags from a resource, send a request with the Amazon Resource Name
20028// (ARN) of the resource and the keys of the tags that you want to remove.
20029type UntagResourceInput struct {
20030	_ struct{} `type:"structure"`
20031
20032	// The Amazon Resource Name (ARN) of the resource that you want to remove tags
20033	// from. To get the ARN, send a GET request with the resource name.
20034	//
20035	// Arn is a required field
20036	Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"`
20037
20038	// The keys of the tags that you want to remove from the resource.
20039	TagKeys []*string `locationName:"tagKeys" type:"list"`
20040}
20041
20042// String returns the string representation
20043func (s UntagResourceInput) String() string {
20044	return awsutil.Prettify(s)
20045}
20046
20047// GoString returns the string representation
20048func (s UntagResourceInput) GoString() string {
20049	return s.String()
20050}
20051
20052// Validate inspects the fields of the type to determine if they are valid.
20053func (s *UntagResourceInput) Validate() error {
20054	invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"}
20055	if s.Arn == nil {
20056		invalidParams.Add(request.NewErrParamRequired("Arn"))
20057	}
20058	if s.Arn != nil && len(*s.Arn) < 1 {
20059		invalidParams.Add(request.NewErrParamMinLen("Arn", 1))
20060	}
20061
20062	if invalidParams.Len() > 0 {
20063		return invalidParams
20064	}
20065	return nil
20066}
20067
20068// SetArn sets the Arn field's value.
20069func (s *UntagResourceInput) SetArn(v string) *UntagResourceInput {
20070	s.Arn = &v
20071	return s
20072}
20073
20074// SetTagKeys sets the TagKeys field's value.
20075func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput {
20076	s.TagKeys = v
20077	return s
20078}
20079
20080// A successful request to remove tags from a resource returns an OK message.
20081type UntagResourceOutput struct {
20082	_ struct{} `type:"structure"`
20083}
20084
20085// String returns the string representation
20086func (s UntagResourceOutput) String() string {
20087	return awsutil.Prettify(s)
20088}
20089
20090// GoString returns the string representation
20091func (s UntagResourceOutput) GoString() string {
20092	return s.String()
20093}
20094
20095// Modify a job template by sending a request with the job template name and
20096// any of the following that you wish to change: description, category, and
20097// queue.
20098type UpdateJobTemplateInput struct {
20099	_ struct{} `type:"structure"`
20100
20101	// Accelerated transcoding can significantly speed up jobs with long, visually
20102	// complex content. Outputs that use this feature incur pro-tier pricing. For
20103	// information about feature limitations, see the AWS Elemental MediaConvert
20104	// User Guide.
20105	AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"`
20106
20107	// The new category for the job template, if you are changing it.
20108	Category *string `locationName:"category" type:"string"`
20109
20110	// The new description for the job template, if you are changing it.
20111	Description *string `locationName:"description" type:"string"`
20112
20113	// Optional list of hop destinations.
20114	HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"`
20115
20116	// The name of the job template you are modifying
20117	//
20118	// Name is a required field
20119	Name *string `location:"uri" locationName:"name" type:"string" required:"true"`
20120
20121	// Specify the relative priority for this job. In any given queue, the service
20122	// begins processing the job with the highest value first. When more than one
20123	// job has the same priority, the service begins processing the job that you
20124	// submitted first. If you don't specify a priority, the service uses the default
20125	// value 0.
20126	Priority *int64 `locationName:"priority" type:"integer"`
20127
20128	// The new queue for the job template, if you are changing it.
20129	Queue *string `locationName:"queue" type:"string"`
20130
20131	// JobTemplateSettings contains all the transcode settings saved in the template
20132	// that will be applied to jobs created from it.
20133	Settings *JobTemplateSettings `locationName:"settings" type:"structure"`
20134
20135	// Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch
20136	// Events. Set the interval, in seconds, between status updates. MediaConvert
20137	// sends an update at this interval from the time the service begins processing
20138	// your job to the time it completes the transcode or encounters an error.
20139	StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"`
20140}
20141
20142// String returns the string representation
20143func (s UpdateJobTemplateInput) String() string {
20144	return awsutil.Prettify(s)
20145}
20146
20147// GoString returns the string representation
20148func (s UpdateJobTemplateInput) GoString() string {
20149	return s.String()
20150}
20151
20152// Validate inspects the fields of the type to determine if they are valid.
20153func (s *UpdateJobTemplateInput) Validate() error {
20154	invalidParams := request.ErrInvalidParams{Context: "UpdateJobTemplateInput"}
20155	if s.Name == nil {
20156		invalidParams.Add(request.NewErrParamRequired("Name"))
20157	}
20158	if s.Name != nil && len(*s.Name) < 1 {
20159		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
20160	}
20161	if s.Priority != nil && *s.Priority < -50 {
20162		invalidParams.Add(request.NewErrParamMinValue("Priority", -50))
20163	}
20164	if s.AccelerationSettings != nil {
20165		if err := s.AccelerationSettings.Validate(); err != nil {
20166			invalidParams.AddNested("AccelerationSettings", err.(request.ErrInvalidParams))
20167		}
20168	}
20169	if s.HopDestinations != nil {
20170		for i, v := range s.HopDestinations {
20171			if v == nil {
20172				continue
20173			}
20174			if err := v.Validate(); err != nil {
20175				invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HopDestinations", i), err.(request.ErrInvalidParams))
20176			}
20177		}
20178	}
20179	if s.Settings != nil {
20180		if err := s.Settings.Validate(); err != nil {
20181			invalidParams.AddNested("Settings", err.(request.ErrInvalidParams))
20182		}
20183	}
20184
20185	if invalidParams.Len() > 0 {
20186		return invalidParams
20187	}
20188	return nil
20189}
20190
20191// SetAccelerationSettings sets the AccelerationSettings field's value.
20192func (s *UpdateJobTemplateInput) SetAccelerationSettings(v *AccelerationSettings) *UpdateJobTemplateInput {
20193	s.AccelerationSettings = v
20194	return s
20195}
20196
20197// SetCategory sets the Category field's value.
20198func (s *UpdateJobTemplateInput) SetCategory(v string) *UpdateJobTemplateInput {
20199	s.Category = &v
20200	return s
20201}
20202
20203// SetDescription sets the Description field's value.
20204func (s *UpdateJobTemplateInput) SetDescription(v string) *UpdateJobTemplateInput {
20205	s.Description = &v
20206	return s
20207}
20208
20209// SetHopDestinations sets the HopDestinations field's value.
20210func (s *UpdateJobTemplateInput) SetHopDestinations(v []*HopDestination) *UpdateJobTemplateInput {
20211	s.HopDestinations = v
20212	return s
20213}
20214
20215// SetName sets the Name field's value.
20216func (s *UpdateJobTemplateInput) SetName(v string) *UpdateJobTemplateInput {
20217	s.Name = &v
20218	return s
20219}
20220
20221// SetPriority sets the Priority field's value.
20222func (s *UpdateJobTemplateInput) SetPriority(v int64) *UpdateJobTemplateInput {
20223	s.Priority = &v
20224	return s
20225}
20226
20227// SetQueue sets the Queue field's value.
20228func (s *UpdateJobTemplateInput) SetQueue(v string) *UpdateJobTemplateInput {
20229	s.Queue = &v
20230	return s
20231}
20232
20233// SetSettings sets the Settings field's value.
20234func (s *UpdateJobTemplateInput) SetSettings(v *JobTemplateSettings) *UpdateJobTemplateInput {
20235	s.Settings = v
20236	return s
20237}
20238
20239// SetStatusUpdateInterval sets the StatusUpdateInterval field's value.
20240func (s *UpdateJobTemplateInput) SetStatusUpdateInterval(v string) *UpdateJobTemplateInput {
20241	s.StatusUpdateInterval = &v
20242	return s
20243}
20244
20245// Successful update job template requests will return the new job template
20246// JSON.
20247type UpdateJobTemplateOutput struct {
20248	_ struct{} `type:"structure"`
20249
20250	// A job template is a pre-made set of encoding instructions that you can use
20251	// to quickly create a job.
20252	JobTemplate *JobTemplate `locationName:"jobTemplate" type:"structure"`
20253}
20254
20255// String returns the string representation
20256func (s UpdateJobTemplateOutput) String() string {
20257	return awsutil.Prettify(s)
20258}
20259
20260// GoString returns the string representation
20261func (s UpdateJobTemplateOutput) GoString() string {
20262	return s.String()
20263}
20264
20265// SetJobTemplate sets the JobTemplate field's value.
20266func (s *UpdateJobTemplateOutput) SetJobTemplate(v *JobTemplate) *UpdateJobTemplateOutput {
20267	s.JobTemplate = v
20268	return s
20269}
20270
20271// Modify a preset by sending a request with the preset name and any of the
20272// following that you wish to change: description, category, and transcoding
20273// settings.
20274type UpdatePresetInput struct {
20275	_ struct{} `type:"structure"`
20276
20277	// The new category for the preset, if you are changing it.
20278	Category *string `locationName:"category" type:"string"`
20279
20280	// The new description for the preset, if you are changing it.
20281	Description *string `locationName:"description" type:"string"`
20282
20283	// The name of the preset you are modifying.
20284	//
20285	// Name is a required field
20286	Name *string `location:"uri" locationName:"name" type:"string" required:"true"`
20287
20288	// Settings for preset
20289	Settings *PresetSettings `locationName:"settings" type:"structure"`
20290}
20291
20292// String returns the string representation
20293func (s UpdatePresetInput) String() string {
20294	return awsutil.Prettify(s)
20295}
20296
20297// GoString returns the string representation
20298func (s UpdatePresetInput) GoString() string {
20299	return s.String()
20300}
20301
20302// Validate inspects the fields of the type to determine if they are valid.
20303func (s *UpdatePresetInput) Validate() error {
20304	invalidParams := request.ErrInvalidParams{Context: "UpdatePresetInput"}
20305	if s.Name == nil {
20306		invalidParams.Add(request.NewErrParamRequired("Name"))
20307	}
20308	if s.Name != nil && len(*s.Name) < 1 {
20309		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
20310	}
20311	if s.Settings != nil {
20312		if err := s.Settings.Validate(); err != nil {
20313			invalidParams.AddNested("Settings", err.(request.ErrInvalidParams))
20314		}
20315	}
20316
20317	if invalidParams.Len() > 0 {
20318		return invalidParams
20319	}
20320	return nil
20321}
20322
20323// SetCategory sets the Category field's value.
20324func (s *UpdatePresetInput) SetCategory(v string) *UpdatePresetInput {
20325	s.Category = &v
20326	return s
20327}
20328
20329// SetDescription sets the Description field's value.
20330func (s *UpdatePresetInput) SetDescription(v string) *UpdatePresetInput {
20331	s.Description = &v
20332	return s
20333}
20334
20335// SetName sets the Name field's value.
20336func (s *UpdatePresetInput) SetName(v string) *UpdatePresetInput {
20337	s.Name = &v
20338	return s
20339}
20340
20341// SetSettings sets the Settings field's value.
20342func (s *UpdatePresetInput) SetSettings(v *PresetSettings) *UpdatePresetInput {
20343	s.Settings = v
20344	return s
20345}
20346
20347// Successful update preset requests will return the new preset JSON.
20348type UpdatePresetOutput struct {
20349	_ struct{} `type:"structure"`
20350
20351	// A preset is a collection of preconfigured media conversion settings that
20352	// you want MediaConvert to apply to the output during the conversion process.
20353	Preset *Preset `locationName:"preset" type:"structure"`
20354}
20355
20356// String returns the string representation
20357func (s UpdatePresetOutput) String() string {
20358	return awsutil.Prettify(s)
20359}
20360
20361// GoString returns the string representation
20362func (s UpdatePresetOutput) GoString() string {
20363	return s.String()
20364}
20365
20366// SetPreset sets the Preset field's value.
20367func (s *UpdatePresetOutput) SetPreset(v *Preset) *UpdatePresetOutput {
20368	s.Preset = v
20369	return s
20370}
20371
20372// Modify a queue by sending a request with the queue name and any changes to
20373// the queue.
20374type UpdateQueueInput struct {
20375	_ struct{} `type:"structure"`
20376
20377	// The new description for the queue, if you are changing it.
20378	Description *string `locationName:"description" type:"string"`
20379
20380	// The name of the queue that you are modifying.
20381	//
20382	// Name is a required field
20383	Name *string `location:"uri" locationName:"name" type:"string" required:"true"`
20384
20385	// The new details of your pricing plan for your reserved queue. When you set
20386	// up a new pricing plan to replace an expired one, you enter into another 12-month
20387	// commitment. When you add capacity to your queue by increasing the number
20388	// of RTS, you extend the term of your commitment to 12 months from when you
20389	// add capacity. After you make these commitments, you can't cancel them.
20390	ReservationPlanSettings *ReservationPlanSettings `locationName:"reservationPlanSettings" type:"structure"`
20391
20392	// Pause or activate a queue by changing its status between ACTIVE and PAUSED.
20393	// If you pause a queue, jobs in that queue won't begin. Jobs that are running
20394	// when you pause the queue continue to run until they finish or result in an
20395	// error.
20396	Status *string `locationName:"status" type:"string" enum:"QueueStatus"`
20397}
20398
20399// String returns the string representation
20400func (s UpdateQueueInput) String() string {
20401	return awsutil.Prettify(s)
20402}
20403
20404// GoString returns the string representation
20405func (s UpdateQueueInput) GoString() string {
20406	return s.String()
20407}
20408
20409// Validate inspects the fields of the type to determine if they are valid.
20410func (s *UpdateQueueInput) Validate() error {
20411	invalidParams := request.ErrInvalidParams{Context: "UpdateQueueInput"}
20412	if s.Name == nil {
20413		invalidParams.Add(request.NewErrParamRequired("Name"))
20414	}
20415	if s.Name != nil && len(*s.Name) < 1 {
20416		invalidParams.Add(request.NewErrParamMinLen("Name", 1))
20417	}
20418	if s.ReservationPlanSettings != nil {
20419		if err := s.ReservationPlanSettings.Validate(); err != nil {
20420			invalidParams.AddNested("ReservationPlanSettings", err.(request.ErrInvalidParams))
20421		}
20422	}
20423
20424	if invalidParams.Len() > 0 {
20425		return invalidParams
20426	}
20427	return nil
20428}
20429
20430// SetDescription sets the Description field's value.
20431func (s *UpdateQueueInput) SetDescription(v string) *UpdateQueueInput {
20432	s.Description = &v
20433	return s
20434}
20435
20436// SetName sets the Name field's value.
20437func (s *UpdateQueueInput) SetName(v string) *UpdateQueueInput {
20438	s.Name = &v
20439	return s
20440}
20441
20442// SetReservationPlanSettings sets the ReservationPlanSettings field's value.
20443func (s *UpdateQueueInput) SetReservationPlanSettings(v *ReservationPlanSettings) *UpdateQueueInput {
20444	s.ReservationPlanSettings = v
20445	return s
20446}
20447
20448// SetStatus sets the Status field's value.
20449func (s *UpdateQueueInput) SetStatus(v string) *UpdateQueueInput {
20450	s.Status = &v
20451	return s
20452}
20453
20454// Successful update queue requests return the new queue information in JSON
20455// format.
20456type UpdateQueueOutput struct {
20457	_ struct{} `type:"structure"`
20458
20459	// You can use queues to manage the resources that are available to your AWS
20460	// account for running multiple transcoding jobs at the same time. If you don't
20461	// specify a queue, the service sends all jobs through the default queue. For
20462	// more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html.
20463	Queue *Queue `locationName:"queue" type:"structure"`
20464}
20465
20466// String returns the string representation
20467func (s UpdateQueueOutput) String() string {
20468	return awsutil.Prettify(s)
20469}
20470
20471// GoString returns the string representation
20472func (s UpdateQueueOutput) GoString() string {
20473	return s.String()
20474}
20475
20476// SetQueue sets the Queue field's value.
20477func (s *UpdateQueueOutput) SetQueue(v *Queue) *UpdateQueueOutput {
20478	s.Queue = v
20479	return s
20480}
20481
20482// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
20483// the value VC3
20484type Vc3Settings struct {
20485	_ struct{} `type:"structure"`
20486
20487	// If you are using the console, use the Framerate setting to specify the frame
20488	// rate for this output. If you want to keep the same frame rate as the input
20489	// video, choose Follow source. If you want to do frame rate conversion, choose
20490	// a frame rate from the dropdown list or choose Custom. The framerates shown
20491	// in the dropdown list are decimal approximations of fractions. If you choose
20492	// Custom, specify your frame rate as a fraction. If you are creating your transcoding
20493	// job specification as a JSON file without the console, use FramerateControl
20494	// to specify which value the service uses for the frame rate for this output.
20495	// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
20496	// from the input. Choose SPECIFIED if you want the service to use the frame
20497	// rate you specify in the settings FramerateNumerator and FramerateDenominator.
20498	FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vc3FramerateControl"`
20499
20500	// Choose the method that you want MediaConvert to use when increasing or decreasing
20501	// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
20502	// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
20503	// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
20504	// smooth picture, but might introduce undesirable video artifacts. For complex
20505	// frame rate conversions, especially if your source video has already been
20506	// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
20507	// motion-compensated interpolation. FrameFormer chooses the best conversion
20508	// method frame by frame. Note that using FrameFormer increases the transcoding
20509	// time and incurs a significant add-on cost.
20510	FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Vc3FramerateConversionAlgorithm"`
20511
20512	// When you use the API for transcode jobs that use frame rate conversion, specify
20513	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
20514	// FramerateDenominator to specify the denominator of this fraction. In this
20515	// example, use 1001 for the value of FramerateDenominator. When you use the
20516	// console for transcode jobs that use frame rate conversion, provide the value
20517	// as a decimal number for Framerate. In this example, specify 23.976.
20518	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
20519
20520	// When you use the API for transcode jobs that use frame rate conversion, specify
20521	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
20522	// FramerateNumerator to specify the numerator of this fraction. In this example,
20523	// use 24000 for the value of FramerateNumerator. When you use the console for
20524	// transcode jobs that use frame rate conversion, provide the value as a decimal
20525	// number for Framerate. In this example, specify 23.976.
20526	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"`
20527
20528	// Optional. Choose the scan line type for this output. If you don't specify
20529	// a value, MediaConvert will create a progressive output.
20530	InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"Vc3InterlaceMode"`
20531
20532	// Use this setting for interlaced outputs, when your output frame rate is half
20533	// of your input frame rate. In this situation, choose Optimized interlacing
20534	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
20535	// case, each progressive frame from the input corresponds to an interlaced
20536	// field in the output. Keep the default value, Basic interlacing (INTERLACED),
20537	// for all other output frame rates. With basic interlacing, MediaConvert performs
20538	// any frame rate conversion first and then interlaces the frames. When you
20539	// choose Optimized interlacing and you set your output frame rate to a value
20540	// that isn't suitable for optimized interlacing, MediaConvert automatically
20541	// falls back to basic interlacing. Required settings: To use optimized interlacing,
20542	// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
20543	// use optimized interlacing for hard telecine outputs. You must also set Interlace
20544	// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
20545	ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"Vc3ScanTypeConversionMode"`
20546
20547	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
20548	// second (fps). Enable slow PAL to create a 25 fps output by relabeling the
20549	// video frames and resampling your audio. Note that enabling this setting will
20550	// slightly reduce the duration of your video. Related settings: You must also
20551	// set Framerate to 25. In your JSON job specification, set (framerateControl)
20552	// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
20553	// 1.
20554	SlowPal *string `locationName:"slowPal" type:"string" enum:"Vc3SlowPal"`
20555
20556	// When you do frame rate conversion from 23.976 frames per second (fps) to
20557	// 29.97 fps, and your output scan type is interlaced, you can optionally enable
20558	// hard telecine (HARD) to create a smoother picture. When you keep the default
20559	// value, None (NONE), MediaConvert does a standard frame rate conversion to
20560	// 29.97 without doing anything with the field polarity to create a smoother
20561	// picture.
20562	Telecine *string `locationName:"telecine" type:"string" enum:"Vc3Telecine"`
20563
20564	// Specify the VC3 class to choose the quality characteristics for this output.
20565	// VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator)
20566	// and Resolution (height and width), determine your output bitrate. For example,
20567	// say that your video resolution is 1920x1080 and your framerate is 29.97.
20568	// Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately
20569	// 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of
20570	// approximately 220 Mbps. VC3 class also specifies the color bit depth of your
20571	// output.
20572	Vc3Class *string `locationName:"vc3Class" type:"string" enum:"Vc3Class"`
20573}
20574
20575// String returns the string representation
20576func (s Vc3Settings) String() string {
20577	return awsutil.Prettify(s)
20578}
20579
20580// GoString returns the string representation
20581func (s Vc3Settings) GoString() string {
20582	return s.String()
20583}
20584
20585// Validate inspects the fields of the type to determine if they are valid.
20586func (s *Vc3Settings) Validate() error {
20587	invalidParams := request.ErrInvalidParams{Context: "Vc3Settings"}
20588	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
20589		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
20590	}
20591	if s.FramerateNumerator != nil && *s.FramerateNumerator < 24 {
20592		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 24))
20593	}
20594
20595	if invalidParams.Len() > 0 {
20596		return invalidParams
20597	}
20598	return nil
20599}
20600
20601// SetFramerateControl sets the FramerateControl field's value.
20602func (s *Vc3Settings) SetFramerateControl(v string) *Vc3Settings {
20603	s.FramerateControl = &v
20604	return s
20605}
20606
20607// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value.
20608func (s *Vc3Settings) SetFramerateConversionAlgorithm(v string) *Vc3Settings {
20609	s.FramerateConversionAlgorithm = &v
20610	return s
20611}
20612
20613// SetFramerateDenominator sets the FramerateDenominator field's value.
20614func (s *Vc3Settings) SetFramerateDenominator(v int64) *Vc3Settings {
20615	s.FramerateDenominator = &v
20616	return s
20617}
20618
20619// SetFramerateNumerator sets the FramerateNumerator field's value.
20620func (s *Vc3Settings) SetFramerateNumerator(v int64) *Vc3Settings {
20621	s.FramerateNumerator = &v
20622	return s
20623}
20624
20625// SetInterlaceMode sets the InterlaceMode field's value.
20626func (s *Vc3Settings) SetInterlaceMode(v string) *Vc3Settings {
20627	s.InterlaceMode = &v
20628	return s
20629}
20630
20631// SetScanTypeConversionMode sets the ScanTypeConversionMode field's value.
20632func (s *Vc3Settings) SetScanTypeConversionMode(v string) *Vc3Settings {
20633	s.ScanTypeConversionMode = &v
20634	return s
20635}
20636
20637// SetSlowPal sets the SlowPal field's value.
20638func (s *Vc3Settings) SetSlowPal(v string) *Vc3Settings {
20639	s.SlowPal = &v
20640	return s
20641}
20642
20643// SetTelecine sets the Telecine field's value.
20644func (s *Vc3Settings) SetTelecine(v string) *Vc3Settings {
20645	s.Telecine = &v
20646	return s
20647}
20648
20649// SetVc3Class sets the Vc3Class field's value.
20650func (s *Vc3Settings) SetVc3Class(v string) *Vc3Settings {
20651	s.Vc3Class = &v
20652	return s
20653}
20654
20655// Video codec settings, (CodecSettings) under (VideoDescription), contains
20656// the group of settings related to video encoding. The settings in this group
20657// vary depending on the value that you choose for Video codec (Codec). For
20658// each codec enum that you choose, define the corresponding settings object.
20659// The following lists the codec enum, settings object pairs. * AV1, Av1Settings
20660// * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264,
20661// H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings
20662// * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings
20663type VideoCodecSettings struct {
20664	_ struct{} `type:"structure"`
20665
20666	// Required when you set Codec, under VideoDescription>CodecSettings to the
20667	// value AV1.
20668	Av1Settings *Av1Settings `locationName:"av1Settings" type:"structure"`
20669
20670	// Required when you choose AVC-Intra for your output video codec. For more
20671	// information about the AVC-Intra settings, see the relevant specification.
20672	// For detailed information about SD and HD in AVC-Intra, see https://ieeexplore.ieee.org/document/7290936.
20673	// For information about 4K/2K in AVC-Intra, see https://pro-av.panasonic.net/en/avc-ultra/AVC-ULTRAoverview.pdf.
20674	AvcIntraSettings *AvcIntraSettings `locationName:"avcIntraSettings" type:"structure"`
20675
20676	// Specifies the video codec. This must be equal to one of the enum values defined
20677	// by the object VideoCodec.
20678	Codec *string `locationName:"codec" type:"string" enum:"VideoCodec"`
20679
20680	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
20681	// the value FRAME_CAPTURE.
20682	FrameCaptureSettings *FrameCaptureSettings `locationName:"frameCaptureSettings" type:"structure"`
20683
20684	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
20685	// the value H_264.
20686	H264Settings *H264Settings `locationName:"h264Settings" type:"structure"`
20687
20688	// Settings for H265 codec
20689	H265Settings *H265Settings `locationName:"h265Settings" type:"structure"`
20690
20691	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
20692	// the value MPEG2.
20693	Mpeg2Settings *Mpeg2Settings `locationName:"mpeg2Settings" type:"structure"`
20694
20695	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
20696	// the value PRORES.
20697	ProresSettings *ProresSettings `locationName:"proresSettings" type:"structure"`
20698
20699	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
20700	// the value VC3
20701	Vc3Settings *Vc3Settings `locationName:"vc3Settings" type:"structure"`
20702
20703	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
20704	// the value VP8.
20705	Vp8Settings *Vp8Settings `locationName:"vp8Settings" type:"structure"`
20706
20707	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
20708	// the value VP9.
20709	Vp9Settings *Vp9Settings `locationName:"vp9Settings" type:"structure"`
20710
20711	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
20712	// the value XAVC.
20713	XavcSettings *XavcSettings `locationName:"xavcSettings" type:"structure"`
20714}
20715
20716// String returns the string representation
20717func (s VideoCodecSettings) String() string {
20718	return awsutil.Prettify(s)
20719}
20720
20721// GoString returns the string representation
20722func (s VideoCodecSettings) GoString() string {
20723	return s.String()
20724}
20725
20726// Validate inspects the fields of the type to determine if they are valid.
20727func (s *VideoCodecSettings) Validate() error {
20728	invalidParams := request.ErrInvalidParams{Context: "VideoCodecSettings"}
20729	if s.Av1Settings != nil {
20730		if err := s.Av1Settings.Validate(); err != nil {
20731			invalidParams.AddNested("Av1Settings", err.(request.ErrInvalidParams))
20732		}
20733	}
20734	if s.AvcIntraSettings != nil {
20735		if err := s.AvcIntraSettings.Validate(); err != nil {
20736			invalidParams.AddNested("AvcIntraSettings", err.(request.ErrInvalidParams))
20737		}
20738	}
20739	if s.FrameCaptureSettings != nil {
20740		if err := s.FrameCaptureSettings.Validate(); err != nil {
20741			invalidParams.AddNested("FrameCaptureSettings", err.(request.ErrInvalidParams))
20742		}
20743	}
20744	if s.H264Settings != nil {
20745		if err := s.H264Settings.Validate(); err != nil {
20746			invalidParams.AddNested("H264Settings", err.(request.ErrInvalidParams))
20747		}
20748	}
20749	if s.H265Settings != nil {
20750		if err := s.H265Settings.Validate(); err != nil {
20751			invalidParams.AddNested("H265Settings", err.(request.ErrInvalidParams))
20752		}
20753	}
20754	if s.Mpeg2Settings != nil {
20755		if err := s.Mpeg2Settings.Validate(); err != nil {
20756			invalidParams.AddNested("Mpeg2Settings", err.(request.ErrInvalidParams))
20757		}
20758	}
20759	if s.ProresSettings != nil {
20760		if err := s.ProresSettings.Validate(); err != nil {
20761			invalidParams.AddNested("ProresSettings", err.(request.ErrInvalidParams))
20762		}
20763	}
20764	if s.Vc3Settings != nil {
20765		if err := s.Vc3Settings.Validate(); err != nil {
20766			invalidParams.AddNested("Vc3Settings", err.(request.ErrInvalidParams))
20767		}
20768	}
20769	if s.Vp8Settings != nil {
20770		if err := s.Vp8Settings.Validate(); err != nil {
20771			invalidParams.AddNested("Vp8Settings", err.(request.ErrInvalidParams))
20772		}
20773	}
20774	if s.Vp9Settings != nil {
20775		if err := s.Vp9Settings.Validate(); err != nil {
20776			invalidParams.AddNested("Vp9Settings", err.(request.ErrInvalidParams))
20777		}
20778	}
20779	if s.XavcSettings != nil {
20780		if err := s.XavcSettings.Validate(); err != nil {
20781			invalidParams.AddNested("XavcSettings", err.(request.ErrInvalidParams))
20782		}
20783	}
20784
20785	if invalidParams.Len() > 0 {
20786		return invalidParams
20787	}
20788	return nil
20789}
20790
20791// SetAv1Settings sets the Av1Settings field's value.
20792func (s *VideoCodecSettings) SetAv1Settings(v *Av1Settings) *VideoCodecSettings {
20793	s.Av1Settings = v
20794	return s
20795}
20796
20797// SetAvcIntraSettings sets the AvcIntraSettings field's value.
20798func (s *VideoCodecSettings) SetAvcIntraSettings(v *AvcIntraSettings) *VideoCodecSettings {
20799	s.AvcIntraSettings = v
20800	return s
20801}
20802
20803// SetCodec sets the Codec field's value.
20804func (s *VideoCodecSettings) SetCodec(v string) *VideoCodecSettings {
20805	s.Codec = &v
20806	return s
20807}
20808
20809// SetFrameCaptureSettings sets the FrameCaptureSettings field's value.
20810func (s *VideoCodecSettings) SetFrameCaptureSettings(v *FrameCaptureSettings) *VideoCodecSettings {
20811	s.FrameCaptureSettings = v
20812	return s
20813}
20814
20815// SetH264Settings sets the H264Settings field's value.
20816func (s *VideoCodecSettings) SetH264Settings(v *H264Settings) *VideoCodecSettings {
20817	s.H264Settings = v
20818	return s
20819}
20820
20821// SetH265Settings sets the H265Settings field's value.
20822func (s *VideoCodecSettings) SetH265Settings(v *H265Settings) *VideoCodecSettings {
20823	s.H265Settings = v
20824	return s
20825}
20826
20827// SetMpeg2Settings sets the Mpeg2Settings field's value.
20828func (s *VideoCodecSettings) SetMpeg2Settings(v *Mpeg2Settings) *VideoCodecSettings {
20829	s.Mpeg2Settings = v
20830	return s
20831}
20832
20833// SetProresSettings sets the ProresSettings field's value.
20834func (s *VideoCodecSettings) SetProresSettings(v *ProresSettings) *VideoCodecSettings {
20835	s.ProresSettings = v
20836	return s
20837}
20838
20839// SetVc3Settings sets the Vc3Settings field's value.
20840func (s *VideoCodecSettings) SetVc3Settings(v *Vc3Settings) *VideoCodecSettings {
20841	s.Vc3Settings = v
20842	return s
20843}
20844
20845// SetVp8Settings sets the Vp8Settings field's value.
20846func (s *VideoCodecSettings) SetVp8Settings(v *Vp8Settings) *VideoCodecSettings {
20847	s.Vp8Settings = v
20848	return s
20849}
20850
20851// SetVp9Settings sets the Vp9Settings field's value.
20852func (s *VideoCodecSettings) SetVp9Settings(v *Vp9Settings) *VideoCodecSettings {
20853	s.Vp9Settings = v
20854	return s
20855}
20856
20857// SetXavcSettings sets the XavcSettings field's value.
20858func (s *VideoCodecSettings) SetXavcSettings(v *XavcSettings) *VideoCodecSettings {
20859	s.XavcSettings = v
20860	return s
20861}
20862
20863// Settings related to video encoding of your output. The specific video settings
20864// depend on the video codec that you choose. When you work directly in your
20865// JSON job specification, include one instance of Video description (VideoDescription)
20866// per output.
20867type VideoDescription struct {
20868	_ struct{} `type:"structure"`
20869
20870	// This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert
20871	// AFD signaling (AfdSignaling) to specify whether the service includes AFD
20872	// values in the output video data and what those values are. * Choose None
20873	// to remove all AFD values from this output. * Choose Fixed to ignore input
20874	// AFD values and instead encode the value specified in the job. * Choose Auto
20875	// to calculate output AFD values based on the input AFD scaler data.
20876	AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"AfdSignaling"`
20877
20878	// The anti-alias filter is automatically applied to all outputs. The service
20879	// no longer accepts the value DISABLED for AntiAlias. If you specify that in
20880	// your job, the service will ignore the setting.
20881	AntiAlias *string `locationName:"antiAlias" type:"string" enum:"AntiAlias"`
20882
20883	// Video codec settings, (CodecSettings) under (VideoDescription), contains
20884	// the group of settings related to video encoding. The settings in this group
20885	// vary depending on the value that you choose for Video codec (Codec). For
20886	// each codec enum that you choose, define the corresponding settings object.
20887	// The following lists the codec enum, settings object pairs. * AV1, Av1Settings
20888	// * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264,
20889	// H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings
20890	// * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings
20891	CodecSettings *VideoCodecSettings `locationName:"codecSettings" type:"structure"`
20892
20893	// Choose Insert (INSERT) for this setting to include color metadata in this
20894	// output. Choose Ignore (IGNORE) to exclude color metadata from this output.
20895	// If you don't specify a value, the service sets this to Insert by default.
20896	ColorMetadata *string `locationName:"colorMetadata" type:"string" enum:"ColorMetadata"`
20897
20898	// Use Cropping selection (crop) to specify the video area that the service
20899	// will include in the output video frame.
20900	Crop *Rectangle `locationName:"crop" type:"structure"`
20901
20902	// Applies only to 29.97 fps outputs. When this feature is enabled, the service
20903	// will use drop-frame timecode on outputs. If it is not possible to use drop-frame
20904	// timecode, the system will fall back to non-drop-frame. This setting is enabled
20905	// by default when Timecode insertion (TimecodeInsertion) is enabled.
20906	DropFrameTimecode *string `locationName:"dropFrameTimecode" type:"string" enum:"DropFrameTimecode"`
20907
20908	// Applies only if you set AFD Signaling(AfdSignaling) to Fixed (FIXED). Use
20909	// Fixed (FixedAfd) to specify a four-bit AFD value which the service will write
20910	// on all frames of this video output.
20911	FixedAfd *int64 `locationName:"fixedAfd" type:"integer"`
20912
20913	// Use the Height (Height) setting to define the video resolution height for
20914	// this output. Specify in pixels. If you don't provide a value here, the service
20915	// will use the input height.
20916	Height *int64 `locationName:"height" min:"32" type:"integer"`
20917
20918	// Use Selection placement (position) to define the video area in your output
20919	// frame. The area outside of the rectangle that you specify here is black.
20920	Position *Rectangle `locationName:"position" type:"structure"`
20921
20922	// Use Respond to AFD (RespondToAfd) to specify how the service changes the
20923	// video itself in response to AFD values in the input. * Choose Respond to
20924	// clip the input video frame according to the AFD value, input display aspect
20925	// ratio, and output display aspect ratio. * Choose Passthrough to include the
20926	// input AFD values. Do not choose this when AfdSignaling is set to (NONE).
20927	// A preferred implementation of this workflow is to set RespondToAfd to (NONE)
20928	// and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values
20929	// from this output.
20930	RespondToAfd *string `locationName:"respondToAfd" type:"string" enum:"RespondToAfd"`
20931
20932	// Specify how the service handles outputs that have a different aspect ratio
20933	// from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT)
20934	// to have the service stretch your video image to fit. Keep the setting Default
20935	// (DEFAULT) to have the service letterbox your video instead. This setting
20936	// overrides any value that you specify for the setting Selection placement
20937	// (position) in this output.
20938	ScalingBehavior *string `locationName:"scalingBehavior" type:"string" enum:"ScalingBehavior"`
20939
20940	// Use Sharpness (Sharpness) setting to specify the strength of anti-aliasing.
20941	// This setting changes the width of the anti-alias filter kernel used for scaling.
20942	// Sharpness only applies if your output resolution is different from your input
20943	// resolution. 0 is the softest setting, 100 the sharpest, and 50 recommended
20944	// for most content.
20945	Sharpness *int64 `locationName:"sharpness" type:"integer"`
20946
20947	// Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode
20948	// insertion when the input frame rate is identical to the output frame rate.
20949	// To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion)
20950	// to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED.
20951	// When the service inserts timecodes in an output, by default, it uses any
20952	// embedded timecodes from the input. If none are present, the service will
20953	// set the timecode for the first output frame to zero. To change this default
20954	// behavior, adjust the settings under Timecode configuration (TimecodeConfig).
20955	// In the console, these settings are located under Job > Job settings > Timecode
20956	// configuration. Note - Timecode source under input settings (InputTimecodeSource)
20957	// does not affect the timecodes that are inserted in the output. Source under
20958	// Job settings > Timecode configuration (TimecodeSource) does.
20959	TimecodeInsertion *string `locationName:"timecodeInsertion" type:"string" enum:"VideoTimecodeInsertion"`
20960
20961	// Find additional transcoding features under Preprocessors (VideoPreprocessors).
20962	// Enable the features at each output individually. These features are disabled
20963	// by default.
20964	VideoPreprocessors *VideoPreprocessor `locationName:"videoPreprocessors" type:"structure"`
20965
20966	// Use Width (Width) to define the video resolution width, in pixels, for this
20967	// output. If you don't provide a value here, the service will use the input
20968	// width.
20969	Width *int64 `locationName:"width" min:"32" type:"integer"`
20970}
20971
20972// String returns the string representation
20973func (s VideoDescription) String() string {
20974	return awsutil.Prettify(s)
20975}
20976
20977// GoString returns the string representation
20978func (s VideoDescription) GoString() string {
20979	return s.String()
20980}
20981
20982// Validate inspects the fields of the type to determine if they are valid.
20983func (s *VideoDescription) Validate() error {
20984	invalidParams := request.ErrInvalidParams{Context: "VideoDescription"}
20985	if s.Height != nil && *s.Height < 32 {
20986		invalidParams.Add(request.NewErrParamMinValue("Height", 32))
20987	}
20988	if s.Width != nil && *s.Width < 32 {
20989		invalidParams.Add(request.NewErrParamMinValue("Width", 32))
20990	}
20991	if s.CodecSettings != nil {
20992		if err := s.CodecSettings.Validate(); err != nil {
20993			invalidParams.AddNested("CodecSettings", err.(request.ErrInvalidParams))
20994		}
20995	}
20996	if s.Crop != nil {
20997		if err := s.Crop.Validate(); err != nil {
20998			invalidParams.AddNested("Crop", err.(request.ErrInvalidParams))
20999		}
21000	}
21001	if s.Position != nil {
21002		if err := s.Position.Validate(); err != nil {
21003			invalidParams.AddNested("Position", err.(request.ErrInvalidParams))
21004		}
21005	}
21006	if s.VideoPreprocessors != nil {
21007		if err := s.VideoPreprocessors.Validate(); err != nil {
21008			invalidParams.AddNested("VideoPreprocessors", err.(request.ErrInvalidParams))
21009		}
21010	}
21011
21012	if invalidParams.Len() > 0 {
21013		return invalidParams
21014	}
21015	return nil
21016}
21017
21018// SetAfdSignaling sets the AfdSignaling field's value.
21019func (s *VideoDescription) SetAfdSignaling(v string) *VideoDescription {
21020	s.AfdSignaling = &v
21021	return s
21022}
21023
21024// SetAntiAlias sets the AntiAlias field's value.
21025func (s *VideoDescription) SetAntiAlias(v string) *VideoDescription {
21026	s.AntiAlias = &v
21027	return s
21028}
21029
21030// SetCodecSettings sets the CodecSettings field's value.
21031func (s *VideoDescription) SetCodecSettings(v *VideoCodecSettings) *VideoDescription {
21032	s.CodecSettings = v
21033	return s
21034}
21035
21036// SetColorMetadata sets the ColorMetadata field's value.
21037func (s *VideoDescription) SetColorMetadata(v string) *VideoDescription {
21038	s.ColorMetadata = &v
21039	return s
21040}
21041
21042// SetCrop sets the Crop field's value.
21043func (s *VideoDescription) SetCrop(v *Rectangle) *VideoDescription {
21044	s.Crop = v
21045	return s
21046}
21047
21048// SetDropFrameTimecode sets the DropFrameTimecode field's value.
21049func (s *VideoDescription) SetDropFrameTimecode(v string) *VideoDescription {
21050	s.DropFrameTimecode = &v
21051	return s
21052}
21053
21054// SetFixedAfd sets the FixedAfd field's value.
21055func (s *VideoDescription) SetFixedAfd(v int64) *VideoDescription {
21056	s.FixedAfd = &v
21057	return s
21058}
21059
21060// SetHeight sets the Height field's value.
21061func (s *VideoDescription) SetHeight(v int64) *VideoDescription {
21062	s.Height = &v
21063	return s
21064}
21065
21066// SetPosition sets the Position field's value.
21067func (s *VideoDescription) SetPosition(v *Rectangle) *VideoDescription {
21068	s.Position = v
21069	return s
21070}
21071
21072// SetRespondToAfd sets the RespondToAfd field's value.
21073func (s *VideoDescription) SetRespondToAfd(v string) *VideoDescription {
21074	s.RespondToAfd = &v
21075	return s
21076}
21077
21078// SetScalingBehavior sets the ScalingBehavior field's value.
21079func (s *VideoDescription) SetScalingBehavior(v string) *VideoDescription {
21080	s.ScalingBehavior = &v
21081	return s
21082}
21083
21084// SetSharpness sets the Sharpness field's value.
21085func (s *VideoDescription) SetSharpness(v int64) *VideoDescription {
21086	s.Sharpness = &v
21087	return s
21088}
21089
21090// SetTimecodeInsertion sets the TimecodeInsertion field's value.
21091func (s *VideoDescription) SetTimecodeInsertion(v string) *VideoDescription {
21092	s.TimecodeInsertion = &v
21093	return s
21094}
21095
21096// SetVideoPreprocessors sets the VideoPreprocessors field's value.
21097func (s *VideoDescription) SetVideoPreprocessors(v *VideoPreprocessor) *VideoDescription {
21098	s.VideoPreprocessors = v
21099	return s
21100}
21101
21102// SetWidth sets the Width field's value.
21103func (s *VideoDescription) SetWidth(v int64) *VideoDescription {
21104	s.Width = &v
21105	return s
21106}
21107
21108// Contains details about the output's video stream
21109type VideoDetail struct {
21110	_ struct{} `type:"structure"`
21111
21112	// Height in pixels for the output
21113	HeightInPx *int64 `locationName:"heightInPx" type:"integer"`
21114
21115	// Width in pixels for the output
21116	WidthInPx *int64 `locationName:"widthInPx" type:"integer"`
21117}
21118
21119// String returns the string representation
21120func (s VideoDetail) String() string {
21121	return awsutil.Prettify(s)
21122}
21123
21124// GoString returns the string representation
21125func (s VideoDetail) GoString() string {
21126	return s.String()
21127}
21128
21129// SetHeightInPx sets the HeightInPx field's value.
21130func (s *VideoDetail) SetHeightInPx(v int64) *VideoDetail {
21131	s.HeightInPx = &v
21132	return s
21133}
21134
21135// SetWidthInPx sets the WidthInPx field's value.
21136func (s *VideoDetail) SetWidthInPx(v int64) *VideoDetail {
21137	s.WidthInPx = &v
21138	return s
21139}
21140
21141// Find additional transcoding features under Preprocessors (VideoPreprocessors).
21142// Enable the features at each output individually. These features are disabled
21143// by default.
21144type VideoPreprocessor struct {
21145	_ struct{} `type:"structure"`
21146
21147	// Use these settings to convert the color space or to modify properties such
21148	// as hue and contrast for this output. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/converting-the-color-space.html.
21149	ColorCorrector *ColorCorrector `locationName:"colorCorrector" type:"structure"`
21150
21151	// Use the deinterlacer to produce smoother motion and a clearer picture. For
21152	// more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-scan-type.html.
21153	Deinterlacer *Deinterlacer `locationName:"deinterlacer" type:"structure"`
21154
21155	// Enable Dolby Vision feature to produce Dolby Vision compatible video output.
21156	DolbyVision *DolbyVision `locationName:"dolbyVision" type:"structure"`
21157
21158	// Enable HDR10+ analyis and metadata injection. Compatible with HEVC only.
21159	Hdr10Plus *Hdr10Plus `locationName:"hdr10Plus" type:"structure"`
21160
21161	// Enable the Image inserter (ImageInserter) feature to include a graphic overlay
21162	// on your video. Enable or disable this feature for each output individually.
21163	// This setting is disabled by default.
21164	ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"`
21165
21166	// Enable the Noise reducer (NoiseReducer) feature to remove noise from your
21167	// video output if necessary. Enable or disable this feature for each output
21168	// individually. This setting is disabled by default.
21169	NoiseReducer *NoiseReducer `locationName:"noiseReducer" type:"structure"`
21170
21171	// If you work with a third party video watermarking partner, use the group
21172	// of settings that correspond with your watermarking partner to include watermarks
21173	// in your output.
21174	PartnerWatermarking *PartnerWatermarking `locationName:"partnerWatermarking" type:"structure"`
21175
21176	// Settings for burning the output timecode and specified prefix into the output.
21177	TimecodeBurnin *TimecodeBurnin `locationName:"timecodeBurnin" type:"structure"`
21178}
21179
21180// String returns the string representation
21181func (s VideoPreprocessor) String() string {
21182	return awsutil.Prettify(s)
21183}
21184
21185// GoString returns the string representation
21186func (s VideoPreprocessor) GoString() string {
21187	return s.String()
21188}
21189
21190// Validate inspects the fields of the type to determine if they are valid.
21191func (s *VideoPreprocessor) Validate() error {
21192	invalidParams := request.ErrInvalidParams{Context: "VideoPreprocessor"}
21193	if s.ColorCorrector != nil {
21194		if err := s.ColorCorrector.Validate(); err != nil {
21195			invalidParams.AddNested("ColorCorrector", err.(request.ErrInvalidParams))
21196		}
21197	}
21198	if s.ImageInserter != nil {
21199		if err := s.ImageInserter.Validate(); err != nil {
21200			invalidParams.AddNested("ImageInserter", err.(request.ErrInvalidParams))
21201		}
21202	}
21203	if s.NoiseReducer != nil {
21204		if err := s.NoiseReducer.Validate(); err != nil {
21205			invalidParams.AddNested("NoiseReducer", err.(request.ErrInvalidParams))
21206		}
21207	}
21208	if s.PartnerWatermarking != nil {
21209		if err := s.PartnerWatermarking.Validate(); err != nil {
21210			invalidParams.AddNested("PartnerWatermarking", err.(request.ErrInvalidParams))
21211		}
21212	}
21213	if s.TimecodeBurnin != nil {
21214		if err := s.TimecodeBurnin.Validate(); err != nil {
21215			invalidParams.AddNested("TimecodeBurnin", err.(request.ErrInvalidParams))
21216		}
21217	}
21218
21219	if invalidParams.Len() > 0 {
21220		return invalidParams
21221	}
21222	return nil
21223}
21224
21225// SetColorCorrector sets the ColorCorrector field's value.
21226func (s *VideoPreprocessor) SetColorCorrector(v *ColorCorrector) *VideoPreprocessor {
21227	s.ColorCorrector = v
21228	return s
21229}
21230
21231// SetDeinterlacer sets the Deinterlacer field's value.
21232func (s *VideoPreprocessor) SetDeinterlacer(v *Deinterlacer) *VideoPreprocessor {
21233	s.Deinterlacer = v
21234	return s
21235}
21236
21237// SetDolbyVision sets the DolbyVision field's value.
21238func (s *VideoPreprocessor) SetDolbyVision(v *DolbyVision) *VideoPreprocessor {
21239	s.DolbyVision = v
21240	return s
21241}
21242
21243// SetHdr10Plus sets the Hdr10Plus field's value.
21244func (s *VideoPreprocessor) SetHdr10Plus(v *Hdr10Plus) *VideoPreprocessor {
21245	s.Hdr10Plus = v
21246	return s
21247}
21248
21249// SetImageInserter sets the ImageInserter field's value.
21250func (s *VideoPreprocessor) SetImageInserter(v *ImageInserter) *VideoPreprocessor {
21251	s.ImageInserter = v
21252	return s
21253}
21254
21255// SetNoiseReducer sets the NoiseReducer field's value.
21256func (s *VideoPreprocessor) SetNoiseReducer(v *NoiseReducer) *VideoPreprocessor {
21257	s.NoiseReducer = v
21258	return s
21259}
21260
21261// SetPartnerWatermarking sets the PartnerWatermarking field's value.
21262func (s *VideoPreprocessor) SetPartnerWatermarking(v *PartnerWatermarking) *VideoPreprocessor {
21263	s.PartnerWatermarking = v
21264	return s
21265}
21266
21267// SetTimecodeBurnin sets the TimecodeBurnin field's value.
21268func (s *VideoPreprocessor) SetTimecodeBurnin(v *TimecodeBurnin) *VideoPreprocessor {
21269	s.TimecodeBurnin = v
21270	return s
21271}
21272
21273// Input video selectors contain the video settings for the input. Each of your
21274// inputs can have up to one video selector.
21275type VideoSelector struct {
21276	_ struct{} `type:"structure"`
21277
21278	// Ignore this setting unless this input is a QuickTime animation with an alpha
21279	// channel. Use this setting to create separate Key and Fill outputs. In each
21280	// output, specify which part of the input MediaConvert uses. Leave this setting
21281	// at the default value DISCARD to delete the alpha channel and preserve the
21282	// video. Set it to REMAP_TO_LUMA to delete the video and map the alpha channel
21283	// to the luma channel of your outputs.
21284	AlphaBehavior *string `locationName:"alphaBehavior" type:"string" enum:"AlphaBehavior"`
21285
21286	// If your input video has accurate color space metadata, or if you don't know
21287	// about color space, leave this set to the default value Follow (FOLLOW). The
21288	// service will automatically detect your input color space. If your input video
21289	// has metadata indicating the wrong color space, specify the accurate color
21290	// space here. If your input video is HDR 10 and the SMPTE ST 2086 Mastering
21291	// Display Color Volume static metadata isn't present in your video stream,
21292	// or if that metadata is present but not accurate, choose Force HDR 10 (FORCE_HDR10)
21293	// here and specify correct values in the input HDR 10 metadata (Hdr10Metadata)
21294	// settings. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr.
21295	ColorSpace *string `locationName:"colorSpace" type:"string" enum:"ColorSpace"`
21296
21297	// There are two sources for color metadata, the input file and the job input
21298	// settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata).
21299	// The Color space usage setting determines which takes precedence. Choose Force
21300	// (FORCE) to use color metadata from the input job settings. If you don't specify
21301	// values for those settings, the service defaults to using metadata from your
21302	// input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the
21303	// source when it is present. If there's no color metadata in your input file,
21304	// the service defaults to using values you specify in the input settings.
21305	ColorSpaceUsage *string `locationName:"colorSpaceUsage" type:"string" enum:"ColorSpaceUsage"`
21306
21307	// Use these settings to provide HDR 10 metadata that is missing or inaccurate
21308	// in your input video. Appropriate values vary depending on the input video
21309	// and must be provided by a color grader. The color grader generates these
21310	// values during the HDR 10 mastering process. The valid range for each of these
21311	// settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color
21312	// coordinate. Related settings - When you specify these values, you must also
21313	// set Color space (ColorSpace) to HDR 10 (HDR10). To specify whether the the
21314	// values you specify here take precedence over the values in the metadata of
21315	// your input file, set Color space usage (ColorSpaceUsage). To specify whether
21316	// color metadata is included in an output, set Color metadata (ColorMetadata).
21317	// For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr.
21318	Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"`
21319
21320	// Use PID (Pid) to select specific video data from an input file. Specify this
21321	// value as an integer; the system automatically converts it to the hexidecimal
21322	// value. For example, 257 selects PID 0x101. A PID, or packet identifier, is
21323	// an identifier for a set of data in an MPEG-2 transport stream container.
21324	Pid *int64 `locationName:"pid" min:"1" type:"integer"`
21325
21326	// Selects a specific program from within a multi-program transport stream.
21327	// Note that Quad 4K is not currently supported.
21328	ProgramNumber *int64 `locationName:"programNumber" type:"integer"`
21329
21330	// Use Rotate (InputRotate) to specify how the service rotates your video. You
21331	// can choose automatic rotation or specify a rotation. You can specify a clockwise
21332	// rotation of 0, 90, 180, or 270 degrees. If your input video container is
21333	// .mov or .mp4 and your input has rotation metadata, you can choose Automatic
21334	// to have the service rotate your video according to the rotation specified
21335	// in the metadata. The rotation must be within one degree of 90, 180, or 270
21336	// degrees. If the rotation metadata specifies any other rotation, the service
21337	// will default to no rotation. By default, the service does no rotation, even
21338	// if your input video has rotation metadata. The service doesn't pass through
21339	// rotation metadata.
21340	Rotate *string `locationName:"rotate" type:"string" enum:"InputRotate"`
21341
21342	// Use this setting when your input video codec is AVC-Intra. Ignore this setting
21343	// for all other inputs. If the sample range metadata in your input video is
21344	// accurate, or if you don't know about sample range, keep the default value,
21345	// Follow (FOLLOW), for this setting. When you do, the service automatically
21346	// detects your input sample range. If your input video has metadata indicating
21347	// the wrong sample range, specify the accurate sample range here. When you
21348	// do, MediaConvert ignores any sample range information in the input metadata.
21349	// Regardless of whether MediaConvert uses the input sample range or the sample
21350	// range that you specify, MediaConvert uses the sample range for transcoding
21351	// and also writes it to the output metadata.
21352	SampleRange *string `locationName:"sampleRange" type:"string" enum:"InputSampleRange"`
21353}
21354
21355// String returns the string representation
21356func (s VideoSelector) String() string {
21357	return awsutil.Prettify(s)
21358}
21359
21360// GoString returns the string representation
21361func (s VideoSelector) GoString() string {
21362	return s.String()
21363}
21364
21365// Validate inspects the fields of the type to determine if they are valid.
21366func (s *VideoSelector) Validate() error {
21367	invalidParams := request.ErrInvalidParams{Context: "VideoSelector"}
21368	if s.Pid != nil && *s.Pid < 1 {
21369		invalidParams.Add(request.NewErrParamMinValue("Pid", 1))
21370	}
21371	if s.ProgramNumber != nil && *s.ProgramNumber < -2.147483648e+09 {
21372		invalidParams.Add(request.NewErrParamMinValue("ProgramNumber", -2.147483648e+09))
21373	}
21374
21375	if invalidParams.Len() > 0 {
21376		return invalidParams
21377	}
21378	return nil
21379}
21380
21381// SetAlphaBehavior sets the AlphaBehavior field's value.
21382func (s *VideoSelector) SetAlphaBehavior(v string) *VideoSelector {
21383	s.AlphaBehavior = &v
21384	return s
21385}
21386
21387// SetColorSpace sets the ColorSpace field's value.
21388func (s *VideoSelector) SetColorSpace(v string) *VideoSelector {
21389	s.ColorSpace = &v
21390	return s
21391}
21392
21393// SetColorSpaceUsage sets the ColorSpaceUsage field's value.
21394func (s *VideoSelector) SetColorSpaceUsage(v string) *VideoSelector {
21395	s.ColorSpaceUsage = &v
21396	return s
21397}
21398
21399// SetHdr10Metadata sets the Hdr10Metadata field's value.
21400func (s *VideoSelector) SetHdr10Metadata(v *Hdr10Metadata) *VideoSelector {
21401	s.Hdr10Metadata = v
21402	return s
21403}
21404
21405// SetPid sets the Pid field's value.
21406func (s *VideoSelector) SetPid(v int64) *VideoSelector {
21407	s.Pid = &v
21408	return s
21409}
21410
21411// SetProgramNumber sets the ProgramNumber field's value.
21412func (s *VideoSelector) SetProgramNumber(v int64) *VideoSelector {
21413	s.ProgramNumber = &v
21414	return s
21415}
21416
21417// SetRotate sets the Rotate field's value.
21418func (s *VideoSelector) SetRotate(v string) *VideoSelector {
21419	s.Rotate = &v
21420	return s
21421}
21422
21423// SetSampleRange sets the SampleRange field's value.
21424func (s *VideoSelector) SetSampleRange(v string) *VideoSelector {
21425	s.SampleRange = &v
21426	return s
21427}
21428
21429// Required when you set Codec, under AudioDescriptions>CodecSettings, to the
21430// value Vorbis.
21431type VorbisSettings struct {
21432	_ struct{} `type:"structure"`
21433
21434	// Optional. Specify the number of channels in this output audio track. Choosing
21435	// Mono on the console gives you 1 output channel; choosing Stereo gives you
21436	// 2. In the API, valid values are 1 and 2. The default value is 2.
21437	Channels *int64 `locationName:"channels" min:"1" type:"integer"`
21438
21439	// Optional. Specify the audio sample rate in Hz. Valid values are 22050, 32000,
21440	// 44100, and 48000. The default value is 48000.
21441	SampleRate *int64 `locationName:"sampleRate" min:"22050" type:"integer"`
21442
21443	// Optional. Specify the variable audio quality of this Vorbis output from -1
21444	// (lowest quality, ~45 kbit/s) to 10 (highest quality, ~500 kbit/s). The default
21445	// value is 4 (~128 kbit/s). Values 5 and 6 are approximately 160 and 192 kbit/s,
21446	// respectively.
21447	VbrQuality *int64 `locationName:"vbrQuality" type:"integer"`
21448}
21449
21450// String returns the string representation
21451func (s VorbisSettings) String() string {
21452	return awsutil.Prettify(s)
21453}
21454
21455// GoString returns the string representation
21456func (s VorbisSettings) GoString() string {
21457	return s.String()
21458}
21459
21460// Validate inspects the fields of the type to determine if they are valid.
21461func (s *VorbisSettings) Validate() error {
21462	invalidParams := request.ErrInvalidParams{Context: "VorbisSettings"}
21463	if s.Channels != nil && *s.Channels < 1 {
21464		invalidParams.Add(request.NewErrParamMinValue("Channels", 1))
21465	}
21466	if s.SampleRate != nil && *s.SampleRate < 22050 {
21467		invalidParams.Add(request.NewErrParamMinValue("SampleRate", 22050))
21468	}
21469	if s.VbrQuality != nil && *s.VbrQuality < -1 {
21470		invalidParams.Add(request.NewErrParamMinValue("VbrQuality", -1))
21471	}
21472
21473	if invalidParams.Len() > 0 {
21474		return invalidParams
21475	}
21476	return nil
21477}
21478
21479// SetChannels sets the Channels field's value.
21480func (s *VorbisSettings) SetChannels(v int64) *VorbisSettings {
21481	s.Channels = &v
21482	return s
21483}
21484
21485// SetSampleRate sets the SampleRate field's value.
21486func (s *VorbisSettings) SetSampleRate(v int64) *VorbisSettings {
21487	s.SampleRate = &v
21488	return s
21489}
21490
21491// SetVbrQuality sets the VbrQuality field's value.
21492func (s *VorbisSettings) SetVbrQuality(v int64) *VorbisSettings {
21493	s.VbrQuality = &v
21494	return s
21495}
21496
21497// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
21498// the value VP8.
21499type Vp8Settings struct {
21500	_ struct{} `type:"structure"`
21501
21502	// Target bitrate in bits/second. For example, enter five megabits per second
21503	// as 5000000.
21504	Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"`
21505
21506	// If you are using the console, use the Framerate setting to specify the frame
21507	// rate for this output. If you want to keep the same frame rate as the input
21508	// video, choose Follow source. If you want to do frame rate conversion, choose
21509	// a frame rate from the dropdown list or choose Custom. The framerates shown
21510	// in the dropdown list are decimal approximations of fractions. If you choose
21511	// Custom, specify your frame rate as a fraction. If you are creating your transcoding
21512	// job specification as a JSON file without the console, use FramerateControl
21513	// to specify which value the service uses for the frame rate for this output.
21514	// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
21515	// from the input. Choose SPECIFIED if you want the service to use the frame
21516	// rate you specify in the settings FramerateNumerator and FramerateDenominator.
21517	FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vp8FramerateControl"`
21518
21519	// Choose the method that you want MediaConvert to use when increasing or decreasing
21520	// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
21521	// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
21522	// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
21523	// smooth picture, but might introduce undesirable video artifacts. For complex
21524	// frame rate conversions, especially if your source video has already been
21525	// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
21526	// motion-compensated interpolation. FrameFormer chooses the best conversion
21527	// method frame by frame. Note that using FrameFormer increases the transcoding
21528	// time and incurs a significant add-on cost.
21529	FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Vp8FramerateConversionAlgorithm"`
21530
21531	// When you use the API for transcode jobs that use frame rate conversion, specify
21532	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
21533	// FramerateDenominator to specify the denominator of this fraction. In this
21534	// example, use 1001 for the value of FramerateDenominator. When you use the
21535	// console for transcode jobs that use frame rate conversion, provide the value
21536	// as a decimal number for Framerate. In this example, specify 23.976.
21537	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
21538
21539	// When you use the API for transcode jobs that use frame rate conversion, specify
21540	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
21541	// FramerateNumerator to specify the numerator of this fraction. In this example,
21542	// use 24000 for the value of FramerateNumerator. When you use the console for
21543	// transcode jobs that use frame rate conversion, provide the value as a decimal
21544	// number for Framerate. In this example, specify 23.976.
21545	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"`
21546
21547	// GOP Length (keyframe interval) in frames. Must be greater than zero.
21548	GopSize *float64 `locationName:"gopSize" type:"double"`
21549
21550	// Optional. Size of buffer (HRD buffer model) in bits. For example, enter five
21551	// megabits as 5000000.
21552	HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"`
21553
21554	// Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional.
21555	// Specify the maximum bitrate in bits/second. For example, enter five megabits
21556	// per second as 5000000. The default behavior uses twice the target bitrate
21557	// as the maximum bitrate.
21558	MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"`
21559
21560	// Optional. Specify how the service determines the pixel aspect ratio (PAR)
21561	// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE),
21562	// uses the PAR from your input video for your output. To specify a different
21563	// PAR in the console, choose any value other than Follow source. To specify
21564	// a different PAR by editing the JSON job specification, choose SPECIFIED.
21565	// When you choose SPECIFIED for this setting, you must also specify values
21566	// for the parNumerator and parDenominator settings.
21567	ParControl *string `locationName:"parControl" type:"string" enum:"Vp8ParControl"`
21568
21569	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
21570	// console, this corresponds to any value other than Follow source. When you
21571	// specify an output pixel aspect ratio (PAR) that is different from your input
21572	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
21573	// widescreen, you would specify the ratio 40:33. In this example, the value
21574	// for parDenominator is 33.
21575	ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"`
21576
21577	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
21578	// console, this corresponds to any value other than Follow source. When you
21579	// specify an output pixel aspect ratio (PAR) that is different from your input
21580	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
21581	// widescreen, you would specify the ratio 40:33. In this example, the value
21582	// for parNumerator is 40.
21583	ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"`
21584
21585	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
21586	// want to trade off encoding speed for output video quality. The default behavior
21587	// is faster, lower quality, multi-pass encoding.
21588	QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Vp8QualityTuningLevel"`
21589
21590	// With the VP8 codec, you can use only the variable bitrate (VBR) rate control
21591	// mode.
21592	RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Vp8RateControlMode"`
21593}
21594
21595// String returns the string representation
21596func (s Vp8Settings) String() string {
21597	return awsutil.Prettify(s)
21598}
21599
21600// GoString returns the string representation
21601func (s Vp8Settings) GoString() string {
21602	return s.String()
21603}
21604
21605// Validate inspects the fields of the type to determine if they are valid.
21606func (s *Vp8Settings) Validate() error {
21607	invalidParams := request.ErrInvalidParams{Context: "Vp8Settings"}
21608	if s.Bitrate != nil && *s.Bitrate < 1000 {
21609		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000))
21610	}
21611	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
21612		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
21613	}
21614	if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 {
21615		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1))
21616	}
21617	if s.MaxBitrate != nil && *s.MaxBitrate < 1000 {
21618		invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000))
21619	}
21620	if s.ParDenominator != nil && *s.ParDenominator < 1 {
21621		invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1))
21622	}
21623	if s.ParNumerator != nil && *s.ParNumerator < 1 {
21624		invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1))
21625	}
21626
21627	if invalidParams.Len() > 0 {
21628		return invalidParams
21629	}
21630	return nil
21631}
21632
21633// SetBitrate sets the Bitrate field's value.
21634func (s *Vp8Settings) SetBitrate(v int64) *Vp8Settings {
21635	s.Bitrate = &v
21636	return s
21637}
21638
21639// SetFramerateControl sets the FramerateControl field's value.
21640func (s *Vp8Settings) SetFramerateControl(v string) *Vp8Settings {
21641	s.FramerateControl = &v
21642	return s
21643}
21644
21645// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value.
21646func (s *Vp8Settings) SetFramerateConversionAlgorithm(v string) *Vp8Settings {
21647	s.FramerateConversionAlgorithm = &v
21648	return s
21649}
21650
21651// SetFramerateDenominator sets the FramerateDenominator field's value.
21652func (s *Vp8Settings) SetFramerateDenominator(v int64) *Vp8Settings {
21653	s.FramerateDenominator = &v
21654	return s
21655}
21656
21657// SetFramerateNumerator sets the FramerateNumerator field's value.
21658func (s *Vp8Settings) SetFramerateNumerator(v int64) *Vp8Settings {
21659	s.FramerateNumerator = &v
21660	return s
21661}
21662
21663// SetGopSize sets the GopSize field's value.
21664func (s *Vp8Settings) SetGopSize(v float64) *Vp8Settings {
21665	s.GopSize = &v
21666	return s
21667}
21668
21669// SetHrdBufferSize sets the HrdBufferSize field's value.
21670func (s *Vp8Settings) SetHrdBufferSize(v int64) *Vp8Settings {
21671	s.HrdBufferSize = &v
21672	return s
21673}
21674
21675// SetMaxBitrate sets the MaxBitrate field's value.
21676func (s *Vp8Settings) SetMaxBitrate(v int64) *Vp8Settings {
21677	s.MaxBitrate = &v
21678	return s
21679}
21680
21681// SetParControl sets the ParControl field's value.
21682func (s *Vp8Settings) SetParControl(v string) *Vp8Settings {
21683	s.ParControl = &v
21684	return s
21685}
21686
21687// SetParDenominator sets the ParDenominator field's value.
21688func (s *Vp8Settings) SetParDenominator(v int64) *Vp8Settings {
21689	s.ParDenominator = &v
21690	return s
21691}
21692
21693// SetParNumerator sets the ParNumerator field's value.
21694func (s *Vp8Settings) SetParNumerator(v int64) *Vp8Settings {
21695	s.ParNumerator = &v
21696	return s
21697}
21698
21699// SetQualityTuningLevel sets the QualityTuningLevel field's value.
21700func (s *Vp8Settings) SetQualityTuningLevel(v string) *Vp8Settings {
21701	s.QualityTuningLevel = &v
21702	return s
21703}
21704
21705// SetRateControlMode sets the RateControlMode field's value.
21706func (s *Vp8Settings) SetRateControlMode(v string) *Vp8Settings {
21707	s.RateControlMode = &v
21708	return s
21709}
21710
21711// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
21712// the value VP9.
21713type Vp9Settings struct {
21714	_ struct{} `type:"structure"`
21715
21716	// Target bitrate in bits/second. For example, enter five megabits per second
21717	// as 5000000.
21718	Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"`
21719
21720	// If you are using the console, use the Framerate setting to specify the frame
21721	// rate for this output. If you want to keep the same frame rate as the input
21722	// video, choose Follow source. If you want to do frame rate conversion, choose
21723	// a frame rate from the dropdown list or choose Custom. The framerates shown
21724	// in the dropdown list are decimal approximations of fractions. If you choose
21725	// Custom, specify your frame rate as a fraction. If you are creating your transcoding
21726	// job specification as a JSON file without the console, use FramerateControl
21727	// to specify which value the service uses for the frame rate for this output.
21728	// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
21729	// from the input. Choose SPECIFIED if you want the service to use the frame
21730	// rate you specify in the settings FramerateNumerator and FramerateDenominator.
21731	FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vp9FramerateControl"`
21732
21733	// Choose the method that you want MediaConvert to use when increasing or decreasing
21734	// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
21735	// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
21736	// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
21737	// smooth picture, but might introduce undesirable video artifacts. For complex
21738	// frame rate conversions, especially if your source video has already been
21739	// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
21740	// motion-compensated interpolation. FrameFormer chooses the best conversion
21741	// method frame by frame. Note that using FrameFormer increases the transcoding
21742	// time and incurs a significant add-on cost.
21743	FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Vp9FramerateConversionAlgorithm"`
21744
21745	// When you use the API for transcode jobs that use frame rate conversion, specify
21746	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
21747	// FramerateDenominator to specify the denominator of this fraction. In this
21748	// example, use 1001 for the value of FramerateDenominator. When you use the
21749	// console for transcode jobs that use frame rate conversion, provide the value
21750	// as a decimal number for Framerate. In this example, specify 23.976.
21751	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
21752
21753	// When you use the API for transcode jobs that use frame rate conversion, specify
21754	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
21755	// FramerateNumerator to specify the numerator of this fraction. In this example,
21756	// use 24000 for the value of FramerateNumerator. When you use the console for
21757	// transcode jobs that use frame rate conversion, provide the value as a decimal
21758	// number for Framerate. In this example, specify 23.976.
21759	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"`
21760
21761	// GOP Length (keyframe interval) in frames. Must be greater than zero.
21762	GopSize *float64 `locationName:"gopSize" type:"double"`
21763
21764	// Size of buffer (HRD buffer model) in bits. For example, enter five megabits
21765	// as 5000000.
21766	HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"`
21767
21768	// Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional.
21769	// Specify the maximum bitrate in bits/second. For example, enter five megabits
21770	// per second as 5000000. The default behavior uses twice the target bitrate
21771	// as the maximum bitrate.
21772	MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"`
21773
21774	// Optional. Specify how the service determines the pixel aspect ratio for this
21775	// output. The default behavior is to use the same pixel aspect ratio as your
21776	// input video.
21777	ParControl *string `locationName:"parControl" type:"string" enum:"Vp9ParControl"`
21778
21779	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
21780	// console, this corresponds to any value other than Follow source. When you
21781	// specify an output pixel aspect ratio (PAR) that is different from your input
21782	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
21783	// widescreen, you would specify the ratio 40:33. In this example, the value
21784	// for parDenominator is 33.
21785	ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"`
21786
21787	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
21788	// console, this corresponds to any value other than Follow source. When you
21789	// specify an output pixel aspect ratio (PAR) that is different from your input
21790	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
21791	// widescreen, you would specify the ratio 40:33. In this example, the value
21792	// for parNumerator is 40.
21793	ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"`
21794
21795	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
21796	// want to trade off encoding speed for output video quality. The default behavior
21797	// is faster, lower quality, multi-pass encoding.
21798	QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Vp9QualityTuningLevel"`
21799
21800	// With the VP9 codec, you can use only the variable bitrate (VBR) rate control
21801	// mode.
21802	RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Vp9RateControlMode"`
21803}
21804
21805// String returns the string representation
21806func (s Vp9Settings) String() string {
21807	return awsutil.Prettify(s)
21808}
21809
21810// GoString returns the string representation
21811func (s Vp9Settings) GoString() string {
21812	return s.String()
21813}
21814
21815// Validate inspects the fields of the type to determine if they are valid.
21816func (s *Vp9Settings) Validate() error {
21817	invalidParams := request.ErrInvalidParams{Context: "Vp9Settings"}
21818	if s.Bitrate != nil && *s.Bitrate < 1000 {
21819		invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000))
21820	}
21821	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
21822		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
21823	}
21824	if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 {
21825		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1))
21826	}
21827	if s.MaxBitrate != nil && *s.MaxBitrate < 1000 {
21828		invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000))
21829	}
21830	if s.ParDenominator != nil && *s.ParDenominator < 1 {
21831		invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1))
21832	}
21833	if s.ParNumerator != nil && *s.ParNumerator < 1 {
21834		invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1))
21835	}
21836
21837	if invalidParams.Len() > 0 {
21838		return invalidParams
21839	}
21840	return nil
21841}
21842
21843// SetBitrate sets the Bitrate field's value.
21844func (s *Vp9Settings) SetBitrate(v int64) *Vp9Settings {
21845	s.Bitrate = &v
21846	return s
21847}
21848
21849// SetFramerateControl sets the FramerateControl field's value.
21850func (s *Vp9Settings) SetFramerateControl(v string) *Vp9Settings {
21851	s.FramerateControl = &v
21852	return s
21853}
21854
21855// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value.
21856func (s *Vp9Settings) SetFramerateConversionAlgorithm(v string) *Vp9Settings {
21857	s.FramerateConversionAlgorithm = &v
21858	return s
21859}
21860
21861// SetFramerateDenominator sets the FramerateDenominator field's value.
21862func (s *Vp9Settings) SetFramerateDenominator(v int64) *Vp9Settings {
21863	s.FramerateDenominator = &v
21864	return s
21865}
21866
21867// SetFramerateNumerator sets the FramerateNumerator field's value.
21868func (s *Vp9Settings) SetFramerateNumerator(v int64) *Vp9Settings {
21869	s.FramerateNumerator = &v
21870	return s
21871}
21872
21873// SetGopSize sets the GopSize field's value.
21874func (s *Vp9Settings) SetGopSize(v float64) *Vp9Settings {
21875	s.GopSize = &v
21876	return s
21877}
21878
21879// SetHrdBufferSize sets the HrdBufferSize field's value.
21880func (s *Vp9Settings) SetHrdBufferSize(v int64) *Vp9Settings {
21881	s.HrdBufferSize = &v
21882	return s
21883}
21884
21885// SetMaxBitrate sets the MaxBitrate field's value.
21886func (s *Vp9Settings) SetMaxBitrate(v int64) *Vp9Settings {
21887	s.MaxBitrate = &v
21888	return s
21889}
21890
21891// SetParControl sets the ParControl field's value.
21892func (s *Vp9Settings) SetParControl(v string) *Vp9Settings {
21893	s.ParControl = &v
21894	return s
21895}
21896
21897// SetParDenominator sets the ParDenominator field's value.
21898func (s *Vp9Settings) SetParDenominator(v int64) *Vp9Settings {
21899	s.ParDenominator = &v
21900	return s
21901}
21902
21903// SetParNumerator sets the ParNumerator field's value.
21904func (s *Vp9Settings) SetParNumerator(v int64) *Vp9Settings {
21905	s.ParNumerator = &v
21906	return s
21907}
21908
21909// SetQualityTuningLevel sets the QualityTuningLevel field's value.
21910func (s *Vp9Settings) SetQualityTuningLevel(v string) *Vp9Settings {
21911	s.QualityTuningLevel = &v
21912	return s
21913}
21914
21915// SetRateControlMode sets the RateControlMode field's value.
21916func (s *Vp9Settings) SetRateControlMode(v string) *Vp9Settings {
21917	s.RateControlMode = &v
21918	return s
21919}
21920
21921// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to
21922// the value WAV.
21923type WavSettings struct {
21924	_ struct{} `type:"structure"`
21925
21926	// Specify Bit depth (BitDepth), in bits per sample, to choose the encoding
21927	// quality for this audio track.
21928	BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"`
21929
21930	// Specify the number of channels in this output audio track. Valid values are
21931	// 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64.
21932	Channels *int64 `locationName:"channels" min:"1" type:"integer"`
21933
21934	// The service defaults to using RIFF for WAV outputs. If your output audio
21935	// is likely to exceed 4 GB in file size, or if you otherwise need the extended
21936	// support of the RF64 format, set your output WAV file format to RF64.
21937	Format *string `locationName:"format" type:"string" enum:"WavFormat"`
21938
21939	// Sample rate in Hz.
21940	SampleRate *int64 `locationName:"sampleRate" min:"8000" type:"integer"`
21941}
21942
21943// String returns the string representation
21944func (s WavSettings) String() string {
21945	return awsutil.Prettify(s)
21946}
21947
21948// GoString returns the string representation
21949func (s WavSettings) GoString() string {
21950	return s.String()
21951}
21952
21953// Validate inspects the fields of the type to determine if they are valid.
21954func (s *WavSettings) Validate() error {
21955	invalidParams := request.ErrInvalidParams{Context: "WavSettings"}
21956	if s.BitDepth != nil && *s.BitDepth < 16 {
21957		invalidParams.Add(request.NewErrParamMinValue("BitDepth", 16))
21958	}
21959	if s.Channels != nil && *s.Channels < 1 {
21960		invalidParams.Add(request.NewErrParamMinValue("Channels", 1))
21961	}
21962	if s.SampleRate != nil && *s.SampleRate < 8000 {
21963		invalidParams.Add(request.NewErrParamMinValue("SampleRate", 8000))
21964	}
21965
21966	if invalidParams.Len() > 0 {
21967		return invalidParams
21968	}
21969	return nil
21970}
21971
21972// SetBitDepth sets the BitDepth field's value.
21973func (s *WavSettings) SetBitDepth(v int64) *WavSettings {
21974	s.BitDepth = &v
21975	return s
21976}
21977
21978// SetChannels sets the Channels field's value.
21979func (s *WavSettings) SetChannels(v int64) *WavSettings {
21980	s.Channels = &v
21981	return s
21982}
21983
21984// SetFormat sets the Format field's value.
21985func (s *WavSettings) SetFormat(v string) *WavSettings {
21986	s.Format = &v
21987	return s
21988}
21989
21990// SetSampleRate sets the SampleRate field's value.
21991func (s *WavSettings) SetSampleRate(v int64) *WavSettings {
21992	s.SampleRate = &v
21993	return s
21994}
21995
21996// WEBVTT Destination Settings
21997type WebvttDestinationSettings struct {
21998	_ struct{} `type:"structure"`
21999
22000	// Choose Enabled (ENABLED) to have MediaConvert use the font style, color,
22001	// and position information from the captions source in the input. Keep the
22002	// default value, Disabled (DISABLED), for simplified output captions.
22003	StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"WebvttStylePassthrough"`
22004}
22005
22006// String returns the string representation
22007func (s WebvttDestinationSettings) String() string {
22008	return awsutil.Prettify(s)
22009}
22010
22011// GoString returns the string representation
22012func (s WebvttDestinationSettings) GoString() string {
22013	return s.String()
22014}
22015
22016// SetStylePassthrough sets the StylePassthrough field's value.
22017func (s *WebvttDestinationSettings) SetStylePassthrough(v string) *WebvttDestinationSettings {
22018	s.StylePassthrough = &v
22019	return s
22020}
22021
22022// Settings specific to WebVTT sources in HLS alternative rendition group. Specify
22023// the properties (renditionGroupId, renditionName or renditionLanguageCode)
22024// to identify the unique subtitle track among the alternative rendition groups
22025// present in the HLS manifest. If no unique track is found, or multiple tracks
22026// match the specified properties, the job fails. If there is only one subtitle
22027// track in the rendition group, the settings can be left empty and the default
22028// subtitle track will be chosen. If your caption source is a sidecar file,
22029// use FileSourceSettings instead of WebvttHlsSourceSettings.
22030type WebvttHlsSourceSettings struct {
22031	_ struct{} `type:"structure"`
22032
22033	// Optional. Specify alternative group ID
22034	RenditionGroupId *string `locationName:"renditionGroupId" type:"string"`
22035
22036	// Optional. Specify ISO 639-2 or ISO 639-3 code in the language property
22037	RenditionLanguageCode *string `locationName:"renditionLanguageCode" type:"string" enum:"LanguageCode"`
22038
22039	// Optional. Specify media name
22040	RenditionName *string `locationName:"renditionName" type:"string"`
22041}
22042
22043// String returns the string representation
22044func (s WebvttHlsSourceSettings) String() string {
22045	return awsutil.Prettify(s)
22046}
22047
22048// GoString returns the string representation
22049func (s WebvttHlsSourceSettings) GoString() string {
22050	return s.String()
22051}
22052
22053// SetRenditionGroupId sets the RenditionGroupId field's value.
22054func (s *WebvttHlsSourceSettings) SetRenditionGroupId(v string) *WebvttHlsSourceSettings {
22055	s.RenditionGroupId = &v
22056	return s
22057}
22058
22059// SetRenditionLanguageCode sets the RenditionLanguageCode field's value.
22060func (s *WebvttHlsSourceSettings) SetRenditionLanguageCode(v string) *WebvttHlsSourceSettings {
22061	s.RenditionLanguageCode = &v
22062	return s
22063}
22064
22065// SetRenditionName sets the RenditionName field's value.
22066func (s *WebvttHlsSourceSettings) SetRenditionName(v string) *WebvttHlsSourceSettings {
22067	s.RenditionName = &v
22068	return s
22069}
22070
22071// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings)
22072// to the value XAVC_4K_INTRA_CBG.
22073type Xavc4kIntraCbgProfileSettings struct {
22074	_ struct{} `type:"structure"`
22075
22076	// Specify the XAVC Intra 4k (CBG) Class to set the bitrate of your output.
22077	// Outputs of the same class have similar image quality over the operating points
22078	// that are valid for that class.
22079	XavcClass *string `locationName:"xavcClass" type:"string" enum:"Xavc4kIntraCbgProfileClass"`
22080}
22081
22082// String returns the string representation
22083func (s Xavc4kIntraCbgProfileSettings) String() string {
22084	return awsutil.Prettify(s)
22085}
22086
22087// GoString returns the string representation
22088func (s Xavc4kIntraCbgProfileSettings) GoString() string {
22089	return s.String()
22090}
22091
22092// SetXavcClass sets the XavcClass field's value.
22093func (s *Xavc4kIntraCbgProfileSettings) SetXavcClass(v string) *Xavc4kIntraCbgProfileSettings {
22094	s.XavcClass = &v
22095	return s
22096}
22097
22098// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings)
22099// to the value XAVC_4K_INTRA_VBR.
22100type Xavc4kIntraVbrProfileSettings struct {
22101	_ struct{} `type:"structure"`
22102
22103	// Specify the XAVC Intra 4k (VBR) Class to set the bitrate of your output.
22104	// Outputs of the same class have similar image quality over the operating points
22105	// that are valid for that class.
22106	XavcClass *string `locationName:"xavcClass" type:"string" enum:"Xavc4kIntraVbrProfileClass"`
22107}
22108
22109// String returns the string representation
22110func (s Xavc4kIntraVbrProfileSettings) String() string {
22111	return awsutil.Prettify(s)
22112}
22113
22114// GoString returns the string representation
22115func (s Xavc4kIntraVbrProfileSettings) GoString() string {
22116	return s.String()
22117}
22118
22119// SetXavcClass sets the XavcClass field's value.
22120func (s *Xavc4kIntraVbrProfileSettings) SetXavcClass(v string) *Xavc4kIntraVbrProfileSettings {
22121	s.XavcClass = &v
22122	return s
22123}
22124
22125// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings)
22126// to the value XAVC_4K.
22127type Xavc4kProfileSettings struct {
22128	_ struct{} `type:"structure"`
22129
22130	// Specify the XAVC 4k (Long GOP) Bitrate Class to set the bitrate of your output.
22131	// Outputs of the same class have similar image quality over the operating points
22132	// that are valid for that class.
22133	BitrateClass *string `locationName:"bitrateClass" type:"string" enum:"Xavc4kProfileBitrateClass"`
22134
22135	// Specify the codec profile for this output. Choose High, 8-bit, 4:2:0 (HIGH)
22136	// or High, 10-bit, 4:2:2 (HIGH_422). These profiles are specified in ITU-T
22137	// H.264.
22138	CodecProfile *string `locationName:"codecProfile" type:"string" enum:"Xavc4kProfileCodecProfile"`
22139
22140	// The best way to set up adaptive quantization is to keep the default value,
22141	// Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization).
22142	// When you do so, MediaConvert automatically applies the best types of quantization
22143	// for your video content. Include this setting in your JSON job specification
22144	// only when you choose to change the default value for Adaptive quantization.
22145	// Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears
22146	// as a visual flicker that can arise when the encoder saves bits by copying
22147	// some macroblocks many times from frame to frame, and then refreshes them
22148	// at the I-frame. When you enable this setting, the encoder updates these macroblocks
22149	// slightly more often to smooth out the flicker. This setting is disabled by
22150	// default. Related setting: In addition to enabling this setting, you must
22151	// also set Adaptive quantization (adaptiveQuantization) to a value other than
22152	// Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree
22153	// of smoothing that Flicker adaptive quantization provides.
22154	FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"XavcFlickerAdaptiveQuantization"`
22155
22156	// Specify whether the encoder uses B-frames as reference frames for other pictures
22157	// in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames
22158	// as reference frames. Choose Don't allow (DISABLED) to prevent the encoder
22159	// from using B-frames as reference frames.
22160	GopBReference *string `locationName:"gopBReference" type:"string" enum:"XavcGopBReference"`
22161
22162	// Frequency of closed GOPs. In streaming applications, it is recommended that
22163	// this be set to 1 so a decoder joining mid-stream will receive an IDR frame
22164	// as quickly as possible. Setting this value to 0 will break output segmenting.
22165	GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"`
22166
22167	// Specify the size of the buffer that MediaConvert uses in the HRD buffer model
22168	// for this output. Specify this value in bits; for example, enter five megabits
22169	// as 5000000. When you don't set this value, or you set it to zero, MediaConvert
22170	// calculates the default by doubling the bitrate of this output point.
22171	HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"`
22172
22173	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
22174	// want to trade off encoding speed for output video quality. The default behavior
22175	// is faster, lower quality, single-pass encoding.
22176	QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Xavc4kProfileQualityTuningLevel"`
22177
22178	// Number of slices per picture. Must be less than or equal to the number of
22179	// macroblock rows for progressive pictures, and less than or equal to half
22180	// the number of macroblock rows for interlaced pictures.
22181	Slices *int64 `locationName:"slices" min:"8" type:"integer"`
22182}
22183
22184// String returns the string representation
22185func (s Xavc4kProfileSettings) String() string {
22186	return awsutil.Prettify(s)
22187}
22188
22189// GoString returns the string representation
22190func (s Xavc4kProfileSettings) GoString() string {
22191	return s.String()
22192}
22193
22194// Validate inspects the fields of the type to determine if they are valid.
22195func (s *Xavc4kProfileSettings) Validate() error {
22196	invalidParams := request.ErrInvalidParams{Context: "Xavc4kProfileSettings"}
22197	if s.Slices != nil && *s.Slices < 8 {
22198		invalidParams.Add(request.NewErrParamMinValue("Slices", 8))
22199	}
22200
22201	if invalidParams.Len() > 0 {
22202		return invalidParams
22203	}
22204	return nil
22205}
22206
22207// SetBitrateClass sets the BitrateClass field's value.
22208func (s *Xavc4kProfileSettings) SetBitrateClass(v string) *Xavc4kProfileSettings {
22209	s.BitrateClass = &v
22210	return s
22211}
22212
22213// SetCodecProfile sets the CodecProfile field's value.
22214func (s *Xavc4kProfileSettings) SetCodecProfile(v string) *Xavc4kProfileSettings {
22215	s.CodecProfile = &v
22216	return s
22217}
22218
22219// SetFlickerAdaptiveQuantization sets the FlickerAdaptiveQuantization field's value.
22220func (s *Xavc4kProfileSettings) SetFlickerAdaptiveQuantization(v string) *Xavc4kProfileSettings {
22221	s.FlickerAdaptiveQuantization = &v
22222	return s
22223}
22224
22225// SetGopBReference sets the GopBReference field's value.
22226func (s *Xavc4kProfileSettings) SetGopBReference(v string) *Xavc4kProfileSettings {
22227	s.GopBReference = &v
22228	return s
22229}
22230
22231// SetGopClosedCadence sets the GopClosedCadence field's value.
22232func (s *Xavc4kProfileSettings) SetGopClosedCadence(v int64) *Xavc4kProfileSettings {
22233	s.GopClosedCadence = &v
22234	return s
22235}
22236
22237// SetHrdBufferSize sets the HrdBufferSize field's value.
22238func (s *Xavc4kProfileSettings) SetHrdBufferSize(v int64) *Xavc4kProfileSettings {
22239	s.HrdBufferSize = &v
22240	return s
22241}
22242
22243// SetQualityTuningLevel sets the QualityTuningLevel field's value.
22244func (s *Xavc4kProfileSettings) SetQualityTuningLevel(v string) *Xavc4kProfileSettings {
22245	s.QualityTuningLevel = &v
22246	return s
22247}
22248
22249// SetSlices sets the Slices field's value.
22250func (s *Xavc4kProfileSettings) SetSlices(v int64) *Xavc4kProfileSettings {
22251	s.Slices = &v
22252	return s
22253}
22254
22255// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings)
22256// to the value XAVC_HD_INTRA_CBG.
22257type XavcHdIntraCbgProfileSettings struct {
22258	_ struct{} `type:"structure"`
22259
22260	// Specify the XAVC Intra HD (CBG) Class to set the bitrate of your output.
22261	// Outputs of the same class have similar image quality over the operating points
22262	// that are valid for that class.
22263	XavcClass *string `locationName:"xavcClass" type:"string" enum:"XavcHdIntraCbgProfileClass"`
22264}
22265
22266// String returns the string representation
22267func (s XavcHdIntraCbgProfileSettings) String() string {
22268	return awsutil.Prettify(s)
22269}
22270
22271// GoString returns the string representation
22272func (s XavcHdIntraCbgProfileSettings) GoString() string {
22273	return s.String()
22274}
22275
22276// SetXavcClass sets the XavcClass field's value.
22277func (s *XavcHdIntraCbgProfileSettings) SetXavcClass(v string) *XavcHdIntraCbgProfileSettings {
22278	s.XavcClass = &v
22279	return s
22280}
22281
22282// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings)
22283// to the value XAVC_HD.
22284type XavcHdProfileSettings struct {
22285	_ struct{} `type:"structure"`
22286
22287	// Specify the XAVC HD (Long GOP) Bitrate Class to set the bitrate of your output.
22288	// Outputs of the same class have similar image quality over the operating points
22289	// that are valid for that class.
22290	BitrateClass *string `locationName:"bitrateClass" type:"string" enum:"XavcHdProfileBitrateClass"`
22291
22292	// The best way to set up adaptive quantization is to keep the default value,
22293	// Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization).
22294	// When you do so, MediaConvert automatically applies the best types of quantization
22295	// for your video content. Include this setting in your JSON job specification
22296	// only when you choose to change the default value for Adaptive quantization.
22297	// Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears
22298	// as a visual flicker that can arise when the encoder saves bits by copying
22299	// some macroblocks many times from frame to frame, and then refreshes them
22300	// at the I-frame. When you enable this setting, the encoder updates these macroblocks
22301	// slightly more often to smooth out the flicker. This setting is disabled by
22302	// default. Related setting: In addition to enabling this setting, you must
22303	// also set Adaptive quantization (adaptiveQuantization) to a value other than
22304	// Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree
22305	// of smoothing that Flicker adaptive quantization provides.
22306	FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"XavcFlickerAdaptiveQuantization"`
22307
22308	// Specify whether the encoder uses B-frames as reference frames for other pictures
22309	// in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames
22310	// as reference frames. Choose Don't allow (DISABLED) to prevent the encoder
22311	// from using B-frames as reference frames.
22312	GopBReference *string `locationName:"gopBReference" type:"string" enum:"XavcGopBReference"`
22313
22314	// Frequency of closed GOPs. In streaming applications, it is recommended that
22315	// this be set to 1 so a decoder joining mid-stream will receive an IDR frame
22316	// as quickly as possible. Setting this value to 0 will break output segmenting.
22317	GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"`
22318
22319	// Specify the size of the buffer that MediaConvert uses in the HRD buffer model
22320	// for this output. Specify this value in bits; for example, enter five megabits
22321	// as 5000000. When you don't set this value, or you set it to zero, MediaConvert
22322	// calculates the default by doubling the bitrate of this output point.
22323	HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"`
22324
22325	// Choose the scan line type for the output. Keep the default value, Progressive
22326	// (PROGRESSIVE) to create a progressive output, regardless of the scan type
22327	// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
22328	// to create an output that's interlaced with the same field polarity throughout.
22329	// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
22330	// to produce outputs with the same field polarity as the source. For jobs that
22331	// have multiple inputs, the output field polarity might change over the course
22332	// of the output. Follow behavior depends on the input scan type. If the source
22333	// is interlaced, the output will be interlaced with the same polarity as the
22334	// source. If the source is progressive, the output will be interlaced with
22335	// top field bottom field first, depending on which of the Follow options you
22336	// choose.
22337	InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"XavcInterlaceMode"`
22338
22339	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
22340	// want to trade off encoding speed for output video quality. The default behavior
22341	// is faster, lower quality, single-pass encoding.
22342	QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"XavcHdProfileQualityTuningLevel"`
22343
22344	// Number of slices per picture. Must be less than or equal to the number of
22345	// macroblock rows for progressive pictures, and less than or equal to half
22346	// the number of macroblock rows for interlaced pictures.
22347	Slices *int64 `locationName:"slices" min:"4" type:"integer"`
22348
22349	// Ignore this setting unless you set Frame rate (framerateNumerator divided
22350	// by framerateDenominator) to 29.970. If your input framerate is 23.976, choose
22351	// Hard (HARD). Otherwise, keep the default value None (NONE). For more information,
22352	// see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html.
22353	Telecine *string `locationName:"telecine" type:"string" enum:"XavcHdProfileTelecine"`
22354}
22355
22356// String returns the string representation
22357func (s XavcHdProfileSettings) String() string {
22358	return awsutil.Prettify(s)
22359}
22360
22361// GoString returns the string representation
22362func (s XavcHdProfileSettings) GoString() string {
22363	return s.String()
22364}
22365
22366// Validate inspects the fields of the type to determine if they are valid.
22367func (s *XavcHdProfileSettings) Validate() error {
22368	invalidParams := request.ErrInvalidParams{Context: "XavcHdProfileSettings"}
22369	if s.Slices != nil && *s.Slices < 4 {
22370		invalidParams.Add(request.NewErrParamMinValue("Slices", 4))
22371	}
22372
22373	if invalidParams.Len() > 0 {
22374		return invalidParams
22375	}
22376	return nil
22377}
22378
22379// SetBitrateClass sets the BitrateClass field's value.
22380func (s *XavcHdProfileSettings) SetBitrateClass(v string) *XavcHdProfileSettings {
22381	s.BitrateClass = &v
22382	return s
22383}
22384
22385// SetFlickerAdaptiveQuantization sets the FlickerAdaptiveQuantization field's value.
22386func (s *XavcHdProfileSettings) SetFlickerAdaptiveQuantization(v string) *XavcHdProfileSettings {
22387	s.FlickerAdaptiveQuantization = &v
22388	return s
22389}
22390
22391// SetGopBReference sets the GopBReference field's value.
22392func (s *XavcHdProfileSettings) SetGopBReference(v string) *XavcHdProfileSettings {
22393	s.GopBReference = &v
22394	return s
22395}
22396
22397// SetGopClosedCadence sets the GopClosedCadence field's value.
22398func (s *XavcHdProfileSettings) SetGopClosedCadence(v int64) *XavcHdProfileSettings {
22399	s.GopClosedCadence = &v
22400	return s
22401}
22402
22403// SetHrdBufferSize sets the HrdBufferSize field's value.
22404func (s *XavcHdProfileSettings) SetHrdBufferSize(v int64) *XavcHdProfileSettings {
22405	s.HrdBufferSize = &v
22406	return s
22407}
22408
22409// SetInterlaceMode sets the InterlaceMode field's value.
22410func (s *XavcHdProfileSettings) SetInterlaceMode(v string) *XavcHdProfileSettings {
22411	s.InterlaceMode = &v
22412	return s
22413}
22414
22415// SetQualityTuningLevel sets the QualityTuningLevel field's value.
22416func (s *XavcHdProfileSettings) SetQualityTuningLevel(v string) *XavcHdProfileSettings {
22417	s.QualityTuningLevel = &v
22418	return s
22419}
22420
22421// SetSlices sets the Slices field's value.
22422func (s *XavcHdProfileSettings) SetSlices(v int64) *XavcHdProfileSettings {
22423	s.Slices = &v
22424	return s
22425}
22426
22427// SetTelecine sets the Telecine field's value.
22428func (s *XavcHdProfileSettings) SetTelecine(v string) *XavcHdProfileSettings {
22429	s.Telecine = &v
22430	return s
22431}
22432
22433// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to
22434// the value XAVC.
22435type XavcSettings struct {
22436	_ struct{} `type:"structure"`
22437
22438	// Keep the default value, Auto (AUTO), for this setting to have MediaConvert
22439	// automatically apply the best types of quantization for your video content.
22440	// When you want to apply your quantization settings manually, you must set
22441	// Adaptive quantization (adaptiveQuantization) to a value other than Auto (AUTO).
22442	// Use this setting to specify the strength of any adaptive quantization filters
22443	// that you enable. If you don't want MediaConvert to do any adaptive quantization
22444	// in this transcode, set Adaptive quantization to Off (OFF). Related settings:
22445	// The value that you choose here applies to the following settings: Flicker
22446	// adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization
22447	// (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization).
22448	AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"XavcAdaptiveQuantization"`
22449
22450	// Optional. Choose a specific entropy encoding mode only when you want to override
22451	// XAVC recommendations. If you choose the value auto, MediaConvert uses the
22452	// mode that the XAVC file format specifies given this output's operating point.
22453	EntropyEncoding *string `locationName:"entropyEncoding" type:"string" enum:"XavcEntropyEncoding"`
22454
22455	// If you are using the console, use the Frame rate setting to specify the frame
22456	// rate for this output. If you want to keep the same frame rate as the input
22457	// video, choose Follow source. If you want to do frame rate conversion, choose
22458	// a frame rate from the dropdown list. The framerates shown in the dropdown
22459	// list are decimal approximations of fractions. If you are creating your transcoding
22460	// job specification as a JSON file without the console, use FramerateControl
22461	// to specify which value the service uses for the frame rate for this output.
22462	// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
22463	// from the input. Choose SPECIFIED if you want the service to use the frame
22464	// rate that you specify in the settings FramerateNumerator and FramerateDenominator.
22465	FramerateControl *string `locationName:"framerateControl" type:"string" enum:"XavcFramerateControl"`
22466
22467	// Choose the method that you want MediaConvert to use when increasing or decreasing
22468	// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
22469	// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
22470	// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
22471	// smooth picture, but might introduce undesirable video artifacts. For complex
22472	// frame rate conversions, especially if your source video has already been
22473	// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
22474	// motion-compensated interpolation. FrameFormer chooses the best conversion
22475	// method frame by frame. Note that using FrameFormer increases the transcoding
22476	// time and incurs a significant add-on cost.
22477	FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"XavcFramerateConversionAlgorithm"`
22478
22479	// When you use the API for transcode jobs that use frame rate conversion, specify
22480	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
22481	// FramerateDenominator to specify the denominator of this fraction. In this
22482	// example, use 1001 for the value of FramerateDenominator. When you use the
22483	// console for transcode jobs that use frame rate conversion, provide the value
22484	// as a decimal number for Frame rate. In this example, specify 23.976.
22485	FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"`
22486
22487	// When you use the API for transcode jobs that use frame rate conversion, specify
22488	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
22489	// FramerateNumerator to specify the numerator of this fraction. In this example,
22490	// use 24000 for the value of FramerateNumerator. When you use the console for
22491	// transcode jobs that use frame rate conversion, provide the value as a decimal
22492	// number for Framerate. In this example, specify 23.976.
22493	FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"`
22494
22495	// Specify the XAVC profile for this output. For more information, see the Sony
22496	// documentation at https://www.xavc-info.org/. Note that MediaConvert doesn't
22497	// support the interlaced video XAVC operating points for XAVC_HD_INTRA_CBG.
22498	// To create an interlaced XAVC output, choose the profile XAVC_HD.
22499	Profile *string `locationName:"profile" type:"string" enum:"XavcProfile"`
22500
22501	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
22502	// second (fps). Enable slow PAL to create a 25 fps output by relabeling the
22503	// video frames and resampling your audio. Note that enabling this setting will
22504	// slightly reduce the duration of your video. Related settings: You must also
22505	// set Frame rate to 25. In your JSON job specification, set (framerateControl)
22506	// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
22507	// 1.
22508	SlowPal *string `locationName:"slowPal" type:"string" enum:"XavcSlowPal"`
22509
22510	// Ignore this setting unless your downstream workflow requires that you specify
22511	// it explicitly. Otherwise, we recommend that you adjust the softness of your
22512	// output by using a lower value for the setting Sharpness (sharpness) or by
22513	// enabling a noise reducer filter (noiseReducerFilter). The Softness (softness)
22514	// setting specifies the quantization matrices that the encoder uses. Keep the
22515	// default value, 0, for flat quantization. Choose the value 1 or 16 to use
22516	// the default JVT softening quantization matricies from the H.264 specification.
22517	// Choose a value from 17 to 128 to use planar interpolation. Increasing values
22518	// from 17 to 128 result in increasing reduction of high-frequency data. The
22519	// value 128 results in the softest video.
22520	Softness *int64 `locationName:"softness" type:"integer"`
22521
22522	// The best way to set up adaptive quantization is to keep the default value,
22523	// Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization).
22524	// When you do so, MediaConvert automatically applies the best types of quantization
22525	// for your video content. Include this setting in your JSON job specification
22526	// only when you choose to change the default value for Adaptive quantization.
22527	// For this setting, keep the default value, Enabled (ENABLED), to adjust quantization
22528	// within each frame based on spatial variation of content complexity. When
22529	// you enable this feature, the encoder uses fewer bits on areas that can sustain
22530	// more distortion with no noticeable visual degradation and uses more bits
22531	// on areas where any small distortion will be noticeable. For example, complex
22532	// textured blocks are encoded with fewer bits and smooth textured blocks are
22533	// encoded with more bits. Enabling this feature will almost always improve
22534	// your video quality. Note, though, that this feature doesn't take into account
22535	// where the viewer's attention is likely to be. If viewers are likely to be
22536	// focusing their attention on a part of the screen with a lot of complex texture,
22537	// you might choose to disable this feature. Related setting: When you enable
22538	// spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization)
22539	// depending on your content. For homogeneous content, such as cartoons and
22540	// video games, set it to Low. For content with a wider variety of textures,
22541	// set it to High or Higher.
22542	SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"XavcSpatialAdaptiveQuantization"`
22543
22544	// The best way to set up adaptive quantization is to keep the default value,
22545	// Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization).
22546	// When you do so, MediaConvert automatically applies the best types of quantization
22547	// for your video content. Include this setting in your JSON job specification
22548	// only when you choose to change the default value for Adaptive quantization.
22549	// For this setting, keep the default value, Enabled (ENABLED), to adjust quantization
22550	// within each frame based on temporal variation of content complexity. When
22551	// you enable this feature, the encoder uses fewer bits on areas of the frame
22552	// that aren't moving and uses more bits on complex objects with sharp edges
22553	// that move a lot. For example, this feature improves the readability of text
22554	// tickers on newscasts and scoreboards on sports matches. Enabling this feature
22555	// will almost always improve your video quality. Note, though, that this feature
22556	// doesn't take into account where the viewer's attention is likely to be. If
22557	// viewers are likely to be focusing their attention on a part of the screen
22558	// that doesn't have moving objects with sharp edges, such as sports athletes'
22559	// faces, you might choose to disable this feature. Related setting: When you
22560	// enable temporal adaptive quantization, adjust the strength of the filter
22561	// with the setting Adaptive quantization (adaptiveQuantization).
22562	TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"XavcTemporalAdaptiveQuantization"`
22563
22564	// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings)
22565	// to the value XAVC_4K_INTRA_CBG.
22566	Xavc4kIntraCbgProfileSettings *Xavc4kIntraCbgProfileSettings `locationName:"xavc4kIntraCbgProfileSettings" type:"structure"`
22567
22568	// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings)
22569	// to the value XAVC_4K_INTRA_VBR.
22570	Xavc4kIntraVbrProfileSettings *Xavc4kIntraVbrProfileSettings `locationName:"xavc4kIntraVbrProfileSettings" type:"structure"`
22571
22572	// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings)
22573	// to the value XAVC_4K.
22574	Xavc4kProfileSettings *Xavc4kProfileSettings `locationName:"xavc4kProfileSettings" type:"structure"`
22575
22576	// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings)
22577	// to the value XAVC_HD_INTRA_CBG.
22578	XavcHdIntraCbgProfileSettings *XavcHdIntraCbgProfileSettings `locationName:"xavcHdIntraCbgProfileSettings" type:"structure"`
22579
22580	// Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings)
22581	// to the value XAVC_HD.
22582	XavcHdProfileSettings *XavcHdProfileSettings `locationName:"xavcHdProfileSettings" type:"structure"`
22583}
22584
22585// String returns the string representation
22586func (s XavcSettings) String() string {
22587	return awsutil.Prettify(s)
22588}
22589
22590// GoString returns the string representation
22591func (s XavcSettings) GoString() string {
22592	return s.String()
22593}
22594
22595// Validate inspects the fields of the type to determine if they are valid.
22596func (s *XavcSettings) Validate() error {
22597	invalidParams := request.ErrInvalidParams{Context: "XavcSettings"}
22598	if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 {
22599		invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1))
22600	}
22601	if s.FramerateNumerator != nil && *s.FramerateNumerator < 24 {
22602		invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 24))
22603	}
22604	if s.Xavc4kProfileSettings != nil {
22605		if err := s.Xavc4kProfileSettings.Validate(); err != nil {
22606			invalidParams.AddNested("Xavc4kProfileSettings", err.(request.ErrInvalidParams))
22607		}
22608	}
22609	if s.XavcHdProfileSettings != nil {
22610		if err := s.XavcHdProfileSettings.Validate(); err != nil {
22611			invalidParams.AddNested("XavcHdProfileSettings", err.(request.ErrInvalidParams))
22612		}
22613	}
22614
22615	if invalidParams.Len() > 0 {
22616		return invalidParams
22617	}
22618	return nil
22619}
22620
22621// SetAdaptiveQuantization sets the AdaptiveQuantization field's value.
22622func (s *XavcSettings) SetAdaptiveQuantization(v string) *XavcSettings {
22623	s.AdaptiveQuantization = &v
22624	return s
22625}
22626
22627// SetEntropyEncoding sets the EntropyEncoding field's value.
22628func (s *XavcSettings) SetEntropyEncoding(v string) *XavcSettings {
22629	s.EntropyEncoding = &v
22630	return s
22631}
22632
22633// SetFramerateControl sets the FramerateControl field's value.
22634func (s *XavcSettings) SetFramerateControl(v string) *XavcSettings {
22635	s.FramerateControl = &v
22636	return s
22637}
22638
22639// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value.
22640func (s *XavcSettings) SetFramerateConversionAlgorithm(v string) *XavcSettings {
22641	s.FramerateConversionAlgorithm = &v
22642	return s
22643}
22644
22645// SetFramerateDenominator sets the FramerateDenominator field's value.
22646func (s *XavcSettings) SetFramerateDenominator(v int64) *XavcSettings {
22647	s.FramerateDenominator = &v
22648	return s
22649}
22650
22651// SetFramerateNumerator sets the FramerateNumerator field's value.
22652func (s *XavcSettings) SetFramerateNumerator(v int64) *XavcSettings {
22653	s.FramerateNumerator = &v
22654	return s
22655}
22656
22657// SetProfile sets the Profile field's value.
22658func (s *XavcSettings) SetProfile(v string) *XavcSettings {
22659	s.Profile = &v
22660	return s
22661}
22662
22663// SetSlowPal sets the SlowPal field's value.
22664func (s *XavcSettings) SetSlowPal(v string) *XavcSettings {
22665	s.SlowPal = &v
22666	return s
22667}
22668
22669// SetSoftness sets the Softness field's value.
22670func (s *XavcSettings) SetSoftness(v int64) *XavcSettings {
22671	s.Softness = &v
22672	return s
22673}
22674
22675// SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value.
22676func (s *XavcSettings) SetSpatialAdaptiveQuantization(v string) *XavcSettings {
22677	s.SpatialAdaptiveQuantization = &v
22678	return s
22679}
22680
22681// SetTemporalAdaptiveQuantization sets the TemporalAdaptiveQuantization field's value.
22682func (s *XavcSettings) SetTemporalAdaptiveQuantization(v string) *XavcSettings {
22683	s.TemporalAdaptiveQuantization = &v
22684	return s
22685}
22686
22687// SetXavc4kIntraCbgProfileSettings sets the Xavc4kIntraCbgProfileSettings field's value.
22688func (s *XavcSettings) SetXavc4kIntraCbgProfileSettings(v *Xavc4kIntraCbgProfileSettings) *XavcSettings {
22689	s.Xavc4kIntraCbgProfileSettings = v
22690	return s
22691}
22692
22693// SetXavc4kIntraVbrProfileSettings sets the Xavc4kIntraVbrProfileSettings field's value.
22694func (s *XavcSettings) SetXavc4kIntraVbrProfileSettings(v *Xavc4kIntraVbrProfileSettings) *XavcSettings {
22695	s.Xavc4kIntraVbrProfileSettings = v
22696	return s
22697}
22698
22699// SetXavc4kProfileSettings sets the Xavc4kProfileSettings field's value.
22700func (s *XavcSettings) SetXavc4kProfileSettings(v *Xavc4kProfileSettings) *XavcSettings {
22701	s.Xavc4kProfileSettings = v
22702	return s
22703}
22704
22705// SetXavcHdIntraCbgProfileSettings sets the XavcHdIntraCbgProfileSettings field's value.
22706func (s *XavcSettings) SetXavcHdIntraCbgProfileSettings(v *XavcHdIntraCbgProfileSettings) *XavcSettings {
22707	s.XavcHdIntraCbgProfileSettings = v
22708	return s
22709}
22710
22711// SetXavcHdProfileSettings sets the XavcHdProfileSettings field's value.
22712func (s *XavcSettings) SetXavcHdProfileSettings(v *XavcHdProfileSettings) *XavcSettings {
22713	s.XavcHdProfileSettings = v
22714	return s
22715}
22716
22717// Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio
22718// + audio description (AD) as a stereo pair. The value for AudioType will be
22719// set to 3, which signals to downstream systems that this stream contains "broadcaster
22720// mixed AD". Note that the input received by the encoder must contain pre-mixed
22721// audio; the encoder does not perform the mixing. When you choose BROADCASTER_MIXED_AD,
22722// the encoder ignores any values you provide in AudioType and FollowInputAudioType.
22723// Choose NORMAL when the input does not contain pre-mixed audio + audio description
22724// (AD). In this case, the encoder will use any values you provide for AudioType
22725// and FollowInputAudioType.
22726const (
22727	// AacAudioDescriptionBroadcasterMixBroadcasterMixedAd is a AacAudioDescriptionBroadcasterMix enum value
22728	AacAudioDescriptionBroadcasterMixBroadcasterMixedAd = "BROADCASTER_MIXED_AD"
22729
22730	// AacAudioDescriptionBroadcasterMixNormal is a AacAudioDescriptionBroadcasterMix enum value
22731	AacAudioDescriptionBroadcasterMixNormal = "NORMAL"
22732)
22733
22734// AacAudioDescriptionBroadcasterMix_Values returns all elements of the AacAudioDescriptionBroadcasterMix enum
22735func AacAudioDescriptionBroadcasterMix_Values() []string {
22736	return []string{
22737		AacAudioDescriptionBroadcasterMixBroadcasterMixedAd,
22738		AacAudioDescriptionBroadcasterMixNormal,
22739	}
22740}
22741
22742// AAC Profile.
22743const (
22744	// AacCodecProfileLc is a AacCodecProfile enum value
22745	AacCodecProfileLc = "LC"
22746
22747	// AacCodecProfileHev1 is a AacCodecProfile enum value
22748	AacCodecProfileHev1 = "HEV1"
22749
22750	// AacCodecProfileHev2 is a AacCodecProfile enum value
22751	AacCodecProfileHev2 = "HEV2"
22752)
22753
22754// AacCodecProfile_Values returns all elements of the AacCodecProfile enum
22755func AacCodecProfile_Values() []string {
22756	return []string{
22757		AacCodecProfileLc,
22758		AacCodecProfileHev1,
22759		AacCodecProfileHev2,
22760	}
22761}
22762
22763// Mono (Audio Description), Mono, Stereo, or 5.1 channel layout. Valid values
22764// depend on rate control mode and profile. "1.0 - Audio Description (Receiver
22765// Mix)" setting receives a stereo description plus control track and emits
22766// a mono AAC encode of the description track, with control data emitted in
22767// the PES header as per ETSI TS 101 154 Annex E.
22768const (
22769	// AacCodingModeAdReceiverMix is a AacCodingMode enum value
22770	AacCodingModeAdReceiverMix = "AD_RECEIVER_MIX"
22771
22772	// AacCodingModeCodingMode10 is a AacCodingMode enum value
22773	AacCodingModeCodingMode10 = "CODING_MODE_1_0"
22774
22775	// AacCodingModeCodingMode11 is a AacCodingMode enum value
22776	AacCodingModeCodingMode11 = "CODING_MODE_1_1"
22777
22778	// AacCodingModeCodingMode20 is a AacCodingMode enum value
22779	AacCodingModeCodingMode20 = "CODING_MODE_2_0"
22780
22781	// AacCodingModeCodingMode51 is a AacCodingMode enum value
22782	AacCodingModeCodingMode51 = "CODING_MODE_5_1"
22783)
22784
22785// AacCodingMode_Values returns all elements of the AacCodingMode enum
22786func AacCodingMode_Values() []string {
22787	return []string{
22788		AacCodingModeAdReceiverMix,
22789		AacCodingModeCodingMode10,
22790		AacCodingModeCodingMode11,
22791		AacCodingModeCodingMode20,
22792		AacCodingModeCodingMode51,
22793	}
22794}
22795
22796// Rate Control Mode.
22797const (
22798	// AacRateControlModeCbr is a AacRateControlMode enum value
22799	AacRateControlModeCbr = "CBR"
22800
22801	// AacRateControlModeVbr is a AacRateControlMode enum value
22802	AacRateControlModeVbr = "VBR"
22803)
22804
22805// AacRateControlMode_Values returns all elements of the AacRateControlMode enum
22806func AacRateControlMode_Values() []string {
22807	return []string{
22808		AacRateControlModeCbr,
22809		AacRateControlModeVbr,
22810	}
22811}
22812
22813// Enables LATM/LOAS AAC output. Note that if you use LATM/LOAS AAC in an output,
22814// you must choose "No container" for the output container.
22815const (
22816	// AacRawFormatLatmLoas is a AacRawFormat enum value
22817	AacRawFormatLatmLoas = "LATM_LOAS"
22818
22819	// AacRawFormatNone is a AacRawFormat enum value
22820	AacRawFormatNone = "NONE"
22821)
22822
22823// AacRawFormat_Values returns all elements of the AacRawFormat enum
22824func AacRawFormat_Values() []string {
22825	return []string{
22826		AacRawFormatLatmLoas,
22827		AacRawFormatNone,
22828	}
22829}
22830
22831// Use MPEG-2 AAC instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream
22832// containers.
22833const (
22834	// AacSpecificationMpeg2 is a AacSpecification enum value
22835	AacSpecificationMpeg2 = "MPEG2"
22836
22837	// AacSpecificationMpeg4 is a AacSpecification enum value
22838	AacSpecificationMpeg4 = "MPEG4"
22839)
22840
22841// AacSpecification_Values returns all elements of the AacSpecification enum
22842func AacSpecification_Values() []string {
22843	return []string{
22844		AacSpecificationMpeg2,
22845		AacSpecificationMpeg4,
22846	}
22847}
22848
22849// VBR Quality Level - Only used if rate_control_mode is VBR.
22850const (
22851	// AacVbrQualityLow is a AacVbrQuality enum value
22852	AacVbrQualityLow = "LOW"
22853
22854	// AacVbrQualityMediumLow is a AacVbrQuality enum value
22855	AacVbrQualityMediumLow = "MEDIUM_LOW"
22856
22857	// AacVbrQualityMediumHigh is a AacVbrQuality enum value
22858	AacVbrQualityMediumHigh = "MEDIUM_HIGH"
22859
22860	// AacVbrQualityHigh is a AacVbrQuality enum value
22861	AacVbrQualityHigh = "HIGH"
22862)
22863
22864// AacVbrQuality_Values returns all elements of the AacVbrQuality enum
22865func AacVbrQuality_Values() []string {
22866	return []string{
22867		AacVbrQualityLow,
22868		AacVbrQualityMediumLow,
22869		AacVbrQualityMediumHigh,
22870		AacVbrQualityHigh,
22871	}
22872}
22873
22874// Specify the bitstream mode for the AC-3 stream that the encoder emits. For
22875// more information about the AC3 bitstream mode, see ATSC A/52-2012 (Annex
22876// E).
22877const (
22878	// Ac3BitstreamModeCompleteMain is a Ac3BitstreamMode enum value
22879	Ac3BitstreamModeCompleteMain = "COMPLETE_MAIN"
22880
22881	// Ac3BitstreamModeCommentary is a Ac3BitstreamMode enum value
22882	Ac3BitstreamModeCommentary = "COMMENTARY"
22883
22884	// Ac3BitstreamModeDialogue is a Ac3BitstreamMode enum value
22885	Ac3BitstreamModeDialogue = "DIALOGUE"
22886
22887	// Ac3BitstreamModeEmergency is a Ac3BitstreamMode enum value
22888	Ac3BitstreamModeEmergency = "EMERGENCY"
22889
22890	// Ac3BitstreamModeHearingImpaired is a Ac3BitstreamMode enum value
22891	Ac3BitstreamModeHearingImpaired = "HEARING_IMPAIRED"
22892
22893	// Ac3BitstreamModeMusicAndEffects is a Ac3BitstreamMode enum value
22894	Ac3BitstreamModeMusicAndEffects = "MUSIC_AND_EFFECTS"
22895
22896	// Ac3BitstreamModeVisuallyImpaired is a Ac3BitstreamMode enum value
22897	Ac3BitstreamModeVisuallyImpaired = "VISUALLY_IMPAIRED"
22898
22899	// Ac3BitstreamModeVoiceOver is a Ac3BitstreamMode enum value
22900	Ac3BitstreamModeVoiceOver = "VOICE_OVER"
22901)
22902
22903// Ac3BitstreamMode_Values returns all elements of the Ac3BitstreamMode enum
22904func Ac3BitstreamMode_Values() []string {
22905	return []string{
22906		Ac3BitstreamModeCompleteMain,
22907		Ac3BitstreamModeCommentary,
22908		Ac3BitstreamModeDialogue,
22909		Ac3BitstreamModeEmergency,
22910		Ac3BitstreamModeHearingImpaired,
22911		Ac3BitstreamModeMusicAndEffects,
22912		Ac3BitstreamModeVisuallyImpaired,
22913		Ac3BitstreamModeVoiceOver,
22914	}
22915}
22916
22917// Dolby Digital coding mode. Determines number of channels.
22918const (
22919	// Ac3CodingModeCodingMode10 is a Ac3CodingMode enum value
22920	Ac3CodingModeCodingMode10 = "CODING_MODE_1_0"
22921
22922	// Ac3CodingModeCodingMode11 is a Ac3CodingMode enum value
22923	Ac3CodingModeCodingMode11 = "CODING_MODE_1_1"
22924
22925	// Ac3CodingModeCodingMode20 is a Ac3CodingMode enum value
22926	Ac3CodingModeCodingMode20 = "CODING_MODE_2_0"
22927
22928	// Ac3CodingModeCodingMode32Lfe is a Ac3CodingMode enum value
22929	Ac3CodingModeCodingMode32Lfe = "CODING_MODE_3_2_LFE"
22930)
22931
22932// Ac3CodingMode_Values returns all elements of the Ac3CodingMode enum
22933func Ac3CodingMode_Values() []string {
22934	return []string{
22935		Ac3CodingModeCodingMode10,
22936		Ac3CodingModeCodingMode11,
22937		Ac3CodingModeCodingMode20,
22938		Ac3CodingModeCodingMode32Lfe,
22939	}
22940}
22941
22942// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
22943// uses when encoding the metadata in the Dolby Digital stream for the line
22944// operating mode. Related setting: When you use this setting, MediaConvert
22945// ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile).
22946// For information about the Dolby Digital DRC operating modes and profiles,
22947// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
22948const (
22949	// Ac3DynamicRangeCompressionLineFilmStandard is a Ac3DynamicRangeCompressionLine enum value
22950	Ac3DynamicRangeCompressionLineFilmStandard = "FILM_STANDARD"
22951
22952	// Ac3DynamicRangeCompressionLineFilmLight is a Ac3DynamicRangeCompressionLine enum value
22953	Ac3DynamicRangeCompressionLineFilmLight = "FILM_LIGHT"
22954
22955	// Ac3DynamicRangeCompressionLineMusicStandard is a Ac3DynamicRangeCompressionLine enum value
22956	Ac3DynamicRangeCompressionLineMusicStandard = "MUSIC_STANDARD"
22957
22958	// Ac3DynamicRangeCompressionLineMusicLight is a Ac3DynamicRangeCompressionLine enum value
22959	Ac3DynamicRangeCompressionLineMusicLight = "MUSIC_LIGHT"
22960
22961	// Ac3DynamicRangeCompressionLineSpeech is a Ac3DynamicRangeCompressionLine enum value
22962	Ac3DynamicRangeCompressionLineSpeech = "SPEECH"
22963
22964	// Ac3DynamicRangeCompressionLineNone is a Ac3DynamicRangeCompressionLine enum value
22965	Ac3DynamicRangeCompressionLineNone = "NONE"
22966)
22967
22968// Ac3DynamicRangeCompressionLine_Values returns all elements of the Ac3DynamicRangeCompressionLine enum
22969func Ac3DynamicRangeCompressionLine_Values() []string {
22970	return []string{
22971		Ac3DynamicRangeCompressionLineFilmStandard,
22972		Ac3DynamicRangeCompressionLineFilmLight,
22973		Ac3DynamicRangeCompressionLineMusicStandard,
22974		Ac3DynamicRangeCompressionLineMusicLight,
22975		Ac3DynamicRangeCompressionLineSpeech,
22976		Ac3DynamicRangeCompressionLineNone,
22977	}
22978}
22979
22980// When you want to add Dolby dynamic range compression (DRC) signaling to your
22981// output stream, we recommend that you use the mode-specific settings instead
22982// of Dynamic range compression profile (DynamicRangeCompressionProfile). The
22983// mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine)
22984// and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf).
22985// Note that when you specify values for all three settings, MediaConvert ignores
22986// the value of this setting in favor of the mode-specific settings. If you
22987// do use this setting instead of the mode-specific settings, choose None (NONE)
22988// to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD)
22989// to set the profile to Dolby's film standard profile for all operating modes.
22990const (
22991	// Ac3DynamicRangeCompressionProfileFilmStandard is a Ac3DynamicRangeCompressionProfile enum value
22992	Ac3DynamicRangeCompressionProfileFilmStandard = "FILM_STANDARD"
22993
22994	// Ac3DynamicRangeCompressionProfileNone is a Ac3DynamicRangeCompressionProfile enum value
22995	Ac3DynamicRangeCompressionProfileNone = "NONE"
22996)
22997
22998// Ac3DynamicRangeCompressionProfile_Values returns all elements of the Ac3DynamicRangeCompressionProfile enum
22999func Ac3DynamicRangeCompressionProfile_Values() []string {
23000	return []string{
23001		Ac3DynamicRangeCompressionProfileFilmStandard,
23002		Ac3DynamicRangeCompressionProfileNone,
23003	}
23004}
23005
23006// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
23007// uses when encoding the metadata in the Dolby Digital stream for the RF operating
23008// mode. Related setting: When you use this setting, MediaConvert ignores any
23009// value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile).
23010// For information about the Dolby Digital DRC operating modes and profiles,
23011// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
23012const (
23013	// Ac3DynamicRangeCompressionRfFilmStandard is a Ac3DynamicRangeCompressionRf enum value
23014	Ac3DynamicRangeCompressionRfFilmStandard = "FILM_STANDARD"
23015
23016	// Ac3DynamicRangeCompressionRfFilmLight is a Ac3DynamicRangeCompressionRf enum value
23017	Ac3DynamicRangeCompressionRfFilmLight = "FILM_LIGHT"
23018
23019	// Ac3DynamicRangeCompressionRfMusicStandard is a Ac3DynamicRangeCompressionRf enum value
23020	Ac3DynamicRangeCompressionRfMusicStandard = "MUSIC_STANDARD"
23021
23022	// Ac3DynamicRangeCompressionRfMusicLight is a Ac3DynamicRangeCompressionRf enum value
23023	Ac3DynamicRangeCompressionRfMusicLight = "MUSIC_LIGHT"
23024
23025	// Ac3DynamicRangeCompressionRfSpeech is a Ac3DynamicRangeCompressionRf enum value
23026	Ac3DynamicRangeCompressionRfSpeech = "SPEECH"
23027
23028	// Ac3DynamicRangeCompressionRfNone is a Ac3DynamicRangeCompressionRf enum value
23029	Ac3DynamicRangeCompressionRfNone = "NONE"
23030)
23031
23032// Ac3DynamicRangeCompressionRf_Values returns all elements of the Ac3DynamicRangeCompressionRf enum
23033func Ac3DynamicRangeCompressionRf_Values() []string {
23034	return []string{
23035		Ac3DynamicRangeCompressionRfFilmStandard,
23036		Ac3DynamicRangeCompressionRfFilmLight,
23037		Ac3DynamicRangeCompressionRfMusicStandard,
23038		Ac3DynamicRangeCompressionRfMusicLight,
23039		Ac3DynamicRangeCompressionRfSpeech,
23040		Ac3DynamicRangeCompressionRfNone,
23041	}
23042}
23043
23044// Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only
23045// valid with 3_2_LFE coding mode.
23046const (
23047	// Ac3LfeFilterEnabled is a Ac3LfeFilter enum value
23048	Ac3LfeFilterEnabled = "ENABLED"
23049
23050	// Ac3LfeFilterDisabled is a Ac3LfeFilter enum value
23051	Ac3LfeFilterDisabled = "DISABLED"
23052)
23053
23054// Ac3LfeFilter_Values returns all elements of the Ac3LfeFilter enum
23055func Ac3LfeFilter_Values() []string {
23056	return []string{
23057		Ac3LfeFilterEnabled,
23058		Ac3LfeFilterDisabled,
23059	}
23060}
23061
23062// When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+,
23063// or DolbyE decoder that supplied this audio data. If audio was not supplied
23064// from one of these streams, then the static metadata settings will be used.
23065const (
23066	// Ac3MetadataControlFollowInput is a Ac3MetadataControl enum value
23067	Ac3MetadataControlFollowInput = "FOLLOW_INPUT"
23068
23069	// Ac3MetadataControlUseConfigured is a Ac3MetadataControl enum value
23070	Ac3MetadataControlUseConfigured = "USE_CONFIGURED"
23071)
23072
23073// Ac3MetadataControl_Values returns all elements of the Ac3MetadataControl enum
23074func Ac3MetadataControl_Values() []string {
23075	return []string{
23076		Ac3MetadataControlFollowInput,
23077		Ac3MetadataControlUseConfigured,
23078	}
23079}
23080
23081// Specify whether the service runs your job with accelerated transcoding. Choose
23082// DISABLED if you don't want accelerated transcoding. Choose ENABLED if you
23083// want your job to run with accelerated transcoding and to fail if your input
23084// files or your job settings aren't compatible with accelerated transcoding.
23085// Choose PREFERRED if you want your job to run with accelerated transcoding
23086// if the job is compatible with the feature and to run at standard speed if
23087// it's not.
23088const (
23089	// AccelerationModeDisabled is a AccelerationMode enum value
23090	AccelerationModeDisabled = "DISABLED"
23091
23092	// AccelerationModeEnabled is a AccelerationMode enum value
23093	AccelerationModeEnabled = "ENABLED"
23094
23095	// AccelerationModePreferred is a AccelerationMode enum value
23096	AccelerationModePreferred = "PREFERRED"
23097)
23098
23099// AccelerationMode_Values returns all elements of the AccelerationMode enum
23100func AccelerationMode_Values() []string {
23101	return []string{
23102		AccelerationModeDisabled,
23103		AccelerationModeEnabled,
23104		AccelerationModePreferred,
23105	}
23106}
23107
23108// Describes whether the current job is running with accelerated transcoding.
23109// For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus
23110// is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode)
23111// set to ENABLED or PREFERRED, AccelerationStatus is one of the other states.
23112// AccelerationStatus is IN_PROGRESS initially, while the service determines
23113// whether the input files and job settings are compatible with accelerated
23114// transcoding. If they are, AcclerationStatus is ACCELERATED. If your input
23115// files and job settings aren't compatible with accelerated transcoding, the
23116// service either fails your job or runs it without accelerated transcoding,
23117// depending on how you set Acceleration (AccelerationMode). When the service
23118// runs your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED.
23119const (
23120	// AccelerationStatusNotApplicable is a AccelerationStatus enum value
23121	AccelerationStatusNotApplicable = "NOT_APPLICABLE"
23122
23123	// AccelerationStatusInProgress is a AccelerationStatus enum value
23124	AccelerationStatusInProgress = "IN_PROGRESS"
23125
23126	// AccelerationStatusAccelerated is a AccelerationStatus enum value
23127	AccelerationStatusAccelerated = "ACCELERATED"
23128
23129	// AccelerationStatusNotAccelerated is a AccelerationStatus enum value
23130	AccelerationStatusNotAccelerated = "NOT_ACCELERATED"
23131)
23132
23133// AccelerationStatus_Values returns all elements of the AccelerationStatus enum
23134func AccelerationStatus_Values() []string {
23135	return []string{
23136		AccelerationStatusNotApplicable,
23137		AccelerationStatusInProgress,
23138		AccelerationStatusAccelerated,
23139		AccelerationStatusNotAccelerated,
23140	}
23141}
23142
23143// This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert
23144// AFD signaling (AfdSignaling) to specify whether the service includes AFD
23145// values in the output video data and what those values are. * Choose None
23146// to remove all AFD values from this output. * Choose Fixed to ignore input
23147// AFD values and instead encode the value specified in the job. * Choose Auto
23148// to calculate output AFD values based on the input AFD scaler data.
23149const (
23150	// AfdSignalingNone is a AfdSignaling enum value
23151	AfdSignalingNone = "NONE"
23152
23153	// AfdSignalingAuto is a AfdSignaling enum value
23154	AfdSignalingAuto = "AUTO"
23155
23156	// AfdSignalingFixed is a AfdSignaling enum value
23157	AfdSignalingFixed = "FIXED"
23158)
23159
23160// AfdSignaling_Values returns all elements of the AfdSignaling enum
23161func AfdSignaling_Values() []string {
23162	return []string{
23163		AfdSignalingNone,
23164		AfdSignalingAuto,
23165		AfdSignalingFixed,
23166	}
23167}
23168
23169// Ignore this setting unless this input is a QuickTime animation with an alpha
23170// channel. Use this setting to create separate Key and Fill outputs. In each
23171// output, specify which part of the input MediaConvert uses. Leave this setting
23172// at the default value DISCARD to delete the alpha channel and preserve the
23173// video. Set it to REMAP_TO_LUMA to delete the video and map the alpha channel
23174// to the luma channel of your outputs.
23175const (
23176	// AlphaBehaviorDiscard is a AlphaBehavior enum value
23177	AlphaBehaviorDiscard = "DISCARD"
23178
23179	// AlphaBehaviorRemapToLuma is a AlphaBehavior enum value
23180	AlphaBehaviorRemapToLuma = "REMAP_TO_LUMA"
23181)
23182
23183// AlphaBehavior_Values returns all elements of the AlphaBehavior enum
23184func AlphaBehavior_Values() []string {
23185	return []string{
23186		AlphaBehaviorDiscard,
23187		AlphaBehaviorRemapToLuma,
23188	}
23189}
23190
23191// Specify whether this set of input captions appears in your outputs in both
23192// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes
23193// the captions data in two ways: it passes the 608 data through using the 608
23194// compatibility bytes fields of the 708 wrapper, and it also translates the
23195// 608 data into 708.
23196const (
23197	// AncillaryConvert608To708Upconvert is a AncillaryConvert608To708 enum value
23198	AncillaryConvert608To708Upconvert = "UPCONVERT"
23199
23200	// AncillaryConvert608To708Disabled is a AncillaryConvert608To708 enum value
23201	AncillaryConvert608To708Disabled = "DISABLED"
23202)
23203
23204// AncillaryConvert608To708_Values returns all elements of the AncillaryConvert608To708 enum
23205func AncillaryConvert608To708_Values() []string {
23206	return []string{
23207		AncillaryConvert608To708Upconvert,
23208		AncillaryConvert608To708Disabled,
23209	}
23210}
23211
23212// By default, the service terminates any unterminated captions at the end of
23213// each input. If you want the caption to continue onto your next input, disable
23214// this setting.
23215const (
23216	// AncillaryTerminateCaptionsEndOfInput is a AncillaryTerminateCaptions enum value
23217	AncillaryTerminateCaptionsEndOfInput = "END_OF_INPUT"
23218
23219	// AncillaryTerminateCaptionsDisabled is a AncillaryTerminateCaptions enum value
23220	AncillaryTerminateCaptionsDisabled = "DISABLED"
23221)
23222
23223// AncillaryTerminateCaptions_Values returns all elements of the AncillaryTerminateCaptions enum
23224func AncillaryTerminateCaptions_Values() []string {
23225	return []string{
23226		AncillaryTerminateCaptionsEndOfInput,
23227		AncillaryTerminateCaptionsDisabled,
23228	}
23229}
23230
23231// The anti-alias filter is automatically applied to all outputs. The service
23232// no longer accepts the value DISABLED for AntiAlias. If you specify that in
23233// your job, the service will ignore the setting.
23234const (
23235	// AntiAliasDisabled is a AntiAlias enum value
23236	AntiAliasDisabled = "DISABLED"
23237
23238	// AntiAliasEnabled is a AntiAlias enum value
23239	AntiAliasEnabled = "ENABLED"
23240)
23241
23242// AntiAlias_Values returns all elements of the AntiAlias enum
23243func AntiAlias_Values() []string {
23244	return []string{
23245		AntiAliasDisabled,
23246		AntiAliasEnabled,
23247	}
23248}
23249
23250// You can add a tag for this mono-channel audio track to mimic its placement
23251// in a multi-channel layout. For example, if this track is the left surround
23252// channel, choose Left surround (LS).
23253const (
23254	// AudioChannelTagL is a AudioChannelTag enum value
23255	AudioChannelTagL = "L"
23256
23257	// AudioChannelTagR is a AudioChannelTag enum value
23258	AudioChannelTagR = "R"
23259
23260	// AudioChannelTagC is a AudioChannelTag enum value
23261	AudioChannelTagC = "C"
23262
23263	// AudioChannelTagLfe is a AudioChannelTag enum value
23264	AudioChannelTagLfe = "LFE"
23265
23266	// AudioChannelTagLs is a AudioChannelTag enum value
23267	AudioChannelTagLs = "LS"
23268
23269	// AudioChannelTagRs is a AudioChannelTag enum value
23270	AudioChannelTagRs = "RS"
23271
23272	// AudioChannelTagLc is a AudioChannelTag enum value
23273	AudioChannelTagLc = "LC"
23274
23275	// AudioChannelTagRc is a AudioChannelTag enum value
23276	AudioChannelTagRc = "RC"
23277
23278	// AudioChannelTagCs is a AudioChannelTag enum value
23279	AudioChannelTagCs = "CS"
23280
23281	// AudioChannelTagLsd is a AudioChannelTag enum value
23282	AudioChannelTagLsd = "LSD"
23283
23284	// AudioChannelTagRsd is a AudioChannelTag enum value
23285	AudioChannelTagRsd = "RSD"
23286
23287	// AudioChannelTagTcs is a AudioChannelTag enum value
23288	AudioChannelTagTcs = "TCS"
23289
23290	// AudioChannelTagVhl is a AudioChannelTag enum value
23291	AudioChannelTagVhl = "VHL"
23292
23293	// AudioChannelTagVhc is a AudioChannelTag enum value
23294	AudioChannelTagVhc = "VHC"
23295
23296	// AudioChannelTagVhr is a AudioChannelTag enum value
23297	AudioChannelTagVhr = "VHR"
23298)
23299
23300// AudioChannelTag_Values returns all elements of the AudioChannelTag enum
23301func AudioChannelTag_Values() []string {
23302	return []string{
23303		AudioChannelTagL,
23304		AudioChannelTagR,
23305		AudioChannelTagC,
23306		AudioChannelTagLfe,
23307		AudioChannelTagLs,
23308		AudioChannelTagRs,
23309		AudioChannelTagLc,
23310		AudioChannelTagRc,
23311		AudioChannelTagCs,
23312		AudioChannelTagLsd,
23313		AudioChannelTagRsd,
23314		AudioChannelTagTcs,
23315		AudioChannelTagVhl,
23316		AudioChannelTagVhc,
23317		AudioChannelTagVhr,
23318	}
23319}
23320
23321// Choose the audio codec for this output. Note that the option Dolby Digital
23322// passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital
23323// Plus audio inputs. Make sure that you choose a codec that's supported with
23324// your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio
23325// For audio-only outputs, make sure that both your input audio codec and your
23326// output audio codec are supported for audio-only workflows. For more information,
23327// see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only
23328// and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output
23329const (
23330	// AudioCodecAac is a AudioCodec enum value
23331	AudioCodecAac = "AAC"
23332
23333	// AudioCodecMp2 is a AudioCodec enum value
23334	AudioCodecMp2 = "MP2"
23335
23336	// AudioCodecMp3 is a AudioCodec enum value
23337	AudioCodecMp3 = "MP3"
23338
23339	// AudioCodecWav is a AudioCodec enum value
23340	AudioCodecWav = "WAV"
23341
23342	// AudioCodecAiff is a AudioCodec enum value
23343	AudioCodecAiff = "AIFF"
23344
23345	// AudioCodecAc3 is a AudioCodec enum value
23346	AudioCodecAc3 = "AC3"
23347
23348	// AudioCodecEac3 is a AudioCodec enum value
23349	AudioCodecEac3 = "EAC3"
23350
23351	// AudioCodecEac3Atmos is a AudioCodec enum value
23352	AudioCodecEac3Atmos = "EAC3_ATMOS"
23353
23354	// AudioCodecVorbis is a AudioCodec enum value
23355	AudioCodecVorbis = "VORBIS"
23356
23357	// AudioCodecOpus is a AudioCodec enum value
23358	AudioCodecOpus = "OPUS"
23359
23360	// AudioCodecPassthrough is a AudioCodec enum value
23361	AudioCodecPassthrough = "PASSTHROUGH"
23362)
23363
23364// AudioCodec_Values returns all elements of the AudioCodec enum
23365func AudioCodec_Values() []string {
23366	return []string{
23367		AudioCodecAac,
23368		AudioCodecMp2,
23369		AudioCodecMp3,
23370		AudioCodecWav,
23371		AudioCodecAiff,
23372		AudioCodecAc3,
23373		AudioCodecEac3,
23374		AudioCodecEac3Atmos,
23375		AudioCodecVorbis,
23376		AudioCodecOpus,
23377		AudioCodecPassthrough,
23378	}
23379}
23380
23381// Enable this setting on one audio selector to set it as the default for the
23382// job. The service uses this default for outputs where it can't find the specified
23383// input audio. If you don't set a default, those outputs have no audio.
23384const (
23385	// AudioDefaultSelectionDefault is a AudioDefaultSelection enum value
23386	AudioDefaultSelectionDefault = "DEFAULT"
23387
23388	// AudioDefaultSelectionNotDefault is a AudioDefaultSelection enum value
23389	AudioDefaultSelectionNotDefault = "NOT_DEFAULT"
23390)
23391
23392// AudioDefaultSelection_Values returns all elements of the AudioDefaultSelection enum
23393func AudioDefaultSelection_Values() []string {
23394	return []string{
23395		AudioDefaultSelectionDefault,
23396		AudioDefaultSelectionNotDefault,
23397	}
23398}
23399
23400// Specify which source for language code takes precedence for this audio track.
23401// When you choose Follow input (FOLLOW_INPUT), the service uses the language
23402// code from the input track if it's present. If there's no languge code on
23403// the input track, the service uses the code that you specify in the setting
23404// Language code (languageCode or customLanguageCode). When you choose Use configured
23405// (USE_CONFIGURED), the service uses the language code that you specify.
23406const (
23407	// AudioLanguageCodeControlFollowInput is a AudioLanguageCodeControl enum value
23408	AudioLanguageCodeControlFollowInput = "FOLLOW_INPUT"
23409
23410	// AudioLanguageCodeControlUseConfigured is a AudioLanguageCodeControl enum value
23411	AudioLanguageCodeControlUseConfigured = "USE_CONFIGURED"
23412)
23413
23414// AudioLanguageCodeControl_Values returns all elements of the AudioLanguageCodeControl enum
23415func AudioLanguageCodeControl_Values() []string {
23416	return []string{
23417		AudioLanguageCodeControlFollowInput,
23418		AudioLanguageCodeControlUseConfigured,
23419	}
23420}
23421
23422// Choose one of the following audio normalization algorithms: ITU-R BS.1770-1:
23423// Ungated loudness. A measurement of ungated average loudness for an entire
23424// piece of content, suitable for measurement of short-form content under ATSC
23425// recommendation A/85. Supports up to 5.1 audio channels. ITU-R BS.1770-2:
23426// Gated loudness. A measurement of gated average loudness compliant with the
23427// requirements of EBU-R128. Supports up to 5.1 audio channels. ITU-R BS.1770-3:
23428// Modified peak. The same loudness measurement algorithm as 1770-2, with an
23429// updated true peak measurement. ITU-R BS.1770-4: Higher channel count. Allows
23430// for more audio channels than the other algorithms, including configurations
23431// such as 7.1.
23432const (
23433	// AudioNormalizationAlgorithmItuBs17701 is a AudioNormalizationAlgorithm enum value
23434	AudioNormalizationAlgorithmItuBs17701 = "ITU_BS_1770_1"
23435
23436	// AudioNormalizationAlgorithmItuBs17702 is a AudioNormalizationAlgorithm enum value
23437	AudioNormalizationAlgorithmItuBs17702 = "ITU_BS_1770_2"
23438
23439	// AudioNormalizationAlgorithmItuBs17703 is a AudioNormalizationAlgorithm enum value
23440	AudioNormalizationAlgorithmItuBs17703 = "ITU_BS_1770_3"
23441
23442	// AudioNormalizationAlgorithmItuBs17704 is a AudioNormalizationAlgorithm enum value
23443	AudioNormalizationAlgorithmItuBs17704 = "ITU_BS_1770_4"
23444)
23445
23446// AudioNormalizationAlgorithm_Values returns all elements of the AudioNormalizationAlgorithm enum
23447func AudioNormalizationAlgorithm_Values() []string {
23448	return []string{
23449		AudioNormalizationAlgorithmItuBs17701,
23450		AudioNormalizationAlgorithmItuBs17702,
23451		AudioNormalizationAlgorithmItuBs17703,
23452		AudioNormalizationAlgorithmItuBs17704,
23453	}
23454}
23455
23456// When enabled the output audio is corrected using the chosen algorithm. If
23457// disabled, the audio will be measured but not adjusted.
23458const (
23459	// AudioNormalizationAlgorithmControlCorrectAudio is a AudioNormalizationAlgorithmControl enum value
23460	AudioNormalizationAlgorithmControlCorrectAudio = "CORRECT_AUDIO"
23461
23462	// AudioNormalizationAlgorithmControlMeasureOnly is a AudioNormalizationAlgorithmControl enum value
23463	AudioNormalizationAlgorithmControlMeasureOnly = "MEASURE_ONLY"
23464)
23465
23466// AudioNormalizationAlgorithmControl_Values returns all elements of the AudioNormalizationAlgorithmControl enum
23467func AudioNormalizationAlgorithmControl_Values() []string {
23468	return []string{
23469		AudioNormalizationAlgorithmControlCorrectAudio,
23470		AudioNormalizationAlgorithmControlMeasureOnly,
23471	}
23472}
23473
23474// If set to LOG, log each output's audio track loudness to a CSV file.
23475const (
23476	// AudioNormalizationLoudnessLoggingLog is a AudioNormalizationLoudnessLogging enum value
23477	AudioNormalizationLoudnessLoggingLog = "LOG"
23478
23479	// AudioNormalizationLoudnessLoggingDontLog is a AudioNormalizationLoudnessLogging enum value
23480	AudioNormalizationLoudnessLoggingDontLog = "DONT_LOG"
23481)
23482
23483// AudioNormalizationLoudnessLogging_Values returns all elements of the AudioNormalizationLoudnessLogging enum
23484func AudioNormalizationLoudnessLogging_Values() []string {
23485	return []string{
23486		AudioNormalizationLoudnessLoggingLog,
23487		AudioNormalizationLoudnessLoggingDontLog,
23488	}
23489}
23490
23491// If set to TRUE_PEAK, calculate and log the TruePeak for each output's audio
23492// track loudness.
23493const (
23494	// AudioNormalizationPeakCalculationTruePeak is a AudioNormalizationPeakCalculation enum value
23495	AudioNormalizationPeakCalculationTruePeak = "TRUE_PEAK"
23496
23497	// AudioNormalizationPeakCalculationNone is a AudioNormalizationPeakCalculation enum value
23498	AudioNormalizationPeakCalculationNone = "NONE"
23499)
23500
23501// AudioNormalizationPeakCalculation_Values returns all elements of the AudioNormalizationPeakCalculation enum
23502func AudioNormalizationPeakCalculation_Values() []string {
23503	return []string{
23504		AudioNormalizationPeakCalculationTruePeak,
23505		AudioNormalizationPeakCalculationNone,
23506	}
23507}
23508
23509// Specifies the type of the audio selector.
23510const (
23511	// AudioSelectorTypePid is a AudioSelectorType enum value
23512	AudioSelectorTypePid = "PID"
23513
23514	// AudioSelectorTypeTrack is a AudioSelectorType enum value
23515	AudioSelectorTypeTrack = "TRACK"
23516
23517	// AudioSelectorTypeLanguageCode is a AudioSelectorType enum value
23518	AudioSelectorTypeLanguageCode = "LANGUAGE_CODE"
23519
23520	// AudioSelectorTypeHlsRenditionGroup is a AudioSelectorType enum value
23521	AudioSelectorTypeHlsRenditionGroup = "HLS_RENDITION_GROUP"
23522)
23523
23524// AudioSelectorType_Values returns all elements of the AudioSelectorType enum
23525func AudioSelectorType_Values() []string {
23526	return []string{
23527		AudioSelectorTypePid,
23528		AudioSelectorTypeTrack,
23529		AudioSelectorTypeLanguageCode,
23530		AudioSelectorTypeHlsRenditionGroup,
23531	}
23532}
23533
23534// When set to FOLLOW_INPUT, if the input contains an ISO 639 audio_type, then
23535// that value is passed through to the output. If the input contains no ISO
23536// 639 audio_type, the value in Audio Type is included in the output. Otherwise
23537// the value in Audio Type is included in the output. Note that this field and
23538// audioType are both ignored if audioDescriptionBroadcasterMix is set to BROADCASTER_MIXED_AD.
23539const (
23540	// AudioTypeControlFollowInput is a AudioTypeControl enum value
23541	AudioTypeControlFollowInput = "FOLLOW_INPUT"
23542
23543	// AudioTypeControlUseConfigured is a AudioTypeControl enum value
23544	AudioTypeControlUseConfigured = "USE_CONFIGURED"
23545)
23546
23547// AudioTypeControl_Values returns all elements of the AudioTypeControl enum
23548func AudioTypeControl_Values() []string {
23549	return []string{
23550		AudioTypeControlFollowInput,
23551		AudioTypeControlUseConfigured,
23552	}
23553}
23554
23555// Specify the strength of any adaptive quantization filters that you enable.
23556// The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization).
23557const (
23558	// Av1AdaptiveQuantizationOff is a Av1AdaptiveQuantization enum value
23559	Av1AdaptiveQuantizationOff = "OFF"
23560
23561	// Av1AdaptiveQuantizationLow is a Av1AdaptiveQuantization enum value
23562	Av1AdaptiveQuantizationLow = "LOW"
23563
23564	// Av1AdaptiveQuantizationMedium is a Av1AdaptiveQuantization enum value
23565	Av1AdaptiveQuantizationMedium = "MEDIUM"
23566
23567	// Av1AdaptiveQuantizationHigh is a Av1AdaptiveQuantization enum value
23568	Av1AdaptiveQuantizationHigh = "HIGH"
23569
23570	// Av1AdaptiveQuantizationHigher is a Av1AdaptiveQuantization enum value
23571	Av1AdaptiveQuantizationHigher = "HIGHER"
23572
23573	// Av1AdaptiveQuantizationMax is a Av1AdaptiveQuantization enum value
23574	Av1AdaptiveQuantizationMax = "MAX"
23575)
23576
23577// Av1AdaptiveQuantization_Values returns all elements of the Av1AdaptiveQuantization enum
23578func Av1AdaptiveQuantization_Values() []string {
23579	return []string{
23580		Av1AdaptiveQuantizationOff,
23581		Av1AdaptiveQuantizationLow,
23582		Av1AdaptiveQuantizationMedium,
23583		Av1AdaptiveQuantizationHigh,
23584		Av1AdaptiveQuantizationHigher,
23585		Av1AdaptiveQuantizationMax,
23586	}
23587}
23588
23589// If you are using the console, use the Framerate setting to specify the frame
23590// rate for this output. If you want to keep the same frame rate as the input
23591// video, choose Follow source. If you want to do frame rate conversion, choose
23592// a frame rate from the dropdown list or choose Custom. The framerates shown
23593// in the dropdown list are decimal approximations of fractions. If you choose
23594// Custom, specify your frame rate as a fraction. If you are creating your transcoding
23595// job specification as a JSON file without the console, use FramerateControl
23596// to specify which value the service uses for the frame rate for this output.
23597// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
23598// from the input. Choose SPECIFIED if you want the service to use the frame
23599// rate you specify in the settings FramerateNumerator and FramerateDenominator.
23600const (
23601	// Av1FramerateControlInitializeFromSource is a Av1FramerateControl enum value
23602	Av1FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
23603
23604	// Av1FramerateControlSpecified is a Av1FramerateControl enum value
23605	Av1FramerateControlSpecified = "SPECIFIED"
23606)
23607
23608// Av1FramerateControl_Values returns all elements of the Av1FramerateControl enum
23609func Av1FramerateControl_Values() []string {
23610	return []string{
23611		Av1FramerateControlInitializeFromSource,
23612		Av1FramerateControlSpecified,
23613	}
23614}
23615
23616// Choose the method that you want MediaConvert to use when increasing or decreasing
23617// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
23618// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
23619// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
23620// smooth picture, but might introduce undesirable video artifacts. For complex
23621// frame rate conversions, especially if your source video has already been
23622// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
23623// motion-compensated interpolation. FrameFormer chooses the best conversion
23624// method frame by frame. Note that using FrameFormer increases the transcoding
23625// time and incurs a significant add-on cost.
23626const (
23627	// Av1FramerateConversionAlgorithmDuplicateDrop is a Av1FramerateConversionAlgorithm enum value
23628	Av1FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP"
23629
23630	// Av1FramerateConversionAlgorithmInterpolate is a Av1FramerateConversionAlgorithm enum value
23631	Av1FramerateConversionAlgorithmInterpolate = "INTERPOLATE"
23632
23633	// Av1FramerateConversionAlgorithmFrameformer is a Av1FramerateConversionAlgorithm enum value
23634	Av1FramerateConversionAlgorithmFrameformer = "FRAMEFORMER"
23635)
23636
23637// Av1FramerateConversionAlgorithm_Values returns all elements of the Av1FramerateConversionAlgorithm enum
23638func Av1FramerateConversionAlgorithm_Values() []string {
23639	return []string{
23640		Av1FramerateConversionAlgorithmDuplicateDrop,
23641		Av1FramerateConversionAlgorithmInterpolate,
23642		Av1FramerateConversionAlgorithmFrameformer,
23643	}
23644}
23645
23646// 'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined
23647// variable bitrate (QVBR). You can''t use CBR or VBR.'
23648const (
23649	// Av1RateControlModeQvbr is a Av1RateControlMode enum value
23650	Av1RateControlModeQvbr = "QVBR"
23651)
23652
23653// Av1RateControlMode_Values returns all elements of the Av1RateControlMode enum
23654func Av1RateControlMode_Values() []string {
23655	return []string{
23656		Av1RateControlModeQvbr,
23657	}
23658}
23659
23660// Keep the default value, Enabled (ENABLED), to adjust quantization within
23661// each frame based on spatial variation of content complexity. When you enable
23662// this feature, the encoder uses fewer bits on areas that can sustain more
23663// distortion with no noticeable visual degradation and uses more bits on areas
23664// where any small distortion will be noticeable. For example, complex textured
23665// blocks are encoded with fewer bits and smooth textured blocks are encoded
23666// with more bits. Enabling this feature will almost always improve your video
23667// quality. Note, though, that this feature doesn't take into account where
23668// the viewer's attention is likely to be. If viewers are likely to be focusing
23669// their attention on a part of the screen with a lot of complex texture, you
23670// might choose to disable this feature. Related setting: When you enable spatial
23671// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization)
23672// depending on your content. For homogeneous content, such as cartoons and
23673// video games, set it to Low. For content with a wider variety of textures,
23674// set it to High or Higher.
23675const (
23676	// Av1SpatialAdaptiveQuantizationDisabled is a Av1SpatialAdaptiveQuantization enum value
23677	Av1SpatialAdaptiveQuantizationDisabled = "DISABLED"
23678
23679	// Av1SpatialAdaptiveQuantizationEnabled is a Av1SpatialAdaptiveQuantization enum value
23680	Av1SpatialAdaptiveQuantizationEnabled = "ENABLED"
23681)
23682
23683// Av1SpatialAdaptiveQuantization_Values returns all elements of the Av1SpatialAdaptiveQuantization enum
23684func Av1SpatialAdaptiveQuantization_Values() []string {
23685	return []string{
23686		Av1SpatialAdaptiveQuantizationDisabled,
23687		Av1SpatialAdaptiveQuantizationEnabled,
23688	}
23689}
23690
23691// Specify the AVC-Intra class of your output. The AVC-Intra class selection
23692// determines the output video bit rate depending on the frame rate of the output.
23693// Outputs with higher class values have higher bitrates and improved image
23694// quality. Note that for Class 4K/2K, MediaConvert supports only 4:2:2 chroma
23695// subsampling.
23696const (
23697	// AvcIntraClassClass50 is a AvcIntraClass enum value
23698	AvcIntraClassClass50 = "CLASS_50"
23699
23700	// AvcIntraClassClass100 is a AvcIntraClass enum value
23701	AvcIntraClassClass100 = "CLASS_100"
23702
23703	// AvcIntraClassClass200 is a AvcIntraClass enum value
23704	AvcIntraClassClass200 = "CLASS_200"
23705
23706	// AvcIntraClassClass4k2k is a AvcIntraClass enum value
23707	AvcIntraClassClass4k2k = "CLASS_4K_2K"
23708)
23709
23710// AvcIntraClass_Values returns all elements of the AvcIntraClass enum
23711func AvcIntraClass_Values() []string {
23712	return []string{
23713		AvcIntraClassClass50,
23714		AvcIntraClassClass100,
23715		AvcIntraClassClass200,
23716		AvcIntraClassClass4k2k,
23717	}
23718}
23719
23720// If you are using the console, use the Framerate setting to specify the frame
23721// rate for this output. If you want to keep the same frame rate as the input
23722// video, choose Follow source. If you want to do frame rate conversion, choose
23723// a frame rate from the dropdown list or choose Custom. The framerates shown
23724// in the dropdown list are decimal approximations of fractions. If you choose
23725// Custom, specify your frame rate as a fraction. If you are creating your transcoding
23726// job specification as a JSON file without the console, use FramerateControl
23727// to specify which value the service uses for the frame rate for this output.
23728// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
23729// from the input. Choose SPECIFIED if you want the service to use the frame
23730// rate you specify in the settings FramerateNumerator and FramerateDenominator.
23731const (
23732	// AvcIntraFramerateControlInitializeFromSource is a AvcIntraFramerateControl enum value
23733	AvcIntraFramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
23734
23735	// AvcIntraFramerateControlSpecified is a AvcIntraFramerateControl enum value
23736	AvcIntraFramerateControlSpecified = "SPECIFIED"
23737)
23738
23739// AvcIntraFramerateControl_Values returns all elements of the AvcIntraFramerateControl enum
23740func AvcIntraFramerateControl_Values() []string {
23741	return []string{
23742		AvcIntraFramerateControlInitializeFromSource,
23743		AvcIntraFramerateControlSpecified,
23744	}
23745}
23746
23747// Choose the method that you want MediaConvert to use when increasing or decreasing
23748// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
23749// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
23750// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
23751// smooth picture, but might introduce undesirable video artifacts. For complex
23752// frame rate conversions, especially if your source video has already been
23753// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
23754// motion-compensated interpolation. FrameFormer chooses the best conversion
23755// method frame by frame. Note that using FrameFormer increases the transcoding
23756// time and incurs a significant add-on cost.
23757const (
23758	// AvcIntraFramerateConversionAlgorithmDuplicateDrop is a AvcIntraFramerateConversionAlgorithm enum value
23759	AvcIntraFramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP"
23760
23761	// AvcIntraFramerateConversionAlgorithmInterpolate is a AvcIntraFramerateConversionAlgorithm enum value
23762	AvcIntraFramerateConversionAlgorithmInterpolate = "INTERPOLATE"
23763
23764	// AvcIntraFramerateConversionAlgorithmFrameformer is a AvcIntraFramerateConversionAlgorithm enum value
23765	AvcIntraFramerateConversionAlgorithmFrameformer = "FRAMEFORMER"
23766)
23767
23768// AvcIntraFramerateConversionAlgorithm_Values returns all elements of the AvcIntraFramerateConversionAlgorithm enum
23769func AvcIntraFramerateConversionAlgorithm_Values() []string {
23770	return []string{
23771		AvcIntraFramerateConversionAlgorithmDuplicateDrop,
23772		AvcIntraFramerateConversionAlgorithmInterpolate,
23773		AvcIntraFramerateConversionAlgorithmFrameformer,
23774	}
23775}
23776
23777// Choose the scan line type for the output. Keep the default value, Progressive
23778// (PROGRESSIVE) to create a progressive output, regardless of the scan type
23779// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
23780// to create an output that's interlaced with the same field polarity throughout.
23781// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
23782// to produce outputs with the same field polarity as the source. For jobs that
23783// have multiple inputs, the output field polarity might change over the course
23784// of the output. Follow behavior depends on the input scan type. If the source
23785// is interlaced, the output will be interlaced with the same polarity as the
23786// source. If the source is progressive, the output will be interlaced with
23787// top field bottom field first, depending on which of the Follow options you
23788// choose.
23789const (
23790	// AvcIntraInterlaceModeProgressive is a AvcIntraInterlaceMode enum value
23791	AvcIntraInterlaceModeProgressive = "PROGRESSIVE"
23792
23793	// AvcIntraInterlaceModeTopField is a AvcIntraInterlaceMode enum value
23794	AvcIntraInterlaceModeTopField = "TOP_FIELD"
23795
23796	// AvcIntraInterlaceModeBottomField is a AvcIntraInterlaceMode enum value
23797	AvcIntraInterlaceModeBottomField = "BOTTOM_FIELD"
23798
23799	// AvcIntraInterlaceModeFollowTopField is a AvcIntraInterlaceMode enum value
23800	AvcIntraInterlaceModeFollowTopField = "FOLLOW_TOP_FIELD"
23801
23802	// AvcIntraInterlaceModeFollowBottomField is a AvcIntraInterlaceMode enum value
23803	AvcIntraInterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD"
23804)
23805
23806// AvcIntraInterlaceMode_Values returns all elements of the AvcIntraInterlaceMode enum
23807func AvcIntraInterlaceMode_Values() []string {
23808	return []string{
23809		AvcIntraInterlaceModeProgressive,
23810		AvcIntraInterlaceModeTopField,
23811		AvcIntraInterlaceModeBottomField,
23812		AvcIntraInterlaceModeFollowTopField,
23813		AvcIntraInterlaceModeFollowBottomField,
23814	}
23815}
23816
23817// Use this setting for interlaced outputs, when your output frame rate is half
23818// of your input frame rate. In this situation, choose Optimized interlacing
23819// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
23820// case, each progressive frame from the input corresponds to an interlaced
23821// field in the output. Keep the default value, Basic interlacing (INTERLACED),
23822// for all other output frame rates. With basic interlacing, MediaConvert performs
23823// any frame rate conversion first and then interlaces the frames. When you
23824// choose Optimized interlacing and you set your output frame rate to a value
23825// that isn't suitable for optimized interlacing, MediaConvert automatically
23826// falls back to basic interlacing. Required settings: To use optimized interlacing,
23827// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
23828// use optimized interlacing for hard telecine outputs. You must also set Interlace
23829// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
23830const (
23831	// AvcIntraScanTypeConversionModeInterlaced is a AvcIntraScanTypeConversionMode enum value
23832	AvcIntraScanTypeConversionModeInterlaced = "INTERLACED"
23833
23834	// AvcIntraScanTypeConversionModeInterlacedOptimize is a AvcIntraScanTypeConversionMode enum value
23835	AvcIntraScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE"
23836)
23837
23838// AvcIntraScanTypeConversionMode_Values returns all elements of the AvcIntraScanTypeConversionMode enum
23839func AvcIntraScanTypeConversionMode_Values() []string {
23840	return []string{
23841		AvcIntraScanTypeConversionModeInterlaced,
23842		AvcIntraScanTypeConversionModeInterlacedOptimize,
23843	}
23844}
23845
23846// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
23847// second (fps). Enable slow PAL to create a 25 fps output. When you enable
23848// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples
23849// your audio to keep it synchronized with the video. Note that enabling this
23850// setting will slightly reduce the duration of your video. Required settings:
23851// You must also set Framerate to 25. In your JSON job specification, set (framerateControl)
23852// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
23853// 1.
23854const (
23855	// AvcIntraSlowPalDisabled is a AvcIntraSlowPal enum value
23856	AvcIntraSlowPalDisabled = "DISABLED"
23857
23858	// AvcIntraSlowPalEnabled is a AvcIntraSlowPal enum value
23859	AvcIntraSlowPalEnabled = "ENABLED"
23860)
23861
23862// AvcIntraSlowPal_Values returns all elements of the AvcIntraSlowPal enum
23863func AvcIntraSlowPal_Values() []string {
23864	return []string{
23865		AvcIntraSlowPalDisabled,
23866		AvcIntraSlowPalEnabled,
23867	}
23868}
23869
23870// When you do frame rate conversion from 23.976 frames per second (fps) to
23871// 29.97 fps, and your output scan type is interlaced, you can optionally enable
23872// hard telecine (HARD) to create a smoother picture. When you keep the default
23873// value, None (NONE), MediaConvert does a standard frame rate conversion to
23874// 29.97 without doing anything with the field polarity to create a smoother
23875// picture.
23876const (
23877	// AvcIntraTelecineNone is a AvcIntraTelecine enum value
23878	AvcIntraTelecineNone = "NONE"
23879
23880	// AvcIntraTelecineHard is a AvcIntraTelecine enum value
23881	AvcIntraTelecineHard = "HARD"
23882)
23883
23884// AvcIntraTelecine_Values returns all elements of the AvcIntraTelecine enum
23885func AvcIntraTelecine_Values() []string {
23886	return []string{
23887		AvcIntraTelecineNone,
23888		AvcIntraTelecineHard,
23889	}
23890}
23891
23892// Optional. Use Quality tuning level (qualityTuningLevel) to choose how many
23893// transcoding passes MediaConvert does with your video. When you choose Multi-pass
23894// (MULTI_PASS), your video quality is better and your output bitrate is more
23895// accurate. That is, the actual bitrate of your output is closer to the target
23896// bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS),
23897// your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS).
23898const (
23899	// AvcIntraUhdQualityTuningLevelSinglePass is a AvcIntraUhdQualityTuningLevel enum value
23900	AvcIntraUhdQualityTuningLevelSinglePass = "SINGLE_PASS"
23901
23902	// AvcIntraUhdQualityTuningLevelMultiPass is a AvcIntraUhdQualityTuningLevel enum value
23903	AvcIntraUhdQualityTuningLevelMultiPass = "MULTI_PASS"
23904)
23905
23906// AvcIntraUhdQualityTuningLevel_Values returns all elements of the AvcIntraUhdQualityTuningLevel enum
23907func AvcIntraUhdQualityTuningLevel_Values() []string {
23908	return []string{
23909		AvcIntraUhdQualityTuningLevelSinglePass,
23910		AvcIntraUhdQualityTuningLevelMultiPass,
23911	}
23912}
23913
23914// The tag type that AWS Billing and Cost Management will use to sort your AWS
23915// Elemental MediaConvert costs on any billing report that you set up.
23916const (
23917	// BillingTagsSourceQueue is a BillingTagsSource enum value
23918	BillingTagsSourceQueue = "QUEUE"
23919
23920	// BillingTagsSourcePreset is a BillingTagsSource enum value
23921	BillingTagsSourcePreset = "PRESET"
23922
23923	// BillingTagsSourceJobTemplate is a BillingTagsSource enum value
23924	BillingTagsSourceJobTemplate = "JOB_TEMPLATE"
23925
23926	// BillingTagsSourceJob is a BillingTagsSource enum value
23927	BillingTagsSourceJob = "JOB"
23928)
23929
23930// BillingTagsSource_Values returns all elements of the BillingTagsSource enum
23931func BillingTagsSource_Values() []string {
23932	return []string{
23933		BillingTagsSourceQueue,
23934		BillingTagsSourcePreset,
23935		BillingTagsSourceJobTemplate,
23936		BillingTagsSourceJob,
23937	}
23938}
23939
23940// If no explicit x_position or y_position is provided, setting alignment to
23941// centered will place the captions at the bottom center of the output. Similarly,
23942// setting a left alignment will align captions to the bottom left of the output.
23943// If x and y positions are given in conjunction with the alignment parameter,
23944// the font will be justified (either left or centered) relative to those coordinates.
23945// This option is not valid for source captions that are STL, 608/embedded or
23946// teletext. These source settings are already pre-defined by the caption stream.
23947// All burn-in and DVB-Sub font settings must match.
23948const (
23949	// BurninSubtitleAlignmentCentered is a BurninSubtitleAlignment enum value
23950	BurninSubtitleAlignmentCentered = "CENTERED"
23951
23952	// BurninSubtitleAlignmentLeft is a BurninSubtitleAlignment enum value
23953	BurninSubtitleAlignmentLeft = "LEFT"
23954)
23955
23956// BurninSubtitleAlignment_Values returns all elements of the BurninSubtitleAlignment enum
23957func BurninSubtitleAlignment_Values() []string {
23958	return []string{
23959		BurninSubtitleAlignmentCentered,
23960		BurninSubtitleAlignmentLeft,
23961	}
23962}
23963
23964// Specifies the color of the rectangle behind the captions.All burn-in and
23965// DVB-Sub font settings must match.
23966const (
23967	// BurninSubtitleBackgroundColorNone is a BurninSubtitleBackgroundColor enum value
23968	BurninSubtitleBackgroundColorNone = "NONE"
23969
23970	// BurninSubtitleBackgroundColorBlack is a BurninSubtitleBackgroundColor enum value
23971	BurninSubtitleBackgroundColorBlack = "BLACK"
23972
23973	// BurninSubtitleBackgroundColorWhite is a BurninSubtitleBackgroundColor enum value
23974	BurninSubtitleBackgroundColorWhite = "WHITE"
23975)
23976
23977// BurninSubtitleBackgroundColor_Values returns all elements of the BurninSubtitleBackgroundColor enum
23978func BurninSubtitleBackgroundColor_Values() []string {
23979	return []string{
23980		BurninSubtitleBackgroundColorNone,
23981		BurninSubtitleBackgroundColorBlack,
23982		BurninSubtitleBackgroundColorWhite,
23983	}
23984}
23985
23986// Specifies the color of the burned-in captions. This option is not valid for
23987// source captions that are STL, 608/embedded or teletext. These source settings
23988// are already pre-defined by the caption stream. All burn-in and DVB-Sub font
23989// settings must match.
23990const (
23991	// BurninSubtitleFontColorWhite is a BurninSubtitleFontColor enum value
23992	BurninSubtitleFontColorWhite = "WHITE"
23993
23994	// BurninSubtitleFontColorBlack is a BurninSubtitleFontColor enum value
23995	BurninSubtitleFontColorBlack = "BLACK"
23996
23997	// BurninSubtitleFontColorYellow is a BurninSubtitleFontColor enum value
23998	BurninSubtitleFontColorYellow = "YELLOW"
23999
24000	// BurninSubtitleFontColorRed is a BurninSubtitleFontColor enum value
24001	BurninSubtitleFontColorRed = "RED"
24002
24003	// BurninSubtitleFontColorGreen is a BurninSubtitleFontColor enum value
24004	BurninSubtitleFontColorGreen = "GREEN"
24005
24006	// BurninSubtitleFontColorBlue is a BurninSubtitleFontColor enum value
24007	BurninSubtitleFontColorBlue = "BLUE"
24008)
24009
24010// BurninSubtitleFontColor_Values returns all elements of the BurninSubtitleFontColor enum
24011func BurninSubtitleFontColor_Values() []string {
24012	return []string{
24013		BurninSubtitleFontColorWhite,
24014		BurninSubtitleFontColorBlack,
24015		BurninSubtitleFontColorYellow,
24016		BurninSubtitleFontColorRed,
24017		BurninSubtitleFontColorGreen,
24018		BurninSubtitleFontColorBlue,
24019	}
24020}
24021
24022// Specifies font outline color. This option is not valid for source captions
24023// that are either 608/embedded or teletext. These source settings are already
24024// pre-defined by the caption stream. All burn-in and DVB-Sub font settings
24025// must match.
24026const (
24027	// BurninSubtitleOutlineColorBlack is a BurninSubtitleOutlineColor enum value
24028	BurninSubtitleOutlineColorBlack = "BLACK"
24029
24030	// BurninSubtitleOutlineColorWhite is a BurninSubtitleOutlineColor enum value
24031	BurninSubtitleOutlineColorWhite = "WHITE"
24032
24033	// BurninSubtitleOutlineColorYellow is a BurninSubtitleOutlineColor enum value
24034	BurninSubtitleOutlineColorYellow = "YELLOW"
24035
24036	// BurninSubtitleOutlineColorRed is a BurninSubtitleOutlineColor enum value
24037	BurninSubtitleOutlineColorRed = "RED"
24038
24039	// BurninSubtitleOutlineColorGreen is a BurninSubtitleOutlineColor enum value
24040	BurninSubtitleOutlineColorGreen = "GREEN"
24041
24042	// BurninSubtitleOutlineColorBlue is a BurninSubtitleOutlineColor enum value
24043	BurninSubtitleOutlineColorBlue = "BLUE"
24044)
24045
24046// BurninSubtitleOutlineColor_Values returns all elements of the BurninSubtitleOutlineColor enum
24047func BurninSubtitleOutlineColor_Values() []string {
24048	return []string{
24049		BurninSubtitleOutlineColorBlack,
24050		BurninSubtitleOutlineColorWhite,
24051		BurninSubtitleOutlineColorYellow,
24052		BurninSubtitleOutlineColorRed,
24053		BurninSubtitleOutlineColorGreen,
24054		BurninSubtitleOutlineColorBlue,
24055	}
24056}
24057
24058// Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub
24059// font settings must match.
24060const (
24061	// BurninSubtitleShadowColorNone is a BurninSubtitleShadowColor enum value
24062	BurninSubtitleShadowColorNone = "NONE"
24063
24064	// BurninSubtitleShadowColorBlack is a BurninSubtitleShadowColor enum value
24065	BurninSubtitleShadowColorBlack = "BLACK"
24066
24067	// BurninSubtitleShadowColorWhite is a BurninSubtitleShadowColor enum value
24068	BurninSubtitleShadowColorWhite = "WHITE"
24069)
24070
24071// BurninSubtitleShadowColor_Values returns all elements of the BurninSubtitleShadowColor enum
24072func BurninSubtitleShadowColor_Values() []string {
24073	return []string{
24074		BurninSubtitleShadowColorNone,
24075		BurninSubtitleShadowColorBlack,
24076		BurninSubtitleShadowColorWhite,
24077	}
24078}
24079
24080// Only applies to jobs with input captions in Teletext or STL formats. Specify
24081// whether the spacing between letters in your captions is set by the captions
24082// grid or varies depending on letter width. Choose fixed grid to conform to
24083// the spacing specified in the captions file more accurately. Choose proportional
24084// to make the text easier to read if the captions are closed caption.
24085const (
24086	// BurninSubtitleTeletextSpacingFixedGrid is a BurninSubtitleTeletextSpacing enum value
24087	BurninSubtitleTeletextSpacingFixedGrid = "FIXED_GRID"
24088
24089	// BurninSubtitleTeletextSpacingProportional is a BurninSubtitleTeletextSpacing enum value
24090	BurninSubtitleTeletextSpacingProportional = "PROPORTIONAL"
24091)
24092
24093// BurninSubtitleTeletextSpacing_Values returns all elements of the BurninSubtitleTeletextSpacing enum
24094func BurninSubtitleTeletextSpacing_Values() []string {
24095	return []string{
24096		BurninSubtitleTeletextSpacingFixedGrid,
24097		BurninSubtitleTeletextSpacingProportional,
24098	}
24099}
24100
24101// Specify the format for this set of captions on this output. The default format
24102// is embedded without SCTE-20. Note that your choice of video output container
24103// constrains your choice of output captions format. For more information, see
24104// https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html.
24105// If you are using SCTE-20 and you want to create an output that complies with
24106// the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To
24107// create a non-compliant output where the embedded captions come first, choose
24108// Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20).
24109const (
24110	// CaptionDestinationTypeBurnIn is a CaptionDestinationType enum value
24111	CaptionDestinationTypeBurnIn = "BURN_IN"
24112
24113	// CaptionDestinationTypeDvbSub is a CaptionDestinationType enum value
24114	CaptionDestinationTypeDvbSub = "DVB_SUB"
24115
24116	// CaptionDestinationTypeEmbedded is a CaptionDestinationType enum value
24117	CaptionDestinationTypeEmbedded = "EMBEDDED"
24118
24119	// CaptionDestinationTypeEmbeddedPlusScte20 is a CaptionDestinationType enum value
24120	CaptionDestinationTypeEmbeddedPlusScte20 = "EMBEDDED_PLUS_SCTE20"
24121
24122	// CaptionDestinationTypeImsc is a CaptionDestinationType enum value
24123	CaptionDestinationTypeImsc = "IMSC"
24124
24125	// CaptionDestinationTypeScte20PlusEmbedded is a CaptionDestinationType enum value
24126	CaptionDestinationTypeScte20PlusEmbedded = "SCTE20_PLUS_EMBEDDED"
24127
24128	// CaptionDestinationTypeScc is a CaptionDestinationType enum value
24129	CaptionDestinationTypeScc = "SCC"
24130
24131	// CaptionDestinationTypeSrt is a CaptionDestinationType enum value
24132	CaptionDestinationTypeSrt = "SRT"
24133
24134	// CaptionDestinationTypeSmi is a CaptionDestinationType enum value
24135	CaptionDestinationTypeSmi = "SMI"
24136
24137	// CaptionDestinationTypeTeletext is a CaptionDestinationType enum value
24138	CaptionDestinationTypeTeletext = "TELETEXT"
24139
24140	// CaptionDestinationTypeTtml is a CaptionDestinationType enum value
24141	CaptionDestinationTypeTtml = "TTML"
24142
24143	// CaptionDestinationTypeWebvtt is a CaptionDestinationType enum value
24144	CaptionDestinationTypeWebvtt = "WEBVTT"
24145)
24146
24147// CaptionDestinationType_Values returns all elements of the CaptionDestinationType enum
24148func CaptionDestinationType_Values() []string {
24149	return []string{
24150		CaptionDestinationTypeBurnIn,
24151		CaptionDestinationTypeDvbSub,
24152		CaptionDestinationTypeEmbedded,
24153		CaptionDestinationTypeEmbeddedPlusScte20,
24154		CaptionDestinationTypeImsc,
24155		CaptionDestinationTypeScte20PlusEmbedded,
24156		CaptionDestinationTypeScc,
24157		CaptionDestinationTypeSrt,
24158		CaptionDestinationTypeSmi,
24159		CaptionDestinationTypeTeletext,
24160		CaptionDestinationTypeTtml,
24161		CaptionDestinationTypeWebvtt,
24162	}
24163}
24164
24165// Use Source (SourceType) to identify the format of your input captions. The
24166// service cannot auto-detect caption format.
24167const (
24168	// CaptionSourceTypeAncillary is a CaptionSourceType enum value
24169	CaptionSourceTypeAncillary = "ANCILLARY"
24170
24171	// CaptionSourceTypeDvbSub is a CaptionSourceType enum value
24172	CaptionSourceTypeDvbSub = "DVB_SUB"
24173
24174	// CaptionSourceTypeEmbedded is a CaptionSourceType enum value
24175	CaptionSourceTypeEmbedded = "EMBEDDED"
24176
24177	// CaptionSourceTypeScte20 is a CaptionSourceType enum value
24178	CaptionSourceTypeScte20 = "SCTE20"
24179
24180	// CaptionSourceTypeScc is a CaptionSourceType enum value
24181	CaptionSourceTypeScc = "SCC"
24182
24183	// CaptionSourceTypeTtml is a CaptionSourceType enum value
24184	CaptionSourceTypeTtml = "TTML"
24185
24186	// CaptionSourceTypeStl is a CaptionSourceType enum value
24187	CaptionSourceTypeStl = "STL"
24188
24189	// CaptionSourceTypeSrt is a CaptionSourceType enum value
24190	CaptionSourceTypeSrt = "SRT"
24191
24192	// CaptionSourceTypeSmi is a CaptionSourceType enum value
24193	CaptionSourceTypeSmi = "SMI"
24194
24195	// CaptionSourceTypeSmpteTt is a CaptionSourceType enum value
24196	CaptionSourceTypeSmpteTt = "SMPTE_TT"
24197
24198	// CaptionSourceTypeTeletext is a CaptionSourceType enum value
24199	CaptionSourceTypeTeletext = "TELETEXT"
24200
24201	// CaptionSourceTypeNullSource is a CaptionSourceType enum value
24202	CaptionSourceTypeNullSource = "NULL_SOURCE"
24203
24204	// CaptionSourceTypeImsc is a CaptionSourceType enum value
24205	CaptionSourceTypeImsc = "IMSC"
24206
24207	// CaptionSourceTypeWebvtt is a CaptionSourceType enum value
24208	CaptionSourceTypeWebvtt = "WEBVTT"
24209)
24210
24211// CaptionSourceType_Values returns all elements of the CaptionSourceType enum
24212func CaptionSourceType_Values() []string {
24213	return []string{
24214		CaptionSourceTypeAncillary,
24215		CaptionSourceTypeDvbSub,
24216		CaptionSourceTypeEmbedded,
24217		CaptionSourceTypeScte20,
24218		CaptionSourceTypeScc,
24219		CaptionSourceTypeTtml,
24220		CaptionSourceTypeStl,
24221		CaptionSourceTypeSrt,
24222		CaptionSourceTypeSmi,
24223		CaptionSourceTypeSmpteTt,
24224		CaptionSourceTypeTeletext,
24225		CaptionSourceTypeNullSource,
24226		CaptionSourceTypeImsc,
24227		CaptionSourceTypeWebvtt,
24228	}
24229}
24230
24231// Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no
24232// tag. Otherwise, keep the default value Enabled (ENABLED) and control caching
24233// in your video distribution set up. For example, use the Cache-Control http
24234// header.
24235const (
24236	// CmafClientCacheDisabled is a CmafClientCache enum value
24237	CmafClientCacheDisabled = "DISABLED"
24238
24239	// CmafClientCacheEnabled is a CmafClientCache enum value
24240	CmafClientCacheEnabled = "ENABLED"
24241)
24242
24243// CmafClientCache_Values returns all elements of the CmafClientCache enum
24244func CmafClientCache_Values() []string {
24245	return []string{
24246		CmafClientCacheDisabled,
24247		CmafClientCacheEnabled,
24248	}
24249}
24250
24251// Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist
24252// generation.
24253const (
24254	// CmafCodecSpecificationRfc6381 is a CmafCodecSpecification enum value
24255	CmafCodecSpecificationRfc6381 = "RFC_6381"
24256
24257	// CmafCodecSpecificationRfc4281 is a CmafCodecSpecification enum value
24258	CmafCodecSpecificationRfc4281 = "RFC_4281"
24259)
24260
24261// CmafCodecSpecification_Values returns all elements of the CmafCodecSpecification enum
24262func CmafCodecSpecification_Values() []string {
24263	return []string{
24264		CmafCodecSpecificationRfc6381,
24265		CmafCodecSpecificationRfc4281,
24266	}
24267}
24268
24269// Specify the encryption scheme that you want the service to use when encrypting
24270// your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR).
24271const (
24272	// CmafEncryptionTypeSampleAes is a CmafEncryptionType enum value
24273	CmafEncryptionTypeSampleAes = "SAMPLE_AES"
24274
24275	// CmafEncryptionTypeAesCtr is a CmafEncryptionType enum value
24276	CmafEncryptionTypeAesCtr = "AES_CTR"
24277)
24278
24279// CmafEncryptionType_Values returns all elements of the CmafEncryptionType enum
24280func CmafEncryptionType_Values() []string {
24281	return []string{
24282		CmafEncryptionTypeSampleAes,
24283		CmafEncryptionTypeAesCtr,
24284	}
24285}
24286
24287// Specify whether MediaConvert generates images for trick play. Keep the default
24288// value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL)
24289// to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME)
24290// to generate tiled thumbnails and full-resolution images of single frames.
24291// When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates
24292// a child manifest for each set of images that you generate and adds corresponding
24293// entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest),
24294// MediaConvert adds an entry in the .mpd manifest for each set of images that
24295// you generate. A common application for these images is Roku trick mode. The
24296// thumbnails and full-frame images that MediaConvert creates with this feature
24297// are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md
24298const (
24299	// CmafImageBasedTrickPlayNone is a CmafImageBasedTrickPlay enum value
24300	CmafImageBasedTrickPlayNone = "NONE"
24301
24302	// CmafImageBasedTrickPlayThumbnail is a CmafImageBasedTrickPlay enum value
24303	CmafImageBasedTrickPlayThumbnail = "THUMBNAIL"
24304
24305	// CmafImageBasedTrickPlayThumbnailAndFullframe is a CmafImageBasedTrickPlay enum value
24306	CmafImageBasedTrickPlayThumbnailAndFullframe = "THUMBNAIL_AND_FULLFRAME"
24307)
24308
24309// CmafImageBasedTrickPlay_Values returns all elements of the CmafImageBasedTrickPlay enum
24310func CmafImageBasedTrickPlay_Values() []string {
24311	return []string{
24312		CmafImageBasedTrickPlayNone,
24313		CmafImageBasedTrickPlayThumbnail,
24314		CmafImageBasedTrickPlayThumbnailAndFullframe,
24315	}
24316}
24317
24318// When you use DRM with CMAF outputs, choose whether the service writes the
24319// 128-bit encryption initialization vector in the HLS and DASH manifests.
24320const (
24321	// CmafInitializationVectorInManifestInclude is a CmafInitializationVectorInManifest enum value
24322	CmafInitializationVectorInManifestInclude = "INCLUDE"
24323
24324	// CmafInitializationVectorInManifestExclude is a CmafInitializationVectorInManifest enum value
24325	CmafInitializationVectorInManifestExclude = "EXCLUDE"
24326)
24327
24328// CmafInitializationVectorInManifest_Values returns all elements of the CmafInitializationVectorInManifest enum
24329func CmafInitializationVectorInManifest_Values() []string {
24330	return []string{
24331		CmafInitializationVectorInManifestInclude,
24332		CmafInitializationVectorInManifestExclude,
24333	}
24334}
24335
24336// Specify whether your DRM encryption key is static or from a key provider
24337// that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.
24338const (
24339	// CmafKeyProviderTypeSpeke is a CmafKeyProviderType enum value
24340	CmafKeyProviderTypeSpeke = "SPEKE"
24341
24342	// CmafKeyProviderTypeStaticKey is a CmafKeyProviderType enum value
24343	CmafKeyProviderTypeStaticKey = "STATIC_KEY"
24344)
24345
24346// CmafKeyProviderType_Values returns all elements of the CmafKeyProviderType enum
24347func CmafKeyProviderType_Values() []string {
24348	return []string{
24349		CmafKeyProviderTypeSpeke,
24350		CmafKeyProviderTypeStaticKey,
24351	}
24352}
24353
24354// When set to GZIP, compresses HLS playlist.
24355const (
24356	// CmafManifestCompressionGzip is a CmafManifestCompression enum value
24357	CmafManifestCompressionGzip = "GZIP"
24358
24359	// CmafManifestCompressionNone is a CmafManifestCompression enum value
24360	CmafManifestCompressionNone = "NONE"
24361)
24362
24363// CmafManifestCompression_Values returns all elements of the CmafManifestCompression enum
24364func CmafManifestCompression_Values() []string {
24365	return []string{
24366		CmafManifestCompressionGzip,
24367		CmafManifestCompressionNone,
24368	}
24369}
24370
24371// Indicates whether the output manifest should use floating point values for
24372// segment duration.
24373const (
24374	// CmafManifestDurationFormatFloatingPoint is a CmafManifestDurationFormat enum value
24375	CmafManifestDurationFormatFloatingPoint = "FLOATING_POINT"
24376
24377	// CmafManifestDurationFormatInteger is a CmafManifestDurationFormat enum value
24378	CmafManifestDurationFormatInteger = "INTEGER"
24379)
24380
24381// CmafManifestDurationFormat_Values returns all elements of the CmafManifestDurationFormat enum
24382func CmafManifestDurationFormat_Values() []string {
24383	return []string{
24384		CmafManifestDurationFormatFloatingPoint,
24385		CmafManifestDurationFormatInteger,
24386	}
24387}
24388
24389// Specify whether your DASH profile is on-demand or main. When you choose Main
24390// profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011
24391// in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE),
24392// the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd.
24393// When you choose On-demand, you must also set the output group setting Segment
24394// control (SegmentControl) to Single file (SINGLE_FILE).
24395const (
24396	// CmafMpdProfileMainProfile is a CmafMpdProfile enum value
24397	CmafMpdProfileMainProfile = "MAIN_PROFILE"
24398
24399	// CmafMpdProfileOnDemandProfile is a CmafMpdProfile enum value
24400	CmafMpdProfileOnDemandProfile = "ON_DEMAND_PROFILE"
24401)
24402
24403// CmafMpdProfile_Values returns all elements of the CmafMpdProfile enum
24404func CmafMpdProfile_Values() []string {
24405	return []string{
24406		CmafMpdProfileMainProfile,
24407		CmafMpdProfileOnDemandProfile,
24408	}
24409}
24410
24411// Use this setting only when your output video stream has B-frames, which causes
24412// the initial presentation time stamp (PTS) to be offset from the initial decode
24413// time stamp (DTS). Specify how MediaConvert handles PTS when writing time
24414// stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS)
24415// when you want MediaConvert to use the initial PTS as the first time stamp
24416// in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore
24417// the initial PTS in the video stream and instead write the initial time stamp
24418// as zero in the manifest. For outputs that don't have B-frames, the time stamps
24419// in your DASH manifests start at zero regardless of your choice here.
24420const (
24421	// CmafPtsOffsetHandlingForBFramesZeroBased is a CmafPtsOffsetHandlingForBFrames enum value
24422	CmafPtsOffsetHandlingForBFramesZeroBased = "ZERO_BASED"
24423
24424	// CmafPtsOffsetHandlingForBFramesMatchInitialPts is a CmafPtsOffsetHandlingForBFrames enum value
24425	CmafPtsOffsetHandlingForBFramesMatchInitialPts = "MATCH_INITIAL_PTS"
24426)
24427
24428// CmafPtsOffsetHandlingForBFrames_Values returns all elements of the CmafPtsOffsetHandlingForBFrames enum
24429func CmafPtsOffsetHandlingForBFrames_Values() []string {
24430	return []string{
24431		CmafPtsOffsetHandlingForBFramesZeroBased,
24432		CmafPtsOffsetHandlingForBFramesMatchInitialPts,
24433	}
24434}
24435
24436// When set to SINGLE_FILE, a single output file is generated, which is internally
24437// segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES,
24438// separate segment files will be created.
24439const (
24440	// CmafSegmentControlSingleFile is a CmafSegmentControl enum value
24441	CmafSegmentControlSingleFile = "SINGLE_FILE"
24442
24443	// CmafSegmentControlSegmentedFiles is a CmafSegmentControl enum value
24444	CmafSegmentControlSegmentedFiles = "SEGMENTED_FILES"
24445)
24446
24447// CmafSegmentControl_Values returns all elements of the CmafSegmentControl enum
24448func CmafSegmentControl_Values() []string {
24449	return []string{
24450		CmafSegmentControlSingleFile,
24451		CmafSegmentControlSegmentedFiles,
24452	}
24453}
24454
24455// Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag
24456// of variant manifest.
24457const (
24458	// CmafStreamInfResolutionInclude is a CmafStreamInfResolution enum value
24459	CmafStreamInfResolutionInclude = "INCLUDE"
24460
24461	// CmafStreamInfResolutionExclude is a CmafStreamInfResolution enum value
24462	CmafStreamInfResolutionExclude = "EXCLUDE"
24463)
24464
24465// CmafStreamInfResolution_Values returns all elements of the CmafStreamInfResolution enum
24466func CmafStreamInfResolution_Values() []string {
24467	return []string{
24468		CmafStreamInfResolutionInclude,
24469		CmafStreamInfResolutionExclude,
24470	}
24471}
24472
24473// When set to LEGACY, the segment target duration is always rounded up to the
24474// nearest integer value above its current value in seconds. When set to SPEC\\_COMPLIANT,
24475// the segment target duration is rounded up to the nearest integer value if
24476// fraction seconds are greater than or equal to 0.5 (>= 0.5) and rounded down
24477// if less than 0.5 (< 0.5). You may need to use LEGACY if your client needs
24478// to ensure that the target duration is always longer than the actual duration
24479// of the segment. Some older players may experience interrupted playback when
24480// the actual duration of a track in a segment is longer than the target duration.
24481const (
24482	// CmafTargetDurationCompatibilityModeLegacy is a CmafTargetDurationCompatibilityMode enum value
24483	CmafTargetDurationCompatibilityModeLegacy = "LEGACY"
24484
24485	// CmafTargetDurationCompatibilityModeSpecCompliant is a CmafTargetDurationCompatibilityMode enum value
24486	CmafTargetDurationCompatibilityModeSpecCompliant = "SPEC_COMPLIANT"
24487)
24488
24489// CmafTargetDurationCompatibilityMode_Values returns all elements of the CmafTargetDurationCompatibilityMode enum
24490func CmafTargetDurationCompatibilityMode_Values() []string {
24491	return []string{
24492		CmafTargetDurationCompatibilityModeLegacy,
24493		CmafTargetDurationCompatibilityModeSpecCompliant,
24494	}
24495}
24496
24497// When set to ENABLED, a DASH MPD manifest will be generated for this output.
24498const (
24499	// CmafWriteDASHManifestDisabled is a CmafWriteDASHManifest enum value
24500	CmafWriteDASHManifestDisabled = "DISABLED"
24501
24502	// CmafWriteDASHManifestEnabled is a CmafWriteDASHManifest enum value
24503	CmafWriteDASHManifestEnabled = "ENABLED"
24504)
24505
24506// CmafWriteDASHManifest_Values returns all elements of the CmafWriteDASHManifest enum
24507func CmafWriteDASHManifest_Values() []string {
24508	return []string{
24509		CmafWriteDASHManifestDisabled,
24510		CmafWriteDASHManifestEnabled,
24511	}
24512}
24513
24514// When set to ENABLED, an Apple HLS manifest will be generated for this output.
24515const (
24516	// CmafWriteHLSManifestDisabled is a CmafWriteHLSManifest enum value
24517	CmafWriteHLSManifestDisabled = "DISABLED"
24518
24519	// CmafWriteHLSManifestEnabled is a CmafWriteHLSManifest enum value
24520	CmafWriteHLSManifestEnabled = "ENABLED"
24521)
24522
24523// CmafWriteHLSManifest_Values returns all elements of the CmafWriteHLSManifest enum
24524func CmafWriteHLSManifest_Values() []string {
24525	return []string{
24526		CmafWriteHLSManifestDisabled,
24527		CmafWriteHLSManifestEnabled,
24528	}
24529}
24530
24531// When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation),
24532// your DASH manifest shows precise segment durations. The segment duration
24533// information appears inside the SegmentTimeline element, inside SegmentTemplate
24534// at the Representation level. When this feature isn't enabled, the segment
24535// durations in your DASH manifest are approximate. The segment duration information
24536// appears in the duration attribute of the SegmentTemplate element.
24537const (
24538	// CmafWriteSegmentTimelineInRepresentationEnabled is a CmafWriteSegmentTimelineInRepresentation enum value
24539	CmafWriteSegmentTimelineInRepresentationEnabled = "ENABLED"
24540
24541	// CmafWriteSegmentTimelineInRepresentationDisabled is a CmafWriteSegmentTimelineInRepresentation enum value
24542	CmafWriteSegmentTimelineInRepresentationDisabled = "DISABLED"
24543)
24544
24545// CmafWriteSegmentTimelineInRepresentation_Values returns all elements of the CmafWriteSegmentTimelineInRepresentation enum
24546func CmafWriteSegmentTimelineInRepresentation_Values() []string {
24547	return []string{
24548		CmafWriteSegmentTimelineInRepresentationEnabled,
24549		CmafWriteSegmentTimelineInRepresentationDisabled,
24550	}
24551}
24552
24553// Specify this setting only when your output will be consumed by a downstream
24554// repackaging workflow that is sensitive to very small duration differences
24555// between video and audio. For this situation, choose Match video duration
24556// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
24557// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
24558// MediaConvert pads the output audio streams with silence or trims them to
24559// ensure that the total duration of each audio stream is at least as long as
24560// the total duration of the video stream. After padding or trimming, the audio
24561// stream duration is no more than one frame longer than the video stream. MediaConvert
24562// applies audio padding or trimming only to the end of the last segment of
24563// the output. For unsegmented outputs, MediaConvert adds padding only to the
24564// end of the file. When you keep the default value, any minor discrepancies
24565// between audio and video duration will depend on your output audio codec.
24566const (
24567	// CmfcAudioDurationDefaultCodecDuration is a CmfcAudioDuration enum value
24568	CmfcAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION"
24569
24570	// CmfcAudioDurationMatchVideoDuration is a CmfcAudioDuration enum value
24571	CmfcAudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION"
24572)
24573
24574// CmfcAudioDuration_Values returns all elements of the CmfcAudioDuration enum
24575func CmfcAudioDuration_Values() []string {
24576	return []string{
24577		CmfcAudioDurationDefaultCodecDuration,
24578		CmfcAudioDurationMatchVideoDuration,
24579	}
24580}
24581
24582// Use this setting to control the values that MediaConvert puts in your HLS
24583// parent playlist to control how the client player selects which audio track
24584// to play. The other options for this setting determine the values that MediaConvert
24585// writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry
24586// for the audio variant. For more information about these attributes, see the
24587// Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist.
24588// Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT)
24589// to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant
24590// in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT)
24591// to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select
24592// to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this
24593// setting, MediaConvert defaults to Alternate audio, auto select, default.
24594// When there is more than one variant in your output group, you must explicitly
24595// choose a value for this setting.
24596const (
24597	// CmfcAudioTrackTypeAlternateAudioAutoSelectDefault is a CmfcAudioTrackType enum value
24598	CmfcAudioTrackTypeAlternateAudioAutoSelectDefault = "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT"
24599
24600	// CmfcAudioTrackTypeAlternateAudioAutoSelect is a CmfcAudioTrackType enum value
24601	CmfcAudioTrackTypeAlternateAudioAutoSelect = "ALTERNATE_AUDIO_AUTO_SELECT"
24602
24603	// CmfcAudioTrackTypeAlternateAudioNotAutoSelect is a CmfcAudioTrackType enum value
24604	CmfcAudioTrackTypeAlternateAudioNotAutoSelect = "ALTERNATE_AUDIO_NOT_AUTO_SELECT"
24605)
24606
24607// CmfcAudioTrackType_Values returns all elements of the CmfcAudioTrackType enum
24608func CmfcAudioTrackType_Values() []string {
24609	return []string{
24610		CmfcAudioTrackTypeAlternateAudioAutoSelectDefault,
24611		CmfcAudioTrackTypeAlternateAudioAutoSelect,
24612		CmfcAudioTrackTypeAlternateAudioNotAutoSelect,
24613	}
24614}
24615
24616// Specify whether to flag this audio track as descriptive video service (DVS)
24617// in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes
24618// the parameter CHARACTERISTICS="public.accessibility.describes-video" in the
24619// EXT-X-MEDIA entry for this track. When you keep the default choice, Don't
24620// flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can
24621// help with accessibility on Apple devices. For more information, see the Apple
24622// documentation.
24623const (
24624	// CmfcDescriptiveVideoServiceFlagDontFlag is a CmfcDescriptiveVideoServiceFlag enum value
24625	CmfcDescriptiveVideoServiceFlagDontFlag = "DONT_FLAG"
24626
24627	// CmfcDescriptiveVideoServiceFlagFlag is a CmfcDescriptiveVideoServiceFlag enum value
24628	CmfcDescriptiveVideoServiceFlagFlag = "FLAG"
24629)
24630
24631// CmfcDescriptiveVideoServiceFlag_Values returns all elements of the CmfcDescriptiveVideoServiceFlag enum
24632func CmfcDescriptiveVideoServiceFlag_Values() []string {
24633	return []string{
24634		CmfcDescriptiveVideoServiceFlagDontFlag,
24635		CmfcDescriptiveVideoServiceFlagFlag,
24636	}
24637}
24638
24639// Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest
24640// that lists only the I-frames for this rendition, in addition to your regular
24641// manifest for this rendition. You might use this manifest as part of a workflow
24642// that creates preview functions for your video. MediaConvert adds both the
24643// I-frame only child manifest and the regular child manifest to the parent
24644// manifest. When you don't need the I-frame only child manifest, keep the default
24645// value Exclude (EXCLUDE).
24646const (
24647	// CmfcIFrameOnlyManifestInclude is a CmfcIFrameOnlyManifest enum value
24648	CmfcIFrameOnlyManifestInclude = "INCLUDE"
24649
24650	// CmfcIFrameOnlyManifestExclude is a CmfcIFrameOnlyManifest enum value
24651	CmfcIFrameOnlyManifestExclude = "EXCLUDE"
24652)
24653
24654// CmfcIFrameOnlyManifest_Values returns all elements of the CmfcIFrameOnlyManifest enum
24655func CmfcIFrameOnlyManifest_Values() []string {
24656	return []string{
24657		CmfcIFrameOnlyManifestInclude,
24658		CmfcIFrameOnlyManifestExclude,
24659	}
24660}
24661
24662// Use this setting only when you specify SCTE-35 markers from ESAM. Choose
24663// INSERT to put SCTE-35 markers in this output at the insertion points that
24664// you specify in an ESAM XML document. Provide the document in the setting
24665// SCC XML (sccXml).
24666const (
24667	// CmfcScte35EsamInsert is a CmfcScte35Esam enum value
24668	CmfcScte35EsamInsert = "INSERT"
24669
24670	// CmfcScte35EsamNone is a CmfcScte35Esam enum value
24671	CmfcScte35EsamNone = "NONE"
24672)
24673
24674// CmfcScte35Esam_Values returns all elements of the CmfcScte35Esam enum
24675func CmfcScte35Esam_Values() []string {
24676	return []string{
24677		CmfcScte35EsamInsert,
24678		CmfcScte35EsamNone,
24679	}
24680}
24681
24682// Ignore this setting unless you have SCTE-35 markers in your input video file.
24683// Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear
24684// in your input to also appear in this output. Choose None (NONE) if you don't
24685// want those SCTE-35 markers in this output.
24686const (
24687	// CmfcScte35SourcePassthrough is a CmfcScte35Source enum value
24688	CmfcScte35SourcePassthrough = "PASSTHROUGH"
24689
24690	// CmfcScte35SourceNone is a CmfcScte35Source enum value
24691	CmfcScte35SourceNone = "NONE"
24692)
24693
24694// CmfcScte35Source_Values returns all elements of the CmfcScte35Source enum
24695func CmfcScte35Source_Values() []string {
24696	return []string{
24697		CmfcScte35SourcePassthrough,
24698		CmfcScte35SourceNone,
24699	}
24700}
24701
24702// Choose Insert (INSERT) for this setting to include color metadata in this
24703// output. Choose Ignore (IGNORE) to exclude color metadata from this output.
24704// If you don't specify a value, the service sets this to Insert by default.
24705const (
24706	// ColorMetadataIgnore is a ColorMetadata enum value
24707	ColorMetadataIgnore = "IGNORE"
24708
24709	// ColorMetadataInsert is a ColorMetadata enum value
24710	ColorMetadataInsert = "INSERT"
24711)
24712
24713// ColorMetadata_Values returns all elements of the ColorMetadata enum
24714func ColorMetadata_Values() []string {
24715	return []string{
24716		ColorMetadataIgnore,
24717		ColorMetadataInsert,
24718	}
24719}
24720
24721// If your input video has accurate color space metadata, or if you don't know
24722// about color space, leave this set to the default value Follow (FOLLOW). The
24723// service will automatically detect your input color space. If your input video
24724// has metadata indicating the wrong color space, specify the accurate color
24725// space here. If your input video is HDR 10 and the SMPTE ST 2086 Mastering
24726// Display Color Volume static metadata isn't present in your video stream,
24727// or if that metadata is present but not accurate, choose Force HDR 10 (FORCE_HDR10)
24728// here and specify correct values in the input HDR 10 metadata (Hdr10Metadata)
24729// settings. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr.
24730const (
24731	// ColorSpaceFollow is a ColorSpace enum value
24732	ColorSpaceFollow = "FOLLOW"
24733
24734	// ColorSpaceRec601 is a ColorSpace enum value
24735	ColorSpaceRec601 = "REC_601"
24736
24737	// ColorSpaceRec709 is a ColorSpace enum value
24738	ColorSpaceRec709 = "REC_709"
24739
24740	// ColorSpaceHdr10 is a ColorSpace enum value
24741	ColorSpaceHdr10 = "HDR10"
24742
24743	// ColorSpaceHlg2020 is a ColorSpace enum value
24744	ColorSpaceHlg2020 = "HLG_2020"
24745)
24746
24747// ColorSpace_Values returns all elements of the ColorSpace enum
24748func ColorSpace_Values() []string {
24749	return []string{
24750		ColorSpaceFollow,
24751		ColorSpaceRec601,
24752		ColorSpaceRec709,
24753		ColorSpaceHdr10,
24754		ColorSpaceHlg2020,
24755	}
24756}
24757
24758// Specify the color space you want for this output. The service supports conversion
24759// between HDR formats, between SDR formats, from SDR to HDR, and from HDR to
24760// SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted
24761// video has an HDR format, but visually appears the same as an unconverted
24762// output. HDR to SDR conversion uses Elemental tone mapping technology to approximate
24763// the outcome of manually regrading from HDR to SDR.
24764const (
24765	// ColorSpaceConversionNone is a ColorSpaceConversion enum value
24766	ColorSpaceConversionNone = "NONE"
24767
24768	// ColorSpaceConversionForce601 is a ColorSpaceConversion enum value
24769	ColorSpaceConversionForce601 = "FORCE_601"
24770
24771	// ColorSpaceConversionForce709 is a ColorSpaceConversion enum value
24772	ColorSpaceConversionForce709 = "FORCE_709"
24773
24774	// ColorSpaceConversionForceHdr10 is a ColorSpaceConversion enum value
24775	ColorSpaceConversionForceHdr10 = "FORCE_HDR10"
24776
24777	// ColorSpaceConversionForceHlg2020 is a ColorSpaceConversion enum value
24778	ColorSpaceConversionForceHlg2020 = "FORCE_HLG_2020"
24779)
24780
24781// ColorSpaceConversion_Values returns all elements of the ColorSpaceConversion enum
24782func ColorSpaceConversion_Values() []string {
24783	return []string{
24784		ColorSpaceConversionNone,
24785		ColorSpaceConversionForce601,
24786		ColorSpaceConversionForce709,
24787		ColorSpaceConversionForceHdr10,
24788		ColorSpaceConversionForceHlg2020,
24789	}
24790}
24791
24792// There are two sources for color metadata, the input file and the job input
24793// settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata).
24794// The Color space usage setting determines which takes precedence. Choose Force
24795// (FORCE) to use color metadata from the input job settings. If you don't specify
24796// values for those settings, the service defaults to using metadata from your
24797// input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the
24798// source when it is present. If there's no color metadata in your input file,
24799// the service defaults to using values you specify in the input settings.
24800const (
24801	// ColorSpaceUsageForce is a ColorSpaceUsage enum value
24802	ColorSpaceUsageForce = "FORCE"
24803
24804	// ColorSpaceUsageFallback is a ColorSpaceUsage enum value
24805	ColorSpaceUsageFallback = "FALLBACK"
24806)
24807
24808// ColorSpaceUsage_Values returns all elements of the ColorSpaceUsage enum
24809func ColorSpaceUsage_Values() []string {
24810	return []string{
24811		ColorSpaceUsageForce,
24812		ColorSpaceUsageFallback,
24813	}
24814}
24815
24816// The length of the term of your reserved queue pricing plan commitment.
24817const (
24818	// CommitmentOneYear is a Commitment enum value
24819	CommitmentOneYear = "ONE_YEAR"
24820)
24821
24822// Commitment_Values returns all elements of the Commitment enum
24823func Commitment_Values() []string {
24824	return []string{
24825		CommitmentOneYear,
24826	}
24827}
24828
24829// Container for this output. Some containers require a container settings object.
24830// If not specified, the default object will be created.
24831const (
24832	// ContainerTypeF4v is a ContainerType enum value
24833	ContainerTypeF4v = "F4V"
24834
24835	// ContainerTypeIsmv is a ContainerType enum value
24836	ContainerTypeIsmv = "ISMV"
24837
24838	// ContainerTypeM2ts is a ContainerType enum value
24839	ContainerTypeM2ts = "M2TS"
24840
24841	// ContainerTypeM3u8 is a ContainerType enum value
24842	ContainerTypeM3u8 = "M3U8"
24843
24844	// ContainerTypeCmfc is a ContainerType enum value
24845	ContainerTypeCmfc = "CMFC"
24846
24847	// ContainerTypeMov is a ContainerType enum value
24848	ContainerTypeMov = "MOV"
24849
24850	// ContainerTypeMp4 is a ContainerType enum value
24851	ContainerTypeMp4 = "MP4"
24852
24853	// ContainerTypeMpd is a ContainerType enum value
24854	ContainerTypeMpd = "MPD"
24855
24856	// ContainerTypeMxf is a ContainerType enum value
24857	ContainerTypeMxf = "MXF"
24858
24859	// ContainerTypeWebm is a ContainerType enum value
24860	ContainerTypeWebm = "WEBM"
24861
24862	// ContainerTypeRaw is a ContainerType enum value
24863	ContainerTypeRaw = "RAW"
24864)
24865
24866// ContainerType_Values returns all elements of the ContainerType enum
24867func ContainerType_Values() []string {
24868	return []string{
24869		ContainerTypeF4v,
24870		ContainerTypeIsmv,
24871		ContainerTypeM2ts,
24872		ContainerTypeM3u8,
24873		ContainerTypeCmfc,
24874		ContainerTypeMov,
24875		ContainerTypeMp4,
24876		ContainerTypeMpd,
24877		ContainerTypeMxf,
24878		ContainerTypeWebm,
24879		ContainerTypeRaw,
24880	}
24881}
24882
24883// The action to take on copy and redistribution control XDS packets. If you
24884// select PASSTHROUGH, packets will not be changed. If you select STRIP, any
24885// packets will be removed in output captions.
24886const (
24887	// CopyProtectionActionPassthrough is a CopyProtectionAction enum value
24888	CopyProtectionActionPassthrough = "PASSTHROUGH"
24889
24890	// CopyProtectionActionStrip is a CopyProtectionAction enum value
24891	CopyProtectionActionStrip = "STRIP"
24892)
24893
24894// CopyProtectionAction_Values returns all elements of the CopyProtectionAction enum
24895func CopyProtectionAction_Values() []string {
24896	return []string{
24897		CopyProtectionActionPassthrough,
24898		CopyProtectionActionStrip,
24899	}
24900}
24901
24902// Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or
24903// Atmos) and your downstream workflow requires that your DASH manifest use
24904// the Dolby channel configuration tag, rather than the MPEG one. For example,
24905// you might need to use this to make dynamic ad insertion work. Specify which
24906// audio channel configuration scheme ID URI MediaConvert writes in your DASH
24907// manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION),
24908// to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration.
24909// Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have
24910// MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011.
24911const (
24912	// DashIsoGroupAudioChannelConfigSchemeIdUriMpegChannelConfiguration is a DashIsoGroupAudioChannelConfigSchemeIdUri enum value
24913	DashIsoGroupAudioChannelConfigSchemeIdUriMpegChannelConfiguration = "MPEG_CHANNEL_CONFIGURATION"
24914
24915	// DashIsoGroupAudioChannelConfigSchemeIdUriDolbyChannelConfiguration is a DashIsoGroupAudioChannelConfigSchemeIdUri enum value
24916	DashIsoGroupAudioChannelConfigSchemeIdUriDolbyChannelConfiguration = "DOLBY_CHANNEL_CONFIGURATION"
24917)
24918
24919// DashIsoGroupAudioChannelConfigSchemeIdUri_Values returns all elements of the DashIsoGroupAudioChannelConfigSchemeIdUri enum
24920func DashIsoGroupAudioChannelConfigSchemeIdUri_Values() []string {
24921	return []string{
24922		DashIsoGroupAudioChannelConfigSchemeIdUriMpegChannelConfiguration,
24923		DashIsoGroupAudioChannelConfigSchemeIdUriDolbyChannelConfiguration,
24924	}
24925}
24926
24927// Supports HbbTV specification as indicated
24928const (
24929	// DashIsoHbbtvComplianceHbbtv15 is a DashIsoHbbtvCompliance enum value
24930	DashIsoHbbtvComplianceHbbtv15 = "HBBTV_1_5"
24931
24932	// DashIsoHbbtvComplianceNone is a DashIsoHbbtvCompliance enum value
24933	DashIsoHbbtvComplianceNone = "NONE"
24934)
24935
24936// DashIsoHbbtvCompliance_Values returns all elements of the DashIsoHbbtvCompliance enum
24937func DashIsoHbbtvCompliance_Values() []string {
24938	return []string{
24939		DashIsoHbbtvComplianceHbbtv15,
24940		DashIsoHbbtvComplianceNone,
24941	}
24942}
24943
24944// Specify whether MediaConvert generates images for trick play. Keep the default
24945// value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL)
24946// to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME)
24947// to generate tiled thumbnails and full-resolution images of single frames.
24948// MediaConvert adds an entry in the .mpd manifest for each set of images that
24949// you generate. A common application for these images is Roku trick mode. The
24950// thumbnails and full-frame images that MediaConvert creates with this feature
24951// are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md
24952const (
24953	// DashIsoImageBasedTrickPlayNone is a DashIsoImageBasedTrickPlay enum value
24954	DashIsoImageBasedTrickPlayNone = "NONE"
24955
24956	// DashIsoImageBasedTrickPlayThumbnail is a DashIsoImageBasedTrickPlay enum value
24957	DashIsoImageBasedTrickPlayThumbnail = "THUMBNAIL"
24958
24959	// DashIsoImageBasedTrickPlayThumbnailAndFullframe is a DashIsoImageBasedTrickPlay enum value
24960	DashIsoImageBasedTrickPlayThumbnailAndFullframe = "THUMBNAIL_AND_FULLFRAME"
24961)
24962
24963// DashIsoImageBasedTrickPlay_Values returns all elements of the DashIsoImageBasedTrickPlay enum
24964func DashIsoImageBasedTrickPlay_Values() []string {
24965	return []string{
24966		DashIsoImageBasedTrickPlayNone,
24967		DashIsoImageBasedTrickPlayThumbnail,
24968		DashIsoImageBasedTrickPlayThumbnailAndFullframe,
24969	}
24970}
24971
24972// Specify whether your DASH profile is on-demand or main. When you choose Main
24973// profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011
24974// in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE),
24975// the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd.
24976// When you choose On-demand, you must also set the output group setting Segment
24977// control (SegmentControl) to Single file (SINGLE_FILE).
24978const (
24979	// DashIsoMpdProfileMainProfile is a DashIsoMpdProfile enum value
24980	DashIsoMpdProfileMainProfile = "MAIN_PROFILE"
24981
24982	// DashIsoMpdProfileOnDemandProfile is a DashIsoMpdProfile enum value
24983	DashIsoMpdProfileOnDemandProfile = "ON_DEMAND_PROFILE"
24984)
24985
24986// DashIsoMpdProfile_Values returns all elements of the DashIsoMpdProfile enum
24987func DashIsoMpdProfile_Values() []string {
24988	return []string{
24989		DashIsoMpdProfileMainProfile,
24990		DashIsoMpdProfileOnDemandProfile,
24991	}
24992}
24993
24994// This setting can improve the compatibility of your output with video players
24995// on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption.
24996// Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback
24997// on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1).
24998// If you choose Unencrypted SEI, for that output, the service will exclude
24999// the access unit delimiter and will leave the SEI NAL units unencrypted.
25000const (
25001	// DashIsoPlaybackDeviceCompatibilityCencV1 is a DashIsoPlaybackDeviceCompatibility enum value
25002	DashIsoPlaybackDeviceCompatibilityCencV1 = "CENC_V1"
25003
25004	// DashIsoPlaybackDeviceCompatibilityUnencryptedSei is a DashIsoPlaybackDeviceCompatibility enum value
25005	DashIsoPlaybackDeviceCompatibilityUnencryptedSei = "UNENCRYPTED_SEI"
25006)
25007
25008// DashIsoPlaybackDeviceCompatibility_Values returns all elements of the DashIsoPlaybackDeviceCompatibility enum
25009func DashIsoPlaybackDeviceCompatibility_Values() []string {
25010	return []string{
25011		DashIsoPlaybackDeviceCompatibilityCencV1,
25012		DashIsoPlaybackDeviceCompatibilityUnencryptedSei,
25013	}
25014}
25015
25016// Use this setting only when your output video stream has B-frames, which causes
25017// the initial presentation time stamp (PTS) to be offset from the initial decode
25018// time stamp (DTS). Specify how MediaConvert handles PTS when writing time
25019// stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS)
25020// when you want MediaConvert to use the initial PTS as the first time stamp
25021// in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore
25022// the initial PTS in the video stream and instead write the initial time stamp
25023// as zero in the manifest. For outputs that don't have B-frames, the time stamps
25024// in your DASH manifests start at zero regardless of your choice here.
25025const (
25026	// DashIsoPtsOffsetHandlingForBFramesZeroBased is a DashIsoPtsOffsetHandlingForBFrames enum value
25027	DashIsoPtsOffsetHandlingForBFramesZeroBased = "ZERO_BASED"
25028
25029	// DashIsoPtsOffsetHandlingForBFramesMatchInitialPts is a DashIsoPtsOffsetHandlingForBFrames enum value
25030	DashIsoPtsOffsetHandlingForBFramesMatchInitialPts = "MATCH_INITIAL_PTS"
25031)
25032
25033// DashIsoPtsOffsetHandlingForBFrames_Values returns all elements of the DashIsoPtsOffsetHandlingForBFrames enum
25034func DashIsoPtsOffsetHandlingForBFrames_Values() []string {
25035	return []string{
25036		DashIsoPtsOffsetHandlingForBFramesZeroBased,
25037		DashIsoPtsOffsetHandlingForBFramesMatchInitialPts,
25038	}
25039}
25040
25041// When set to SINGLE_FILE, a single output file is generated, which is internally
25042// segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES,
25043// separate segment files will be created.
25044const (
25045	// DashIsoSegmentControlSingleFile is a DashIsoSegmentControl enum value
25046	DashIsoSegmentControlSingleFile = "SINGLE_FILE"
25047
25048	// DashIsoSegmentControlSegmentedFiles is a DashIsoSegmentControl enum value
25049	DashIsoSegmentControlSegmentedFiles = "SEGMENTED_FILES"
25050)
25051
25052// DashIsoSegmentControl_Values returns all elements of the DashIsoSegmentControl enum
25053func DashIsoSegmentControl_Values() []string {
25054	return []string{
25055		DashIsoSegmentControlSingleFile,
25056		DashIsoSegmentControlSegmentedFiles,
25057	}
25058}
25059
25060// When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation),
25061// your DASH manifest shows precise segment durations. The segment duration
25062// information appears inside the SegmentTimeline element, inside SegmentTemplate
25063// at the Representation level. When this feature isn't enabled, the segment
25064// durations in your DASH manifest are approximate. The segment duration information
25065// appears in the duration attribute of the SegmentTemplate element.
25066const (
25067	// DashIsoWriteSegmentTimelineInRepresentationEnabled is a DashIsoWriteSegmentTimelineInRepresentation enum value
25068	DashIsoWriteSegmentTimelineInRepresentationEnabled = "ENABLED"
25069
25070	// DashIsoWriteSegmentTimelineInRepresentationDisabled is a DashIsoWriteSegmentTimelineInRepresentation enum value
25071	DashIsoWriteSegmentTimelineInRepresentationDisabled = "DISABLED"
25072)
25073
25074// DashIsoWriteSegmentTimelineInRepresentation_Values returns all elements of the DashIsoWriteSegmentTimelineInRepresentation enum
25075func DashIsoWriteSegmentTimelineInRepresentation_Values() []string {
25076	return []string{
25077		DashIsoWriteSegmentTimelineInRepresentationEnabled,
25078		DashIsoWriteSegmentTimelineInRepresentationDisabled,
25079	}
25080}
25081
25082// Specify the encryption mode that you used to encrypt your input files.
25083const (
25084	// DecryptionModeAesCtr is a DecryptionMode enum value
25085	DecryptionModeAesCtr = "AES_CTR"
25086
25087	// DecryptionModeAesCbc is a DecryptionMode enum value
25088	DecryptionModeAesCbc = "AES_CBC"
25089
25090	// DecryptionModeAesGcm is a DecryptionMode enum value
25091	DecryptionModeAesGcm = "AES_GCM"
25092)
25093
25094// DecryptionMode_Values returns all elements of the DecryptionMode enum
25095func DecryptionMode_Values() []string {
25096	return []string{
25097		DecryptionModeAesCtr,
25098		DecryptionModeAesCbc,
25099		DecryptionModeAesGcm,
25100	}
25101}
25102
25103// Only applies when you set Deinterlacer (DeinterlaceMode) to Deinterlace (DEINTERLACE)
25104// or Adaptive (ADAPTIVE). Motion adaptive interpolate (INTERPOLATE) produces
25105// sharper pictures, while blend (BLEND) produces smoother motion. Use (INTERPOLATE_TICKER)
25106// OR (BLEND_TICKER) if your source file includes a ticker, such as a scrolling
25107// headline at the bottom of the frame.
25108const (
25109	// DeinterlaceAlgorithmInterpolate is a DeinterlaceAlgorithm enum value
25110	DeinterlaceAlgorithmInterpolate = "INTERPOLATE"
25111
25112	// DeinterlaceAlgorithmInterpolateTicker is a DeinterlaceAlgorithm enum value
25113	DeinterlaceAlgorithmInterpolateTicker = "INTERPOLATE_TICKER"
25114
25115	// DeinterlaceAlgorithmBlend is a DeinterlaceAlgorithm enum value
25116	DeinterlaceAlgorithmBlend = "BLEND"
25117
25118	// DeinterlaceAlgorithmBlendTicker is a DeinterlaceAlgorithm enum value
25119	DeinterlaceAlgorithmBlendTicker = "BLEND_TICKER"
25120)
25121
25122// DeinterlaceAlgorithm_Values returns all elements of the DeinterlaceAlgorithm enum
25123func DeinterlaceAlgorithm_Values() []string {
25124	return []string{
25125		DeinterlaceAlgorithmInterpolate,
25126		DeinterlaceAlgorithmInterpolateTicker,
25127		DeinterlaceAlgorithmBlend,
25128		DeinterlaceAlgorithmBlendTicker,
25129	}
25130}
25131
25132// - When set to NORMAL (default), the deinterlacer does not convert frames
25133// that are tagged in metadata as progressive. It will only convert those that
25134// are tagged as some other type. - When set to FORCE_ALL_FRAMES, the deinterlacer
25135// converts every frame to progressive - even those that are already tagged
25136// as progressive. Turn Force mode on only if there is a good chance that the
25137// metadata has tagged frames as progressive when they are not progressive.
25138// Do not turn on otherwise; processing frames that are already progressive
25139// into progressive will probably result in lower quality video.
25140const (
25141	// DeinterlacerControlForceAllFrames is a DeinterlacerControl enum value
25142	DeinterlacerControlForceAllFrames = "FORCE_ALL_FRAMES"
25143
25144	// DeinterlacerControlNormal is a DeinterlacerControl enum value
25145	DeinterlacerControlNormal = "NORMAL"
25146)
25147
25148// DeinterlacerControl_Values returns all elements of the DeinterlacerControl enum
25149func DeinterlacerControl_Values() []string {
25150	return []string{
25151		DeinterlacerControlForceAllFrames,
25152		DeinterlacerControlNormal,
25153	}
25154}
25155
25156// Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing.
25157// Default is Deinterlace. - Deinterlace converts interlaced to progressive.
25158// - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p.
25159// - Adaptive auto-detects and converts to progressive.
25160const (
25161	// DeinterlacerModeDeinterlace is a DeinterlacerMode enum value
25162	DeinterlacerModeDeinterlace = "DEINTERLACE"
25163
25164	// DeinterlacerModeInverseTelecine is a DeinterlacerMode enum value
25165	DeinterlacerModeInverseTelecine = "INVERSE_TELECINE"
25166
25167	// DeinterlacerModeAdaptive is a DeinterlacerMode enum value
25168	DeinterlacerModeAdaptive = "ADAPTIVE"
25169)
25170
25171// DeinterlacerMode_Values returns all elements of the DeinterlacerMode enum
25172func DeinterlacerMode_Values() []string {
25173	return []string{
25174		DeinterlacerModeDeinterlace,
25175		DeinterlacerModeInverseTelecine,
25176		DeinterlacerModeAdaptive,
25177	}
25178}
25179
25180// Optional field, defaults to DEFAULT. Specify DEFAULT for this operation to
25181// return your endpoints if any exist, or to create an endpoint for you and
25182// return it if one doesn't already exist. Specify GET_ONLY to return your endpoints
25183// if any exist, or an empty list if none exist.
25184const (
25185	// DescribeEndpointsModeDefault is a DescribeEndpointsMode enum value
25186	DescribeEndpointsModeDefault = "DEFAULT"
25187
25188	// DescribeEndpointsModeGetOnly is a DescribeEndpointsMode enum value
25189	DescribeEndpointsModeGetOnly = "GET_ONLY"
25190)
25191
25192// DescribeEndpointsMode_Values returns all elements of the DescribeEndpointsMode enum
25193func DescribeEndpointsMode_Values() []string {
25194	return []string{
25195		DescribeEndpointsModeDefault,
25196		DescribeEndpointsModeGetOnly,
25197	}
25198}
25199
25200// Use Dolby Vision Mode to choose how the service will handle Dolby Vision
25201// MaxCLL and MaxFALL properies.
25202const (
25203	// DolbyVisionLevel6ModePassthrough is a DolbyVisionLevel6Mode enum value
25204	DolbyVisionLevel6ModePassthrough = "PASSTHROUGH"
25205
25206	// DolbyVisionLevel6ModeRecalculate is a DolbyVisionLevel6Mode enum value
25207	DolbyVisionLevel6ModeRecalculate = "RECALCULATE"
25208
25209	// DolbyVisionLevel6ModeSpecify is a DolbyVisionLevel6Mode enum value
25210	DolbyVisionLevel6ModeSpecify = "SPECIFY"
25211)
25212
25213// DolbyVisionLevel6Mode_Values returns all elements of the DolbyVisionLevel6Mode enum
25214func DolbyVisionLevel6Mode_Values() []string {
25215	return []string{
25216		DolbyVisionLevel6ModePassthrough,
25217		DolbyVisionLevel6ModeRecalculate,
25218		DolbyVisionLevel6ModeSpecify,
25219	}
25220}
25221
25222// In the current MediaConvert implementation, the Dolby Vision profile is always
25223// 5 (PROFILE_5). Therefore, all of your inputs must contain Dolby Vision frame
25224// interleaved data.
25225const (
25226	// DolbyVisionProfileProfile5 is a DolbyVisionProfile enum value
25227	DolbyVisionProfileProfile5 = "PROFILE_5"
25228)
25229
25230// DolbyVisionProfile_Values returns all elements of the DolbyVisionProfile enum
25231func DolbyVisionProfile_Values() []string {
25232	return []string{
25233		DolbyVisionProfileProfile5,
25234	}
25235}
25236
25237// Applies only to 29.97 fps outputs. When this feature is enabled, the service
25238// will use drop-frame timecode on outputs. If it is not possible to use drop-frame
25239// timecode, the system will fall back to non-drop-frame. This setting is enabled
25240// by default when Timecode insertion (TimecodeInsertion) is enabled.
25241const (
25242	// DropFrameTimecodeDisabled is a DropFrameTimecode enum value
25243	DropFrameTimecodeDisabled = "DISABLED"
25244
25245	// DropFrameTimecodeEnabled is a DropFrameTimecode enum value
25246	DropFrameTimecodeEnabled = "ENABLED"
25247)
25248
25249// DropFrameTimecode_Values returns all elements of the DropFrameTimecode enum
25250func DropFrameTimecode_Values() []string {
25251	return []string{
25252		DropFrameTimecodeDisabled,
25253		DropFrameTimecodeEnabled,
25254	}
25255}
25256
25257// If no explicit x_position or y_position is provided, setting alignment to
25258// centered will place the captions at the bottom center of the output. Similarly,
25259// setting a left alignment will align captions to the bottom left of the output.
25260// If x and y positions are given in conjunction with the alignment parameter,
25261// the font will be justified (either left or centered) relative to those coordinates.
25262// This option is not valid for source captions that are STL, 608/embedded or
25263// teletext. These source settings are already pre-defined by the caption stream.
25264// All burn-in and DVB-Sub font settings must match.
25265const (
25266	// DvbSubtitleAlignmentCentered is a DvbSubtitleAlignment enum value
25267	DvbSubtitleAlignmentCentered = "CENTERED"
25268
25269	// DvbSubtitleAlignmentLeft is a DvbSubtitleAlignment enum value
25270	DvbSubtitleAlignmentLeft = "LEFT"
25271)
25272
25273// DvbSubtitleAlignment_Values returns all elements of the DvbSubtitleAlignment enum
25274func DvbSubtitleAlignment_Values() []string {
25275	return []string{
25276		DvbSubtitleAlignmentCentered,
25277		DvbSubtitleAlignmentLeft,
25278	}
25279}
25280
25281// Specifies the color of the rectangle behind the captions.All burn-in and
25282// DVB-Sub font settings must match.
25283const (
25284	// DvbSubtitleBackgroundColorNone is a DvbSubtitleBackgroundColor enum value
25285	DvbSubtitleBackgroundColorNone = "NONE"
25286
25287	// DvbSubtitleBackgroundColorBlack is a DvbSubtitleBackgroundColor enum value
25288	DvbSubtitleBackgroundColorBlack = "BLACK"
25289
25290	// DvbSubtitleBackgroundColorWhite is a DvbSubtitleBackgroundColor enum value
25291	DvbSubtitleBackgroundColorWhite = "WHITE"
25292)
25293
25294// DvbSubtitleBackgroundColor_Values returns all elements of the DvbSubtitleBackgroundColor enum
25295func DvbSubtitleBackgroundColor_Values() []string {
25296	return []string{
25297		DvbSubtitleBackgroundColorNone,
25298		DvbSubtitleBackgroundColorBlack,
25299		DvbSubtitleBackgroundColorWhite,
25300	}
25301}
25302
25303// Specifies the color of the DVB-SUB captions. This option is not valid for
25304// source captions that are STL, 608/embedded or teletext. These source settings
25305// are already pre-defined by the caption stream. All burn-in and DVB-Sub font
25306// settings must match.
25307const (
25308	// DvbSubtitleFontColorWhite is a DvbSubtitleFontColor enum value
25309	DvbSubtitleFontColorWhite = "WHITE"
25310
25311	// DvbSubtitleFontColorBlack is a DvbSubtitleFontColor enum value
25312	DvbSubtitleFontColorBlack = "BLACK"
25313
25314	// DvbSubtitleFontColorYellow is a DvbSubtitleFontColor enum value
25315	DvbSubtitleFontColorYellow = "YELLOW"
25316
25317	// DvbSubtitleFontColorRed is a DvbSubtitleFontColor enum value
25318	DvbSubtitleFontColorRed = "RED"
25319
25320	// DvbSubtitleFontColorGreen is a DvbSubtitleFontColor enum value
25321	DvbSubtitleFontColorGreen = "GREEN"
25322
25323	// DvbSubtitleFontColorBlue is a DvbSubtitleFontColor enum value
25324	DvbSubtitleFontColorBlue = "BLUE"
25325)
25326
25327// DvbSubtitleFontColor_Values returns all elements of the DvbSubtitleFontColor enum
25328func DvbSubtitleFontColor_Values() []string {
25329	return []string{
25330		DvbSubtitleFontColorWhite,
25331		DvbSubtitleFontColorBlack,
25332		DvbSubtitleFontColorYellow,
25333		DvbSubtitleFontColorRed,
25334		DvbSubtitleFontColorGreen,
25335		DvbSubtitleFontColorBlue,
25336	}
25337}
25338
25339// Specifies font outline color. This option is not valid for source captions
25340// that are either 608/embedded or teletext. These source settings are already
25341// pre-defined by the caption stream. All burn-in and DVB-Sub font settings
25342// must match.
25343const (
25344	// DvbSubtitleOutlineColorBlack is a DvbSubtitleOutlineColor enum value
25345	DvbSubtitleOutlineColorBlack = "BLACK"
25346
25347	// DvbSubtitleOutlineColorWhite is a DvbSubtitleOutlineColor enum value
25348	DvbSubtitleOutlineColorWhite = "WHITE"
25349
25350	// DvbSubtitleOutlineColorYellow is a DvbSubtitleOutlineColor enum value
25351	DvbSubtitleOutlineColorYellow = "YELLOW"
25352
25353	// DvbSubtitleOutlineColorRed is a DvbSubtitleOutlineColor enum value
25354	DvbSubtitleOutlineColorRed = "RED"
25355
25356	// DvbSubtitleOutlineColorGreen is a DvbSubtitleOutlineColor enum value
25357	DvbSubtitleOutlineColorGreen = "GREEN"
25358
25359	// DvbSubtitleOutlineColorBlue is a DvbSubtitleOutlineColor enum value
25360	DvbSubtitleOutlineColorBlue = "BLUE"
25361)
25362
25363// DvbSubtitleOutlineColor_Values returns all elements of the DvbSubtitleOutlineColor enum
25364func DvbSubtitleOutlineColor_Values() []string {
25365	return []string{
25366		DvbSubtitleOutlineColorBlack,
25367		DvbSubtitleOutlineColorWhite,
25368		DvbSubtitleOutlineColorYellow,
25369		DvbSubtitleOutlineColorRed,
25370		DvbSubtitleOutlineColorGreen,
25371		DvbSubtitleOutlineColorBlue,
25372	}
25373}
25374
25375// Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub
25376// font settings must match.
25377const (
25378	// DvbSubtitleShadowColorNone is a DvbSubtitleShadowColor enum value
25379	DvbSubtitleShadowColorNone = "NONE"
25380
25381	// DvbSubtitleShadowColorBlack is a DvbSubtitleShadowColor enum value
25382	DvbSubtitleShadowColorBlack = "BLACK"
25383
25384	// DvbSubtitleShadowColorWhite is a DvbSubtitleShadowColor enum value
25385	DvbSubtitleShadowColorWhite = "WHITE"
25386)
25387
25388// DvbSubtitleShadowColor_Values returns all elements of the DvbSubtitleShadowColor enum
25389func DvbSubtitleShadowColor_Values() []string {
25390	return []string{
25391		DvbSubtitleShadowColorNone,
25392		DvbSubtitleShadowColorBlack,
25393		DvbSubtitleShadowColorWhite,
25394	}
25395}
25396
25397// Only applies to jobs with input captions in Teletext or STL formats. Specify
25398// whether the spacing between letters in your captions is set by the captions
25399// grid or varies depending on letter width. Choose fixed grid to conform to
25400// the spacing specified in the captions file more accurately. Choose proportional
25401// to make the text easier to read if the captions are closed caption.
25402const (
25403	// DvbSubtitleTeletextSpacingFixedGrid is a DvbSubtitleTeletextSpacing enum value
25404	DvbSubtitleTeletextSpacingFixedGrid = "FIXED_GRID"
25405
25406	// DvbSubtitleTeletextSpacingProportional is a DvbSubtitleTeletextSpacing enum value
25407	DvbSubtitleTeletextSpacingProportional = "PROPORTIONAL"
25408)
25409
25410// DvbSubtitleTeletextSpacing_Values returns all elements of the DvbSubtitleTeletextSpacing enum
25411func DvbSubtitleTeletextSpacing_Values() []string {
25412	return []string{
25413		DvbSubtitleTeletextSpacingFixedGrid,
25414		DvbSubtitleTeletextSpacingProportional,
25415	}
25416}
25417
25418// Specify whether your DVB subtitles are standard or for hearing impaired.
25419// Choose hearing impaired if your subtitles include audio descriptions and
25420// dialogue. Choose standard if your subtitles include only dialogue.
25421const (
25422	// DvbSubtitlingTypeHearingImpaired is a DvbSubtitlingType enum value
25423	DvbSubtitlingTypeHearingImpaired = "HEARING_IMPAIRED"
25424
25425	// DvbSubtitlingTypeStandard is a DvbSubtitlingType enum value
25426	DvbSubtitlingTypeStandard = "STANDARD"
25427)
25428
25429// DvbSubtitlingType_Values returns all elements of the DvbSubtitlingType enum
25430func DvbSubtitlingType_Values() []string {
25431	return []string{
25432		DvbSubtitlingTypeHearingImpaired,
25433		DvbSubtitlingTypeStandard,
25434	}
25435}
25436
25437// Specify how MediaConvert handles the display definition segment (DDS). Keep
25438// the default, None (NONE), to exclude the DDS from this set of captions. Choose
25439// No display window (NO_DISPLAY_WINDOW) to have MediaConvert include the DDS
25440// but not include display window data. In this case, MediaConvert writes that
25441// information to the page composition segment (PCS) instead. Choose Specify
25442// (SPECIFIED) to have MediaConvert set up the display window based on the values
25443// that you specify in related job settings. For video resolutions that are
25444// 576 pixels or smaller in height, MediaConvert doesn't include the DDS, regardless
25445// of the value you choose for DDS handling (ddsHandling). In this case, it
25446// doesn't write the display window data to the PCS either. Related settings:
25447// Use the settings DDS x-coordinate (ddsXCoordinate) and DDS y-coordinate (ddsYCoordinate)
25448// to specify the offset between the top left corner of the display window and
25449// the top left corner of the video frame. All burn-in and DVB-Sub font settings
25450// must match.
25451const (
25452	// DvbddsHandlingNone is a DvbddsHandling enum value
25453	DvbddsHandlingNone = "NONE"
25454
25455	// DvbddsHandlingSpecified is a DvbddsHandling enum value
25456	DvbddsHandlingSpecified = "SPECIFIED"
25457
25458	// DvbddsHandlingNoDisplayWindow is a DvbddsHandling enum value
25459	DvbddsHandlingNoDisplayWindow = "NO_DISPLAY_WINDOW"
25460)
25461
25462// DvbddsHandling_Values returns all elements of the DvbddsHandling enum
25463func DvbddsHandling_Values() []string {
25464	return []string{
25465		DvbddsHandlingNone,
25466		DvbddsHandlingSpecified,
25467		DvbddsHandlingNoDisplayWindow,
25468	}
25469}
25470
25471// Specify the bitstream mode for the E-AC-3 stream that the encoder emits.
25472// For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex
25473// E).
25474const (
25475	// Eac3AtmosBitstreamModeCompleteMain is a Eac3AtmosBitstreamMode enum value
25476	Eac3AtmosBitstreamModeCompleteMain = "COMPLETE_MAIN"
25477)
25478
25479// Eac3AtmosBitstreamMode_Values returns all elements of the Eac3AtmosBitstreamMode enum
25480func Eac3AtmosBitstreamMode_Values() []string {
25481	return []string{
25482		Eac3AtmosBitstreamModeCompleteMain,
25483	}
25484}
25485
25486// The coding mode for Dolby Digital Plus JOC (Atmos).
25487const (
25488	// Eac3AtmosCodingModeCodingModeAuto is a Eac3AtmosCodingMode enum value
25489	Eac3AtmosCodingModeCodingModeAuto = "CODING_MODE_AUTO"
25490
25491	// Eac3AtmosCodingModeCodingMode514 is a Eac3AtmosCodingMode enum value
25492	Eac3AtmosCodingModeCodingMode514 = "CODING_MODE_5_1_4"
25493
25494	// Eac3AtmosCodingModeCodingMode714 is a Eac3AtmosCodingMode enum value
25495	Eac3AtmosCodingModeCodingMode714 = "CODING_MODE_7_1_4"
25496
25497	// Eac3AtmosCodingModeCodingMode916 is a Eac3AtmosCodingMode enum value
25498	Eac3AtmosCodingModeCodingMode916 = "CODING_MODE_9_1_6"
25499)
25500
25501// Eac3AtmosCodingMode_Values returns all elements of the Eac3AtmosCodingMode enum
25502func Eac3AtmosCodingMode_Values() []string {
25503	return []string{
25504		Eac3AtmosCodingModeCodingModeAuto,
25505		Eac3AtmosCodingModeCodingMode514,
25506		Eac3AtmosCodingModeCodingMode714,
25507		Eac3AtmosCodingModeCodingMode916,
25508	}
25509}
25510
25511// Enable Dolby Dialogue Intelligence to adjust loudness based on dialogue analysis.
25512const (
25513	// Eac3AtmosDialogueIntelligenceEnabled is a Eac3AtmosDialogueIntelligence enum value
25514	Eac3AtmosDialogueIntelligenceEnabled = "ENABLED"
25515
25516	// Eac3AtmosDialogueIntelligenceDisabled is a Eac3AtmosDialogueIntelligence enum value
25517	Eac3AtmosDialogueIntelligenceDisabled = "DISABLED"
25518)
25519
25520// Eac3AtmosDialogueIntelligence_Values returns all elements of the Eac3AtmosDialogueIntelligence enum
25521func Eac3AtmosDialogueIntelligence_Values() []string {
25522	return []string{
25523		Eac3AtmosDialogueIntelligenceEnabled,
25524		Eac3AtmosDialogueIntelligenceDisabled,
25525	}
25526}
25527
25528// Specify whether MediaConvert should use any downmix metadata from your input
25529// file. Keep the default value, Custom (SPECIFIED) to provide downmix values
25530// in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use
25531// the metadata from your input. Related settings--Use these settings to specify
25532// your downmix values: Left only/Right only surround (LoRoSurroundMixLevel),
25533// Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right
25534// total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel),
25535// and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for
25536// Downmix control (DownmixControl) and you don't specify values for the related
25537// settings, MediaConvert uses default values for those settings.
25538const (
25539	// Eac3AtmosDownmixControlSpecified is a Eac3AtmosDownmixControl enum value
25540	Eac3AtmosDownmixControlSpecified = "SPECIFIED"
25541
25542	// Eac3AtmosDownmixControlInitializeFromSource is a Eac3AtmosDownmixControl enum value
25543	Eac3AtmosDownmixControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
25544)
25545
25546// Eac3AtmosDownmixControl_Values returns all elements of the Eac3AtmosDownmixControl enum
25547func Eac3AtmosDownmixControl_Values() []string {
25548	return []string{
25549		Eac3AtmosDownmixControlSpecified,
25550		Eac3AtmosDownmixControlInitializeFromSource,
25551	}
25552}
25553
25554// Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses
25555// when encoding the metadata in the Dolby stream for the line operating mode.
25556// Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting:
25557// To have MediaConvert use the value you specify here, keep the default value,
25558// Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl).
25559// Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine).
25560// For information about the Dolby DRC operating modes and profiles, see the
25561// Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
25562const (
25563	// Eac3AtmosDynamicRangeCompressionLineNone is a Eac3AtmosDynamicRangeCompressionLine enum value
25564	Eac3AtmosDynamicRangeCompressionLineNone = "NONE"
25565
25566	// Eac3AtmosDynamicRangeCompressionLineFilmStandard is a Eac3AtmosDynamicRangeCompressionLine enum value
25567	Eac3AtmosDynamicRangeCompressionLineFilmStandard = "FILM_STANDARD"
25568
25569	// Eac3AtmosDynamicRangeCompressionLineFilmLight is a Eac3AtmosDynamicRangeCompressionLine enum value
25570	Eac3AtmosDynamicRangeCompressionLineFilmLight = "FILM_LIGHT"
25571
25572	// Eac3AtmosDynamicRangeCompressionLineMusicStandard is a Eac3AtmosDynamicRangeCompressionLine enum value
25573	Eac3AtmosDynamicRangeCompressionLineMusicStandard = "MUSIC_STANDARD"
25574
25575	// Eac3AtmosDynamicRangeCompressionLineMusicLight is a Eac3AtmosDynamicRangeCompressionLine enum value
25576	Eac3AtmosDynamicRangeCompressionLineMusicLight = "MUSIC_LIGHT"
25577
25578	// Eac3AtmosDynamicRangeCompressionLineSpeech is a Eac3AtmosDynamicRangeCompressionLine enum value
25579	Eac3AtmosDynamicRangeCompressionLineSpeech = "SPEECH"
25580)
25581
25582// Eac3AtmosDynamicRangeCompressionLine_Values returns all elements of the Eac3AtmosDynamicRangeCompressionLine enum
25583func Eac3AtmosDynamicRangeCompressionLine_Values() []string {
25584	return []string{
25585		Eac3AtmosDynamicRangeCompressionLineNone,
25586		Eac3AtmosDynamicRangeCompressionLineFilmStandard,
25587		Eac3AtmosDynamicRangeCompressionLineFilmLight,
25588		Eac3AtmosDynamicRangeCompressionLineMusicStandard,
25589		Eac3AtmosDynamicRangeCompressionLineMusicLight,
25590		Eac3AtmosDynamicRangeCompressionLineSpeech,
25591	}
25592}
25593
25594// Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses
25595// when encoding the metadata in the Dolby stream for the RF operating mode.
25596// Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting:
25597// To have MediaConvert use the value you specify here, keep the default value,
25598// Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl).
25599// Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf).
25600// For information about the Dolby DRC operating modes and profiles, see the
25601// Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
25602const (
25603	// Eac3AtmosDynamicRangeCompressionRfNone is a Eac3AtmosDynamicRangeCompressionRf enum value
25604	Eac3AtmosDynamicRangeCompressionRfNone = "NONE"
25605
25606	// Eac3AtmosDynamicRangeCompressionRfFilmStandard is a Eac3AtmosDynamicRangeCompressionRf enum value
25607	Eac3AtmosDynamicRangeCompressionRfFilmStandard = "FILM_STANDARD"
25608
25609	// Eac3AtmosDynamicRangeCompressionRfFilmLight is a Eac3AtmosDynamicRangeCompressionRf enum value
25610	Eac3AtmosDynamicRangeCompressionRfFilmLight = "FILM_LIGHT"
25611
25612	// Eac3AtmosDynamicRangeCompressionRfMusicStandard is a Eac3AtmosDynamicRangeCompressionRf enum value
25613	Eac3AtmosDynamicRangeCompressionRfMusicStandard = "MUSIC_STANDARD"
25614
25615	// Eac3AtmosDynamicRangeCompressionRfMusicLight is a Eac3AtmosDynamicRangeCompressionRf enum value
25616	Eac3AtmosDynamicRangeCompressionRfMusicLight = "MUSIC_LIGHT"
25617
25618	// Eac3AtmosDynamicRangeCompressionRfSpeech is a Eac3AtmosDynamicRangeCompressionRf enum value
25619	Eac3AtmosDynamicRangeCompressionRfSpeech = "SPEECH"
25620)
25621
25622// Eac3AtmosDynamicRangeCompressionRf_Values returns all elements of the Eac3AtmosDynamicRangeCompressionRf enum
25623func Eac3AtmosDynamicRangeCompressionRf_Values() []string {
25624	return []string{
25625		Eac3AtmosDynamicRangeCompressionRfNone,
25626		Eac3AtmosDynamicRangeCompressionRfFilmStandard,
25627		Eac3AtmosDynamicRangeCompressionRfFilmLight,
25628		Eac3AtmosDynamicRangeCompressionRfMusicStandard,
25629		Eac3AtmosDynamicRangeCompressionRfMusicLight,
25630		Eac3AtmosDynamicRangeCompressionRfSpeech,
25631	}
25632}
25633
25634// Specify whether MediaConvert should use any dynamic range control metadata
25635// from your input file. Keep the default value, Custom (SPECIFIED), to provide
25636// dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE)
25637// to use the metadata from your input. Related settings--Use these settings
25638// to specify your dynamic range control values: Dynamic range compression line
25639// (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf).
25640// When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl)
25641// and you don't specify values for the related settings, MediaConvert uses
25642// default values for those settings.
25643const (
25644	// Eac3AtmosDynamicRangeControlSpecified is a Eac3AtmosDynamicRangeControl enum value
25645	Eac3AtmosDynamicRangeControlSpecified = "SPECIFIED"
25646
25647	// Eac3AtmosDynamicRangeControlInitializeFromSource is a Eac3AtmosDynamicRangeControl enum value
25648	Eac3AtmosDynamicRangeControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
25649)
25650
25651// Eac3AtmosDynamicRangeControl_Values returns all elements of the Eac3AtmosDynamicRangeControl enum
25652func Eac3AtmosDynamicRangeControl_Values() []string {
25653	return []string{
25654		Eac3AtmosDynamicRangeControlSpecified,
25655		Eac3AtmosDynamicRangeControlInitializeFromSource,
25656	}
25657}
25658
25659// Choose how the service meters the loudness of your audio.
25660const (
25661	// Eac3AtmosMeteringModeLeqA is a Eac3AtmosMeteringMode enum value
25662	Eac3AtmosMeteringModeLeqA = "LEQ_A"
25663
25664	// Eac3AtmosMeteringModeItuBs17701 is a Eac3AtmosMeteringMode enum value
25665	Eac3AtmosMeteringModeItuBs17701 = "ITU_BS_1770_1"
25666
25667	// Eac3AtmosMeteringModeItuBs17702 is a Eac3AtmosMeteringMode enum value
25668	Eac3AtmosMeteringModeItuBs17702 = "ITU_BS_1770_2"
25669
25670	// Eac3AtmosMeteringModeItuBs17703 is a Eac3AtmosMeteringMode enum value
25671	Eac3AtmosMeteringModeItuBs17703 = "ITU_BS_1770_3"
25672
25673	// Eac3AtmosMeteringModeItuBs17704 is a Eac3AtmosMeteringMode enum value
25674	Eac3AtmosMeteringModeItuBs17704 = "ITU_BS_1770_4"
25675)
25676
25677// Eac3AtmosMeteringMode_Values returns all elements of the Eac3AtmosMeteringMode enum
25678func Eac3AtmosMeteringMode_Values() []string {
25679	return []string{
25680		Eac3AtmosMeteringModeLeqA,
25681		Eac3AtmosMeteringModeItuBs17701,
25682		Eac3AtmosMeteringModeItuBs17702,
25683		Eac3AtmosMeteringModeItuBs17703,
25684		Eac3AtmosMeteringModeItuBs17704,
25685	}
25686}
25687
25688// Choose how the service does stereo downmixing. Default value: Not indicated
25689// (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert
25690// use this value, keep the default value, Custom (SPECIFIED) for the setting
25691// Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo
25692// downmix (StereoDownmix).
25693const (
25694	// Eac3AtmosStereoDownmixNotIndicated is a Eac3AtmosStereoDownmix enum value
25695	Eac3AtmosStereoDownmixNotIndicated = "NOT_INDICATED"
25696
25697	// Eac3AtmosStereoDownmixStereo is a Eac3AtmosStereoDownmix enum value
25698	Eac3AtmosStereoDownmixStereo = "STEREO"
25699
25700	// Eac3AtmosStereoDownmixSurround is a Eac3AtmosStereoDownmix enum value
25701	Eac3AtmosStereoDownmixSurround = "SURROUND"
25702
25703	// Eac3AtmosStereoDownmixDpl2 is a Eac3AtmosStereoDownmix enum value
25704	Eac3AtmosStereoDownmixDpl2 = "DPL2"
25705)
25706
25707// Eac3AtmosStereoDownmix_Values returns all elements of the Eac3AtmosStereoDownmix enum
25708func Eac3AtmosStereoDownmix_Values() []string {
25709	return []string{
25710		Eac3AtmosStereoDownmixNotIndicated,
25711		Eac3AtmosStereoDownmixStereo,
25712		Eac3AtmosStereoDownmixSurround,
25713		Eac3AtmosStereoDownmixDpl2,
25714	}
25715}
25716
25717// Specify whether your input audio has an additional center rear surround channel
25718// matrix encoded into your left and right surround channels.
25719const (
25720	// Eac3AtmosSurroundExModeNotIndicated is a Eac3AtmosSurroundExMode enum value
25721	Eac3AtmosSurroundExModeNotIndicated = "NOT_INDICATED"
25722
25723	// Eac3AtmosSurroundExModeEnabled is a Eac3AtmosSurroundExMode enum value
25724	Eac3AtmosSurroundExModeEnabled = "ENABLED"
25725
25726	// Eac3AtmosSurroundExModeDisabled is a Eac3AtmosSurroundExMode enum value
25727	Eac3AtmosSurroundExModeDisabled = "DISABLED"
25728)
25729
25730// Eac3AtmosSurroundExMode_Values returns all elements of the Eac3AtmosSurroundExMode enum
25731func Eac3AtmosSurroundExMode_Values() []string {
25732	return []string{
25733		Eac3AtmosSurroundExModeNotIndicated,
25734		Eac3AtmosSurroundExModeEnabled,
25735		Eac3AtmosSurroundExModeDisabled,
25736	}
25737}
25738
25739// If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels.
25740// Only used for 3/2 coding mode.
25741const (
25742	// Eac3AttenuationControlAttenuate3Db is a Eac3AttenuationControl enum value
25743	Eac3AttenuationControlAttenuate3Db = "ATTENUATE_3_DB"
25744
25745	// Eac3AttenuationControlNone is a Eac3AttenuationControl enum value
25746	Eac3AttenuationControlNone = "NONE"
25747)
25748
25749// Eac3AttenuationControl_Values returns all elements of the Eac3AttenuationControl enum
25750func Eac3AttenuationControl_Values() []string {
25751	return []string{
25752		Eac3AttenuationControlAttenuate3Db,
25753		Eac3AttenuationControlNone,
25754	}
25755}
25756
25757// Specify the bitstream mode for the E-AC-3 stream that the encoder emits.
25758// For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex
25759// E).
25760const (
25761	// Eac3BitstreamModeCompleteMain is a Eac3BitstreamMode enum value
25762	Eac3BitstreamModeCompleteMain = "COMPLETE_MAIN"
25763
25764	// Eac3BitstreamModeCommentary is a Eac3BitstreamMode enum value
25765	Eac3BitstreamModeCommentary = "COMMENTARY"
25766
25767	// Eac3BitstreamModeEmergency is a Eac3BitstreamMode enum value
25768	Eac3BitstreamModeEmergency = "EMERGENCY"
25769
25770	// Eac3BitstreamModeHearingImpaired is a Eac3BitstreamMode enum value
25771	Eac3BitstreamModeHearingImpaired = "HEARING_IMPAIRED"
25772
25773	// Eac3BitstreamModeVisuallyImpaired is a Eac3BitstreamMode enum value
25774	Eac3BitstreamModeVisuallyImpaired = "VISUALLY_IMPAIRED"
25775)
25776
25777// Eac3BitstreamMode_Values returns all elements of the Eac3BitstreamMode enum
25778func Eac3BitstreamMode_Values() []string {
25779	return []string{
25780		Eac3BitstreamModeCompleteMain,
25781		Eac3BitstreamModeCommentary,
25782		Eac3BitstreamModeEmergency,
25783		Eac3BitstreamModeHearingImpaired,
25784		Eac3BitstreamModeVisuallyImpaired,
25785	}
25786}
25787
25788// Dolby Digital Plus coding mode. Determines number of channels.
25789const (
25790	// Eac3CodingModeCodingMode10 is a Eac3CodingMode enum value
25791	Eac3CodingModeCodingMode10 = "CODING_MODE_1_0"
25792
25793	// Eac3CodingModeCodingMode20 is a Eac3CodingMode enum value
25794	Eac3CodingModeCodingMode20 = "CODING_MODE_2_0"
25795
25796	// Eac3CodingModeCodingMode32 is a Eac3CodingMode enum value
25797	Eac3CodingModeCodingMode32 = "CODING_MODE_3_2"
25798)
25799
25800// Eac3CodingMode_Values returns all elements of the Eac3CodingMode enum
25801func Eac3CodingMode_Values() []string {
25802	return []string{
25803		Eac3CodingModeCodingMode10,
25804		Eac3CodingModeCodingMode20,
25805		Eac3CodingModeCodingMode32,
25806	}
25807}
25808
25809// Activates a DC highpass filter for all input channels.
25810const (
25811	// Eac3DcFilterEnabled is a Eac3DcFilter enum value
25812	Eac3DcFilterEnabled = "ENABLED"
25813
25814	// Eac3DcFilterDisabled is a Eac3DcFilter enum value
25815	Eac3DcFilterDisabled = "DISABLED"
25816)
25817
25818// Eac3DcFilter_Values returns all elements of the Eac3DcFilter enum
25819func Eac3DcFilter_Values() []string {
25820	return []string{
25821		Eac3DcFilterEnabled,
25822		Eac3DcFilterDisabled,
25823	}
25824}
25825
25826// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
25827// uses when encoding the metadata in the Dolby Digital stream for the line
25828// operating mode. Related setting: When you use this setting, MediaConvert
25829// ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile).
25830// For information about the Dolby Digital DRC operating modes and profiles,
25831// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
25832const (
25833	// Eac3DynamicRangeCompressionLineNone is a Eac3DynamicRangeCompressionLine enum value
25834	Eac3DynamicRangeCompressionLineNone = "NONE"
25835
25836	// Eac3DynamicRangeCompressionLineFilmStandard is a Eac3DynamicRangeCompressionLine enum value
25837	Eac3DynamicRangeCompressionLineFilmStandard = "FILM_STANDARD"
25838
25839	// Eac3DynamicRangeCompressionLineFilmLight is a Eac3DynamicRangeCompressionLine enum value
25840	Eac3DynamicRangeCompressionLineFilmLight = "FILM_LIGHT"
25841
25842	// Eac3DynamicRangeCompressionLineMusicStandard is a Eac3DynamicRangeCompressionLine enum value
25843	Eac3DynamicRangeCompressionLineMusicStandard = "MUSIC_STANDARD"
25844
25845	// Eac3DynamicRangeCompressionLineMusicLight is a Eac3DynamicRangeCompressionLine enum value
25846	Eac3DynamicRangeCompressionLineMusicLight = "MUSIC_LIGHT"
25847
25848	// Eac3DynamicRangeCompressionLineSpeech is a Eac3DynamicRangeCompressionLine enum value
25849	Eac3DynamicRangeCompressionLineSpeech = "SPEECH"
25850)
25851
25852// Eac3DynamicRangeCompressionLine_Values returns all elements of the Eac3DynamicRangeCompressionLine enum
25853func Eac3DynamicRangeCompressionLine_Values() []string {
25854	return []string{
25855		Eac3DynamicRangeCompressionLineNone,
25856		Eac3DynamicRangeCompressionLineFilmStandard,
25857		Eac3DynamicRangeCompressionLineFilmLight,
25858		Eac3DynamicRangeCompressionLineMusicStandard,
25859		Eac3DynamicRangeCompressionLineMusicLight,
25860		Eac3DynamicRangeCompressionLineSpeech,
25861	}
25862}
25863
25864// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
25865// uses when encoding the metadata in the Dolby Digital stream for the RF operating
25866// mode. Related setting: When you use this setting, MediaConvert ignores any
25867// value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile).
25868// For information about the Dolby Digital DRC operating modes and profiles,
25869// see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
25870const (
25871	// Eac3DynamicRangeCompressionRfNone is a Eac3DynamicRangeCompressionRf enum value
25872	Eac3DynamicRangeCompressionRfNone = "NONE"
25873
25874	// Eac3DynamicRangeCompressionRfFilmStandard is a Eac3DynamicRangeCompressionRf enum value
25875	Eac3DynamicRangeCompressionRfFilmStandard = "FILM_STANDARD"
25876
25877	// Eac3DynamicRangeCompressionRfFilmLight is a Eac3DynamicRangeCompressionRf enum value
25878	Eac3DynamicRangeCompressionRfFilmLight = "FILM_LIGHT"
25879
25880	// Eac3DynamicRangeCompressionRfMusicStandard is a Eac3DynamicRangeCompressionRf enum value
25881	Eac3DynamicRangeCompressionRfMusicStandard = "MUSIC_STANDARD"
25882
25883	// Eac3DynamicRangeCompressionRfMusicLight is a Eac3DynamicRangeCompressionRf enum value
25884	Eac3DynamicRangeCompressionRfMusicLight = "MUSIC_LIGHT"
25885
25886	// Eac3DynamicRangeCompressionRfSpeech is a Eac3DynamicRangeCompressionRf enum value
25887	Eac3DynamicRangeCompressionRfSpeech = "SPEECH"
25888)
25889
25890// Eac3DynamicRangeCompressionRf_Values returns all elements of the Eac3DynamicRangeCompressionRf enum
25891func Eac3DynamicRangeCompressionRf_Values() []string {
25892	return []string{
25893		Eac3DynamicRangeCompressionRfNone,
25894		Eac3DynamicRangeCompressionRfFilmStandard,
25895		Eac3DynamicRangeCompressionRfFilmLight,
25896		Eac3DynamicRangeCompressionRfMusicStandard,
25897		Eac3DynamicRangeCompressionRfMusicLight,
25898		Eac3DynamicRangeCompressionRfSpeech,
25899	}
25900}
25901
25902// When encoding 3/2 audio, controls whether the LFE channel is enabled
25903const (
25904	// Eac3LfeControlLfe is a Eac3LfeControl enum value
25905	Eac3LfeControlLfe = "LFE"
25906
25907	// Eac3LfeControlNoLfe is a Eac3LfeControl enum value
25908	Eac3LfeControlNoLfe = "NO_LFE"
25909)
25910
25911// Eac3LfeControl_Values returns all elements of the Eac3LfeControl enum
25912func Eac3LfeControl_Values() []string {
25913	return []string{
25914		Eac3LfeControlLfe,
25915		Eac3LfeControlNoLfe,
25916	}
25917}
25918
25919// Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only
25920// valid with 3_2_LFE coding mode.
25921const (
25922	// Eac3LfeFilterEnabled is a Eac3LfeFilter enum value
25923	Eac3LfeFilterEnabled = "ENABLED"
25924
25925	// Eac3LfeFilterDisabled is a Eac3LfeFilter enum value
25926	Eac3LfeFilterDisabled = "DISABLED"
25927)
25928
25929// Eac3LfeFilter_Values returns all elements of the Eac3LfeFilter enum
25930func Eac3LfeFilter_Values() []string {
25931	return []string{
25932		Eac3LfeFilterEnabled,
25933		Eac3LfeFilterDisabled,
25934	}
25935}
25936
25937// When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+,
25938// or DolbyE decoder that supplied this audio data. If audio was not supplied
25939// from one of these streams, then the static metadata settings will be used.
25940const (
25941	// Eac3MetadataControlFollowInput is a Eac3MetadataControl enum value
25942	Eac3MetadataControlFollowInput = "FOLLOW_INPUT"
25943
25944	// Eac3MetadataControlUseConfigured is a Eac3MetadataControl enum value
25945	Eac3MetadataControlUseConfigured = "USE_CONFIGURED"
25946)
25947
25948// Eac3MetadataControl_Values returns all elements of the Eac3MetadataControl enum
25949func Eac3MetadataControl_Values() []string {
25950	return []string{
25951		Eac3MetadataControlFollowInput,
25952		Eac3MetadataControlUseConfigured,
25953	}
25954}
25955
25956// When set to WHEN_POSSIBLE, input DD+ audio will be passed through if it is
25957// present on the input. this detection is dynamic over the life of the transcode.
25958// Inputs that alternate between DD+ and non-DD+ content will have a consistent
25959// DD+ output as the system alternates between passthrough and encoding.
25960const (
25961	// Eac3PassthroughControlWhenPossible is a Eac3PassthroughControl enum value
25962	Eac3PassthroughControlWhenPossible = "WHEN_POSSIBLE"
25963
25964	// Eac3PassthroughControlNoPassthrough is a Eac3PassthroughControl enum value
25965	Eac3PassthroughControlNoPassthrough = "NO_PASSTHROUGH"
25966)
25967
25968// Eac3PassthroughControl_Values returns all elements of the Eac3PassthroughControl enum
25969func Eac3PassthroughControl_Values() []string {
25970	return []string{
25971		Eac3PassthroughControlWhenPossible,
25972		Eac3PassthroughControlNoPassthrough,
25973	}
25974}
25975
25976// Controls the amount of phase-shift applied to the surround channels. Only
25977// used for 3/2 coding mode.
25978const (
25979	// Eac3PhaseControlShift90Degrees is a Eac3PhaseControl enum value
25980	Eac3PhaseControlShift90Degrees = "SHIFT_90_DEGREES"
25981
25982	// Eac3PhaseControlNoShift is a Eac3PhaseControl enum value
25983	Eac3PhaseControlNoShift = "NO_SHIFT"
25984)
25985
25986// Eac3PhaseControl_Values returns all elements of the Eac3PhaseControl enum
25987func Eac3PhaseControl_Values() []string {
25988	return []string{
25989		Eac3PhaseControlShift90Degrees,
25990		Eac3PhaseControlNoShift,
25991	}
25992}
25993
25994// Choose how the service does stereo downmixing. This setting only applies
25995// if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2)
25996// for the setting Coding mode (Eac3CodingMode). If you choose a different value
25997// for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix).
25998const (
25999	// Eac3StereoDownmixNotIndicated is a Eac3StereoDownmix enum value
26000	Eac3StereoDownmixNotIndicated = "NOT_INDICATED"
26001
26002	// Eac3StereoDownmixLoRo is a Eac3StereoDownmix enum value
26003	Eac3StereoDownmixLoRo = "LO_RO"
26004
26005	// Eac3StereoDownmixLtRt is a Eac3StereoDownmix enum value
26006	Eac3StereoDownmixLtRt = "LT_RT"
26007
26008	// Eac3StereoDownmixDpl2 is a Eac3StereoDownmix enum value
26009	Eac3StereoDownmixDpl2 = "DPL2"
26010)
26011
26012// Eac3StereoDownmix_Values returns all elements of the Eac3StereoDownmix enum
26013func Eac3StereoDownmix_Values() []string {
26014	return []string{
26015		Eac3StereoDownmixNotIndicated,
26016		Eac3StereoDownmixLoRo,
26017		Eac3StereoDownmixLtRt,
26018		Eac3StereoDownmixDpl2,
26019	}
26020}
26021
26022// When encoding 3/2 audio, sets whether an extra center back surround channel
26023// is matrix encoded into the left and right surround channels.
26024const (
26025	// Eac3SurroundExModeNotIndicated is a Eac3SurroundExMode enum value
26026	Eac3SurroundExModeNotIndicated = "NOT_INDICATED"
26027
26028	// Eac3SurroundExModeEnabled is a Eac3SurroundExMode enum value
26029	Eac3SurroundExModeEnabled = "ENABLED"
26030
26031	// Eac3SurroundExModeDisabled is a Eac3SurroundExMode enum value
26032	Eac3SurroundExModeDisabled = "DISABLED"
26033)
26034
26035// Eac3SurroundExMode_Values returns all elements of the Eac3SurroundExMode enum
26036func Eac3SurroundExMode_Values() []string {
26037	return []string{
26038		Eac3SurroundExModeNotIndicated,
26039		Eac3SurroundExModeEnabled,
26040		Eac3SurroundExModeDisabled,
26041	}
26042}
26043
26044// When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into
26045// the two channels.
26046const (
26047	// Eac3SurroundModeNotIndicated is a Eac3SurroundMode enum value
26048	Eac3SurroundModeNotIndicated = "NOT_INDICATED"
26049
26050	// Eac3SurroundModeEnabled is a Eac3SurroundMode enum value
26051	Eac3SurroundModeEnabled = "ENABLED"
26052
26053	// Eac3SurroundModeDisabled is a Eac3SurroundMode enum value
26054	Eac3SurroundModeDisabled = "DISABLED"
26055)
26056
26057// Eac3SurroundMode_Values returns all elements of the Eac3SurroundMode enum
26058func Eac3SurroundMode_Values() []string {
26059	return []string{
26060		Eac3SurroundModeNotIndicated,
26061		Eac3SurroundModeEnabled,
26062		Eac3SurroundModeDisabled,
26063	}
26064}
26065
26066// Specify whether this set of input captions appears in your outputs in both
26067// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes
26068// the captions data in two ways: it passes the 608 data through using the 608
26069// compatibility bytes fields of the 708 wrapper, and it also translates the
26070// 608 data into 708.
26071const (
26072	// EmbeddedConvert608To708Upconvert is a EmbeddedConvert608To708 enum value
26073	EmbeddedConvert608To708Upconvert = "UPCONVERT"
26074
26075	// EmbeddedConvert608To708Disabled is a EmbeddedConvert608To708 enum value
26076	EmbeddedConvert608To708Disabled = "DISABLED"
26077)
26078
26079// EmbeddedConvert608To708_Values returns all elements of the EmbeddedConvert608To708 enum
26080func EmbeddedConvert608To708_Values() []string {
26081	return []string{
26082		EmbeddedConvert608To708Upconvert,
26083		EmbeddedConvert608To708Disabled,
26084	}
26085}
26086
26087// By default, the service terminates any unterminated captions at the end of
26088// each input. If you want the caption to continue onto your next input, disable
26089// this setting.
26090const (
26091	// EmbeddedTerminateCaptionsEndOfInput is a EmbeddedTerminateCaptions enum value
26092	EmbeddedTerminateCaptionsEndOfInput = "END_OF_INPUT"
26093
26094	// EmbeddedTerminateCaptionsDisabled is a EmbeddedTerminateCaptions enum value
26095	EmbeddedTerminateCaptionsDisabled = "DISABLED"
26096)
26097
26098// EmbeddedTerminateCaptions_Values returns all elements of the EmbeddedTerminateCaptions enum
26099func EmbeddedTerminateCaptions_Values() []string {
26100	return []string{
26101		EmbeddedTerminateCaptionsEndOfInput,
26102		EmbeddedTerminateCaptionsDisabled,
26103	}
26104}
26105
26106// If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning
26107// of the archive as required for progressive downloading. Otherwise it is placed
26108// normally at the end.
26109const (
26110	// F4vMoovPlacementProgressiveDownload is a F4vMoovPlacement enum value
26111	F4vMoovPlacementProgressiveDownload = "PROGRESSIVE_DOWNLOAD"
26112
26113	// F4vMoovPlacementNormal is a F4vMoovPlacement enum value
26114	F4vMoovPlacementNormal = "NORMAL"
26115)
26116
26117// F4vMoovPlacement_Values returns all elements of the F4vMoovPlacement enum
26118func F4vMoovPlacement_Values() []string {
26119	return []string{
26120		F4vMoovPlacementProgressiveDownload,
26121		F4vMoovPlacementNormal,
26122	}
26123}
26124
26125// Specify whether this set of input captions appears in your outputs in both
26126// 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes
26127// the captions data in two ways: it passes the 608 data through using the 608
26128// compatibility bytes fields of the 708 wrapper, and it also translates the
26129// 608 data into 708.
26130const (
26131	// FileSourceConvert608To708Upconvert is a FileSourceConvert608To708 enum value
26132	FileSourceConvert608To708Upconvert = "UPCONVERT"
26133
26134	// FileSourceConvert608To708Disabled is a FileSourceConvert608To708 enum value
26135	FileSourceConvert608To708Disabled = "DISABLED"
26136)
26137
26138// FileSourceConvert608To708_Values returns all elements of the FileSourceConvert608To708 enum
26139func FileSourceConvert608To708_Values() []string {
26140	return []string{
26141		FileSourceConvert608To708Upconvert,
26142		FileSourceConvert608To708Disabled,
26143	}
26144}
26145
26146// Provide the font script, using an ISO 15924 script code, if the LanguageCode
26147// is not sufficient for determining the script type. Where LanguageCode or
26148// CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset.
26149const (
26150	// FontScriptAutomatic is a FontScript enum value
26151	FontScriptAutomatic = "AUTOMATIC"
26152
26153	// FontScriptHans is a FontScript enum value
26154	FontScriptHans = "HANS"
26155
26156	// FontScriptHant is a FontScript enum value
26157	FontScriptHant = "HANT"
26158)
26159
26160// FontScript_Values returns all elements of the FontScript enum
26161func FontScript_Values() []string {
26162	return []string{
26163		FontScriptAutomatic,
26164		FontScriptHans,
26165		FontScriptHant,
26166	}
26167}
26168
26169// Keep the default value, Auto (AUTO), for this setting to have MediaConvert
26170// automatically apply the best types of quantization for your video content.
26171// When you want to apply your quantization settings manually, you must set
26172// H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting
26173// to specify the strength of any adaptive quantization filters that you enable.
26174// If you don't want MediaConvert to do any adaptive quantization in this transcode,
26175// set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related
26176// settings: The value that you choose here applies to the following settings:
26177// H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization.
26178const (
26179	// H264AdaptiveQuantizationOff is a H264AdaptiveQuantization enum value
26180	H264AdaptiveQuantizationOff = "OFF"
26181
26182	// H264AdaptiveQuantizationAuto is a H264AdaptiveQuantization enum value
26183	H264AdaptiveQuantizationAuto = "AUTO"
26184
26185	// H264AdaptiveQuantizationLow is a H264AdaptiveQuantization enum value
26186	H264AdaptiveQuantizationLow = "LOW"
26187
26188	// H264AdaptiveQuantizationMedium is a H264AdaptiveQuantization enum value
26189	H264AdaptiveQuantizationMedium = "MEDIUM"
26190
26191	// H264AdaptiveQuantizationHigh is a H264AdaptiveQuantization enum value
26192	H264AdaptiveQuantizationHigh = "HIGH"
26193
26194	// H264AdaptiveQuantizationHigher is a H264AdaptiveQuantization enum value
26195	H264AdaptiveQuantizationHigher = "HIGHER"
26196
26197	// H264AdaptiveQuantizationMax is a H264AdaptiveQuantization enum value
26198	H264AdaptiveQuantizationMax = "MAX"
26199)
26200
26201// H264AdaptiveQuantization_Values returns all elements of the H264AdaptiveQuantization enum
26202func H264AdaptiveQuantization_Values() []string {
26203	return []string{
26204		H264AdaptiveQuantizationOff,
26205		H264AdaptiveQuantizationAuto,
26206		H264AdaptiveQuantizationLow,
26207		H264AdaptiveQuantizationMedium,
26208		H264AdaptiveQuantizationHigh,
26209		H264AdaptiveQuantizationHigher,
26210		H264AdaptiveQuantizationMax,
26211	}
26212}
26213
26214// Specify an H.264 level that is consistent with your output video settings.
26215// If you aren't sure what level to specify, choose Auto (AUTO).
26216const (
26217	// H264CodecLevelAuto is a H264CodecLevel enum value
26218	H264CodecLevelAuto = "AUTO"
26219
26220	// H264CodecLevelLevel1 is a H264CodecLevel enum value
26221	H264CodecLevelLevel1 = "LEVEL_1"
26222
26223	// H264CodecLevelLevel11 is a H264CodecLevel enum value
26224	H264CodecLevelLevel11 = "LEVEL_1_1"
26225
26226	// H264CodecLevelLevel12 is a H264CodecLevel enum value
26227	H264CodecLevelLevel12 = "LEVEL_1_2"
26228
26229	// H264CodecLevelLevel13 is a H264CodecLevel enum value
26230	H264CodecLevelLevel13 = "LEVEL_1_3"
26231
26232	// H264CodecLevelLevel2 is a H264CodecLevel enum value
26233	H264CodecLevelLevel2 = "LEVEL_2"
26234
26235	// H264CodecLevelLevel21 is a H264CodecLevel enum value
26236	H264CodecLevelLevel21 = "LEVEL_2_1"
26237
26238	// H264CodecLevelLevel22 is a H264CodecLevel enum value
26239	H264CodecLevelLevel22 = "LEVEL_2_2"
26240
26241	// H264CodecLevelLevel3 is a H264CodecLevel enum value
26242	H264CodecLevelLevel3 = "LEVEL_3"
26243
26244	// H264CodecLevelLevel31 is a H264CodecLevel enum value
26245	H264CodecLevelLevel31 = "LEVEL_3_1"
26246
26247	// H264CodecLevelLevel32 is a H264CodecLevel enum value
26248	H264CodecLevelLevel32 = "LEVEL_3_2"
26249
26250	// H264CodecLevelLevel4 is a H264CodecLevel enum value
26251	H264CodecLevelLevel4 = "LEVEL_4"
26252
26253	// H264CodecLevelLevel41 is a H264CodecLevel enum value
26254	H264CodecLevelLevel41 = "LEVEL_4_1"
26255
26256	// H264CodecLevelLevel42 is a H264CodecLevel enum value
26257	H264CodecLevelLevel42 = "LEVEL_4_2"
26258
26259	// H264CodecLevelLevel5 is a H264CodecLevel enum value
26260	H264CodecLevelLevel5 = "LEVEL_5"
26261
26262	// H264CodecLevelLevel51 is a H264CodecLevel enum value
26263	H264CodecLevelLevel51 = "LEVEL_5_1"
26264
26265	// H264CodecLevelLevel52 is a H264CodecLevel enum value
26266	H264CodecLevelLevel52 = "LEVEL_5_2"
26267)
26268
26269// H264CodecLevel_Values returns all elements of the H264CodecLevel enum
26270func H264CodecLevel_Values() []string {
26271	return []string{
26272		H264CodecLevelAuto,
26273		H264CodecLevelLevel1,
26274		H264CodecLevelLevel11,
26275		H264CodecLevelLevel12,
26276		H264CodecLevelLevel13,
26277		H264CodecLevelLevel2,
26278		H264CodecLevelLevel21,
26279		H264CodecLevelLevel22,
26280		H264CodecLevelLevel3,
26281		H264CodecLevelLevel31,
26282		H264CodecLevelLevel32,
26283		H264CodecLevelLevel4,
26284		H264CodecLevelLevel41,
26285		H264CodecLevelLevel42,
26286		H264CodecLevelLevel5,
26287		H264CodecLevelLevel51,
26288		H264CodecLevelLevel52,
26289	}
26290}
26291
26292// H.264 Profile. High 4:2:2 and 10-bit profiles are only available with the
26293// AVC-I License.
26294const (
26295	// H264CodecProfileBaseline is a H264CodecProfile enum value
26296	H264CodecProfileBaseline = "BASELINE"
26297
26298	// H264CodecProfileHigh is a H264CodecProfile enum value
26299	H264CodecProfileHigh = "HIGH"
26300
26301	// H264CodecProfileHigh10bit is a H264CodecProfile enum value
26302	H264CodecProfileHigh10bit = "HIGH_10BIT"
26303
26304	// H264CodecProfileHigh422 is a H264CodecProfile enum value
26305	H264CodecProfileHigh422 = "HIGH_422"
26306
26307	// H264CodecProfileHigh42210bit is a H264CodecProfile enum value
26308	H264CodecProfileHigh42210bit = "HIGH_422_10BIT"
26309
26310	// H264CodecProfileMain is a H264CodecProfile enum value
26311	H264CodecProfileMain = "MAIN"
26312)
26313
26314// H264CodecProfile_Values returns all elements of the H264CodecProfile enum
26315func H264CodecProfile_Values() []string {
26316	return []string{
26317		H264CodecProfileBaseline,
26318		H264CodecProfileHigh,
26319		H264CodecProfileHigh10bit,
26320		H264CodecProfileHigh422,
26321		H264CodecProfileHigh42210bit,
26322		H264CodecProfileMain,
26323	}
26324}
26325
26326// Choose Adaptive to improve subjective video quality for high-motion content.
26327// This will cause the service to use fewer B-frames (which infer information
26328// based on other frames) for high-motion portions of the video and more B-frames
26329// for low-motion portions. The maximum number of B-frames is limited by the
26330// value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).
26331const (
26332	// H264DynamicSubGopAdaptive is a H264DynamicSubGop enum value
26333	H264DynamicSubGopAdaptive = "ADAPTIVE"
26334
26335	// H264DynamicSubGopStatic is a H264DynamicSubGop enum value
26336	H264DynamicSubGopStatic = "STATIC"
26337)
26338
26339// H264DynamicSubGop_Values returns all elements of the H264DynamicSubGop enum
26340func H264DynamicSubGop_Values() []string {
26341	return []string{
26342		H264DynamicSubGopAdaptive,
26343		H264DynamicSubGopStatic,
26344	}
26345}
26346
26347// Entropy encoding mode. Use CABAC (must be in Main or High profile) or CAVLC.
26348const (
26349	// H264EntropyEncodingCabac is a H264EntropyEncoding enum value
26350	H264EntropyEncodingCabac = "CABAC"
26351
26352	// H264EntropyEncodingCavlc is a H264EntropyEncoding enum value
26353	H264EntropyEncodingCavlc = "CAVLC"
26354)
26355
26356// H264EntropyEncoding_Values returns all elements of the H264EntropyEncoding enum
26357func H264EntropyEncoding_Values() []string {
26358	return []string{
26359		H264EntropyEncodingCabac,
26360		H264EntropyEncodingCavlc,
26361	}
26362}
26363
26364// Keep the default value, PAFF, to have MediaConvert use PAFF encoding for
26365// interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding
26366// and create separate interlaced fields.
26367const (
26368	// H264FieldEncodingPaff is a H264FieldEncoding enum value
26369	H264FieldEncodingPaff = "PAFF"
26370
26371	// H264FieldEncodingForceField is a H264FieldEncoding enum value
26372	H264FieldEncodingForceField = "FORCE_FIELD"
26373)
26374
26375// H264FieldEncoding_Values returns all elements of the H264FieldEncoding enum
26376func H264FieldEncoding_Values() []string {
26377	return []string{
26378		H264FieldEncodingPaff,
26379		H264FieldEncodingForceField,
26380	}
26381}
26382
26383// Only use this setting when you change the default value, AUTO, for the setting
26384// H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization
26385// and all other adaptive quantization from your JSON job specification, MediaConvert
26386// automatically applies the best types of quantization for your video content.
26387// When you set H264AdaptiveQuantization to a value other than AUTO, the default
26388// value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change
26389// this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears
26390// as a visual flicker that can arise when the encoder saves bits by copying
26391// some macroblocks many times from frame to frame, and then refreshes them
26392// at the I-frame. When you enable this setting, the encoder updates these macroblocks
26393// slightly more often to smooth out the flicker. To manually enable or disable
26394// H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization)
26395// to a value other than AUTO.
26396const (
26397	// H264FlickerAdaptiveQuantizationDisabled is a H264FlickerAdaptiveQuantization enum value
26398	H264FlickerAdaptiveQuantizationDisabled = "DISABLED"
26399
26400	// H264FlickerAdaptiveQuantizationEnabled is a H264FlickerAdaptiveQuantization enum value
26401	H264FlickerAdaptiveQuantizationEnabled = "ENABLED"
26402)
26403
26404// H264FlickerAdaptiveQuantization_Values returns all elements of the H264FlickerAdaptiveQuantization enum
26405func H264FlickerAdaptiveQuantization_Values() []string {
26406	return []string{
26407		H264FlickerAdaptiveQuantizationDisabled,
26408		H264FlickerAdaptiveQuantizationEnabled,
26409	}
26410}
26411
26412// If you are using the console, use the Framerate setting to specify the frame
26413// rate for this output. If you want to keep the same frame rate as the input
26414// video, choose Follow source. If you want to do frame rate conversion, choose
26415// a frame rate from the dropdown list or choose Custom. The framerates shown
26416// in the dropdown list are decimal approximations of fractions. If you choose
26417// Custom, specify your frame rate as a fraction. If you are creating your transcoding
26418// job specification as a JSON file without the console, use FramerateControl
26419// to specify which value the service uses for the frame rate for this output.
26420// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
26421// from the input. Choose SPECIFIED if you want the service to use the frame
26422// rate you specify in the settings FramerateNumerator and FramerateDenominator.
26423const (
26424	// H264FramerateControlInitializeFromSource is a H264FramerateControl enum value
26425	H264FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
26426
26427	// H264FramerateControlSpecified is a H264FramerateControl enum value
26428	H264FramerateControlSpecified = "SPECIFIED"
26429)
26430
26431// H264FramerateControl_Values returns all elements of the H264FramerateControl enum
26432func H264FramerateControl_Values() []string {
26433	return []string{
26434		H264FramerateControlInitializeFromSource,
26435		H264FramerateControlSpecified,
26436	}
26437}
26438
26439// Choose the method that you want MediaConvert to use when increasing or decreasing
26440// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
26441// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
26442// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
26443// smooth picture, but might introduce undesirable video artifacts. For complex
26444// frame rate conversions, especially if your source video has already been
26445// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
26446// motion-compensated interpolation. FrameFormer chooses the best conversion
26447// method frame by frame. Note that using FrameFormer increases the transcoding
26448// time and incurs a significant add-on cost.
26449const (
26450	// H264FramerateConversionAlgorithmDuplicateDrop is a H264FramerateConversionAlgorithm enum value
26451	H264FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP"
26452
26453	// H264FramerateConversionAlgorithmInterpolate is a H264FramerateConversionAlgorithm enum value
26454	H264FramerateConversionAlgorithmInterpolate = "INTERPOLATE"
26455
26456	// H264FramerateConversionAlgorithmFrameformer is a H264FramerateConversionAlgorithm enum value
26457	H264FramerateConversionAlgorithmFrameformer = "FRAMEFORMER"
26458)
26459
26460// H264FramerateConversionAlgorithm_Values returns all elements of the H264FramerateConversionAlgorithm enum
26461func H264FramerateConversionAlgorithm_Values() []string {
26462	return []string{
26463		H264FramerateConversionAlgorithmDuplicateDrop,
26464		H264FramerateConversionAlgorithmInterpolate,
26465		H264FramerateConversionAlgorithmFrameformer,
26466	}
26467}
26468
26469// If enable, use reference B frames for GOP structures that have B frames >
26470// 1.
26471const (
26472	// H264GopBReferenceDisabled is a H264GopBReference enum value
26473	H264GopBReferenceDisabled = "DISABLED"
26474
26475	// H264GopBReferenceEnabled is a H264GopBReference enum value
26476	H264GopBReferenceEnabled = "ENABLED"
26477)
26478
26479// H264GopBReference_Values returns all elements of the H264GopBReference enum
26480func H264GopBReference_Values() []string {
26481	return []string{
26482		H264GopBReferenceDisabled,
26483		H264GopBReferenceEnabled,
26484	}
26485}
26486
26487// Indicates if the GOP Size in H264 is specified in frames or seconds. If seconds
26488// the system will convert the GOP Size into a frame count at run time.
26489const (
26490	// H264GopSizeUnitsFrames is a H264GopSizeUnits enum value
26491	H264GopSizeUnitsFrames = "FRAMES"
26492
26493	// H264GopSizeUnitsSeconds is a H264GopSizeUnits enum value
26494	H264GopSizeUnitsSeconds = "SECONDS"
26495)
26496
26497// H264GopSizeUnits_Values returns all elements of the H264GopSizeUnits enum
26498func H264GopSizeUnits_Values() []string {
26499	return []string{
26500		H264GopSizeUnitsFrames,
26501		H264GopSizeUnitsSeconds,
26502	}
26503}
26504
26505// Choose the scan line type for the output. Keep the default value, Progressive
26506// (PROGRESSIVE) to create a progressive output, regardless of the scan type
26507// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
26508// to create an output that's interlaced with the same field polarity throughout.
26509// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
26510// to produce outputs with the same field polarity as the source. For jobs that
26511// have multiple inputs, the output field polarity might change over the course
26512// of the output. Follow behavior depends on the input scan type. If the source
26513// is interlaced, the output will be interlaced with the same polarity as the
26514// source. If the source is progressive, the output will be interlaced with
26515// top field bottom field first, depending on which of the Follow options you
26516// choose.
26517const (
26518	// H264InterlaceModeProgressive is a H264InterlaceMode enum value
26519	H264InterlaceModeProgressive = "PROGRESSIVE"
26520
26521	// H264InterlaceModeTopField is a H264InterlaceMode enum value
26522	H264InterlaceModeTopField = "TOP_FIELD"
26523
26524	// H264InterlaceModeBottomField is a H264InterlaceMode enum value
26525	H264InterlaceModeBottomField = "BOTTOM_FIELD"
26526
26527	// H264InterlaceModeFollowTopField is a H264InterlaceMode enum value
26528	H264InterlaceModeFollowTopField = "FOLLOW_TOP_FIELD"
26529
26530	// H264InterlaceModeFollowBottomField is a H264InterlaceMode enum value
26531	H264InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD"
26532)
26533
26534// H264InterlaceMode_Values returns all elements of the H264InterlaceMode enum
26535func H264InterlaceMode_Values() []string {
26536	return []string{
26537		H264InterlaceModeProgressive,
26538		H264InterlaceModeTopField,
26539		H264InterlaceModeBottomField,
26540		H264InterlaceModeFollowTopField,
26541		H264InterlaceModeFollowBottomField,
26542	}
26543}
26544
26545// Optional. Specify how the service determines the pixel aspect ratio (PAR)
26546// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE),
26547// uses the PAR from your input video for your output. To specify a different
26548// PAR in the console, choose any value other than Follow source. To specify
26549// a different PAR by editing the JSON job specification, choose SPECIFIED.
26550// When you choose SPECIFIED for this setting, you must also specify values
26551// for the parNumerator and parDenominator settings.
26552const (
26553	// H264ParControlInitializeFromSource is a H264ParControl enum value
26554	H264ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
26555
26556	// H264ParControlSpecified is a H264ParControl enum value
26557	H264ParControlSpecified = "SPECIFIED"
26558)
26559
26560// H264ParControl_Values returns all elements of the H264ParControl enum
26561func H264ParControl_Values() []string {
26562	return []string{
26563		H264ParControlInitializeFromSource,
26564		H264ParControlSpecified,
26565	}
26566}
26567
26568// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
26569// want to trade off encoding speed for output video quality. The default behavior
26570// is faster, lower quality, single-pass encoding.
26571const (
26572	// H264QualityTuningLevelSinglePass is a H264QualityTuningLevel enum value
26573	H264QualityTuningLevelSinglePass = "SINGLE_PASS"
26574
26575	// H264QualityTuningLevelSinglePassHq is a H264QualityTuningLevel enum value
26576	H264QualityTuningLevelSinglePassHq = "SINGLE_PASS_HQ"
26577
26578	// H264QualityTuningLevelMultiPassHq is a H264QualityTuningLevel enum value
26579	H264QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ"
26580)
26581
26582// H264QualityTuningLevel_Values returns all elements of the H264QualityTuningLevel enum
26583func H264QualityTuningLevel_Values() []string {
26584	return []string{
26585		H264QualityTuningLevelSinglePass,
26586		H264QualityTuningLevelSinglePassHq,
26587		H264QualityTuningLevelMultiPassHq,
26588	}
26589}
26590
26591// Use this setting to specify whether this output has a variable bitrate (VBR),
26592// constant bitrate (CBR) or quality-defined variable bitrate (QVBR).
26593const (
26594	// H264RateControlModeVbr is a H264RateControlMode enum value
26595	H264RateControlModeVbr = "VBR"
26596
26597	// H264RateControlModeCbr is a H264RateControlMode enum value
26598	H264RateControlModeCbr = "CBR"
26599
26600	// H264RateControlModeQvbr is a H264RateControlMode enum value
26601	H264RateControlModeQvbr = "QVBR"
26602)
26603
26604// H264RateControlMode_Values returns all elements of the H264RateControlMode enum
26605func H264RateControlMode_Values() []string {
26606	return []string{
26607		H264RateControlModeVbr,
26608		H264RateControlModeCbr,
26609		H264RateControlModeQvbr,
26610	}
26611}
26612
26613// Places a PPS header on each encoded picture, even if repeated.
26614const (
26615	// H264RepeatPpsDisabled is a H264RepeatPps enum value
26616	H264RepeatPpsDisabled = "DISABLED"
26617
26618	// H264RepeatPpsEnabled is a H264RepeatPps enum value
26619	H264RepeatPpsEnabled = "ENABLED"
26620)
26621
26622// H264RepeatPps_Values returns all elements of the H264RepeatPps enum
26623func H264RepeatPps_Values() []string {
26624	return []string{
26625		H264RepeatPpsDisabled,
26626		H264RepeatPpsEnabled,
26627	}
26628}
26629
26630// Use this setting for interlaced outputs, when your output frame rate is half
26631// of your input frame rate. In this situation, choose Optimized interlacing
26632// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
26633// case, each progressive frame from the input corresponds to an interlaced
26634// field in the output. Keep the default value, Basic interlacing (INTERLACED),
26635// for all other output frame rates. With basic interlacing, MediaConvert performs
26636// any frame rate conversion first and then interlaces the frames. When you
26637// choose Optimized interlacing and you set your output frame rate to a value
26638// that isn't suitable for optimized interlacing, MediaConvert automatically
26639// falls back to basic interlacing. Required settings: To use optimized interlacing,
26640// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
26641// use optimized interlacing for hard telecine outputs. You must also set Interlace
26642// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
26643const (
26644	// H264ScanTypeConversionModeInterlaced is a H264ScanTypeConversionMode enum value
26645	H264ScanTypeConversionModeInterlaced = "INTERLACED"
26646
26647	// H264ScanTypeConversionModeInterlacedOptimize is a H264ScanTypeConversionMode enum value
26648	H264ScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE"
26649)
26650
26651// H264ScanTypeConversionMode_Values returns all elements of the H264ScanTypeConversionMode enum
26652func H264ScanTypeConversionMode_Values() []string {
26653	return []string{
26654		H264ScanTypeConversionModeInterlaced,
26655		H264ScanTypeConversionModeInterlacedOptimize,
26656	}
26657}
26658
26659// Enable this setting to insert I-frames at scene changes that the service
26660// automatically detects. This improves video quality and is enabled by default.
26661// If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION)
26662// for further video quality improvement. For more information about QVBR, see
26663// https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.
26664const (
26665	// H264SceneChangeDetectDisabled is a H264SceneChangeDetect enum value
26666	H264SceneChangeDetectDisabled = "DISABLED"
26667
26668	// H264SceneChangeDetectEnabled is a H264SceneChangeDetect enum value
26669	H264SceneChangeDetectEnabled = "ENABLED"
26670
26671	// H264SceneChangeDetectTransitionDetection is a H264SceneChangeDetect enum value
26672	H264SceneChangeDetectTransitionDetection = "TRANSITION_DETECTION"
26673)
26674
26675// H264SceneChangeDetect_Values returns all elements of the H264SceneChangeDetect enum
26676func H264SceneChangeDetect_Values() []string {
26677	return []string{
26678		H264SceneChangeDetectDisabled,
26679		H264SceneChangeDetectEnabled,
26680		H264SceneChangeDetectTransitionDetection,
26681	}
26682}
26683
26684// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
26685// second (fps). Enable slow PAL to create a 25 fps output. When you enable
26686// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples
26687// your audio to keep it synchronized with the video. Note that enabling this
26688// setting will slightly reduce the duration of your video. Required settings:
26689// You must also set Framerate to 25. In your JSON job specification, set (framerateControl)
26690// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
26691// 1.
26692const (
26693	// H264SlowPalDisabled is a H264SlowPal enum value
26694	H264SlowPalDisabled = "DISABLED"
26695
26696	// H264SlowPalEnabled is a H264SlowPal enum value
26697	H264SlowPalEnabled = "ENABLED"
26698)
26699
26700// H264SlowPal_Values returns all elements of the H264SlowPal enum
26701func H264SlowPal_Values() []string {
26702	return []string{
26703		H264SlowPalDisabled,
26704		H264SlowPalEnabled,
26705	}
26706}
26707
26708// Only use this setting when you change the default value, Auto (AUTO), for
26709// the setting H264AdaptiveQuantization. When you keep all defaults, excluding
26710// H264AdaptiveQuantization and all other adaptive quantization from your JSON
26711// job specification, MediaConvert automatically applies the best types of quantization
26712// for your video content. When you set H264AdaptiveQuantization to a value
26713// other than AUTO, the default value for H264SpatialAdaptiveQuantization is
26714// Enabled (ENABLED). Keep this default value to adjust quantization within
26715// each frame based on spatial variation of content complexity. When you enable
26716// this feature, the encoder uses fewer bits on areas that can sustain more
26717// distortion with no noticeable visual degradation and uses more bits on areas
26718// where any small distortion will be noticeable. For example, complex textured
26719// blocks are encoded with fewer bits and smooth textured blocks are encoded
26720// with more bits. Enabling this feature will almost always improve your video
26721// quality. Note, though, that this feature doesn't take into account where
26722// the viewer's attention is likely to be. If viewers are likely to be focusing
26723// their attention on a part of the screen with a lot of complex texture, you
26724// might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED).
26725// Related setting: When you enable spatial adaptive quantization, set the value
26726// for Adaptive quantization (H264AdaptiveQuantization) depending on your content.
26727// For homogeneous content, such as cartoons and video games, set it to Low.
26728// For content with a wider variety of textures, set it to High or Higher. To
26729// manually enable or disable H264SpatialAdaptiveQuantization, you must set
26730// Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.
26731const (
26732	// H264SpatialAdaptiveQuantizationDisabled is a H264SpatialAdaptiveQuantization enum value
26733	H264SpatialAdaptiveQuantizationDisabled = "DISABLED"
26734
26735	// H264SpatialAdaptiveQuantizationEnabled is a H264SpatialAdaptiveQuantization enum value
26736	H264SpatialAdaptiveQuantizationEnabled = "ENABLED"
26737)
26738
26739// H264SpatialAdaptiveQuantization_Values returns all elements of the H264SpatialAdaptiveQuantization enum
26740func H264SpatialAdaptiveQuantization_Values() []string {
26741	return []string{
26742		H264SpatialAdaptiveQuantizationDisabled,
26743		H264SpatialAdaptiveQuantizationEnabled,
26744	}
26745}
26746
26747// Produces a bitstream compliant with SMPTE RP-2027.
26748const (
26749	// H264SyntaxDefault is a H264Syntax enum value
26750	H264SyntaxDefault = "DEFAULT"
26751
26752	// H264SyntaxRp2027 is a H264Syntax enum value
26753	H264SyntaxRp2027 = "RP2027"
26754)
26755
26756// H264Syntax_Values returns all elements of the H264Syntax enum
26757func H264Syntax_Values() []string {
26758	return []string{
26759		H264SyntaxDefault,
26760		H264SyntaxRp2027,
26761	}
26762}
26763
26764// When you do frame rate conversion from 23.976 frames per second (fps) to
26765// 29.97 fps, and your output scan type is interlaced, you can optionally enable
26766// hard or soft telecine to create a smoother picture. Hard telecine (HARD)
26767// produces a 29.97i output. Soft telecine (SOFT) produces an output with a
26768// 23.976 output that signals to the video player device to do the conversion
26769// during play back. When you keep the default value, None (NONE), MediaConvert
26770// does a standard frame rate conversion to 29.97 without doing anything with
26771// the field polarity to create a smoother picture.
26772const (
26773	// H264TelecineNone is a H264Telecine enum value
26774	H264TelecineNone = "NONE"
26775
26776	// H264TelecineSoft is a H264Telecine enum value
26777	H264TelecineSoft = "SOFT"
26778
26779	// H264TelecineHard is a H264Telecine enum value
26780	H264TelecineHard = "HARD"
26781)
26782
26783// H264Telecine_Values returns all elements of the H264Telecine enum
26784func H264Telecine_Values() []string {
26785	return []string{
26786		H264TelecineNone,
26787		H264TelecineSoft,
26788		H264TelecineHard,
26789	}
26790}
26791
26792// Only use this setting when you change the default value, AUTO, for the setting
26793// H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization
26794// and all other adaptive quantization from your JSON job specification, MediaConvert
26795// automatically applies the best types of quantization for your video content.
26796// When you set H264AdaptiveQuantization to a value other than AUTO, the default
26797// value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this
26798// default value to adjust quantization within each frame based on temporal
26799// variation of content complexity. When you enable this feature, the encoder
26800// uses fewer bits on areas of the frame that aren't moving and uses more bits
26801// on complex objects with sharp edges that move a lot. For example, this feature
26802// improves the readability of text tickers on newscasts and scoreboards on
26803// sports matches. Enabling this feature will almost always improve your video
26804// quality. Note, though, that this feature doesn't take into account where
26805// the viewer's attention is likely to be. If viewers are likely to be focusing
26806// their attention on a part of the screen that doesn't have moving objects
26807// with sharp edges, such as sports athletes' faces, you might choose to set
26808// H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting:
26809// When you enable temporal quantization, adjust the strength of the filter
26810// with the setting Adaptive quantization (adaptiveQuantization). To manually
26811// enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive
26812// quantization (H264AdaptiveQuantization) to a value other than AUTO.
26813const (
26814	// H264TemporalAdaptiveQuantizationDisabled is a H264TemporalAdaptiveQuantization enum value
26815	H264TemporalAdaptiveQuantizationDisabled = "DISABLED"
26816
26817	// H264TemporalAdaptiveQuantizationEnabled is a H264TemporalAdaptiveQuantization enum value
26818	H264TemporalAdaptiveQuantizationEnabled = "ENABLED"
26819)
26820
26821// H264TemporalAdaptiveQuantization_Values returns all elements of the H264TemporalAdaptiveQuantization enum
26822func H264TemporalAdaptiveQuantization_Values() []string {
26823	return []string{
26824		H264TemporalAdaptiveQuantizationDisabled,
26825		H264TemporalAdaptiveQuantizationEnabled,
26826	}
26827}
26828
26829// Inserts timecode for each frame as 4 bytes of an unregistered SEI message.
26830const (
26831	// H264UnregisteredSeiTimecodeDisabled is a H264UnregisteredSeiTimecode enum value
26832	H264UnregisteredSeiTimecodeDisabled = "DISABLED"
26833
26834	// H264UnregisteredSeiTimecodeEnabled is a H264UnregisteredSeiTimecode enum value
26835	H264UnregisteredSeiTimecodeEnabled = "ENABLED"
26836)
26837
26838// H264UnregisteredSeiTimecode_Values returns all elements of the H264UnregisteredSeiTimecode enum
26839func H264UnregisteredSeiTimecode_Values() []string {
26840	return []string{
26841		H264UnregisteredSeiTimecodeDisabled,
26842		H264UnregisteredSeiTimecodeEnabled,
26843	}
26844}
26845
26846// Specify the strength of any adaptive quantization filters that you enable.
26847// The value that you choose here applies to the following settings: Flicker
26848// adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization
26849// (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization).
26850const (
26851	// H265AdaptiveQuantizationOff is a H265AdaptiveQuantization enum value
26852	H265AdaptiveQuantizationOff = "OFF"
26853
26854	// H265AdaptiveQuantizationLow is a H265AdaptiveQuantization enum value
26855	H265AdaptiveQuantizationLow = "LOW"
26856
26857	// H265AdaptiveQuantizationMedium is a H265AdaptiveQuantization enum value
26858	H265AdaptiveQuantizationMedium = "MEDIUM"
26859
26860	// H265AdaptiveQuantizationHigh is a H265AdaptiveQuantization enum value
26861	H265AdaptiveQuantizationHigh = "HIGH"
26862
26863	// H265AdaptiveQuantizationHigher is a H265AdaptiveQuantization enum value
26864	H265AdaptiveQuantizationHigher = "HIGHER"
26865
26866	// H265AdaptiveQuantizationMax is a H265AdaptiveQuantization enum value
26867	H265AdaptiveQuantizationMax = "MAX"
26868)
26869
26870// H265AdaptiveQuantization_Values returns all elements of the H265AdaptiveQuantization enum
26871func H265AdaptiveQuantization_Values() []string {
26872	return []string{
26873		H265AdaptiveQuantizationOff,
26874		H265AdaptiveQuantizationLow,
26875		H265AdaptiveQuantizationMedium,
26876		H265AdaptiveQuantizationHigh,
26877		H265AdaptiveQuantizationHigher,
26878		H265AdaptiveQuantizationMax,
26879	}
26880}
26881
26882// Enables Alternate Transfer Function SEI message for outputs using Hybrid
26883// Log Gamma (HLG) Electro-Optical Transfer Function (EOTF).
26884const (
26885	// H265AlternateTransferFunctionSeiDisabled is a H265AlternateTransferFunctionSei enum value
26886	H265AlternateTransferFunctionSeiDisabled = "DISABLED"
26887
26888	// H265AlternateTransferFunctionSeiEnabled is a H265AlternateTransferFunctionSei enum value
26889	H265AlternateTransferFunctionSeiEnabled = "ENABLED"
26890)
26891
26892// H265AlternateTransferFunctionSei_Values returns all elements of the H265AlternateTransferFunctionSei enum
26893func H265AlternateTransferFunctionSei_Values() []string {
26894	return []string{
26895		H265AlternateTransferFunctionSeiDisabled,
26896		H265AlternateTransferFunctionSeiEnabled,
26897	}
26898}
26899
26900// H.265 Level.
26901const (
26902	// H265CodecLevelAuto is a H265CodecLevel enum value
26903	H265CodecLevelAuto = "AUTO"
26904
26905	// H265CodecLevelLevel1 is a H265CodecLevel enum value
26906	H265CodecLevelLevel1 = "LEVEL_1"
26907
26908	// H265CodecLevelLevel2 is a H265CodecLevel enum value
26909	H265CodecLevelLevel2 = "LEVEL_2"
26910
26911	// H265CodecLevelLevel21 is a H265CodecLevel enum value
26912	H265CodecLevelLevel21 = "LEVEL_2_1"
26913
26914	// H265CodecLevelLevel3 is a H265CodecLevel enum value
26915	H265CodecLevelLevel3 = "LEVEL_3"
26916
26917	// H265CodecLevelLevel31 is a H265CodecLevel enum value
26918	H265CodecLevelLevel31 = "LEVEL_3_1"
26919
26920	// H265CodecLevelLevel4 is a H265CodecLevel enum value
26921	H265CodecLevelLevel4 = "LEVEL_4"
26922
26923	// H265CodecLevelLevel41 is a H265CodecLevel enum value
26924	H265CodecLevelLevel41 = "LEVEL_4_1"
26925
26926	// H265CodecLevelLevel5 is a H265CodecLevel enum value
26927	H265CodecLevelLevel5 = "LEVEL_5"
26928
26929	// H265CodecLevelLevel51 is a H265CodecLevel enum value
26930	H265CodecLevelLevel51 = "LEVEL_5_1"
26931
26932	// H265CodecLevelLevel52 is a H265CodecLevel enum value
26933	H265CodecLevelLevel52 = "LEVEL_5_2"
26934
26935	// H265CodecLevelLevel6 is a H265CodecLevel enum value
26936	H265CodecLevelLevel6 = "LEVEL_6"
26937
26938	// H265CodecLevelLevel61 is a H265CodecLevel enum value
26939	H265CodecLevelLevel61 = "LEVEL_6_1"
26940
26941	// H265CodecLevelLevel62 is a H265CodecLevel enum value
26942	H265CodecLevelLevel62 = "LEVEL_6_2"
26943)
26944
26945// H265CodecLevel_Values returns all elements of the H265CodecLevel enum
26946func H265CodecLevel_Values() []string {
26947	return []string{
26948		H265CodecLevelAuto,
26949		H265CodecLevelLevel1,
26950		H265CodecLevelLevel2,
26951		H265CodecLevelLevel21,
26952		H265CodecLevelLevel3,
26953		H265CodecLevelLevel31,
26954		H265CodecLevelLevel4,
26955		H265CodecLevelLevel41,
26956		H265CodecLevelLevel5,
26957		H265CodecLevelLevel51,
26958		H265CodecLevelLevel52,
26959		H265CodecLevelLevel6,
26960		H265CodecLevelLevel61,
26961		H265CodecLevelLevel62,
26962	}
26963}
26964
26965// Represents the Profile and Tier, per the HEVC (H.265) specification. Selections
26966// are grouped as [Profile] / [Tier], so "Main/High" represents Main Profile
26967// with High Tier. 4:2:2 profiles are only available with the HEVC 4:2:2 License.
26968const (
26969	// H265CodecProfileMainMain is a H265CodecProfile enum value
26970	H265CodecProfileMainMain = "MAIN_MAIN"
26971
26972	// H265CodecProfileMainHigh is a H265CodecProfile enum value
26973	H265CodecProfileMainHigh = "MAIN_HIGH"
26974
26975	// H265CodecProfileMain10Main is a H265CodecProfile enum value
26976	H265CodecProfileMain10Main = "MAIN10_MAIN"
26977
26978	// H265CodecProfileMain10High is a H265CodecProfile enum value
26979	H265CodecProfileMain10High = "MAIN10_HIGH"
26980
26981	// H265CodecProfileMain4228bitMain is a H265CodecProfile enum value
26982	H265CodecProfileMain4228bitMain = "MAIN_422_8BIT_MAIN"
26983
26984	// H265CodecProfileMain4228bitHigh is a H265CodecProfile enum value
26985	H265CodecProfileMain4228bitHigh = "MAIN_422_8BIT_HIGH"
26986
26987	// H265CodecProfileMain42210bitMain is a H265CodecProfile enum value
26988	H265CodecProfileMain42210bitMain = "MAIN_422_10BIT_MAIN"
26989
26990	// H265CodecProfileMain42210bitHigh is a H265CodecProfile enum value
26991	H265CodecProfileMain42210bitHigh = "MAIN_422_10BIT_HIGH"
26992)
26993
26994// H265CodecProfile_Values returns all elements of the H265CodecProfile enum
26995func H265CodecProfile_Values() []string {
26996	return []string{
26997		H265CodecProfileMainMain,
26998		H265CodecProfileMainHigh,
26999		H265CodecProfileMain10Main,
27000		H265CodecProfileMain10High,
27001		H265CodecProfileMain4228bitMain,
27002		H265CodecProfileMain4228bitHigh,
27003		H265CodecProfileMain42210bitMain,
27004		H265CodecProfileMain42210bitHigh,
27005	}
27006}
27007
27008// Choose Adaptive to improve subjective video quality for high-motion content.
27009// This will cause the service to use fewer B-frames (which infer information
27010// based on other frames) for high-motion portions of the video and more B-frames
27011// for low-motion portions. The maximum number of B-frames is limited by the
27012// value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).
27013const (
27014	// H265DynamicSubGopAdaptive is a H265DynamicSubGop enum value
27015	H265DynamicSubGopAdaptive = "ADAPTIVE"
27016
27017	// H265DynamicSubGopStatic is a H265DynamicSubGop enum value
27018	H265DynamicSubGopStatic = "STATIC"
27019)
27020
27021// H265DynamicSubGop_Values returns all elements of the H265DynamicSubGop enum
27022func H265DynamicSubGop_Values() []string {
27023	return []string{
27024		H265DynamicSubGopAdaptive,
27025		H265DynamicSubGopStatic,
27026	}
27027}
27028
27029// Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears
27030// as a visual flicker that can arise when the encoder saves bits by copying
27031// some macroblocks many times from frame to frame, and then refreshes them
27032// at the I-frame. When you enable this setting, the encoder updates these macroblocks
27033// slightly more often to smooth out the flicker. This setting is disabled by
27034// default. Related setting: In addition to enabling this setting, you must
27035// also set adaptiveQuantization to a value other than Off (OFF).
27036const (
27037	// H265FlickerAdaptiveQuantizationDisabled is a H265FlickerAdaptiveQuantization enum value
27038	H265FlickerAdaptiveQuantizationDisabled = "DISABLED"
27039
27040	// H265FlickerAdaptiveQuantizationEnabled is a H265FlickerAdaptiveQuantization enum value
27041	H265FlickerAdaptiveQuantizationEnabled = "ENABLED"
27042)
27043
27044// H265FlickerAdaptiveQuantization_Values returns all elements of the H265FlickerAdaptiveQuantization enum
27045func H265FlickerAdaptiveQuantization_Values() []string {
27046	return []string{
27047		H265FlickerAdaptiveQuantizationDisabled,
27048		H265FlickerAdaptiveQuantizationEnabled,
27049	}
27050}
27051
27052// If you are using the console, use the Framerate setting to specify the frame
27053// rate for this output. If you want to keep the same frame rate as the input
27054// video, choose Follow source. If you want to do frame rate conversion, choose
27055// a frame rate from the dropdown list or choose Custom. The framerates shown
27056// in the dropdown list are decimal approximations of fractions. If you choose
27057// Custom, specify your frame rate as a fraction. If you are creating your transcoding
27058// job specification as a JSON file without the console, use FramerateControl
27059// to specify which value the service uses for the frame rate for this output.
27060// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
27061// from the input. Choose SPECIFIED if you want the service to use the frame
27062// rate you specify in the settings FramerateNumerator and FramerateDenominator.
27063const (
27064	// H265FramerateControlInitializeFromSource is a H265FramerateControl enum value
27065	H265FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
27066
27067	// H265FramerateControlSpecified is a H265FramerateControl enum value
27068	H265FramerateControlSpecified = "SPECIFIED"
27069)
27070
27071// H265FramerateControl_Values returns all elements of the H265FramerateControl enum
27072func H265FramerateControl_Values() []string {
27073	return []string{
27074		H265FramerateControlInitializeFromSource,
27075		H265FramerateControlSpecified,
27076	}
27077}
27078
27079// Choose the method that you want MediaConvert to use when increasing or decreasing
27080// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
27081// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
27082// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
27083// smooth picture, but might introduce undesirable video artifacts. For complex
27084// frame rate conversions, especially if your source video has already been
27085// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
27086// motion-compensated interpolation. FrameFormer chooses the best conversion
27087// method frame by frame. Note that using FrameFormer increases the transcoding
27088// time and incurs a significant add-on cost.
27089const (
27090	// H265FramerateConversionAlgorithmDuplicateDrop is a H265FramerateConversionAlgorithm enum value
27091	H265FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP"
27092
27093	// H265FramerateConversionAlgorithmInterpolate is a H265FramerateConversionAlgorithm enum value
27094	H265FramerateConversionAlgorithmInterpolate = "INTERPOLATE"
27095
27096	// H265FramerateConversionAlgorithmFrameformer is a H265FramerateConversionAlgorithm enum value
27097	H265FramerateConversionAlgorithmFrameformer = "FRAMEFORMER"
27098)
27099
27100// H265FramerateConversionAlgorithm_Values returns all elements of the H265FramerateConversionAlgorithm enum
27101func H265FramerateConversionAlgorithm_Values() []string {
27102	return []string{
27103		H265FramerateConversionAlgorithmDuplicateDrop,
27104		H265FramerateConversionAlgorithmInterpolate,
27105		H265FramerateConversionAlgorithmFrameformer,
27106	}
27107}
27108
27109// If enable, use reference B frames for GOP structures that have B frames >
27110// 1.
27111const (
27112	// H265GopBReferenceDisabled is a H265GopBReference enum value
27113	H265GopBReferenceDisabled = "DISABLED"
27114
27115	// H265GopBReferenceEnabled is a H265GopBReference enum value
27116	H265GopBReferenceEnabled = "ENABLED"
27117)
27118
27119// H265GopBReference_Values returns all elements of the H265GopBReference enum
27120func H265GopBReference_Values() []string {
27121	return []string{
27122		H265GopBReferenceDisabled,
27123		H265GopBReferenceEnabled,
27124	}
27125}
27126
27127// Indicates if the GOP Size in H265 is specified in frames or seconds. If seconds
27128// the system will convert the GOP Size into a frame count at run time.
27129const (
27130	// H265GopSizeUnitsFrames is a H265GopSizeUnits enum value
27131	H265GopSizeUnitsFrames = "FRAMES"
27132
27133	// H265GopSizeUnitsSeconds is a H265GopSizeUnits enum value
27134	H265GopSizeUnitsSeconds = "SECONDS"
27135)
27136
27137// H265GopSizeUnits_Values returns all elements of the H265GopSizeUnits enum
27138func H265GopSizeUnits_Values() []string {
27139	return []string{
27140		H265GopSizeUnitsFrames,
27141		H265GopSizeUnitsSeconds,
27142	}
27143}
27144
27145// Choose the scan line type for the output. Keep the default value, Progressive
27146// (PROGRESSIVE) to create a progressive output, regardless of the scan type
27147// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
27148// to create an output that's interlaced with the same field polarity throughout.
27149// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
27150// to produce outputs with the same field polarity as the source. For jobs that
27151// have multiple inputs, the output field polarity might change over the course
27152// of the output. Follow behavior depends on the input scan type. If the source
27153// is interlaced, the output will be interlaced with the same polarity as the
27154// source. If the source is progressive, the output will be interlaced with
27155// top field bottom field first, depending on which of the Follow options you
27156// choose.
27157const (
27158	// H265InterlaceModeProgressive is a H265InterlaceMode enum value
27159	H265InterlaceModeProgressive = "PROGRESSIVE"
27160
27161	// H265InterlaceModeTopField is a H265InterlaceMode enum value
27162	H265InterlaceModeTopField = "TOP_FIELD"
27163
27164	// H265InterlaceModeBottomField is a H265InterlaceMode enum value
27165	H265InterlaceModeBottomField = "BOTTOM_FIELD"
27166
27167	// H265InterlaceModeFollowTopField is a H265InterlaceMode enum value
27168	H265InterlaceModeFollowTopField = "FOLLOW_TOP_FIELD"
27169
27170	// H265InterlaceModeFollowBottomField is a H265InterlaceMode enum value
27171	H265InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD"
27172)
27173
27174// H265InterlaceMode_Values returns all elements of the H265InterlaceMode enum
27175func H265InterlaceMode_Values() []string {
27176	return []string{
27177		H265InterlaceModeProgressive,
27178		H265InterlaceModeTopField,
27179		H265InterlaceModeBottomField,
27180		H265InterlaceModeFollowTopField,
27181		H265InterlaceModeFollowBottomField,
27182	}
27183}
27184
27185// Optional. Specify how the service determines the pixel aspect ratio (PAR)
27186// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE),
27187// uses the PAR from your input video for your output. To specify a different
27188// PAR in the console, choose any value other than Follow source. To specify
27189// a different PAR by editing the JSON job specification, choose SPECIFIED.
27190// When you choose SPECIFIED for this setting, you must also specify values
27191// for the parNumerator and parDenominator settings.
27192const (
27193	// H265ParControlInitializeFromSource is a H265ParControl enum value
27194	H265ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
27195
27196	// H265ParControlSpecified is a H265ParControl enum value
27197	H265ParControlSpecified = "SPECIFIED"
27198)
27199
27200// H265ParControl_Values returns all elements of the H265ParControl enum
27201func H265ParControl_Values() []string {
27202	return []string{
27203		H265ParControlInitializeFromSource,
27204		H265ParControlSpecified,
27205	}
27206}
27207
27208// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
27209// want to trade off encoding speed for output video quality. The default behavior
27210// is faster, lower quality, single-pass encoding.
27211const (
27212	// H265QualityTuningLevelSinglePass is a H265QualityTuningLevel enum value
27213	H265QualityTuningLevelSinglePass = "SINGLE_PASS"
27214
27215	// H265QualityTuningLevelSinglePassHq is a H265QualityTuningLevel enum value
27216	H265QualityTuningLevelSinglePassHq = "SINGLE_PASS_HQ"
27217
27218	// H265QualityTuningLevelMultiPassHq is a H265QualityTuningLevel enum value
27219	H265QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ"
27220)
27221
27222// H265QualityTuningLevel_Values returns all elements of the H265QualityTuningLevel enum
27223func H265QualityTuningLevel_Values() []string {
27224	return []string{
27225		H265QualityTuningLevelSinglePass,
27226		H265QualityTuningLevelSinglePassHq,
27227		H265QualityTuningLevelMultiPassHq,
27228	}
27229}
27230
27231// Use this setting to specify whether this output has a variable bitrate (VBR),
27232// constant bitrate (CBR) or quality-defined variable bitrate (QVBR).
27233const (
27234	// H265RateControlModeVbr is a H265RateControlMode enum value
27235	H265RateControlModeVbr = "VBR"
27236
27237	// H265RateControlModeCbr is a H265RateControlMode enum value
27238	H265RateControlModeCbr = "CBR"
27239
27240	// H265RateControlModeQvbr is a H265RateControlMode enum value
27241	H265RateControlModeQvbr = "QVBR"
27242)
27243
27244// H265RateControlMode_Values returns all elements of the H265RateControlMode enum
27245func H265RateControlMode_Values() []string {
27246	return []string{
27247		H265RateControlModeVbr,
27248		H265RateControlModeCbr,
27249		H265RateControlModeQvbr,
27250	}
27251}
27252
27253// Specify Sample Adaptive Offset (SAO) filter strength. Adaptive mode dynamically
27254// selects best strength based on content
27255const (
27256	// H265SampleAdaptiveOffsetFilterModeDefault is a H265SampleAdaptiveOffsetFilterMode enum value
27257	H265SampleAdaptiveOffsetFilterModeDefault = "DEFAULT"
27258
27259	// H265SampleAdaptiveOffsetFilterModeAdaptive is a H265SampleAdaptiveOffsetFilterMode enum value
27260	H265SampleAdaptiveOffsetFilterModeAdaptive = "ADAPTIVE"
27261
27262	// H265SampleAdaptiveOffsetFilterModeOff is a H265SampleAdaptiveOffsetFilterMode enum value
27263	H265SampleAdaptiveOffsetFilterModeOff = "OFF"
27264)
27265
27266// H265SampleAdaptiveOffsetFilterMode_Values returns all elements of the H265SampleAdaptiveOffsetFilterMode enum
27267func H265SampleAdaptiveOffsetFilterMode_Values() []string {
27268	return []string{
27269		H265SampleAdaptiveOffsetFilterModeDefault,
27270		H265SampleAdaptiveOffsetFilterModeAdaptive,
27271		H265SampleAdaptiveOffsetFilterModeOff,
27272	}
27273}
27274
27275// Use this setting for interlaced outputs, when your output frame rate is half
27276// of your input frame rate. In this situation, choose Optimized interlacing
27277// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
27278// case, each progressive frame from the input corresponds to an interlaced
27279// field in the output. Keep the default value, Basic interlacing (INTERLACED),
27280// for all other output frame rates. With basic interlacing, MediaConvert performs
27281// any frame rate conversion first and then interlaces the frames. When you
27282// choose Optimized interlacing and you set your output frame rate to a value
27283// that isn't suitable for optimized interlacing, MediaConvert automatically
27284// falls back to basic interlacing. Required settings: To use optimized interlacing,
27285// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
27286// use optimized interlacing for hard telecine outputs. You must also set Interlace
27287// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
27288const (
27289	// H265ScanTypeConversionModeInterlaced is a H265ScanTypeConversionMode enum value
27290	H265ScanTypeConversionModeInterlaced = "INTERLACED"
27291
27292	// H265ScanTypeConversionModeInterlacedOptimize is a H265ScanTypeConversionMode enum value
27293	H265ScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE"
27294)
27295
27296// H265ScanTypeConversionMode_Values returns all elements of the H265ScanTypeConversionMode enum
27297func H265ScanTypeConversionMode_Values() []string {
27298	return []string{
27299		H265ScanTypeConversionModeInterlaced,
27300		H265ScanTypeConversionModeInterlacedOptimize,
27301	}
27302}
27303
27304// Enable this setting to insert I-frames at scene changes that the service
27305// automatically detects. This improves video quality and is enabled by default.
27306// If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION)
27307// for further video quality improvement. For more information about QVBR, see
27308// https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.
27309const (
27310	// H265SceneChangeDetectDisabled is a H265SceneChangeDetect enum value
27311	H265SceneChangeDetectDisabled = "DISABLED"
27312
27313	// H265SceneChangeDetectEnabled is a H265SceneChangeDetect enum value
27314	H265SceneChangeDetectEnabled = "ENABLED"
27315
27316	// H265SceneChangeDetectTransitionDetection is a H265SceneChangeDetect enum value
27317	H265SceneChangeDetectTransitionDetection = "TRANSITION_DETECTION"
27318)
27319
27320// H265SceneChangeDetect_Values returns all elements of the H265SceneChangeDetect enum
27321func H265SceneChangeDetect_Values() []string {
27322	return []string{
27323		H265SceneChangeDetectDisabled,
27324		H265SceneChangeDetectEnabled,
27325		H265SceneChangeDetectTransitionDetection,
27326	}
27327}
27328
27329// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
27330// second (fps). Enable slow PAL to create a 25 fps output. When you enable
27331// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples
27332// your audio to keep it synchronized with the video. Note that enabling this
27333// setting will slightly reduce the duration of your video. Required settings:
27334// You must also set Framerate to 25. In your JSON job specification, set (framerateControl)
27335// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
27336// 1.
27337const (
27338	// H265SlowPalDisabled is a H265SlowPal enum value
27339	H265SlowPalDisabled = "DISABLED"
27340
27341	// H265SlowPalEnabled is a H265SlowPal enum value
27342	H265SlowPalEnabled = "ENABLED"
27343)
27344
27345// H265SlowPal_Values returns all elements of the H265SlowPal enum
27346func H265SlowPal_Values() []string {
27347	return []string{
27348		H265SlowPalDisabled,
27349		H265SlowPalEnabled,
27350	}
27351}
27352
27353// Keep the default value, Enabled (ENABLED), to adjust quantization within
27354// each frame based on spatial variation of content complexity. When you enable
27355// this feature, the encoder uses fewer bits on areas that can sustain more
27356// distortion with no noticeable visual degradation and uses more bits on areas
27357// where any small distortion will be noticeable. For example, complex textured
27358// blocks are encoded with fewer bits and smooth textured blocks are encoded
27359// with more bits. Enabling this feature will almost always improve your video
27360// quality. Note, though, that this feature doesn't take into account where
27361// the viewer's attention is likely to be. If viewers are likely to be focusing
27362// their attention on a part of the screen with a lot of complex texture, you
27363// might choose to disable this feature. Related setting: When you enable spatial
27364// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization)
27365// depending on your content. For homogeneous content, such as cartoons and
27366// video games, set it to Low. For content with a wider variety of textures,
27367// set it to High or Higher.
27368const (
27369	// H265SpatialAdaptiveQuantizationDisabled is a H265SpatialAdaptiveQuantization enum value
27370	H265SpatialAdaptiveQuantizationDisabled = "DISABLED"
27371
27372	// H265SpatialAdaptiveQuantizationEnabled is a H265SpatialAdaptiveQuantization enum value
27373	H265SpatialAdaptiveQuantizationEnabled = "ENABLED"
27374)
27375
27376// H265SpatialAdaptiveQuantization_Values returns all elements of the H265SpatialAdaptiveQuantization enum
27377func H265SpatialAdaptiveQuantization_Values() []string {
27378	return []string{
27379		H265SpatialAdaptiveQuantizationDisabled,
27380		H265SpatialAdaptiveQuantizationEnabled,
27381	}
27382}
27383
27384// This field applies only if the Streams > Advanced > Framerate (framerate)
27385// field is set to 29.970. This field works with the Streams > Advanced > Preprocessors
27386// > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced
27387// Mode field (interlace_mode) to identify the scan type for the output: Progressive,
27388// Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output
27389// from 23.976 input. - Soft: produces 23.976; the player converts this output
27390// to 29.97i.
27391const (
27392	// H265TelecineNone is a H265Telecine enum value
27393	H265TelecineNone = "NONE"
27394
27395	// H265TelecineSoft is a H265Telecine enum value
27396	H265TelecineSoft = "SOFT"
27397
27398	// H265TelecineHard is a H265Telecine enum value
27399	H265TelecineHard = "HARD"
27400)
27401
27402// H265Telecine_Values returns all elements of the H265Telecine enum
27403func H265Telecine_Values() []string {
27404	return []string{
27405		H265TelecineNone,
27406		H265TelecineSoft,
27407		H265TelecineHard,
27408	}
27409}
27410
27411// Keep the default value, Enabled (ENABLED), to adjust quantization within
27412// each frame based on temporal variation of content complexity. When you enable
27413// this feature, the encoder uses fewer bits on areas of the frame that aren't
27414// moving and uses more bits on complex objects with sharp edges that move a
27415// lot. For example, this feature improves the readability of text tickers on
27416// newscasts and scoreboards on sports matches. Enabling this feature will almost
27417// always improve your video quality. Note, though, that this feature doesn't
27418// take into account where the viewer's attention is likely to be. If viewers
27419// are likely to be focusing their attention on a part of the screen that doesn't
27420// have moving objects with sharp edges, such as sports athletes' faces, you
27421// might choose to disable this feature. Related setting: When you enable temporal
27422// quantization, adjust the strength of the filter with the setting Adaptive
27423// quantization (adaptiveQuantization).
27424const (
27425	// H265TemporalAdaptiveQuantizationDisabled is a H265TemporalAdaptiveQuantization enum value
27426	H265TemporalAdaptiveQuantizationDisabled = "DISABLED"
27427
27428	// H265TemporalAdaptiveQuantizationEnabled is a H265TemporalAdaptiveQuantization enum value
27429	H265TemporalAdaptiveQuantizationEnabled = "ENABLED"
27430)
27431
27432// H265TemporalAdaptiveQuantization_Values returns all elements of the H265TemporalAdaptiveQuantization enum
27433func H265TemporalAdaptiveQuantization_Values() []string {
27434	return []string{
27435		H265TemporalAdaptiveQuantizationDisabled,
27436		H265TemporalAdaptiveQuantizationEnabled,
27437	}
27438}
27439
27440// Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers
27441// are supported depending on GOP structure: I- and P-frames form one layer,
27442// reference B-frames can form a second layer and non-reference b-frames can
27443// form a third layer. Decoders can optionally decode only the lower temporal
27444// layers to generate a lower frame rate output. For example, given a bitstream
27445// with temporal IDs and with b-frames = 1 (i.e. IbPbPb display order), a decoder
27446// could decode all the frames for full frame rate output or only the I and
27447// P frames (lowest temporal layer) for a half frame rate output.
27448const (
27449	// H265TemporalIdsDisabled is a H265TemporalIds enum value
27450	H265TemporalIdsDisabled = "DISABLED"
27451
27452	// H265TemporalIdsEnabled is a H265TemporalIds enum value
27453	H265TemporalIdsEnabled = "ENABLED"
27454)
27455
27456// H265TemporalIds_Values returns all elements of the H265TemporalIds enum
27457func H265TemporalIds_Values() []string {
27458	return []string{
27459		H265TemporalIdsDisabled,
27460		H265TemporalIdsEnabled,
27461	}
27462}
27463
27464// Enable use of tiles, allowing horizontal as well as vertical subdivision
27465// of the encoded pictures.
27466const (
27467	// H265TilesDisabled is a H265Tiles enum value
27468	H265TilesDisabled = "DISABLED"
27469
27470	// H265TilesEnabled is a H265Tiles enum value
27471	H265TilesEnabled = "ENABLED"
27472)
27473
27474// H265Tiles_Values returns all elements of the H265Tiles enum
27475func H265Tiles_Values() []string {
27476	return []string{
27477		H265TilesDisabled,
27478		H265TilesEnabled,
27479	}
27480}
27481
27482// Inserts timecode for each frame as 4 bytes of an unregistered SEI message.
27483const (
27484	// H265UnregisteredSeiTimecodeDisabled is a H265UnregisteredSeiTimecode enum value
27485	H265UnregisteredSeiTimecodeDisabled = "DISABLED"
27486
27487	// H265UnregisteredSeiTimecodeEnabled is a H265UnregisteredSeiTimecode enum value
27488	H265UnregisteredSeiTimecodeEnabled = "ENABLED"
27489)
27490
27491// H265UnregisteredSeiTimecode_Values returns all elements of the H265UnregisteredSeiTimecode enum
27492func H265UnregisteredSeiTimecode_Values() []string {
27493	return []string{
27494		H265UnregisteredSeiTimecodeDisabled,
27495		H265UnregisteredSeiTimecodeEnabled,
27496	}
27497}
27498
27499// If the location of parameter set NAL units doesn't matter in your workflow,
27500// ignore this setting. Use this setting only with CMAF or DASH outputs, or
27501// with standalone file outputs in an MPEG-4 container (MP4 outputs). Choose
27502// HVC1 to mark your output as HVC1. This makes your output compliant with the
27503// following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15
27504// 3rd Edition. For these outputs, the service stores parameter set NAL units
27505// in the sample headers but not in the samples directly. For MP4 outputs, when
27506// you choose HVC1, your output video might not work properly with some downstream
27507// systems and video players. The service defaults to marking your output as
27508// HEV1. For these outputs, the service writes parameter set NAL units directly
27509// into the samples.
27510const (
27511	// H265WriteMp4PackagingTypeHvc1 is a H265WriteMp4PackagingType enum value
27512	H265WriteMp4PackagingTypeHvc1 = "HVC1"
27513
27514	// H265WriteMp4PackagingTypeHev1 is a H265WriteMp4PackagingType enum value
27515	H265WriteMp4PackagingTypeHev1 = "HEV1"
27516)
27517
27518// H265WriteMp4PackagingType_Values returns all elements of the H265WriteMp4PackagingType enum
27519func H265WriteMp4PackagingType_Values() []string {
27520	return []string{
27521		H265WriteMp4PackagingTypeHvc1,
27522		H265WriteMp4PackagingTypeHev1,
27523	}
27524}
27525
27526const (
27527	// HlsAdMarkersElemental is a HlsAdMarkers enum value
27528	HlsAdMarkersElemental = "ELEMENTAL"
27529
27530	// HlsAdMarkersElementalScte35 is a HlsAdMarkers enum value
27531	HlsAdMarkersElementalScte35 = "ELEMENTAL_SCTE35"
27532)
27533
27534// HlsAdMarkers_Values returns all elements of the HlsAdMarkers enum
27535func HlsAdMarkers_Values() []string {
27536	return []string{
27537		HlsAdMarkersElemental,
27538		HlsAdMarkersElementalScte35,
27539	}
27540}
27541
27542// Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream
27543// (M2TS) to create a file in an MPEG2-TS container. Keep the default value
27544// Automatic (AUTOMATIC) to create a raw audio-only file with no container.
27545// Regardless of the value that you specify here, if this output has video,
27546// the service will place outputs into an MPEG2-TS container.
27547const (
27548	// HlsAudioOnlyContainerAutomatic is a HlsAudioOnlyContainer enum value
27549	HlsAudioOnlyContainerAutomatic = "AUTOMATIC"
27550
27551	// HlsAudioOnlyContainerM2ts is a HlsAudioOnlyContainer enum value
27552	HlsAudioOnlyContainerM2ts = "M2TS"
27553)
27554
27555// HlsAudioOnlyContainer_Values returns all elements of the HlsAudioOnlyContainer enum
27556func HlsAudioOnlyContainer_Values() []string {
27557	return []string{
27558		HlsAudioOnlyContainerAutomatic,
27559		HlsAudioOnlyContainerM2ts,
27560	}
27561}
27562
27563// Ignore this setting unless you are using FairPlay DRM with Verimatrix and
27564// you encounter playback issues. Keep the default value, Include (INCLUDE),
27565// to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only
27566// headers from your audio segments.
27567const (
27568	// HlsAudioOnlyHeaderInclude is a HlsAudioOnlyHeader enum value
27569	HlsAudioOnlyHeaderInclude = "INCLUDE"
27570
27571	// HlsAudioOnlyHeaderExclude is a HlsAudioOnlyHeader enum value
27572	HlsAudioOnlyHeaderExclude = "EXCLUDE"
27573)
27574
27575// HlsAudioOnlyHeader_Values returns all elements of the HlsAudioOnlyHeader enum
27576func HlsAudioOnlyHeader_Values() []string {
27577	return []string{
27578		HlsAudioOnlyHeaderInclude,
27579		HlsAudioOnlyHeaderExclude,
27580	}
27581}
27582
27583// Four types of audio-only tracks are supported: Audio-Only Variant Stream
27584// The client can play back this audio-only stream instead of video in low-bandwidth
27585// scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest. Alternate
27586// Audio, Auto Select, Default Alternate rendition that the client should try
27587// to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest
27588// with DEFAULT=YES, AUTOSELECT=YES Alternate Audio, Auto Select, Not Default
27589// Alternate rendition that the client may try to play back by default. Represented
27590// as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=YES Alternate
27591// Audio, not Auto Select Alternate rendition that the client will not try to
27592// play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with
27593// DEFAULT=NO, AUTOSELECT=NO
27594const (
27595	// HlsAudioTrackTypeAlternateAudioAutoSelectDefault is a HlsAudioTrackType enum value
27596	HlsAudioTrackTypeAlternateAudioAutoSelectDefault = "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT"
27597
27598	// HlsAudioTrackTypeAlternateAudioAutoSelect is a HlsAudioTrackType enum value
27599	HlsAudioTrackTypeAlternateAudioAutoSelect = "ALTERNATE_AUDIO_AUTO_SELECT"
27600
27601	// HlsAudioTrackTypeAlternateAudioNotAutoSelect is a HlsAudioTrackType enum value
27602	HlsAudioTrackTypeAlternateAudioNotAutoSelect = "ALTERNATE_AUDIO_NOT_AUTO_SELECT"
27603
27604	// HlsAudioTrackTypeAudioOnlyVariantStream is a HlsAudioTrackType enum value
27605	HlsAudioTrackTypeAudioOnlyVariantStream = "AUDIO_ONLY_VARIANT_STREAM"
27606)
27607
27608// HlsAudioTrackType_Values returns all elements of the HlsAudioTrackType enum
27609func HlsAudioTrackType_Values() []string {
27610	return []string{
27611		HlsAudioTrackTypeAlternateAudioAutoSelectDefault,
27612		HlsAudioTrackTypeAlternateAudioAutoSelect,
27613		HlsAudioTrackTypeAlternateAudioNotAutoSelect,
27614		HlsAudioTrackTypeAudioOnlyVariantStream,
27615	}
27616}
27617
27618// Applies only to 608 Embedded output captions. Insert: Include CLOSED-CAPTIONS
27619// lines in the manifest. Specify at least one language in the CC1 Language
27620// Code field. One CLOSED-CAPTION line is added for each Language Code you specify.
27621// Make sure to specify the languages in the order in which they appear in the
27622// original source (if the source is embedded format) or the order of the caption
27623// selectors (if the source is other than embedded). Otherwise, languages in
27624// the manifest will not match up properly with the output captions. None: Include
27625// CLOSED-CAPTIONS=NONE line in the manifest. Omit: Omit any CLOSED-CAPTIONS
27626// line from the manifest.
27627const (
27628	// HlsCaptionLanguageSettingInsert is a HlsCaptionLanguageSetting enum value
27629	HlsCaptionLanguageSettingInsert = "INSERT"
27630
27631	// HlsCaptionLanguageSettingOmit is a HlsCaptionLanguageSetting enum value
27632	HlsCaptionLanguageSettingOmit = "OMIT"
27633
27634	// HlsCaptionLanguageSettingNone is a HlsCaptionLanguageSetting enum value
27635	HlsCaptionLanguageSettingNone = "NONE"
27636)
27637
27638// HlsCaptionLanguageSetting_Values returns all elements of the HlsCaptionLanguageSetting enum
27639func HlsCaptionLanguageSetting_Values() []string {
27640	return []string{
27641		HlsCaptionLanguageSettingInsert,
27642		HlsCaptionLanguageSettingOmit,
27643		HlsCaptionLanguageSettingNone,
27644	}
27645}
27646
27647// Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no
27648// tag. Otherwise, keep the default value Enabled (ENABLED) and control caching
27649// in your video distribution set up. For example, use the Cache-Control http
27650// header.
27651const (
27652	// HlsClientCacheDisabled is a HlsClientCache enum value
27653	HlsClientCacheDisabled = "DISABLED"
27654
27655	// HlsClientCacheEnabled is a HlsClientCache enum value
27656	HlsClientCacheEnabled = "ENABLED"
27657)
27658
27659// HlsClientCache_Values returns all elements of the HlsClientCache enum
27660func HlsClientCache_Values() []string {
27661	return []string{
27662		HlsClientCacheDisabled,
27663		HlsClientCacheEnabled,
27664	}
27665}
27666
27667// Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist
27668// generation.
27669const (
27670	// HlsCodecSpecificationRfc6381 is a HlsCodecSpecification enum value
27671	HlsCodecSpecificationRfc6381 = "RFC_6381"
27672
27673	// HlsCodecSpecificationRfc4281 is a HlsCodecSpecification enum value
27674	HlsCodecSpecificationRfc4281 = "RFC_4281"
27675)
27676
27677// HlsCodecSpecification_Values returns all elements of the HlsCodecSpecification enum
27678func HlsCodecSpecification_Values() []string {
27679	return []string{
27680		HlsCodecSpecificationRfc6381,
27681		HlsCodecSpecificationRfc4281,
27682	}
27683}
27684
27685// Specify whether to flag this audio track as descriptive video service (DVS)
27686// in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes
27687// the parameter CHARACTERISTICS="public.accessibility.describes-video" in the
27688// EXT-X-MEDIA entry for this track. When you keep the default choice, Don't
27689// flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can
27690// help with accessibility on Apple devices. For more information, see the Apple
27691// documentation.
27692const (
27693	// HlsDescriptiveVideoServiceFlagDontFlag is a HlsDescriptiveVideoServiceFlag enum value
27694	HlsDescriptiveVideoServiceFlagDontFlag = "DONT_FLAG"
27695
27696	// HlsDescriptiveVideoServiceFlagFlag is a HlsDescriptiveVideoServiceFlag enum value
27697	HlsDescriptiveVideoServiceFlagFlag = "FLAG"
27698)
27699
27700// HlsDescriptiveVideoServiceFlag_Values returns all elements of the HlsDescriptiveVideoServiceFlag enum
27701func HlsDescriptiveVideoServiceFlag_Values() []string {
27702	return []string{
27703		HlsDescriptiveVideoServiceFlagDontFlag,
27704		HlsDescriptiveVideoServiceFlagFlag,
27705	}
27706}
27707
27708// Indicates whether segments should be placed in subdirectories.
27709const (
27710	// HlsDirectoryStructureSingleDirectory is a HlsDirectoryStructure enum value
27711	HlsDirectoryStructureSingleDirectory = "SINGLE_DIRECTORY"
27712
27713	// HlsDirectoryStructureSubdirectoryPerStream is a HlsDirectoryStructure enum value
27714	HlsDirectoryStructureSubdirectoryPerStream = "SUBDIRECTORY_PER_STREAM"
27715)
27716
27717// HlsDirectoryStructure_Values returns all elements of the HlsDirectoryStructure enum
27718func HlsDirectoryStructure_Values() []string {
27719	return []string{
27720		HlsDirectoryStructureSingleDirectory,
27721		HlsDirectoryStructureSubdirectoryPerStream,
27722	}
27723}
27724
27725// Encrypts the segments with the given encryption scheme. Leave blank to disable.
27726// Selecting 'Disabled' in the web interface also disables encryption.
27727const (
27728	// HlsEncryptionTypeAes128 is a HlsEncryptionType enum value
27729	HlsEncryptionTypeAes128 = "AES128"
27730
27731	// HlsEncryptionTypeSampleAes is a HlsEncryptionType enum value
27732	HlsEncryptionTypeSampleAes = "SAMPLE_AES"
27733)
27734
27735// HlsEncryptionType_Values returns all elements of the HlsEncryptionType enum
27736func HlsEncryptionType_Values() []string {
27737	return []string{
27738		HlsEncryptionTypeAes128,
27739		HlsEncryptionTypeSampleAes,
27740	}
27741}
27742
27743// Choose Include (INCLUDE) to have MediaConvert generate a child manifest that
27744// lists only the I-frames for this rendition, in addition to your regular manifest
27745// for this rendition. You might use this manifest as part of a workflow that
27746// creates preview functions for your video. MediaConvert adds both the I-frame
27747// only child manifest and the regular child manifest to the parent manifest.
27748// When you don't need the I-frame only child manifest, keep the default value
27749// Exclude (EXCLUDE).
27750const (
27751	// HlsIFrameOnlyManifestInclude is a HlsIFrameOnlyManifest enum value
27752	HlsIFrameOnlyManifestInclude = "INCLUDE"
27753
27754	// HlsIFrameOnlyManifestExclude is a HlsIFrameOnlyManifest enum value
27755	HlsIFrameOnlyManifestExclude = "EXCLUDE"
27756)
27757
27758// HlsIFrameOnlyManifest_Values returns all elements of the HlsIFrameOnlyManifest enum
27759func HlsIFrameOnlyManifest_Values() []string {
27760	return []string{
27761		HlsIFrameOnlyManifestInclude,
27762		HlsIFrameOnlyManifestExclude,
27763	}
27764}
27765
27766// Specify whether MediaConvert generates images for trick play. Keep the default
27767// value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL)
27768// to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME)
27769// to generate tiled thumbnails and full-resolution images of single frames.
27770// MediaConvert creates a child manifest for each set of images that you generate
27771// and adds corresponding entries to the parent manifest. A common application
27772// for these images is Roku trick mode. The thumbnails and full-frame images
27773// that MediaConvert creates with this feature are compatible with this Roku
27774// specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md
27775const (
27776	// HlsImageBasedTrickPlayNone is a HlsImageBasedTrickPlay enum value
27777	HlsImageBasedTrickPlayNone = "NONE"
27778
27779	// HlsImageBasedTrickPlayThumbnail is a HlsImageBasedTrickPlay enum value
27780	HlsImageBasedTrickPlayThumbnail = "THUMBNAIL"
27781
27782	// HlsImageBasedTrickPlayThumbnailAndFullframe is a HlsImageBasedTrickPlay enum value
27783	HlsImageBasedTrickPlayThumbnailAndFullframe = "THUMBNAIL_AND_FULLFRAME"
27784)
27785
27786// HlsImageBasedTrickPlay_Values returns all elements of the HlsImageBasedTrickPlay enum
27787func HlsImageBasedTrickPlay_Values() []string {
27788	return []string{
27789		HlsImageBasedTrickPlayNone,
27790		HlsImageBasedTrickPlayThumbnail,
27791		HlsImageBasedTrickPlayThumbnailAndFullframe,
27792	}
27793}
27794
27795// The Initialization Vector is a 128-bit number used in conjunction with the
27796// key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed
27797// in the manifest. Otherwise Initialization Vector is not in the manifest.
27798const (
27799	// HlsInitializationVectorInManifestInclude is a HlsInitializationVectorInManifest enum value
27800	HlsInitializationVectorInManifestInclude = "INCLUDE"
27801
27802	// HlsInitializationVectorInManifestExclude is a HlsInitializationVectorInManifest enum value
27803	HlsInitializationVectorInManifestExclude = "EXCLUDE"
27804)
27805
27806// HlsInitializationVectorInManifest_Values returns all elements of the HlsInitializationVectorInManifest enum
27807func HlsInitializationVectorInManifest_Values() []string {
27808	return []string{
27809		HlsInitializationVectorInManifestInclude,
27810		HlsInitializationVectorInManifestExclude,
27811	}
27812}
27813
27814// Specify whether your DRM encryption key is static or from a key provider
27815// that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.
27816const (
27817	// HlsKeyProviderTypeSpeke is a HlsKeyProviderType enum value
27818	HlsKeyProviderTypeSpeke = "SPEKE"
27819
27820	// HlsKeyProviderTypeStaticKey is a HlsKeyProviderType enum value
27821	HlsKeyProviderTypeStaticKey = "STATIC_KEY"
27822)
27823
27824// HlsKeyProviderType_Values returns all elements of the HlsKeyProviderType enum
27825func HlsKeyProviderType_Values() []string {
27826	return []string{
27827		HlsKeyProviderTypeSpeke,
27828		HlsKeyProviderTypeStaticKey,
27829	}
27830}
27831
27832// When set to GZIP, compresses HLS playlist.
27833const (
27834	// HlsManifestCompressionGzip is a HlsManifestCompression enum value
27835	HlsManifestCompressionGzip = "GZIP"
27836
27837	// HlsManifestCompressionNone is a HlsManifestCompression enum value
27838	HlsManifestCompressionNone = "NONE"
27839)
27840
27841// HlsManifestCompression_Values returns all elements of the HlsManifestCompression enum
27842func HlsManifestCompression_Values() []string {
27843	return []string{
27844		HlsManifestCompressionGzip,
27845		HlsManifestCompressionNone,
27846	}
27847}
27848
27849// Indicates whether the output manifest should use floating point values for
27850// segment duration.
27851const (
27852	// HlsManifestDurationFormatFloatingPoint is a HlsManifestDurationFormat enum value
27853	HlsManifestDurationFormatFloatingPoint = "FLOATING_POINT"
27854
27855	// HlsManifestDurationFormatInteger is a HlsManifestDurationFormat enum value
27856	HlsManifestDurationFormatInteger = "INTEGER"
27857)
27858
27859// HlsManifestDurationFormat_Values returns all elements of the HlsManifestDurationFormat enum
27860func HlsManifestDurationFormat_Values() []string {
27861	return []string{
27862		HlsManifestDurationFormatFloatingPoint,
27863		HlsManifestDurationFormatInteger,
27864	}
27865}
27866
27867// Enable this setting to insert the EXT-X-SESSION-KEY element into the master
27868// playlist. This allows for offline Apple HLS FairPlay content protection.
27869const (
27870	// HlsOfflineEncryptedEnabled is a HlsOfflineEncrypted enum value
27871	HlsOfflineEncryptedEnabled = "ENABLED"
27872
27873	// HlsOfflineEncryptedDisabled is a HlsOfflineEncrypted enum value
27874	HlsOfflineEncryptedDisabled = "DISABLED"
27875)
27876
27877// HlsOfflineEncrypted_Values returns all elements of the HlsOfflineEncrypted enum
27878func HlsOfflineEncrypted_Values() []string {
27879	return []string{
27880		HlsOfflineEncryptedEnabled,
27881		HlsOfflineEncryptedDisabled,
27882	}
27883}
27884
27885// Indicates whether the .m3u8 manifest file should be generated for this HLS
27886// output group.
27887const (
27888	// HlsOutputSelectionManifestsAndSegments is a HlsOutputSelection enum value
27889	HlsOutputSelectionManifestsAndSegments = "MANIFESTS_AND_SEGMENTS"
27890
27891	// HlsOutputSelectionSegmentsOnly is a HlsOutputSelection enum value
27892	HlsOutputSelectionSegmentsOnly = "SEGMENTS_ONLY"
27893)
27894
27895// HlsOutputSelection_Values returns all elements of the HlsOutputSelection enum
27896func HlsOutputSelection_Values() []string {
27897	return []string{
27898		HlsOutputSelectionManifestsAndSegments,
27899		HlsOutputSelectionSegmentsOnly,
27900	}
27901}
27902
27903// Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files.
27904// The value is calculated as follows: either the program date and time are
27905// initialized using the input timecode source, or the time is initialized using
27906// the input timecode source and the date is initialized using the timestamp_offset.
27907const (
27908	// HlsProgramDateTimeInclude is a HlsProgramDateTime enum value
27909	HlsProgramDateTimeInclude = "INCLUDE"
27910
27911	// HlsProgramDateTimeExclude is a HlsProgramDateTime enum value
27912	HlsProgramDateTimeExclude = "EXCLUDE"
27913)
27914
27915// HlsProgramDateTime_Values returns all elements of the HlsProgramDateTime enum
27916func HlsProgramDateTime_Values() []string {
27917	return []string{
27918		HlsProgramDateTimeInclude,
27919		HlsProgramDateTimeExclude,
27920	}
27921}
27922
27923// When set to SINGLE_FILE, emits program as a single media resource (.ts) file,
27924// uses #EXT-X-BYTERANGE tags to index segment for playback.
27925const (
27926	// HlsSegmentControlSingleFile is a HlsSegmentControl enum value
27927	HlsSegmentControlSingleFile = "SINGLE_FILE"
27928
27929	// HlsSegmentControlSegmentedFiles is a HlsSegmentControl enum value
27930	HlsSegmentControlSegmentedFiles = "SEGMENTED_FILES"
27931)
27932
27933// HlsSegmentControl_Values returns all elements of the HlsSegmentControl enum
27934func HlsSegmentControl_Values() []string {
27935	return []string{
27936		HlsSegmentControlSingleFile,
27937		HlsSegmentControlSegmentedFiles,
27938	}
27939}
27940
27941// Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag
27942// of variant manifest.
27943const (
27944	// HlsStreamInfResolutionInclude is a HlsStreamInfResolution enum value
27945	HlsStreamInfResolutionInclude = "INCLUDE"
27946
27947	// HlsStreamInfResolutionExclude is a HlsStreamInfResolution enum value
27948	HlsStreamInfResolutionExclude = "EXCLUDE"
27949)
27950
27951// HlsStreamInfResolution_Values returns all elements of the HlsStreamInfResolution enum
27952func HlsStreamInfResolution_Values() []string {
27953	return []string{
27954		HlsStreamInfResolutionInclude,
27955		HlsStreamInfResolutionExclude,
27956	}
27957}
27958
27959// When set to LEGACY, the segment target duration is always rounded up to the
27960// nearest integer value above its current value in seconds. When set to SPEC\\_COMPLIANT,
27961// the segment target duration is rounded up to the nearest integer value if
27962// fraction seconds are greater than or equal to 0.5 (>= 0.5) and rounded down
27963// if less than 0.5 (< 0.5). You may need to use LEGACY if your client needs
27964// to ensure that the target duration is always longer than the actual duration
27965// of the segment. Some older players may experience interrupted playback when
27966// the actual duration of a track in a segment is longer than the target duration.
27967const (
27968	// HlsTargetDurationCompatibilityModeLegacy is a HlsTargetDurationCompatibilityMode enum value
27969	HlsTargetDurationCompatibilityModeLegacy = "LEGACY"
27970
27971	// HlsTargetDurationCompatibilityModeSpecCompliant is a HlsTargetDurationCompatibilityMode enum value
27972	HlsTargetDurationCompatibilityModeSpecCompliant = "SPEC_COMPLIANT"
27973)
27974
27975// HlsTargetDurationCompatibilityMode_Values returns all elements of the HlsTargetDurationCompatibilityMode enum
27976func HlsTargetDurationCompatibilityMode_Values() []string {
27977	return []string{
27978		HlsTargetDurationCompatibilityModeLegacy,
27979		HlsTargetDurationCompatibilityModeSpecCompliant,
27980	}
27981}
27982
27983// Indicates ID3 frame that has the timecode.
27984const (
27985	// HlsTimedMetadataId3FrameNone is a HlsTimedMetadataId3Frame enum value
27986	HlsTimedMetadataId3FrameNone = "NONE"
27987
27988	// HlsTimedMetadataId3FramePriv is a HlsTimedMetadataId3Frame enum value
27989	HlsTimedMetadataId3FramePriv = "PRIV"
27990
27991	// HlsTimedMetadataId3FrameTdrl is a HlsTimedMetadataId3Frame enum value
27992	HlsTimedMetadataId3FrameTdrl = "TDRL"
27993)
27994
27995// HlsTimedMetadataId3Frame_Values returns all elements of the HlsTimedMetadataId3Frame enum
27996func HlsTimedMetadataId3Frame_Values() []string {
27997	return []string{
27998		HlsTimedMetadataId3FrameNone,
27999		HlsTimedMetadataId3FramePriv,
28000		HlsTimedMetadataId3FrameTdrl,
28001	}
28002}
28003
28004// Keep this setting enabled to have MediaConvert use the font style and position
28005// information from the captions source in the output. This option is available
28006// only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting
28007// for simplified output captions.
28008const (
28009	// ImscStylePassthroughEnabled is a ImscStylePassthrough enum value
28010	ImscStylePassthroughEnabled = "ENABLED"
28011
28012	// ImscStylePassthroughDisabled is a ImscStylePassthrough enum value
28013	ImscStylePassthroughDisabled = "DISABLED"
28014)
28015
28016// ImscStylePassthrough_Values returns all elements of the ImscStylePassthrough enum
28017func ImscStylePassthrough_Values() []string {
28018	return []string{
28019		ImscStylePassthroughEnabled,
28020		ImscStylePassthroughDisabled,
28021	}
28022}
28023
28024// Enable Deblock (InputDeblockFilter) to produce smoother motion in the output.
28025// Default is disabled. Only manually controllable for MPEG2 and uncompressed
28026// video inputs.
28027const (
28028	// InputDeblockFilterEnabled is a InputDeblockFilter enum value
28029	InputDeblockFilterEnabled = "ENABLED"
28030
28031	// InputDeblockFilterDisabled is a InputDeblockFilter enum value
28032	InputDeblockFilterDisabled = "DISABLED"
28033)
28034
28035// InputDeblockFilter_Values returns all elements of the InputDeblockFilter enum
28036func InputDeblockFilter_Values() []string {
28037	return []string{
28038		InputDeblockFilterEnabled,
28039		InputDeblockFilterDisabled,
28040	}
28041}
28042
28043// Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default
28044// is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video
28045// inputs.
28046const (
28047	// InputDenoiseFilterEnabled is a InputDenoiseFilter enum value
28048	InputDenoiseFilterEnabled = "ENABLED"
28049
28050	// InputDenoiseFilterDisabled is a InputDenoiseFilter enum value
28051	InputDenoiseFilterDisabled = "DISABLED"
28052)
28053
28054// InputDenoiseFilter_Values returns all elements of the InputDenoiseFilter enum
28055func InputDenoiseFilter_Values() []string {
28056	return []string{
28057		InputDenoiseFilterEnabled,
28058		InputDenoiseFilterDisabled,
28059	}
28060}
28061
28062// Specify how the transcoding service applies the denoise and deblock filters.
28063// You must also enable the filters separately, with Denoise (InputDenoiseFilter)
28064// and Deblock (InputDeblockFilter). * Auto - The transcoding service determines
28065// whether to apply filtering, depending on input type and quality. * Disable
28066// - The input is not filtered. This is true even if you use the API to enable
28067// them in (InputDeblockFilter) and (InputDeblockFilter). * Force - The input
28068// is filtered regardless of input type.
28069const (
28070	// InputFilterEnableAuto is a InputFilterEnable enum value
28071	InputFilterEnableAuto = "AUTO"
28072
28073	// InputFilterEnableDisable is a InputFilterEnable enum value
28074	InputFilterEnableDisable = "DISABLE"
28075
28076	// InputFilterEnableForce is a InputFilterEnable enum value
28077	InputFilterEnableForce = "FORCE"
28078)
28079
28080// InputFilterEnable_Values returns all elements of the InputFilterEnable enum
28081func InputFilterEnable_Values() []string {
28082	return []string{
28083		InputFilterEnableAuto,
28084		InputFilterEnableDisable,
28085		InputFilterEnableForce,
28086	}
28087}
28088
28089// Set PSI control (InputPsiControl) for transport stream inputs to specify
28090// which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio
28091// and video. * Use PSI - Scan only PSI data.
28092const (
28093	// InputPsiControlIgnorePsi is a InputPsiControl enum value
28094	InputPsiControlIgnorePsi = "IGNORE_PSI"
28095
28096	// InputPsiControlUsePsi is a InputPsiControl enum value
28097	InputPsiControlUsePsi = "USE_PSI"
28098)
28099
28100// InputPsiControl_Values returns all elements of the InputPsiControl enum
28101func InputPsiControl_Values() []string {
28102	return []string{
28103		InputPsiControlIgnorePsi,
28104		InputPsiControlUsePsi,
28105	}
28106}
28107
28108// Use Rotate (InputRotate) to specify how the service rotates your video. You
28109// can choose automatic rotation or specify a rotation. You can specify a clockwise
28110// rotation of 0, 90, 180, or 270 degrees. If your input video container is
28111// .mov or .mp4 and your input has rotation metadata, you can choose Automatic
28112// to have the service rotate your video according to the rotation specified
28113// in the metadata. The rotation must be within one degree of 90, 180, or 270
28114// degrees. If the rotation metadata specifies any other rotation, the service
28115// will default to no rotation. By default, the service does no rotation, even
28116// if your input video has rotation metadata. The service doesn't pass through
28117// rotation metadata.
28118const (
28119	// InputRotateDegree0 is a InputRotate enum value
28120	InputRotateDegree0 = "DEGREE_0"
28121
28122	// InputRotateDegrees90 is a InputRotate enum value
28123	InputRotateDegrees90 = "DEGREES_90"
28124
28125	// InputRotateDegrees180 is a InputRotate enum value
28126	InputRotateDegrees180 = "DEGREES_180"
28127
28128	// InputRotateDegrees270 is a InputRotate enum value
28129	InputRotateDegrees270 = "DEGREES_270"
28130
28131	// InputRotateAuto is a InputRotate enum value
28132	InputRotateAuto = "AUTO"
28133)
28134
28135// InputRotate_Values returns all elements of the InputRotate enum
28136func InputRotate_Values() []string {
28137	return []string{
28138		InputRotateDegree0,
28139		InputRotateDegrees90,
28140		InputRotateDegrees180,
28141		InputRotateDegrees270,
28142		InputRotateAuto,
28143	}
28144}
28145
28146// Use this setting when your input video codec is AVC-Intra. Ignore this setting
28147// for all other inputs. If the sample range metadata in your input video is
28148// accurate, or if you don't know about sample range, keep the default value,
28149// Follow (FOLLOW), for this setting. When you do, the service automatically
28150// detects your input sample range. If your input video has metadata indicating
28151// the wrong sample range, specify the accurate sample range here. When you
28152// do, MediaConvert ignores any sample range information in the input metadata.
28153// Regardless of whether MediaConvert uses the input sample range or the sample
28154// range that you specify, MediaConvert uses the sample range for transcoding
28155// and also writes it to the output metadata.
28156const (
28157	// InputSampleRangeFollow is a InputSampleRange enum value
28158	InputSampleRangeFollow = "FOLLOW"
28159
28160	// InputSampleRangeFullRange is a InputSampleRange enum value
28161	InputSampleRangeFullRange = "FULL_RANGE"
28162
28163	// InputSampleRangeLimitedRange is a InputSampleRange enum value
28164	InputSampleRangeLimitedRange = "LIMITED_RANGE"
28165)
28166
28167// InputSampleRange_Values returns all elements of the InputSampleRange enum
28168func InputSampleRange_Values() []string {
28169	return []string{
28170		InputSampleRangeFollow,
28171		InputSampleRangeFullRange,
28172		InputSampleRangeLimitedRange,
28173	}
28174}
28175
28176// When you have a progressive segmented frame (PsF) input, use this setting
28177// to flag the input as PsF. MediaConvert doesn't automatically detect PsF.
28178// Therefore, flagging your input as PsF results in better preservation of video
28179// quality when you do deinterlacing and frame rate conversion. If you don't
28180// specify, the default value is Auto (AUTO). Auto is the correct setting for
28181// all inputs that are not PsF. Don't set this value to PsF when your input
28182// is interlaced. Doing so creates horizontal interlacing artifacts.
28183const (
28184	// InputScanTypeAuto is a InputScanType enum value
28185	InputScanTypeAuto = "AUTO"
28186
28187	// InputScanTypePsf is a InputScanType enum value
28188	InputScanTypePsf = "PSF"
28189)
28190
28191// InputScanType_Values returns all elements of the InputScanType enum
28192func InputScanType_Values() []string {
28193	return []string{
28194		InputScanTypeAuto,
28195		InputScanTypePsf,
28196	}
28197}
28198
28199// Use this Timecode source setting, located under the input settings (InputTimecodeSource),
28200// to specify how the service counts input video frames. This input frame count
28201// affects only the behavior of features that apply to a single input at a time,
28202// such as input clipping and synchronizing some captions formats. Choose Embedded
28203// (EMBEDDED) to use the timecodes in your input video. Choose Start at zero
28204// (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART)
28205// to start the first frame at the timecode that you specify in the setting
28206// Start timecode (timecodeStart). If you don't specify a value for Timecode
28207// source, the service will use Embedded by default. For more information about
28208// timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.
28209const (
28210	// InputTimecodeSourceEmbedded is a InputTimecodeSource enum value
28211	InputTimecodeSourceEmbedded = "EMBEDDED"
28212
28213	// InputTimecodeSourceZerobased is a InputTimecodeSource enum value
28214	InputTimecodeSourceZerobased = "ZEROBASED"
28215
28216	// InputTimecodeSourceSpecifiedstart is a InputTimecodeSource enum value
28217	InputTimecodeSourceSpecifiedstart = "SPECIFIEDSTART"
28218)
28219
28220// InputTimecodeSource_Values returns all elements of the InputTimecodeSource enum
28221func InputTimecodeSource_Values() []string {
28222	return []string{
28223		InputTimecodeSourceEmbedded,
28224		InputTimecodeSourceZerobased,
28225		InputTimecodeSourceSpecifiedstart,
28226	}
28227}
28228
28229// A job's phase can be PROBING, TRANSCODING OR UPLOADING
28230const (
28231	// JobPhaseProbing is a JobPhase enum value
28232	JobPhaseProbing = "PROBING"
28233
28234	// JobPhaseTranscoding is a JobPhase enum value
28235	JobPhaseTranscoding = "TRANSCODING"
28236
28237	// JobPhaseUploading is a JobPhase enum value
28238	JobPhaseUploading = "UPLOADING"
28239)
28240
28241// JobPhase_Values returns all elements of the JobPhase enum
28242func JobPhase_Values() []string {
28243	return []string{
28244		JobPhaseProbing,
28245		JobPhaseTranscoding,
28246		JobPhaseUploading,
28247	}
28248}
28249
28250// A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR.
28251const (
28252	// JobStatusSubmitted is a JobStatus enum value
28253	JobStatusSubmitted = "SUBMITTED"
28254
28255	// JobStatusProgressing is a JobStatus enum value
28256	JobStatusProgressing = "PROGRESSING"
28257
28258	// JobStatusComplete is a JobStatus enum value
28259	JobStatusComplete = "COMPLETE"
28260
28261	// JobStatusCanceled is a JobStatus enum value
28262	JobStatusCanceled = "CANCELED"
28263
28264	// JobStatusError is a JobStatus enum value
28265	JobStatusError = "ERROR"
28266)
28267
28268// JobStatus_Values returns all elements of the JobStatus enum
28269func JobStatus_Values() []string {
28270	return []string{
28271		JobStatusSubmitted,
28272		JobStatusProgressing,
28273		JobStatusComplete,
28274		JobStatusCanceled,
28275		JobStatusError,
28276	}
28277}
28278
28279// Optional. When you request a list of job templates, you can choose to list
28280// them alphabetically by NAME or chronologically by CREATION_DATE. If you don't
28281// specify, the service will list them by name.
28282const (
28283	// JobTemplateListByName is a JobTemplateListBy enum value
28284	JobTemplateListByName = "NAME"
28285
28286	// JobTemplateListByCreationDate is a JobTemplateListBy enum value
28287	JobTemplateListByCreationDate = "CREATION_DATE"
28288
28289	// JobTemplateListBySystem is a JobTemplateListBy enum value
28290	JobTemplateListBySystem = "SYSTEM"
28291)
28292
28293// JobTemplateListBy_Values returns all elements of the JobTemplateListBy enum
28294func JobTemplateListBy_Values() []string {
28295	return []string{
28296		JobTemplateListByName,
28297		JobTemplateListByCreationDate,
28298		JobTemplateListBySystem,
28299	}
28300}
28301
28302// Specify the language, using the ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php.
28303const (
28304	// LanguageCodeEng is a LanguageCode enum value
28305	LanguageCodeEng = "ENG"
28306
28307	// LanguageCodeSpa is a LanguageCode enum value
28308	LanguageCodeSpa = "SPA"
28309
28310	// LanguageCodeFra is a LanguageCode enum value
28311	LanguageCodeFra = "FRA"
28312
28313	// LanguageCodeDeu is a LanguageCode enum value
28314	LanguageCodeDeu = "DEU"
28315
28316	// LanguageCodeGer is a LanguageCode enum value
28317	LanguageCodeGer = "GER"
28318
28319	// LanguageCodeZho is a LanguageCode enum value
28320	LanguageCodeZho = "ZHO"
28321
28322	// LanguageCodeAra is a LanguageCode enum value
28323	LanguageCodeAra = "ARA"
28324
28325	// LanguageCodeHin is a LanguageCode enum value
28326	LanguageCodeHin = "HIN"
28327
28328	// LanguageCodeJpn is a LanguageCode enum value
28329	LanguageCodeJpn = "JPN"
28330
28331	// LanguageCodeRus is a LanguageCode enum value
28332	LanguageCodeRus = "RUS"
28333
28334	// LanguageCodePor is a LanguageCode enum value
28335	LanguageCodePor = "POR"
28336
28337	// LanguageCodeIta is a LanguageCode enum value
28338	LanguageCodeIta = "ITA"
28339
28340	// LanguageCodeUrd is a LanguageCode enum value
28341	LanguageCodeUrd = "URD"
28342
28343	// LanguageCodeVie is a LanguageCode enum value
28344	LanguageCodeVie = "VIE"
28345
28346	// LanguageCodeKor is a LanguageCode enum value
28347	LanguageCodeKor = "KOR"
28348
28349	// LanguageCodePan is a LanguageCode enum value
28350	LanguageCodePan = "PAN"
28351
28352	// LanguageCodeAbk is a LanguageCode enum value
28353	LanguageCodeAbk = "ABK"
28354
28355	// LanguageCodeAar is a LanguageCode enum value
28356	LanguageCodeAar = "AAR"
28357
28358	// LanguageCodeAfr is a LanguageCode enum value
28359	LanguageCodeAfr = "AFR"
28360
28361	// LanguageCodeAka is a LanguageCode enum value
28362	LanguageCodeAka = "AKA"
28363
28364	// LanguageCodeSqi is a LanguageCode enum value
28365	LanguageCodeSqi = "SQI"
28366
28367	// LanguageCodeAmh is a LanguageCode enum value
28368	LanguageCodeAmh = "AMH"
28369
28370	// LanguageCodeArg is a LanguageCode enum value
28371	LanguageCodeArg = "ARG"
28372
28373	// LanguageCodeHye is a LanguageCode enum value
28374	LanguageCodeHye = "HYE"
28375
28376	// LanguageCodeAsm is a LanguageCode enum value
28377	LanguageCodeAsm = "ASM"
28378
28379	// LanguageCodeAva is a LanguageCode enum value
28380	LanguageCodeAva = "AVA"
28381
28382	// LanguageCodeAve is a LanguageCode enum value
28383	LanguageCodeAve = "AVE"
28384
28385	// LanguageCodeAym is a LanguageCode enum value
28386	LanguageCodeAym = "AYM"
28387
28388	// LanguageCodeAze is a LanguageCode enum value
28389	LanguageCodeAze = "AZE"
28390
28391	// LanguageCodeBam is a LanguageCode enum value
28392	LanguageCodeBam = "BAM"
28393
28394	// LanguageCodeBak is a LanguageCode enum value
28395	LanguageCodeBak = "BAK"
28396
28397	// LanguageCodeEus is a LanguageCode enum value
28398	LanguageCodeEus = "EUS"
28399
28400	// LanguageCodeBel is a LanguageCode enum value
28401	LanguageCodeBel = "BEL"
28402
28403	// LanguageCodeBen is a LanguageCode enum value
28404	LanguageCodeBen = "BEN"
28405
28406	// LanguageCodeBih is a LanguageCode enum value
28407	LanguageCodeBih = "BIH"
28408
28409	// LanguageCodeBis is a LanguageCode enum value
28410	LanguageCodeBis = "BIS"
28411
28412	// LanguageCodeBos is a LanguageCode enum value
28413	LanguageCodeBos = "BOS"
28414
28415	// LanguageCodeBre is a LanguageCode enum value
28416	LanguageCodeBre = "BRE"
28417
28418	// LanguageCodeBul is a LanguageCode enum value
28419	LanguageCodeBul = "BUL"
28420
28421	// LanguageCodeMya is a LanguageCode enum value
28422	LanguageCodeMya = "MYA"
28423
28424	// LanguageCodeCat is a LanguageCode enum value
28425	LanguageCodeCat = "CAT"
28426
28427	// LanguageCodeKhm is a LanguageCode enum value
28428	LanguageCodeKhm = "KHM"
28429
28430	// LanguageCodeCha is a LanguageCode enum value
28431	LanguageCodeCha = "CHA"
28432
28433	// LanguageCodeChe is a LanguageCode enum value
28434	LanguageCodeChe = "CHE"
28435
28436	// LanguageCodeNya is a LanguageCode enum value
28437	LanguageCodeNya = "NYA"
28438
28439	// LanguageCodeChu is a LanguageCode enum value
28440	LanguageCodeChu = "CHU"
28441
28442	// LanguageCodeChv is a LanguageCode enum value
28443	LanguageCodeChv = "CHV"
28444
28445	// LanguageCodeCor is a LanguageCode enum value
28446	LanguageCodeCor = "COR"
28447
28448	// LanguageCodeCos is a LanguageCode enum value
28449	LanguageCodeCos = "COS"
28450
28451	// LanguageCodeCre is a LanguageCode enum value
28452	LanguageCodeCre = "CRE"
28453
28454	// LanguageCodeHrv is a LanguageCode enum value
28455	LanguageCodeHrv = "HRV"
28456
28457	// LanguageCodeCes is a LanguageCode enum value
28458	LanguageCodeCes = "CES"
28459
28460	// LanguageCodeDan is a LanguageCode enum value
28461	LanguageCodeDan = "DAN"
28462
28463	// LanguageCodeDiv is a LanguageCode enum value
28464	LanguageCodeDiv = "DIV"
28465
28466	// LanguageCodeNld is a LanguageCode enum value
28467	LanguageCodeNld = "NLD"
28468
28469	// LanguageCodeDzo is a LanguageCode enum value
28470	LanguageCodeDzo = "DZO"
28471
28472	// LanguageCodeEnm is a LanguageCode enum value
28473	LanguageCodeEnm = "ENM"
28474
28475	// LanguageCodeEpo is a LanguageCode enum value
28476	LanguageCodeEpo = "EPO"
28477
28478	// LanguageCodeEst is a LanguageCode enum value
28479	LanguageCodeEst = "EST"
28480
28481	// LanguageCodeEwe is a LanguageCode enum value
28482	LanguageCodeEwe = "EWE"
28483
28484	// LanguageCodeFao is a LanguageCode enum value
28485	LanguageCodeFao = "FAO"
28486
28487	// LanguageCodeFij is a LanguageCode enum value
28488	LanguageCodeFij = "FIJ"
28489
28490	// LanguageCodeFin is a LanguageCode enum value
28491	LanguageCodeFin = "FIN"
28492
28493	// LanguageCodeFrm is a LanguageCode enum value
28494	LanguageCodeFrm = "FRM"
28495
28496	// LanguageCodeFul is a LanguageCode enum value
28497	LanguageCodeFul = "FUL"
28498
28499	// LanguageCodeGla is a LanguageCode enum value
28500	LanguageCodeGla = "GLA"
28501
28502	// LanguageCodeGlg is a LanguageCode enum value
28503	LanguageCodeGlg = "GLG"
28504
28505	// LanguageCodeLug is a LanguageCode enum value
28506	LanguageCodeLug = "LUG"
28507
28508	// LanguageCodeKat is a LanguageCode enum value
28509	LanguageCodeKat = "KAT"
28510
28511	// LanguageCodeEll is a LanguageCode enum value
28512	LanguageCodeEll = "ELL"
28513
28514	// LanguageCodeGrn is a LanguageCode enum value
28515	LanguageCodeGrn = "GRN"
28516
28517	// LanguageCodeGuj is a LanguageCode enum value
28518	LanguageCodeGuj = "GUJ"
28519
28520	// LanguageCodeHat is a LanguageCode enum value
28521	LanguageCodeHat = "HAT"
28522
28523	// LanguageCodeHau is a LanguageCode enum value
28524	LanguageCodeHau = "HAU"
28525
28526	// LanguageCodeHeb is a LanguageCode enum value
28527	LanguageCodeHeb = "HEB"
28528
28529	// LanguageCodeHer is a LanguageCode enum value
28530	LanguageCodeHer = "HER"
28531
28532	// LanguageCodeHmo is a LanguageCode enum value
28533	LanguageCodeHmo = "HMO"
28534
28535	// LanguageCodeHun is a LanguageCode enum value
28536	LanguageCodeHun = "HUN"
28537
28538	// LanguageCodeIsl is a LanguageCode enum value
28539	LanguageCodeIsl = "ISL"
28540
28541	// LanguageCodeIdo is a LanguageCode enum value
28542	LanguageCodeIdo = "IDO"
28543
28544	// LanguageCodeIbo is a LanguageCode enum value
28545	LanguageCodeIbo = "IBO"
28546
28547	// LanguageCodeInd is a LanguageCode enum value
28548	LanguageCodeInd = "IND"
28549
28550	// LanguageCodeIna is a LanguageCode enum value
28551	LanguageCodeIna = "INA"
28552
28553	// LanguageCodeIle is a LanguageCode enum value
28554	LanguageCodeIle = "ILE"
28555
28556	// LanguageCodeIku is a LanguageCode enum value
28557	LanguageCodeIku = "IKU"
28558
28559	// LanguageCodeIpk is a LanguageCode enum value
28560	LanguageCodeIpk = "IPK"
28561
28562	// LanguageCodeGle is a LanguageCode enum value
28563	LanguageCodeGle = "GLE"
28564
28565	// LanguageCodeJav is a LanguageCode enum value
28566	LanguageCodeJav = "JAV"
28567
28568	// LanguageCodeKal is a LanguageCode enum value
28569	LanguageCodeKal = "KAL"
28570
28571	// LanguageCodeKan is a LanguageCode enum value
28572	LanguageCodeKan = "KAN"
28573
28574	// LanguageCodeKau is a LanguageCode enum value
28575	LanguageCodeKau = "KAU"
28576
28577	// LanguageCodeKas is a LanguageCode enum value
28578	LanguageCodeKas = "KAS"
28579
28580	// LanguageCodeKaz is a LanguageCode enum value
28581	LanguageCodeKaz = "KAZ"
28582
28583	// LanguageCodeKik is a LanguageCode enum value
28584	LanguageCodeKik = "KIK"
28585
28586	// LanguageCodeKin is a LanguageCode enum value
28587	LanguageCodeKin = "KIN"
28588
28589	// LanguageCodeKir is a LanguageCode enum value
28590	LanguageCodeKir = "KIR"
28591
28592	// LanguageCodeKom is a LanguageCode enum value
28593	LanguageCodeKom = "KOM"
28594
28595	// LanguageCodeKon is a LanguageCode enum value
28596	LanguageCodeKon = "KON"
28597
28598	// LanguageCodeKua is a LanguageCode enum value
28599	LanguageCodeKua = "KUA"
28600
28601	// LanguageCodeKur is a LanguageCode enum value
28602	LanguageCodeKur = "KUR"
28603
28604	// LanguageCodeLao is a LanguageCode enum value
28605	LanguageCodeLao = "LAO"
28606
28607	// LanguageCodeLat is a LanguageCode enum value
28608	LanguageCodeLat = "LAT"
28609
28610	// LanguageCodeLav is a LanguageCode enum value
28611	LanguageCodeLav = "LAV"
28612
28613	// LanguageCodeLim is a LanguageCode enum value
28614	LanguageCodeLim = "LIM"
28615
28616	// LanguageCodeLin is a LanguageCode enum value
28617	LanguageCodeLin = "LIN"
28618
28619	// LanguageCodeLit is a LanguageCode enum value
28620	LanguageCodeLit = "LIT"
28621
28622	// LanguageCodeLub is a LanguageCode enum value
28623	LanguageCodeLub = "LUB"
28624
28625	// LanguageCodeLtz is a LanguageCode enum value
28626	LanguageCodeLtz = "LTZ"
28627
28628	// LanguageCodeMkd is a LanguageCode enum value
28629	LanguageCodeMkd = "MKD"
28630
28631	// LanguageCodeMlg is a LanguageCode enum value
28632	LanguageCodeMlg = "MLG"
28633
28634	// LanguageCodeMsa is a LanguageCode enum value
28635	LanguageCodeMsa = "MSA"
28636
28637	// LanguageCodeMal is a LanguageCode enum value
28638	LanguageCodeMal = "MAL"
28639
28640	// LanguageCodeMlt is a LanguageCode enum value
28641	LanguageCodeMlt = "MLT"
28642
28643	// LanguageCodeGlv is a LanguageCode enum value
28644	LanguageCodeGlv = "GLV"
28645
28646	// LanguageCodeMri is a LanguageCode enum value
28647	LanguageCodeMri = "MRI"
28648
28649	// LanguageCodeMar is a LanguageCode enum value
28650	LanguageCodeMar = "MAR"
28651
28652	// LanguageCodeMah is a LanguageCode enum value
28653	LanguageCodeMah = "MAH"
28654
28655	// LanguageCodeMon is a LanguageCode enum value
28656	LanguageCodeMon = "MON"
28657
28658	// LanguageCodeNau is a LanguageCode enum value
28659	LanguageCodeNau = "NAU"
28660
28661	// LanguageCodeNav is a LanguageCode enum value
28662	LanguageCodeNav = "NAV"
28663
28664	// LanguageCodeNde is a LanguageCode enum value
28665	LanguageCodeNde = "NDE"
28666
28667	// LanguageCodeNbl is a LanguageCode enum value
28668	LanguageCodeNbl = "NBL"
28669
28670	// LanguageCodeNdo is a LanguageCode enum value
28671	LanguageCodeNdo = "NDO"
28672
28673	// LanguageCodeNep is a LanguageCode enum value
28674	LanguageCodeNep = "NEP"
28675
28676	// LanguageCodeSme is a LanguageCode enum value
28677	LanguageCodeSme = "SME"
28678
28679	// LanguageCodeNor is a LanguageCode enum value
28680	LanguageCodeNor = "NOR"
28681
28682	// LanguageCodeNob is a LanguageCode enum value
28683	LanguageCodeNob = "NOB"
28684
28685	// LanguageCodeNno is a LanguageCode enum value
28686	LanguageCodeNno = "NNO"
28687
28688	// LanguageCodeOci is a LanguageCode enum value
28689	LanguageCodeOci = "OCI"
28690
28691	// LanguageCodeOji is a LanguageCode enum value
28692	LanguageCodeOji = "OJI"
28693
28694	// LanguageCodeOri is a LanguageCode enum value
28695	LanguageCodeOri = "ORI"
28696
28697	// LanguageCodeOrm is a LanguageCode enum value
28698	LanguageCodeOrm = "ORM"
28699
28700	// LanguageCodeOss is a LanguageCode enum value
28701	LanguageCodeOss = "OSS"
28702
28703	// LanguageCodePli is a LanguageCode enum value
28704	LanguageCodePli = "PLI"
28705
28706	// LanguageCodeFas is a LanguageCode enum value
28707	LanguageCodeFas = "FAS"
28708
28709	// LanguageCodePol is a LanguageCode enum value
28710	LanguageCodePol = "POL"
28711
28712	// LanguageCodePus is a LanguageCode enum value
28713	LanguageCodePus = "PUS"
28714
28715	// LanguageCodeQue is a LanguageCode enum value
28716	LanguageCodeQue = "QUE"
28717
28718	// LanguageCodeQaa is a LanguageCode enum value
28719	LanguageCodeQaa = "QAA"
28720
28721	// LanguageCodeRon is a LanguageCode enum value
28722	LanguageCodeRon = "RON"
28723
28724	// LanguageCodeRoh is a LanguageCode enum value
28725	LanguageCodeRoh = "ROH"
28726
28727	// LanguageCodeRun is a LanguageCode enum value
28728	LanguageCodeRun = "RUN"
28729
28730	// LanguageCodeSmo is a LanguageCode enum value
28731	LanguageCodeSmo = "SMO"
28732
28733	// LanguageCodeSag is a LanguageCode enum value
28734	LanguageCodeSag = "SAG"
28735
28736	// LanguageCodeSan is a LanguageCode enum value
28737	LanguageCodeSan = "SAN"
28738
28739	// LanguageCodeSrd is a LanguageCode enum value
28740	LanguageCodeSrd = "SRD"
28741
28742	// LanguageCodeSrb is a LanguageCode enum value
28743	LanguageCodeSrb = "SRB"
28744
28745	// LanguageCodeSna is a LanguageCode enum value
28746	LanguageCodeSna = "SNA"
28747
28748	// LanguageCodeIii is a LanguageCode enum value
28749	LanguageCodeIii = "III"
28750
28751	// LanguageCodeSnd is a LanguageCode enum value
28752	LanguageCodeSnd = "SND"
28753
28754	// LanguageCodeSin is a LanguageCode enum value
28755	LanguageCodeSin = "SIN"
28756
28757	// LanguageCodeSlk is a LanguageCode enum value
28758	LanguageCodeSlk = "SLK"
28759
28760	// LanguageCodeSlv is a LanguageCode enum value
28761	LanguageCodeSlv = "SLV"
28762
28763	// LanguageCodeSom is a LanguageCode enum value
28764	LanguageCodeSom = "SOM"
28765
28766	// LanguageCodeSot is a LanguageCode enum value
28767	LanguageCodeSot = "SOT"
28768
28769	// LanguageCodeSun is a LanguageCode enum value
28770	LanguageCodeSun = "SUN"
28771
28772	// LanguageCodeSwa is a LanguageCode enum value
28773	LanguageCodeSwa = "SWA"
28774
28775	// LanguageCodeSsw is a LanguageCode enum value
28776	LanguageCodeSsw = "SSW"
28777
28778	// LanguageCodeSwe is a LanguageCode enum value
28779	LanguageCodeSwe = "SWE"
28780
28781	// LanguageCodeTgl is a LanguageCode enum value
28782	LanguageCodeTgl = "TGL"
28783
28784	// LanguageCodeTah is a LanguageCode enum value
28785	LanguageCodeTah = "TAH"
28786
28787	// LanguageCodeTgk is a LanguageCode enum value
28788	LanguageCodeTgk = "TGK"
28789
28790	// LanguageCodeTam is a LanguageCode enum value
28791	LanguageCodeTam = "TAM"
28792
28793	// LanguageCodeTat is a LanguageCode enum value
28794	LanguageCodeTat = "TAT"
28795
28796	// LanguageCodeTel is a LanguageCode enum value
28797	LanguageCodeTel = "TEL"
28798
28799	// LanguageCodeTha is a LanguageCode enum value
28800	LanguageCodeTha = "THA"
28801
28802	// LanguageCodeBod is a LanguageCode enum value
28803	LanguageCodeBod = "BOD"
28804
28805	// LanguageCodeTir is a LanguageCode enum value
28806	LanguageCodeTir = "TIR"
28807
28808	// LanguageCodeTon is a LanguageCode enum value
28809	LanguageCodeTon = "TON"
28810
28811	// LanguageCodeTso is a LanguageCode enum value
28812	LanguageCodeTso = "TSO"
28813
28814	// LanguageCodeTsn is a LanguageCode enum value
28815	LanguageCodeTsn = "TSN"
28816
28817	// LanguageCodeTur is a LanguageCode enum value
28818	LanguageCodeTur = "TUR"
28819
28820	// LanguageCodeTuk is a LanguageCode enum value
28821	LanguageCodeTuk = "TUK"
28822
28823	// LanguageCodeTwi is a LanguageCode enum value
28824	LanguageCodeTwi = "TWI"
28825
28826	// LanguageCodeUig is a LanguageCode enum value
28827	LanguageCodeUig = "UIG"
28828
28829	// LanguageCodeUkr is a LanguageCode enum value
28830	LanguageCodeUkr = "UKR"
28831
28832	// LanguageCodeUzb is a LanguageCode enum value
28833	LanguageCodeUzb = "UZB"
28834
28835	// LanguageCodeVen is a LanguageCode enum value
28836	LanguageCodeVen = "VEN"
28837
28838	// LanguageCodeVol is a LanguageCode enum value
28839	LanguageCodeVol = "VOL"
28840
28841	// LanguageCodeWln is a LanguageCode enum value
28842	LanguageCodeWln = "WLN"
28843
28844	// LanguageCodeCym is a LanguageCode enum value
28845	LanguageCodeCym = "CYM"
28846
28847	// LanguageCodeFry is a LanguageCode enum value
28848	LanguageCodeFry = "FRY"
28849
28850	// LanguageCodeWol is a LanguageCode enum value
28851	LanguageCodeWol = "WOL"
28852
28853	// LanguageCodeXho is a LanguageCode enum value
28854	LanguageCodeXho = "XHO"
28855
28856	// LanguageCodeYid is a LanguageCode enum value
28857	LanguageCodeYid = "YID"
28858
28859	// LanguageCodeYor is a LanguageCode enum value
28860	LanguageCodeYor = "YOR"
28861
28862	// LanguageCodeZha is a LanguageCode enum value
28863	LanguageCodeZha = "ZHA"
28864
28865	// LanguageCodeZul is a LanguageCode enum value
28866	LanguageCodeZul = "ZUL"
28867
28868	// LanguageCodeOrj is a LanguageCode enum value
28869	LanguageCodeOrj = "ORJ"
28870
28871	// LanguageCodeQpc is a LanguageCode enum value
28872	LanguageCodeQpc = "QPC"
28873
28874	// LanguageCodeTng is a LanguageCode enum value
28875	LanguageCodeTng = "TNG"
28876)
28877
28878// LanguageCode_Values returns all elements of the LanguageCode enum
28879func LanguageCode_Values() []string {
28880	return []string{
28881		LanguageCodeEng,
28882		LanguageCodeSpa,
28883		LanguageCodeFra,
28884		LanguageCodeDeu,
28885		LanguageCodeGer,
28886		LanguageCodeZho,
28887		LanguageCodeAra,
28888		LanguageCodeHin,
28889		LanguageCodeJpn,
28890		LanguageCodeRus,
28891		LanguageCodePor,
28892		LanguageCodeIta,
28893		LanguageCodeUrd,
28894		LanguageCodeVie,
28895		LanguageCodeKor,
28896		LanguageCodePan,
28897		LanguageCodeAbk,
28898		LanguageCodeAar,
28899		LanguageCodeAfr,
28900		LanguageCodeAka,
28901		LanguageCodeSqi,
28902		LanguageCodeAmh,
28903		LanguageCodeArg,
28904		LanguageCodeHye,
28905		LanguageCodeAsm,
28906		LanguageCodeAva,
28907		LanguageCodeAve,
28908		LanguageCodeAym,
28909		LanguageCodeAze,
28910		LanguageCodeBam,
28911		LanguageCodeBak,
28912		LanguageCodeEus,
28913		LanguageCodeBel,
28914		LanguageCodeBen,
28915		LanguageCodeBih,
28916		LanguageCodeBis,
28917		LanguageCodeBos,
28918		LanguageCodeBre,
28919		LanguageCodeBul,
28920		LanguageCodeMya,
28921		LanguageCodeCat,
28922		LanguageCodeKhm,
28923		LanguageCodeCha,
28924		LanguageCodeChe,
28925		LanguageCodeNya,
28926		LanguageCodeChu,
28927		LanguageCodeChv,
28928		LanguageCodeCor,
28929		LanguageCodeCos,
28930		LanguageCodeCre,
28931		LanguageCodeHrv,
28932		LanguageCodeCes,
28933		LanguageCodeDan,
28934		LanguageCodeDiv,
28935		LanguageCodeNld,
28936		LanguageCodeDzo,
28937		LanguageCodeEnm,
28938		LanguageCodeEpo,
28939		LanguageCodeEst,
28940		LanguageCodeEwe,
28941		LanguageCodeFao,
28942		LanguageCodeFij,
28943		LanguageCodeFin,
28944		LanguageCodeFrm,
28945		LanguageCodeFul,
28946		LanguageCodeGla,
28947		LanguageCodeGlg,
28948		LanguageCodeLug,
28949		LanguageCodeKat,
28950		LanguageCodeEll,
28951		LanguageCodeGrn,
28952		LanguageCodeGuj,
28953		LanguageCodeHat,
28954		LanguageCodeHau,
28955		LanguageCodeHeb,
28956		LanguageCodeHer,
28957		LanguageCodeHmo,
28958		LanguageCodeHun,
28959		LanguageCodeIsl,
28960		LanguageCodeIdo,
28961		LanguageCodeIbo,
28962		LanguageCodeInd,
28963		LanguageCodeIna,
28964		LanguageCodeIle,
28965		LanguageCodeIku,
28966		LanguageCodeIpk,
28967		LanguageCodeGle,
28968		LanguageCodeJav,
28969		LanguageCodeKal,
28970		LanguageCodeKan,
28971		LanguageCodeKau,
28972		LanguageCodeKas,
28973		LanguageCodeKaz,
28974		LanguageCodeKik,
28975		LanguageCodeKin,
28976		LanguageCodeKir,
28977		LanguageCodeKom,
28978		LanguageCodeKon,
28979		LanguageCodeKua,
28980		LanguageCodeKur,
28981		LanguageCodeLao,
28982		LanguageCodeLat,
28983		LanguageCodeLav,
28984		LanguageCodeLim,
28985		LanguageCodeLin,
28986		LanguageCodeLit,
28987		LanguageCodeLub,
28988		LanguageCodeLtz,
28989		LanguageCodeMkd,
28990		LanguageCodeMlg,
28991		LanguageCodeMsa,
28992		LanguageCodeMal,
28993		LanguageCodeMlt,
28994		LanguageCodeGlv,
28995		LanguageCodeMri,
28996		LanguageCodeMar,
28997		LanguageCodeMah,
28998		LanguageCodeMon,
28999		LanguageCodeNau,
29000		LanguageCodeNav,
29001		LanguageCodeNde,
29002		LanguageCodeNbl,
29003		LanguageCodeNdo,
29004		LanguageCodeNep,
29005		LanguageCodeSme,
29006		LanguageCodeNor,
29007		LanguageCodeNob,
29008		LanguageCodeNno,
29009		LanguageCodeOci,
29010		LanguageCodeOji,
29011		LanguageCodeOri,
29012		LanguageCodeOrm,
29013		LanguageCodeOss,
29014		LanguageCodePli,
29015		LanguageCodeFas,
29016		LanguageCodePol,
29017		LanguageCodePus,
29018		LanguageCodeQue,
29019		LanguageCodeQaa,
29020		LanguageCodeRon,
29021		LanguageCodeRoh,
29022		LanguageCodeRun,
29023		LanguageCodeSmo,
29024		LanguageCodeSag,
29025		LanguageCodeSan,
29026		LanguageCodeSrd,
29027		LanguageCodeSrb,
29028		LanguageCodeSna,
29029		LanguageCodeIii,
29030		LanguageCodeSnd,
29031		LanguageCodeSin,
29032		LanguageCodeSlk,
29033		LanguageCodeSlv,
29034		LanguageCodeSom,
29035		LanguageCodeSot,
29036		LanguageCodeSun,
29037		LanguageCodeSwa,
29038		LanguageCodeSsw,
29039		LanguageCodeSwe,
29040		LanguageCodeTgl,
29041		LanguageCodeTah,
29042		LanguageCodeTgk,
29043		LanguageCodeTam,
29044		LanguageCodeTat,
29045		LanguageCodeTel,
29046		LanguageCodeTha,
29047		LanguageCodeBod,
29048		LanguageCodeTir,
29049		LanguageCodeTon,
29050		LanguageCodeTso,
29051		LanguageCodeTsn,
29052		LanguageCodeTur,
29053		LanguageCodeTuk,
29054		LanguageCodeTwi,
29055		LanguageCodeUig,
29056		LanguageCodeUkr,
29057		LanguageCodeUzb,
29058		LanguageCodeVen,
29059		LanguageCodeVol,
29060		LanguageCodeWln,
29061		LanguageCodeCym,
29062		LanguageCodeFry,
29063		LanguageCodeWol,
29064		LanguageCodeXho,
29065		LanguageCodeYid,
29066		LanguageCodeYor,
29067		LanguageCodeZha,
29068		LanguageCodeZul,
29069		LanguageCodeOrj,
29070		LanguageCodeQpc,
29071		LanguageCodeTng,
29072	}
29073}
29074
29075// Selects between the DVB and ATSC buffer models for Dolby Digital audio.
29076const (
29077	// M2tsAudioBufferModelDvb is a M2tsAudioBufferModel enum value
29078	M2tsAudioBufferModelDvb = "DVB"
29079
29080	// M2tsAudioBufferModelAtsc is a M2tsAudioBufferModel enum value
29081	M2tsAudioBufferModelAtsc = "ATSC"
29082)
29083
29084// M2tsAudioBufferModel_Values returns all elements of the M2tsAudioBufferModel enum
29085func M2tsAudioBufferModel_Values() []string {
29086	return []string{
29087		M2tsAudioBufferModelDvb,
29088		M2tsAudioBufferModelAtsc,
29089	}
29090}
29091
29092// Specify this setting only when your output will be consumed by a downstream
29093// repackaging workflow that is sensitive to very small duration differences
29094// between video and audio. For this situation, choose Match video duration
29095// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
29096// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
29097// MediaConvert pads the output audio streams with silence or trims them to
29098// ensure that the total duration of each audio stream is at least as long as
29099// the total duration of the video stream. After padding or trimming, the audio
29100// stream duration is no more than one frame longer than the video stream. MediaConvert
29101// applies audio padding or trimming only to the end of the last segment of
29102// the output. For unsegmented outputs, MediaConvert adds padding only to the
29103// end of the file. When you keep the default value, any minor discrepancies
29104// between audio and video duration will depend on your output audio codec.
29105const (
29106	// M2tsAudioDurationDefaultCodecDuration is a M2tsAudioDuration enum value
29107	M2tsAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION"
29108
29109	// M2tsAudioDurationMatchVideoDuration is a M2tsAudioDuration enum value
29110	M2tsAudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION"
29111)
29112
29113// M2tsAudioDuration_Values returns all elements of the M2tsAudioDuration enum
29114func M2tsAudioDuration_Values() []string {
29115	return []string{
29116		M2tsAudioDurationDefaultCodecDuration,
29117		M2tsAudioDurationMatchVideoDuration,
29118	}
29119}
29120
29121// Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX,
29122// use multiplex buffer model. If set to NONE, this can lead to lower latency,
29123// but low-memory devices may not be able to play back the stream without interruptions.
29124const (
29125	// M2tsBufferModelMultiplex is a M2tsBufferModel enum value
29126	M2tsBufferModelMultiplex = "MULTIPLEX"
29127
29128	// M2tsBufferModelNone is a M2tsBufferModel enum value
29129	M2tsBufferModelNone = "NONE"
29130)
29131
29132// M2tsBufferModel_Values returns all elements of the M2tsBufferModel enum
29133func M2tsBufferModel_Values() []string {
29134	return []string{
29135		M2tsBufferModelMultiplex,
29136		M2tsBufferModelNone,
29137	}
29138}
29139
29140// When set to VIDEO_AND_FIXED_INTERVALS, audio EBP markers will be added to
29141// partitions 3 and 4. The interval between these additional markers will be
29142// fixed, and will be slightly shorter than the video EBP marker interval. When
29143// set to VIDEO_INTERVAL, these additional markers will not be inserted. Only
29144// applicable when EBP segmentation markers are is selected (segmentationMarkers
29145// is EBP or EBP_LEGACY).
29146const (
29147	// M2tsEbpAudioIntervalVideoAndFixedIntervals is a M2tsEbpAudioInterval enum value
29148	M2tsEbpAudioIntervalVideoAndFixedIntervals = "VIDEO_AND_FIXED_INTERVALS"
29149
29150	// M2tsEbpAudioIntervalVideoInterval is a M2tsEbpAudioInterval enum value
29151	M2tsEbpAudioIntervalVideoInterval = "VIDEO_INTERVAL"
29152)
29153
29154// M2tsEbpAudioInterval_Values returns all elements of the M2tsEbpAudioInterval enum
29155func M2tsEbpAudioInterval_Values() []string {
29156	return []string{
29157		M2tsEbpAudioIntervalVideoAndFixedIntervals,
29158		M2tsEbpAudioIntervalVideoInterval,
29159	}
29160}
29161
29162// Selects which PIDs to place EBP markers on. They can either be placed only
29163// on the video PID, or on both the video PID and all audio PIDs. Only applicable
29164// when EBP segmentation markers are is selected (segmentationMarkers is EBP
29165// or EBP_LEGACY).
29166const (
29167	// M2tsEbpPlacementVideoAndAudioPids is a M2tsEbpPlacement enum value
29168	M2tsEbpPlacementVideoAndAudioPids = "VIDEO_AND_AUDIO_PIDS"
29169
29170	// M2tsEbpPlacementVideoPid is a M2tsEbpPlacement enum value
29171	M2tsEbpPlacementVideoPid = "VIDEO_PID"
29172)
29173
29174// M2tsEbpPlacement_Values returns all elements of the M2tsEbpPlacement enum
29175func M2tsEbpPlacement_Values() []string {
29176	return []string{
29177		M2tsEbpPlacementVideoAndAudioPids,
29178		M2tsEbpPlacementVideoPid,
29179	}
29180}
29181
29182// Controls whether to include the ES Rate field in the PES header.
29183const (
29184	// M2tsEsRateInPesInclude is a M2tsEsRateInPes enum value
29185	M2tsEsRateInPesInclude = "INCLUDE"
29186
29187	// M2tsEsRateInPesExclude is a M2tsEsRateInPes enum value
29188	M2tsEsRateInPesExclude = "EXCLUDE"
29189)
29190
29191// M2tsEsRateInPes_Values returns all elements of the M2tsEsRateInPes enum
29192func M2tsEsRateInPes_Values() []string {
29193	return []string{
29194		M2tsEsRateInPesInclude,
29195		M2tsEsRateInPesExclude,
29196	}
29197}
29198
29199// Keep the default value (DEFAULT) unless you know that your audio EBP markers
29200// are incorrectly appearing before your video EBP markers. To correct this
29201// problem, set this value to Force (FORCE).
29202const (
29203	// M2tsForceTsVideoEbpOrderForce is a M2tsForceTsVideoEbpOrder enum value
29204	M2tsForceTsVideoEbpOrderForce = "FORCE"
29205
29206	// M2tsForceTsVideoEbpOrderDefault is a M2tsForceTsVideoEbpOrder enum value
29207	M2tsForceTsVideoEbpOrderDefault = "DEFAULT"
29208)
29209
29210// M2tsForceTsVideoEbpOrder_Values returns all elements of the M2tsForceTsVideoEbpOrder enum
29211func M2tsForceTsVideoEbpOrder_Values() []string {
29212	return []string{
29213		M2tsForceTsVideoEbpOrderForce,
29214		M2tsForceTsVideoEbpOrderDefault,
29215	}
29216}
29217
29218// If INSERT, Nielsen inaudible tones for media tracking will be detected in
29219// the input audio and an equivalent ID3 tag will be inserted in the output.
29220const (
29221	// M2tsNielsenId3Insert is a M2tsNielsenId3 enum value
29222	M2tsNielsenId3Insert = "INSERT"
29223
29224	// M2tsNielsenId3None is a M2tsNielsenId3 enum value
29225	M2tsNielsenId3None = "NONE"
29226)
29227
29228// M2tsNielsenId3_Values returns all elements of the M2tsNielsenId3 enum
29229func M2tsNielsenId3_Values() []string {
29230	return []string{
29231		M2tsNielsenId3Insert,
29232		M2tsNielsenId3None,
29233	}
29234}
29235
29236// When set to PCR_EVERY_PES_PACKET, a Program Clock Reference value is inserted
29237// for every Packetized Elementary Stream (PES) header. This is effective only
29238// when the PCR PID is the same as the video or audio elementary stream.
29239const (
29240	// M2tsPcrControlPcrEveryPesPacket is a M2tsPcrControl enum value
29241	M2tsPcrControlPcrEveryPesPacket = "PCR_EVERY_PES_PACKET"
29242
29243	// M2tsPcrControlConfiguredPcrPeriod is a M2tsPcrControl enum value
29244	M2tsPcrControlConfiguredPcrPeriod = "CONFIGURED_PCR_PERIOD"
29245)
29246
29247// M2tsPcrControl_Values returns all elements of the M2tsPcrControl enum
29248func M2tsPcrControl_Values() []string {
29249	return []string{
29250		M2tsPcrControlPcrEveryPesPacket,
29251		M2tsPcrControlConfiguredPcrPeriod,
29252	}
29253}
29254
29255// When set to CBR, inserts null packets into transport stream to fill specified
29256// bitrate. When set to VBR, the bitrate setting acts as the maximum bitrate,
29257// but the output will not be padded up to that bitrate.
29258const (
29259	// M2tsRateModeVbr is a M2tsRateMode enum value
29260	M2tsRateModeVbr = "VBR"
29261
29262	// M2tsRateModeCbr is a M2tsRateMode enum value
29263	M2tsRateModeCbr = "CBR"
29264)
29265
29266// M2tsRateMode_Values returns all elements of the M2tsRateMode enum
29267func M2tsRateMode_Values() []string {
29268	return []string{
29269		M2tsRateModeVbr,
29270		M2tsRateModeCbr,
29271	}
29272}
29273
29274// For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if
29275// you want SCTE-35 markers that appear in your input to also appear in this
29276// output. Choose None (NONE) if you don't want SCTE-35 markers in this output.
29277// For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also
29278// provide the ESAM XML as a string in the setting Signal processing notification
29279// XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam).
29280const (
29281	// M2tsScte35SourcePassthrough is a M2tsScte35Source enum value
29282	M2tsScte35SourcePassthrough = "PASSTHROUGH"
29283
29284	// M2tsScte35SourceNone is a M2tsScte35Source enum value
29285	M2tsScte35SourceNone = "NONE"
29286)
29287
29288// M2tsScte35Source_Values returns all elements of the M2tsScte35Source enum
29289func M2tsScte35Source_Values() []string {
29290	return []string{
29291		M2tsScte35SourcePassthrough,
29292		M2tsScte35SourceNone,
29293	}
29294}
29295
29296// Inserts segmentation markers at each segmentation_time period. rai_segstart
29297// sets the Random Access Indicator bit in the adaptation field. rai_adapt sets
29298// the RAI bit and adds the current timecode in the private data bytes. psi_segstart
29299// inserts PAT and PMT tables at the start of segments. ebp adds Encoder Boundary
29300// Point information to the adaptation field as per OpenCable specification
29301// OC-SP-EBP-I01-130118. ebp_legacy adds Encoder Boundary Point information
29302// to the adaptation field using a legacy proprietary format.
29303const (
29304	// M2tsSegmentationMarkersNone is a M2tsSegmentationMarkers enum value
29305	M2tsSegmentationMarkersNone = "NONE"
29306
29307	// M2tsSegmentationMarkersRaiSegstart is a M2tsSegmentationMarkers enum value
29308	M2tsSegmentationMarkersRaiSegstart = "RAI_SEGSTART"
29309
29310	// M2tsSegmentationMarkersRaiAdapt is a M2tsSegmentationMarkers enum value
29311	M2tsSegmentationMarkersRaiAdapt = "RAI_ADAPT"
29312
29313	// M2tsSegmentationMarkersPsiSegstart is a M2tsSegmentationMarkers enum value
29314	M2tsSegmentationMarkersPsiSegstart = "PSI_SEGSTART"
29315
29316	// M2tsSegmentationMarkersEbp is a M2tsSegmentationMarkers enum value
29317	M2tsSegmentationMarkersEbp = "EBP"
29318
29319	// M2tsSegmentationMarkersEbpLegacy is a M2tsSegmentationMarkers enum value
29320	M2tsSegmentationMarkersEbpLegacy = "EBP_LEGACY"
29321)
29322
29323// M2tsSegmentationMarkers_Values returns all elements of the M2tsSegmentationMarkers enum
29324func M2tsSegmentationMarkers_Values() []string {
29325	return []string{
29326		M2tsSegmentationMarkersNone,
29327		M2tsSegmentationMarkersRaiSegstart,
29328		M2tsSegmentationMarkersRaiAdapt,
29329		M2tsSegmentationMarkersPsiSegstart,
29330		M2tsSegmentationMarkersEbp,
29331		M2tsSegmentationMarkersEbpLegacy,
29332	}
29333}
29334
29335// The segmentation style parameter controls how segmentation markers are inserted
29336// into the transport stream. With avails, it is possible that segments may
29337// be truncated, which can influence where future segmentation markers are inserted.
29338// When a segmentation style of "reset_cadence" is selected and a segment is
29339// truncated due to an avail, we will reset the segmentation cadence. This means
29340// the subsequent segment will have a duration of of $segmentation_time seconds.
29341// When a segmentation style of "maintain_cadence" is selected and a segment
29342// is truncated due to an avail, we will not reset the segmentation cadence.
29343// This means the subsequent segment will likely be truncated as well. However,
29344// all segments after that will have a duration of $segmentation_time seconds.
29345// Note that EBP lookahead is a slight exception to this rule.
29346const (
29347	// M2tsSegmentationStyleMaintainCadence is a M2tsSegmentationStyle enum value
29348	M2tsSegmentationStyleMaintainCadence = "MAINTAIN_CADENCE"
29349
29350	// M2tsSegmentationStyleResetCadence is a M2tsSegmentationStyle enum value
29351	M2tsSegmentationStyleResetCadence = "RESET_CADENCE"
29352)
29353
29354// M2tsSegmentationStyle_Values returns all elements of the M2tsSegmentationStyle enum
29355func M2tsSegmentationStyle_Values() []string {
29356	return []string{
29357		M2tsSegmentationStyleMaintainCadence,
29358		M2tsSegmentationStyleResetCadence,
29359	}
29360}
29361
29362// Specify this setting only when your output will be consumed by a downstream
29363// repackaging workflow that is sensitive to very small duration differences
29364// between video and audio. For this situation, choose Match video duration
29365// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
29366// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
29367// MediaConvert pads the output audio streams with silence or trims them to
29368// ensure that the total duration of each audio stream is at least as long as
29369// the total duration of the video stream. After padding or trimming, the audio
29370// stream duration is no more than one frame longer than the video stream. MediaConvert
29371// applies audio padding or trimming only to the end of the last segment of
29372// the output. For unsegmented outputs, MediaConvert adds padding only to the
29373// end of the file. When you keep the default value, any minor discrepancies
29374// between audio and video duration will depend on your output audio codec.
29375const (
29376	// M3u8AudioDurationDefaultCodecDuration is a M3u8AudioDuration enum value
29377	M3u8AudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION"
29378
29379	// M3u8AudioDurationMatchVideoDuration is a M3u8AudioDuration enum value
29380	M3u8AudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION"
29381)
29382
29383// M3u8AudioDuration_Values returns all elements of the M3u8AudioDuration enum
29384func M3u8AudioDuration_Values() []string {
29385	return []string{
29386		M3u8AudioDurationDefaultCodecDuration,
29387		M3u8AudioDurationMatchVideoDuration,
29388	}
29389}
29390
29391// If INSERT, Nielsen inaudible tones for media tracking will be detected in
29392// the input audio and an equivalent ID3 tag will be inserted in the output.
29393const (
29394	// M3u8NielsenId3Insert is a M3u8NielsenId3 enum value
29395	M3u8NielsenId3Insert = "INSERT"
29396
29397	// M3u8NielsenId3None is a M3u8NielsenId3 enum value
29398	M3u8NielsenId3None = "NONE"
29399)
29400
29401// M3u8NielsenId3_Values returns all elements of the M3u8NielsenId3 enum
29402func M3u8NielsenId3_Values() []string {
29403	return []string{
29404		M3u8NielsenId3Insert,
29405		M3u8NielsenId3None,
29406	}
29407}
29408
29409// When set to PCR_EVERY_PES_PACKET a Program Clock Reference value is inserted
29410// for every Packetized Elementary Stream (PES) header. This parameter is effective
29411// only when the PCR PID is the same as the video or audio elementary stream.
29412const (
29413	// M3u8PcrControlPcrEveryPesPacket is a M3u8PcrControl enum value
29414	M3u8PcrControlPcrEveryPesPacket = "PCR_EVERY_PES_PACKET"
29415
29416	// M3u8PcrControlConfiguredPcrPeriod is a M3u8PcrControl enum value
29417	M3u8PcrControlConfiguredPcrPeriod = "CONFIGURED_PCR_PERIOD"
29418)
29419
29420// M3u8PcrControl_Values returns all elements of the M3u8PcrControl enum
29421func M3u8PcrControl_Values() []string {
29422	return []string{
29423		M3u8PcrControlPcrEveryPesPacket,
29424		M3u8PcrControlConfiguredPcrPeriod,
29425	}
29426}
29427
29428// For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if
29429// you want SCTE-35 markers that appear in your input to also appear in this
29430// output. Choose None (NONE) if you don't want SCTE-35 markers in this output.
29431// For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you
29432// don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose
29433// Ad markers (adMarkers) if you do want manifest conditioning. In both cases,
29434// also provide the ESAM XML as a string in the setting Signal processing notification
29435// XML (sccXml).
29436const (
29437	// M3u8Scte35SourcePassthrough is a M3u8Scte35Source enum value
29438	M3u8Scte35SourcePassthrough = "PASSTHROUGH"
29439
29440	// M3u8Scte35SourceNone is a M3u8Scte35Source enum value
29441	M3u8Scte35SourceNone = "NONE"
29442)
29443
29444// M3u8Scte35Source_Values returns all elements of the M3u8Scte35Source enum
29445func M3u8Scte35Source_Values() []string {
29446	return []string{
29447		M3u8Scte35SourcePassthrough,
29448		M3u8Scte35SourceNone,
29449	}
29450}
29451
29452// Choose the type of motion graphic asset that you are providing for your overlay.
29453// You can choose either a .mov file or a series of .png files.
29454const (
29455	// MotionImageInsertionModeMov is a MotionImageInsertionMode enum value
29456	MotionImageInsertionModeMov = "MOV"
29457
29458	// MotionImageInsertionModePng is a MotionImageInsertionMode enum value
29459	MotionImageInsertionModePng = "PNG"
29460)
29461
29462// MotionImageInsertionMode_Values returns all elements of the MotionImageInsertionMode enum
29463func MotionImageInsertionMode_Values() []string {
29464	return []string{
29465		MotionImageInsertionModeMov,
29466		MotionImageInsertionModePng,
29467	}
29468}
29469
29470// Specify whether your motion graphic overlay repeats on a loop or plays only
29471// once.
29472const (
29473	// MotionImagePlaybackOnce is a MotionImagePlayback enum value
29474	MotionImagePlaybackOnce = "ONCE"
29475
29476	// MotionImagePlaybackRepeat is a MotionImagePlayback enum value
29477	MotionImagePlaybackRepeat = "REPEAT"
29478)
29479
29480// MotionImagePlayback_Values returns all elements of the MotionImagePlayback enum
29481func MotionImagePlayback_Values() []string {
29482	return []string{
29483		MotionImagePlaybackOnce,
29484		MotionImagePlaybackRepeat,
29485	}
29486}
29487
29488// When enabled, include 'clap' atom if appropriate for the video output settings.
29489const (
29490	// MovClapAtomInclude is a MovClapAtom enum value
29491	MovClapAtomInclude = "INCLUDE"
29492
29493	// MovClapAtomExclude is a MovClapAtom enum value
29494	MovClapAtomExclude = "EXCLUDE"
29495)
29496
29497// MovClapAtom_Values returns all elements of the MovClapAtom enum
29498func MovClapAtom_Values() []string {
29499	return []string{
29500		MovClapAtomInclude,
29501		MovClapAtomExclude,
29502	}
29503}
29504
29505// When enabled, file composition times will start at zero, composition times
29506// in the 'ctts' (composition time to sample) box for B-frames will be negative,
29507// and a 'cslg' (composition shift least greatest) box will be included per
29508// 14496-1 amendment 1. This improves compatibility with Apple players and tools.
29509const (
29510	// MovCslgAtomInclude is a MovCslgAtom enum value
29511	MovCslgAtomInclude = "INCLUDE"
29512
29513	// MovCslgAtomExclude is a MovCslgAtom enum value
29514	MovCslgAtomExclude = "EXCLUDE"
29515)
29516
29517// MovCslgAtom_Values returns all elements of the MovCslgAtom enum
29518func MovCslgAtom_Values() []string {
29519	return []string{
29520		MovCslgAtomInclude,
29521		MovCslgAtomExclude,
29522	}
29523}
29524
29525// When set to XDCAM, writes MPEG2 video streams into the QuickTime file using
29526// XDCAM fourcc codes. This increases compatibility with Apple editors and players,
29527// but may decrease compatibility with other players. Only applicable when the
29528// video codec is MPEG2.
29529const (
29530	// MovMpeg2FourCCControlXdcam is a MovMpeg2FourCCControl enum value
29531	MovMpeg2FourCCControlXdcam = "XDCAM"
29532
29533	// MovMpeg2FourCCControlMpeg is a MovMpeg2FourCCControl enum value
29534	MovMpeg2FourCCControlMpeg = "MPEG"
29535)
29536
29537// MovMpeg2FourCCControl_Values returns all elements of the MovMpeg2FourCCControl enum
29538func MovMpeg2FourCCControl_Values() []string {
29539	return []string{
29540		MovMpeg2FourCCControlXdcam,
29541		MovMpeg2FourCCControlMpeg,
29542	}
29543}
29544
29545// To make this output compatible with Omenon, keep the default value, OMNEON.
29546// Unless you need Omneon compatibility, set this value to NONE. When you keep
29547// the default value, OMNEON, MediaConvert increases the length of the edit
29548// list atom. This might cause file rejections when a recipient of the output
29549// file doesn't expct this extra padding.
29550const (
29551	// MovPaddingControlOmneon is a MovPaddingControl enum value
29552	MovPaddingControlOmneon = "OMNEON"
29553
29554	// MovPaddingControlNone is a MovPaddingControl enum value
29555	MovPaddingControlNone = "NONE"
29556)
29557
29558// MovPaddingControl_Values returns all elements of the MovPaddingControl enum
29559func MovPaddingControl_Values() []string {
29560	return []string{
29561		MovPaddingControlOmneon,
29562		MovPaddingControlNone,
29563	}
29564}
29565
29566// Always keep the default value (SELF_CONTAINED) for this setting.
29567const (
29568	// MovReferenceSelfContained is a MovReference enum value
29569	MovReferenceSelfContained = "SELF_CONTAINED"
29570
29571	// MovReferenceExternal is a MovReference enum value
29572	MovReferenceExternal = "EXTERNAL"
29573)
29574
29575// MovReference_Values returns all elements of the MovReference enum
29576func MovReference_Values() []string {
29577	return []string{
29578		MovReferenceSelfContained,
29579		MovReferenceExternal,
29580	}
29581}
29582
29583// Specify whether the service encodes this MP3 audio output with a constant
29584// bitrate (CBR) or a variable bitrate (VBR).
29585const (
29586	// Mp3RateControlModeCbr is a Mp3RateControlMode enum value
29587	Mp3RateControlModeCbr = "CBR"
29588
29589	// Mp3RateControlModeVbr is a Mp3RateControlMode enum value
29590	Mp3RateControlModeVbr = "VBR"
29591)
29592
29593// Mp3RateControlMode_Values returns all elements of the Mp3RateControlMode enum
29594func Mp3RateControlMode_Values() []string {
29595	return []string{
29596		Mp3RateControlModeCbr,
29597		Mp3RateControlModeVbr,
29598	}
29599}
29600
29601// When enabled, file composition times will start at zero, composition times
29602// in the 'ctts' (composition time to sample) box for B-frames will be negative,
29603// and a 'cslg' (composition shift least greatest) box will be included per
29604// 14496-1 amendment 1. This improves compatibility with Apple players and tools.
29605const (
29606	// Mp4CslgAtomInclude is a Mp4CslgAtom enum value
29607	Mp4CslgAtomInclude = "INCLUDE"
29608
29609	// Mp4CslgAtomExclude is a Mp4CslgAtom enum value
29610	Mp4CslgAtomExclude = "EXCLUDE"
29611)
29612
29613// Mp4CslgAtom_Values returns all elements of the Mp4CslgAtom enum
29614func Mp4CslgAtom_Values() []string {
29615	return []string{
29616		Mp4CslgAtomInclude,
29617		Mp4CslgAtomExclude,
29618	}
29619}
29620
29621// Inserts a free-space box immediately after the moov box.
29622const (
29623	// Mp4FreeSpaceBoxInclude is a Mp4FreeSpaceBox enum value
29624	Mp4FreeSpaceBoxInclude = "INCLUDE"
29625
29626	// Mp4FreeSpaceBoxExclude is a Mp4FreeSpaceBox enum value
29627	Mp4FreeSpaceBoxExclude = "EXCLUDE"
29628)
29629
29630// Mp4FreeSpaceBox_Values returns all elements of the Mp4FreeSpaceBox enum
29631func Mp4FreeSpaceBox_Values() []string {
29632	return []string{
29633		Mp4FreeSpaceBoxInclude,
29634		Mp4FreeSpaceBoxExclude,
29635	}
29636}
29637
29638// If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning
29639// of the archive as required for progressive downloading. Otherwise it is placed
29640// normally at the end.
29641const (
29642	// Mp4MoovPlacementProgressiveDownload is a Mp4MoovPlacement enum value
29643	Mp4MoovPlacementProgressiveDownload = "PROGRESSIVE_DOWNLOAD"
29644
29645	// Mp4MoovPlacementNormal is a Mp4MoovPlacement enum value
29646	Mp4MoovPlacementNormal = "NORMAL"
29647)
29648
29649// Mp4MoovPlacement_Values returns all elements of the Mp4MoovPlacement enum
29650func Mp4MoovPlacement_Values() []string {
29651	return []string{
29652		Mp4MoovPlacementProgressiveDownload,
29653		Mp4MoovPlacementNormal,
29654	}
29655}
29656
29657// Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH
29658// manifest with elements for embedded 608 captions. This markup isn't generally
29659// required, but some video players require it to discover and play embedded
29660// 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements
29661// out. When you enable this setting, this is the markup that MediaConvert includes
29662// in your manifest:
29663const (
29664	// MpdAccessibilityCaptionHintsInclude is a MpdAccessibilityCaptionHints enum value
29665	MpdAccessibilityCaptionHintsInclude = "INCLUDE"
29666
29667	// MpdAccessibilityCaptionHintsExclude is a MpdAccessibilityCaptionHints enum value
29668	MpdAccessibilityCaptionHintsExclude = "EXCLUDE"
29669)
29670
29671// MpdAccessibilityCaptionHints_Values returns all elements of the MpdAccessibilityCaptionHints enum
29672func MpdAccessibilityCaptionHints_Values() []string {
29673	return []string{
29674		MpdAccessibilityCaptionHintsInclude,
29675		MpdAccessibilityCaptionHintsExclude,
29676	}
29677}
29678
29679// Specify this setting only when your output will be consumed by a downstream
29680// repackaging workflow that is sensitive to very small duration differences
29681// between video and audio. For this situation, choose Match video duration
29682// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
29683// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
29684// MediaConvert pads the output audio streams with silence or trims them to
29685// ensure that the total duration of each audio stream is at least as long as
29686// the total duration of the video stream. After padding or trimming, the audio
29687// stream duration is no more than one frame longer than the video stream. MediaConvert
29688// applies audio padding or trimming only to the end of the last segment of
29689// the output. For unsegmented outputs, MediaConvert adds padding only to the
29690// end of the file. When you keep the default value, any minor discrepancies
29691// between audio and video duration will depend on your output audio codec.
29692const (
29693	// MpdAudioDurationDefaultCodecDuration is a MpdAudioDuration enum value
29694	MpdAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION"
29695
29696	// MpdAudioDurationMatchVideoDuration is a MpdAudioDuration enum value
29697	MpdAudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION"
29698)
29699
29700// MpdAudioDuration_Values returns all elements of the MpdAudioDuration enum
29701func MpdAudioDuration_Values() []string {
29702	return []string{
29703		MpdAudioDurationDefaultCodecDuration,
29704		MpdAudioDurationMatchVideoDuration,
29705	}
29706}
29707
29708// Use this setting only in DASH output groups that include sidecar TTML or
29709// IMSC captions. You specify sidecar captions in a separate output from your
29710// audio and video. Choose Raw (RAW) for captions in a single XML file in a
29711// raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in
29712// XML format contained within fragmented MP4 files. This set of fragmented
29713// MP4 files is separate from your video and audio fragmented MP4 files.
29714const (
29715	// MpdCaptionContainerTypeRaw is a MpdCaptionContainerType enum value
29716	MpdCaptionContainerTypeRaw = "RAW"
29717
29718	// MpdCaptionContainerTypeFragmentedMp4 is a MpdCaptionContainerType enum value
29719	MpdCaptionContainerTypeFragmentedMp4 = "FRAGMENTED_MP4"
29720)
29721
29722// MpdCaptionContainerType_Values returns all elements of the MpdCaptionContainerType enum
29723func MpdCaptionContainerType_Values() []string {
29724	return []string{
29725		MpdCaptionContainerTypeRaw,
29726		MpdCaptionContainerTypeFragmentedMp4,
29727	}
29728}
29729
29730// Use this setting only when you specify SCTE-35 markers from ESAM. Choose
29731// INSERT to put SCTE-35 markers in this output at the insertion points that
29732// you specify in an ESAM XML document. Provide the document in the setting
29733// SCC XML (sccXml).
29734const (
29735	// MpdScte35EsamInsert is a MpdScte35Esam enum value
29736	MpdScte35EsamInsert = "INSERT"
29737
29738	// MpdScte35EsamNone is a MpdScte35Esam enum value
29739	MpdScte35EsamNone = "NONE"
29740)
29741
29742// MpdScte35Esam_Values returns all elements of the MpdScte35Esam enum
29743func MpdScte35Esam_Values() []string {
29744	return []string{
29745		MpdScte35EsamInsert,
29746		MpdScte35EsamNone,
29747	}
29748}
29749
29750// Ignore this setting unless you have SCTE-35 markers in your input video file.
29751// Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear
29752// in your input to also appear in this output. Choose None (NONE) if you don't
29753// want those SCTE-35 markers in this output.
29754const (
29755	// MpdScte35SourcePassthrough is a MpdScte35Source enum value
29756	MpdScte35SourcePassthrough = "PASSTHROUGH"
29757
29758	// MpdScte35SourceNone is a MpdScte35Source enum value
29759	MpdScte35SourceNone = "NONE"
29760)
29761
29762// MpdScte35Source_Values returns all elements of the MpdScte35Source enum
29763func MpdScte35Source_Values() []string {
29764	return []string{
29765		MpdScte35SourcePassthrough,
29766		MpdScte35SourceNone,
29767	}
29768}
29769
29770// Specify the strength of any adaptive quantization filters that you enable.
29771// The value that you choose here applies to the following settings: Spatial
29772// adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive
29773// quantization (temporalAdaptiveQuantization).
29774const (
29775	// Mpeg2AdaptiveQuantizationOff is a Mpeg2AdaptiveQuantization enum value
29776	Mpeg2AdaptiveQuantizationOff = "OFF"
29777
29778	// Mpeg2AdaptiveQuantizationLow is a Mpeg2AdaptiveQuantization enum value
29779	Mpeg2AdaptiveQuantizationLow = "LOW"
29780
29781	// Mpeg2AdaptiveQuantizationMedium is a Mpeg2AdaptiveQuantization enum value
29782	Mpeg2AdaptiveQuantizationMedium = "MEDIUM"
29783
29784	// Mpeg2AdaptiveQuantizationHigh is a Mpeg2AdaptiveQuantization enum value
29785	Mpeg2AdaptiveQuantizationHigh = "HIGH"
29786)
29787
29788// Mpeg2AdaptiveQuantization_Values returns all elements of the Mpeg2AdaptiveQuantization enum
29789func Mpeg2AdaptiveQuantization_Values() []string {
29790	return []string{
29791		Mpeg2AdaptiveQuantizationOff,
29792		Mpeg2AdaptiveQuantizationLow,
29793		Mpeg2AdaptiveQuantizationMedium,
29794		Mpeg2AdaptiveQuantizationHigh,
29795	}
29796}
29797
29798// Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output.
29799const (
29800	// Mpeg2CodecLevelAuto is a Mpeg2CodecLevel enum value
29801	Mpeg2CodecLevelAuto = "AUTO"
29802
29803	// Mpeg2CodecLevelLow is a Mpeg2CodecLevel enum value
29804	Mpeg2CodecLevelLow = "LOW"
29805
29806	// Mpeg2CodecLevelMain is a Mpeg2CodecLevel enum value
29807	Mpeg2CodecLevelMain = "MAIN"
29808
29809	// Mpeg2CodecLevelHigh1440 is a Mpeg2CodecLevel enum value
29810	Mpeg2CodecLevelHigh1440 = "HIGH1440"
29811
29812	// Mpeg2CodecLevelHigh is a Mpeg2CodecLevel enum value
29813	Mpeg2CodecLevelHigh = "HIGH"
29814)
29815
29816// Mpeg2CodecLevel_Values returns all elements of the Mpeg2CodecLevel enum
29817func Mpeg2CodecLevel_Values() []string {
29818	return []string{
29819		Mpeg2CodecLevelAuto,
29820		Mpeg2CodecLevelLow,
29821		Mpeg2CodecLevelMain,
29822		Mpeg2CodecLevelHigh1440,
29823		Mpeg2CodecLevelHigh,
29824	}
29825}
29826
29827// Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output.
29828const (
29829	// Mpeg2CodecProfileMain is a Mpeg2CodecProfile enum value
29830	Mpeg2CodecProfileMain = "MAIN"
29831
29832	// Mpeg2CodecProfileProfile422 is a Mpeg2CodecProfile enum value
29833	Mpeg2CodecProfileProfile422 = "PROFILE_422"
29834)
29835
29836// Mpeg2CodecProfile_Values returns all elements of the Mpeg2CodecProfile enum
29837func Mpeg2CodecProfile_Values() []string {
29838	return []string{
29839		Mpeg2CodecProfileMain,
29840		Mpeg2CodecProfileProfile422,
29841	}
29842}
29843
29844// Choose Adaptive to improve subjective video quality for high-motion content.
29845// This will cause the service to use fewer B-frames (which infer information
29846// based on other frames) for high-motion portions of the video and more B-frames
29847// for low-motion portions. The maximum number of B-frames is limited by the
29848// value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).
29849const (
29850	// Mpeg2DynamicSubGopAdaptive is a Mpeg2DynamicSubGop enum value
29851	Mpeg2DynamicSubGopAdaptive = "ADAPTIVE"
29852
29853	// Mpeg2DynamicSubGopStatic is a Mpeg2DynamicSubGop enum value
29854	Mpeg2DynamicSubGopStatic = "STATIC"
29855)
29856
29857// Mpeg2DynamicSubGop_Values returns all elements of the Mpeg2DynamicSubGop enum
29858func Mpeg2DynamicSubGop_Values() []string {
29859	return []string{
29860		Mpeg2DynamicSubGopAdaptive,
29861		Mpeg2DynamicSubGopStatic,
29862	}
29863}
29864
29865// If you are using the console, use the Framerate setting to specify the frame
29866// rate for this output. If you want to keep the same frame rate as the input
29867// video, choose Follow source. If you want to do frame rate conversion, choose
29868// a frame rate from the dropdown list or choose Custom. The framerates shown
29869// in the dropdown list are decimal approximations of fractions. If you choose
29870// Custom, specify your frame rate as a fraction. If you are creating your transcoding
29871// job specification as a JSON file without the console, use FramerateControl
29872// to specify which value the service uses for the frame rate for this output.
29873// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
29874// from the input. Choose SPECIFIED if you want the service to use the frame
29875// rate you specify in the settings FramerateNumerator and FramerateDenominator.
29876const (
29877	// Mpeg2FramerateControlInitializeFromSource is a Mpeg2FramerateControl enum value
29878	Mpeg2FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
29879
29880	// Mpeg2FramerateControlSpecified is a Mpeg2FramerateControl enum value
29881	Mpeg2FramerateControlSpecified = "SPECIFIED"
29882)
29883
29884// Mpeg2FramerateControl_Values returns all elements of the Mpeg2FramerateControl enum
29885func Mpeg2FramerateControl_Values() []string {
29886	return []string{
29887		Mpeg2FramerateControlInitializeFromSource,
29888		Mpeg2FramerateControlSpecified,
29889	}
29890}
29891
29892// Choose the method that you want MediaConvert to use when increasing or decreasing
29893// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
29894// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
29895// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
29896// smooth picture, but might introduce undesirable video artifacts. For complex
29897// frame rate conversions, especially if your source video has already been
29898// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
29899// motion-compensated interpolation. FrameFormer chooses the best conversion
29900// method frame by frame. Note that using FrameFormer increases the transcoding
29901// time and incurs a significant add-on cost.
29902const (
29903	// Mpeg2FramerateConversionAlgorithmDuplicateDrop is a Mpeg2FramerateConversionAlgorithm enum value
29904	Mpeg2FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP"
29905
29906	// Mpeg2FramerateConversionAlgorithmInterpolate is a Mpeg2FramerateConversionAlgorithm enum value
29907	Mpeg2FramerateConversionAlgorithmInterpolate = "INTERPOLATE"
29908
29909	// Mpeg2FramerateConversionAlgorithmFrameformer is a Mpeg2FramerateConversionAlgorithm enum value
29910	Mpeg2FramerateConversionAlgorithmFrameformer = "FRAMEFORMER"
29911)
29912
29913// Mpeg2FramerateConversionAlgorithm_Values returns all elements of the Mpeg2FramerateConversionAlgorithm enum
29914func Mpeg2FramerateConversionAlgorithm_Values() []string {
29915	return []string{
29916		Mpeg2FramerateConversionAlgorithmDuplicateDrop,
29917		Mpeg2FramerateConversionAlgorithmInterpolate,
29918		Mpeg2FramerateConversionAlgorithmFrameformer,
29919	}
29920}
29921
29922// Indicates if the GOP Size in MPEG2 is specified in frames or seconds. If
29923// seconds the system will convert the GOP Size into a frame count at run time.
29924const (
29925	// Mpeg2GopSizeUnitsFrames is a Mpeg2GopSizeUnits enum value
29926	Mpeg2GopSizeUnitsFrames = "FRAMES"
29927
29928	// Mpeg2GopSizeUnitsSeconds is a Mpeg2GopSizeUnits enum value
29929	Mpeg2GopSizeUnitsSeconds = "SECONDS"
29930)
29931
29932// Mpeg2GopSizeUnits_Values returns all elements of the Mpeg2GopSizeUnits enum
29933func Mpeg2GopSizeUnits_Values() []string {
29934	return []string{
29935		Mpeg2GopSizeUnitsFrames,
29936		Mpeg2GopSizeUnitsSeconds,
29937	}
29938}
29939
29940// Choose the scan line type for the output. Keep the default value, Progressive
29941// (PROGRESSIVE) to create a progressive output, regardless of the scan type
29942// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
29943// to create an output that's interlaced with the same field polarity throughout.
29944// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
29945// to produce outputs with the same field polarity as the source. For jobs that
29946// have multiple inputs, the output field polarity might change over the course
29947// of the output. Follow behavior depends on the input scan type. If the source
29948// is interlaced, the output will be interlaced with the same polarity as the
29949// source. If the source is progressive, the output will be interlaced with
29950// top field bottom field first, depending on which of the Follow options you
29951// choose.
29952const (
29953	// Mpeg2InterlaceModeProgressive is a Mpeg2InterlaceMode enum value
29954	Mpeg2InterlaceModeProgressive = "PROGRESSIVE"
29955
29956	// Mpeg2InterlaceModeTopField is a Mpeg2InterlaceMode enum value
29957	Mpeg2InterlaceModeTopField = "TOP_FIELD"
29958
29959	// Mpeg2InterlaceModeBottomField is a Mpeg2InterlaceMode enum value
29960	Mpeg2InterlaceModeBottomField = "BOTTOM_FIELD"
29961
29962	// Mpeg2InterlaceModeFollowTopField is a Mpeg2InterlaceMode enum value
29963	Mpeg2InterlaceModeFollowTopField = "FOLLOW_TOP_FIELD"
29964
29965	// Mpeg2InterlaceModeFollowBottomField is a Mpeg2InterlaceMode enum value
29966	Mpeg2InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD"
29967)
29968
29969// Mpeg2InterlaceMode_Values returns all elements of the Mpeg2InterlaceMode enum
29970func Mpeg2InterlaceMode_Values() []string {
29971	return []string{
29972		Mpeg2InterlaceModeProgressive,
29973		Mpeg2InterlaceModeTopField,
29974		Mpeg2InterlaceModeBottomField,
29975		Mpeg2InterlaceModeFollowTopField,
29976		Mpeg2InterlaceModeFollowBottomField,
29977	}
29978}
29979
29980// Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision
29981// for intra-block DC coefficients. If you choose the value auto, the service
29982// will automatically select the precision based on the per-frame compression
29983// ratio.
29984const (
29985	// Mpeg2IntraDcPrecisionAuto is a Mpeg2IntraDcPrecision enum value
29986	Mpeg2IntraDcPrecisionAuto = "AUTO"
29987
29988	// Mpeg2IntraDcPrecisionIntraDcPrecision8 is a Mpeg2IntraDcPrecision enum value
29989	Mpeg2IntraDcPrecisionIntraDcPrecision8 = "INTRA_DC_PRECISION_8"
29990
29991	// Mpeg2IntraDcPrecisionIntraDcPrecision9 is a Mpeg2IntraDcPrecision enum value
29992	Mpeg2IntraDcPrecisionIntraDcPrecision9 = "INTRA_DC_PRECISION_9"
29993
29994	// Mpeg2IntraDcPrecisionIntraDcPrecision10 is a Mpeg2IntraDcPrecision enum value
29995	Mpeg2IntraDcPrecisionIntraDcPrecision10 = "INTRA_DC_PRECISION_10"
29996
29997	// Mpeg2IntraDcPrecisionIntraDcPrecision11 is a Mpeg2IntraDcPrecision enum value
29998	Mpeg2IntraDcPrecisionIntraDcPrecision11 = "INTRA_DC_PRECISION_11"
29999)
30000
30001// Mpeg2IntraDcPrecision_Values returns all elements of the Mpeg2IntraDcPrecision enum
30002func Mpeg2IntraDcPrecision_Values() []string {
30003	return []string{
30004		Mpeg2IntraDcPrecisionAuto,
30005		Mpeg2IntraDcPrecisionIntraDcPrecision8,
30006		Mpeg2IntraDcPrecisionIntraDcPrecision9,
30007		Mpeg2IntraDcPrecisionIntraDcPrecision10,
30008		Mpeg2IntraDcPrecisionIntraDcPrecision11,
30009	}
30010}
30011
30012// Optional. Specify how the service determines the pixel aspect ratio (PAR)
30013// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE),
30014// uses the PAR from your input video for your output. To specify a different
30015// PAR in the console, choose any value other than Follow source. To specify
30016// a different PAR by editing the JSON job specification, choose SPECIFIED.
30017// When you choose SPECIFIED for this setting, you must also specify values
30018// for the parNumerator and parDenominator settings.
30019const (
30020	// Mpeg2ParControlInitializeFromSource is a Mpeg2ParControl enum value
30021	Mpeg2ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
30022
30023	// Mpeg2ParControlSpecified is a Mpeg2ParControl enum value
30024	Mpeg2ParControlSpecified = "SPECIFIED"
30025)
30026
30027// Mpeg2ParControl_Values returns all elements of the Mpeg2ParControl enum
30028func Mpeg2ParControl_Values() []string {
30029	return []string{
30030		Mpeg2ParControlInitializeFromSource,
30031		Mpeg2ParControlSpecified,
30032	}
30033}
30034
30035// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
30036// want to trade off encoding speed for output video quality. The default behavior
30037// is faster, lower quality, single-pass encoding.
30038const (
30039	// Mpeg2QualityTuningLevelSinglePass is a Mpeg2QualityTuningLevel enum value
30040	Mpeg2QualityTuningLevelSinglePass = "SINGLE_PASS"
30041
30042	// Mpeg2QualityTuningLevelMultiPass is a Mpeg2QualityTuningLevel enum value
30043	Mpeg2QualityTuningLevelMultiPass = "MULTI_PASS"
30044)
30045
30046// Mpeg2QualityTuningLevel_Values returns all elements of the Mpeg2QualityTuningLevel enum
30047func Mpeg2QualityTuningLevel_Values() []string {
30048	return []string{
30049		Mpeg2QualityTuningLevelSinglePass,
30050		Mpeg2QualityTuningLevelMultiPass,
30051	}
30052}
30053
30054// Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate
30055// is variable (vbr) or constant (cbr).
30056const (
30057	// Mpeg2RateControlModeVbr is a Mpeg2RateControlMode enum value
30058	Mpeg2RateControlModeVbr = "VBR"
30059
30060	// Mpeg2RateControlModeCbr is a Mpeg2RateControlMode enum value
30061	Mpeg2RateControlModeCbr = "CBR"
30062)
30063
30064// Mpeg2RateControlMode_Values returns all elements of the Mpeg2RateControlMode enum
30065func Mpeg2RateControlMode_Values() []string {
30066	return []string{
30067		Mpeg2RateControlModeVbr,
30068		Mpeg2RateControlModeCbr,
30069	}
30070}
30071
30072// Use this setting for interlaced outputs, when your output frame rate is half
30073// of your input frame rate. In this situation, choose Optimized interlacing
30074// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
30075// case, each progressive frame from the input corresponds to an interlaced
30076// field in the output. Keep the default value, Basic interlacing (INTERLACED),
30077// for all other output frame rates. With basic interlacing, MediaConvert performs
30078// any frame rate conversion first and then interlaces the frames. When you
30079// choose Optimized interlacing and you set your output frame rate to a value
30080// that isn't suitable for optimized interlacing, MediaConvert automatically
30081// falls back to basic interlacing. Required settings: To use optimized interlacing,
30082// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
30083// use optimized interlacing for hard telecine outputs. You must also set Interlace
30084// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
30085const (
30086	// Mpeg2ScanTypeConversionModeInterlaced is a Mpeg2ScanTypeConversionMode enum value
30087	Mpeg2ScanTypeConversionModeInterlaced = "INTERLACED"
30088
30089	// Mpeg2ScanTypeConversionModeInterlacedOptimize is a Mpeg2ScanTypeConversionMode enum value
30090	Mpeg2ScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE"
30091)
30092
30093// Mpeg2ScanTypeConversionMode_Values returns all elements of the Mpeg2ScanTypeConversionMode enum
30094func Mpeg2ScanTypeConversionMode_Values() []string {
30095	return []string{
30096		Mpeg2ScanTypeConversionModeInterlaced,
30097		Mpeg2ScanTypeConversionModeInterlacedOptimize,
30098	}
30099}
30100
30101// Enable this setting to insert I-frames at scene changes that the service
30102// automatically detects. This improves video quality and is enabled by default.
30103const (
30104	// Mpeg2SceneChangeDetectDisabled is a Mpeg2SceneChangeDetect enum value
30105	Mpeg2SceneChangeDetectDisabled = "DISABLED"
30106
30107	// Mpeg2SceneChangeDetectEnabled is a Mpeg2SceneChangeDetect enum value
30108	Mpeg2SceneChangeDetectEnabled = "ENABLED"
30109)
30110
30111// Mpeg2SceneChangeDetect_Values returns all elements of the Mpeg2SceneChangeDetect enum
30112func Mpeg2SceneChangeDetect_Values() []string {
30113	return []string{
30114		Mpeg2SceneChangeDetectDisabled,
30115		Mpeg2SceneChangeDetectEnabled,
30116	}
30117}
30118
30119// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
30120// second (fps). Enable slow PAL to create a 25 fps output. When you enable
30121// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples
30122// your audio to keep it synchronized with the video. Note that enabling this
30123// setting will slightly reduce the duration of your video. Required settings:
30124// You must also set Framerate to 25. In your JSON job specification, set (framerateControl)
30125// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
30126// 1.
30127const (
30128	// Mpeg2SlowPalDisabled is a Mpeg2SlowPal enum value
30129	Mpeg2SlowPalDisabled = "DISABLED"
30130
30131	// Mpeg2SlowPalEnabled is a Mpeg2SlowPal enum value
30132	Mpeg2SlowPalEnabled = "ENABLED"
30133)
30134
30135// Mpeg2SlowPal_Values returns all elements of the Mpeg2SlowPal enum
30136func Mpeg2SlowPal_Values() []string {
30137	return []string{
30138		Mpeg2SlowPalDisabled,
30139		Mpeg2SlowPalEnabled,
30140	}
30141}
30142
30143// Keep the default value, Enabled (ENABLED), to adjust quantization within
30144// each frame based on spatial variation of content complexity. When you enable
30145// this feature, the encoder uses fewer bits on areas that can sustain more
30146// distortion with no noticeable visual degradation and uses more bits on areas
30147// where any small distortion will be noticeable. For example, complex textured
30148// blocks are encoded with fewer bits and smooth textured blocks are encoded
30149// with more bits. Enabling this feature will almost always improve your video
30150// quality. Note, though, that this feature doesn't take into account where
30151// the viewer's attention is likely to be. If viewers are likely to be focusing
30152// their attention on a part of the screen with a lot of complex texture, you
30153// might choose to disable this feature. Related setting: When you enable spatial
30154// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization)
30155// depending on your content. For homogeneous content, such as cartoons and
30156// video games, set it to Low. For content with a wider variety of textures,
30157// set it to High or Higher.
30158const (
30159	// Mpeg2SpatialAdaptiveQuantizationDisabled is a Mpeg2SpatialAdaptiveQuantization enum value
30160	Mpeg2SpatialAdaptiveQuantizationDisabled = "DISABLED"
30161
30162	// Mpeg2SpatialAdaptiveQuantizationEnabled is a Mpeg2SpatialAdaptiveQuantization enum value
30163	Mpeg2SpatialAdaptiveQuantizationEnabled = "ENABLED"
30164)
30165
30166// Mpeg2SpatialAdaptiveQuantization_Values returns all elements of the Mpeg2SpatialAdaptiveQuantization enum
30167func Mpeg2SpatialAdaptiveQuantization_Values() []string {
30168	return []string{
30169		Mpeg2SpatialAdaptiveQuantizationDisabled,
30170		Mpeg2SpatialAdaptiveQuantizationEnabled,
30171	}
30172}
30173
30174// Specify whether this output's video uses the D10 syntax. Keep the default
30175// value to not use the syntax. Related settings: When you choose D10 (D_10)
30176// for your MXF profile (profile), you must also set this value to to D10 (D_10).
30177const (
30178	// Mpeg2SyntaxDefault is a Mpeg2Syntax enum value
30179	Mpeg2SyntaxDefault = "DEFAULT"
30180
30181	// Mpeg2SyntaxD10 is a Mpeg2Syntax enum value
30182	Mpeg2SyntaxD10 = "D_10"
30183)
30184
30185// Mpeg2Syntax_Values returns all elements of the Mpeg2Syntax enum
30186func Mpeg2Syntax_Values() []string {
30187	return []string{
30188		Mpeg2SyntaxDefault,
30189		Mpeg2SyntaxD10,
30190	}
30191}
30192
30193// When you do frame rate conversion from 23.976 frames per second (fps) to
30194// 29.97 fps, and your output scan type is interlaced, you can optionally enable
30195// hard or soft telecine to create a smoother picture. Hard telecine (HARD)
30196// produces a 29.97i output. Soft telecine (SOFT) produces an output with a
30197// 23.976 output that signals to the video player device to do the conversion
30198// during play back. When you keep the default value, None (NONE), MediaConvert
30199// does a standard frame rate conversion to 29.97 without doing anything with
30200// the field polarity to create a smoother picture.
30201const (
30202	// Mpeg2TelecineNone is a Mpeg2Telecine enum value
30203	Mpeg2TelecineNone = "NONE"
30204
30205	// Mpeg2TelecineSoft is a Mpeg2Telecine enum value
30206	Mpeg2TelecineSoft = "SOFT"
30207
30208	// Mpeg2TelecineHard is a Mpeg2Telecine enum value
30209	Mpeg2TelecineHard = "HARD"
30210)
30211
30212// Mpeg2Telecine_Values returns all elements of the Mpeg2Telecine enum
30213func Mpeg2Telecine_Values() []string {
30214	return []string{
30215		Mpeg2TelecineNone,
30216		Mpeg2TelecineSoft,
30217		Mpeg2TelecineHard,
30218	}
30219}
30220
30221// Keep the default value, Enabled (ENABLED), to adjust quantization within
30222// each frame based on temporal variation of content complexity. When you enable
30223// this feature, the encoder uses fewer bits on areas of the frame that aren't
30224// moving and uses more bits on complex objects with sharp edges that move a
30225// lot. For example, this feature improves the readability of text tickers on
30226// newscasts and scoreboards on sports matches. Enabling this feature will almost
30227// always improve your video quality. Note, though, that this feature doesn't
30228// take into account where the viewer's attention is likely to be. If viewers
30229// are likely to be focusing their attention on a part of the screen that doesn't
30230// have moving objects with sharp edges, such as sports athletes' faces, you
30231// might choose to disable this feature. Related setting: When you enable temporal
30232// quantization, adjust the strength of the filter with the setting Adaptive
30233// quantization (adaptiveQuantization).
30234const (
30235	// Mpeg2TemporalAdaptiveQuantizationDisabled is a Mpeg2TemporalAdaptiveQuantization enum value
30236	Mpeg2TemporalAdaptiveQuantizationDisabled = "DISABLED"
30237
30238	// Mpeg2TemporalAdaptiveQuantizationEnabled is a Mpeg2TemporalAdaptiveQuantization enum value
30239	Mpeg2TemporalAdaptiveQuantizationEnabled = "ENABLED"
30240)
30241
30242// Mpeg2TemporalAdaptiveQuantization_Values returns all elements of the Mpeg2TemporalAdaptiveQuantization enum
30243func Mpeg2TemporalAdaptiveQuantization_Values() []string {
30244	return []string{
30245		Mpeg2TemporalAdaptiveQuantizationDisabled,
30246		Mpeg2TemporalAdaptiveQuantizationEnabled,
30247	}
30248}
30249
30250// COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across
30251// a Microsoft Smooth output group into a single audio stream.
30252const (
30253	// MsSmoothAudioDeduplicationCombineDuplicateStreams is a MsSmoothAudioDeduplication enum value
30254	MsSmoothAudioDeduplicationCombineDuplicateStreams = "COMBINE_DUPLICATE_STREAMS"
30255
30256	// MsSmoothAudioDeduplicationNone is a MsSmoothAudioDeduplication enum value
30257	MsSmoothAudioDeduplicationNone = "NONE"
30258)
30259
30260// MsSmoothAudioDeduplication_Values returns all elements of the MsSmoothAudioDeduplication enum
30261func MsSmoothAudioDeduplication_Values() []string {
30262	return []string{
30263		MsSmoothAudioDeduplicationCombineDuplicateStreams,
30264		MsSmoothAudioDeduplicationNone,
30265	}
30266}
30267
30268// Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding
30269// format for the server and client manifest. Valid options are utf8 and utf16.
30270const (
30271	// MsSmoothManifestEncodingUtf8 is a MsSmoothManifestEncoding enum value
30272	MsSmoothManifestEncodingUtf8 = "UTF8"
30273
30274	// MsSmoothManifestEncodingUtf16 is a MsSmoothManifestEncoding enum value
30275	MsSmoothManifestEncodingUtf16 = "UTF16"
30276)
30277
30278// MsSmoothManifestEncoding_Values returns all elements of the MsSmoothManifestEncoding enum
30279func MsSmoothManifestEncoding_Values() []string {
30280	return []string{
30281		MsSmoothManifestEncodingUtf8,
30282		MsSmoothManifestEncodingUtf16,
30283	}
30284}
30285
30286// Optional. When you have AFD signaling set up in your output video stream,
30287// use this setting to choose whether to also include it in the MXF wrapper.
30288// Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper.
30289// Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from
30290// the video stream for this output to the MXF wrapper. Regardless of which
30291// option you choose, the AFD values remain in the video stream. Related settings:
30292// To set up your output to include or exclude AFD values, see AfdSignaling,
30293// under VideoDescription. On the console, find AFD signaling under the output's
30294// video encoding settings.
30295const (
30296	// MxfAfdSignalingNoCopy is a MxfAfdSignaling enum value
30297	MxfAfdSignalingNoCopy = "NO_COPY"
30298
30299	// MxfAfdSignalingCopyFromVideo is a MxfAfdSignaling enum value
30300	MxfAfdSignalingCopyFromVideo = "COPY_FROM_VIDEO"
30301)
30302
30303// MxfAfdSignaling_Values returns all elements of the MxfAfdSignaling enum
30304func MxfAfdSignaling_Values() []string {
30305	return []string{
30306		MxfAfdSignalingNoCopy,
30307		MxfAfdSignalingCopyFromVideo,
30308	}
30309}
30310
30311// Specify the MXF profile, also called shim, for this output. When you choose
30312// Auto, MediaConvert chooses a profile based on the video codec and resolution.
30313// For a list of codecs supported with each MXF profile, see https://docs.aws.amazon.com/mediaconvert/latest/ug/codecs-supported-with-each-mxf-profile.html.
30314// For more information about the automatic selection behavior, see https://docs.aws.amazon.com/mediaconvert/latest/ug/default-automatic-selection-of-mxf-profiles.html.
30315const (
30316	// MxfProfileD10 is a MxfProfile enum value
30317	MxfProfileD10 = "D_10"
30318
30319	// MxfProfileXdcam is a MxfProfile enum value
30320	MxfProfileXdcam = "XDCAM"
30321
30322	// MxfProfileOp1a is a MxfProfile enum value
30323	MxfProfileOp1a = "OP1A"
30324
30325	// MxfProfileXavc is a MxfProfile enum value
30326	MxfProfileXavc = "XAVC"
30327)
30328
30329// MxfProfile_Values returns all elements of the MxfProfile enum
30330func MxfProfile_Values() []string {
30331	return []string{
30332		MxfProfileD10,
30333		MxfProfileXdcam,
30334		MxfProfileOp1a,
30335		MxfProfileXavc,
30336	}
30337}
30338
30339// To create an output that complies with the XAVC file format guidelines for
30340// interoperability, keep the default value, Drop frames for compliance (DROP_FRAMES_FOR_COMPLIANCE).
30341// To include all frames from your input in this output, keep the default setting,
30342// Allow any duration (ALLOW_ANY_DURATION). The number of frames that MediaConvert
30343// excludes when you set this to Drop frames for compliance depends on the output
30344// frame rate and duration.
30345const (
30346	// MxfXavcDurationModeAllowAnyDuration is a MxfXavcDurationMode enum value
30347	MxfXavcDurationModeAllowAnyDuration = "ALLOW_ANY_DURATION"
30348
30349	// MxfXavcDurationModeDropFramesForCompliance is a MxfXavcDurationMode enum value
30350	MxfXavcDurationModeDropFramesForCompliance = "DROP_FRAMES_FOR_COMPLIANCE"
30351)
30352
30353// MxfXavcDurationMode_Values returns all elements of the MxfXavcDurationMode enum
30354func MxfXavcDurationMode_Values() []string {
30355	return []string{
30356		MxfXavcDurationModeAllowAnyDuration,
30357		MxfXavcDurationModeDropFramesForCompliance,
30358	}
30359}
30360
30361// Choose the type of Nielsen watermarks that you want in your outputs. When
30362// you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the
30363// setting SID (sourceId). When you choose CBET (CBET), you must provide a value
30364// for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET
30365// (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings.
30366const (
30367	// NielsenActiveWatermarkProcessTypeNaes2AndNw is a NielsenActiveWatermarkProcessType enum value
30368	NielsenActiveWatermarkProcessTypeNaes2AndNw = "NAES2_AND_NW"
30369
30370	// NielsenActiveWatermarkProcessTypeCbet is a NielsenActiveWatermarkProcessType enum value
30371	NielsenActiveWatermarkProcessTypeCbet = "CBET"
30372
30373	// NielsenActiveWatermarkProcessTypeNaes2AndNwAndCbet is a NielsenActiveWatermarkProcessType enum value
30374	NielsenActiveWatermarkProcessTypeNaes2AndNwAndCbet = "NAES2_AND_NW_AND_CBET"
30375)
30376
30377// NielsenActiveWatermarkProcessType_Values returns all elements of the NielsenActiveWatermarkProcessType enum
30378func NielsenActiveWatermarkProcessType_Values() []string {
30379	return []string{
30380		NielsenActiveWatermarkProcessTypeNaes2AndNw,
30381		NielsenActiveWatermarkProcessTypeCbet,
30382		NielsenActiveWatermarkProcessTypeNaes2AndNwAndCbet,
30383	}
30384}
30385
30386// Required. Specify whether your source content already contains Nielsen non-linear
30387// watermarks. When you set this value to Watermarked (WATERMARKED), the service
30388// fails the job. Nielsen requires that you add non-linear watermarking to only
30389// clean content that doesn't already have non-linear Nielsen watermarks.
30390const (
30391	// NielsenSourceWatermarkStatusTypeClean is a NielsenSourceWatermarkStatusType enum value
30392	NielsenSourceWatermarkStatusTypeClean = "CLEAN"
30393
30394	// NielsenSourceWatermarkStatusTypeWatermarked is a NielsenSourceWatermarkStatusType enum value
30395	NielsenSourceWatermarkStatusTypeWatermarked = "WATERMARKED"
30396)
30397
30398// NielsenSourceWatermarkStatusType_Values returns all elements of the NielsenSourceWatermarkStatusType enum
30399func NielsenSourceWatermarkStatusType_Values() []string {
30400	return []string{
30401		NielsenSourceWatermarkStatusTypeClean,
30402		NielsenSourceWatermarkStatusTypeWatermarked,
30403	}
30404}
30405
30406// To create assets that have the same TIC values in each audio track, keep
30407// the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that
30408// have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK).
30409const (
30410	// NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack is a NielsenUniqueTicPerAudioTrackType enum value
30411	NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack = "RESERVE_UNIQUE_TICS_PER_TRACK"
30412
30413	// NielsenUniqueTicPerAudioTrackTypeSameTicsPerTrack is a NielsenUniqueTicPerAudioTrackType enum value
30414	NielsenUniqueTicPerAudioTrackTypeSameTicsPerTrack = "SAME_TICS_PER_TRACK"
30415)
30416
30417// NielsenUniqueTicPerAudioTrackType_Values returns all elements of the NielsenUniqueTicPerAudioTrackType enum
30418func NielsenUniqueTicPerAudioTrackType_Values() []string {
30419	return []string{
30420		NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack,
30421		NielsenUniqueTicPerAudioTrackTypeSameTicsPerTrack,
30422	}
30423}
30424
30425// Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL),
30426// you can use this setting to apply sharpening. The default behavior, Auto
30427// (AUTO), allows the transcoder to determine whether to apply filtering, depending
30428// on input type and quality. When you set Noise reducer to Temporal, your output
30429// bandwidth is reduced. When Post temporal sharpening is also enabled, that
30430// bandwidth reduction is smaller.
30431const (
30432	// NoiseFilterPostTemporalSharpeningDisabled is a NoiseFilterPostTemporalSharpening enum value
30433	NoiseFilterPostTemporalSharpeningDisabled = "DISABLED"
30434
30435	// NoiseFilterPostTemporalSharpeningEnabled is a NoiseFilterPostTemporalSharpening enum value
30436	NoiseFilterPostTemporalSharpeningEnabled = "ENABLED"
30437
30438	// NoiseFilterPostTemporalSharpeningAuto is a NoiseFilterPostTemporalSharpening enum value
30439	NoiseFilterPostTemporalSharpeningAuto = "AUTO"
30440)
30441
30442// NoiseFilterPostTemporalSharpening_Values returns all elements of the NoiseFilterPostTemporalSharpening enum
30443func NoiseFilterPostTemporalSharpening_Values() []string {
30444	return []string{
30445		NoiseFilterPostTemporalSharpeningDisabled,
30446		NoiseFilterPostTemporalSharpeningEnabled,
30447		NoiseFilterPostTemporalSharpeningAuto,
30448	}
30449}
30450
30451// Use Noise reducer filter (NoiseReducerFilter) to select one of the following
30452// spatial image filtering functions. To use this setting, you must also enable
30453// Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing
30454// noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution
30455// filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain
30456// filtering based on JND principles. * Temporal optimizes video quality for
30457// complex motion.
30458const (
30459	// NoiseReducerFilterBilateral is a NoiseReducerFilter enum value
30460	NoiseReducerFilterBilateral = "BILATERAL"
30461
30462	// NoiseReducerFilterMean is a NoiseReducerFilter enum value
30463	NoiseReducerFilterMean = "MEAN"
30464
30465	// NoiseReducerFilterGaussian is a NoiseReducerFilter enum value
30466	NoiseReducerFilterGaussian = "GAUSSIAN"
30467
30468	// NoiseReducerFilterLanczos is a NoiseReducerFilter enum value
30469	NoiseReducerFilterLanczos = "LANCZOS"
30470
30471	// NoiseReducerFilterSharpen is a NoiseReducerFilter enum value
30472	NoiseReducerFilterSharpen = "SHARPEN"
30473
30474	// NoiseReducerFilterConserve is a NoiseReducerFilter enum value
30475	NoiseReducerFilterConserve = "CONSERVE"
30476
30477	// NoiseReducerFilterSpatial is a NoiseReducerFilter enum value
30478	NoiseReducerFilterSpatial = "SPATIAL"
30479
30480	// NoiseReducerFilterTemporal is a NoiseReducerFilter enum value
30481	NoiseReducerFilterTemporal = "TEMPORAL"
30482)
30483
30484// NoiseReducerFilter_Values returns all elements of the NoiseReducerFilter enum
30485func NoiseReducerFilter_Values() []string {
30486	return []string{
30487		NoiseReducerFilterBilateral,
30488		NoiseReducerFilterMean,
30489		NoiseReducerFilterGaussian,
30490		NoiseReducerFilterLanczos,
30491		NoiseReducerFilterSharpen,
30492		NoiseReducerFilterConserve,
30493		NoiseReducerFilterSpatial,
30494		NoiseReducerFilterTemporal,
30495	}
30496}
30497
30498// Optional. When you request lists of resources, you can specify whether they
30499// are sorted in ASCENDING or DESCENDING order. Default varies by resource.
30500const (
30501	// OrderAscending is a Order enum value
30502	OrderAscending = "ASCENDING"
30503
30504	// OrderDescending is a Order enum value
30505	OrderDescending = "DESCENDING"
30506)
30507
30508// Order_Values returns all elements of the Order enum
30509func Order_Values() []string {
30510	return []string{
30511		OrderAscending,
30512		OrderDescending,
30513	}
30514}
30515
30516// Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming,
30517// CMAF)
30518const (
30519	// OutputGroupTypeHlsGroupSettings is a OutputGroupType enum value
30520	OutputGroupTypeHlsGroupSettings = "HLS_GROUP_SETTINGS"
30521
30522	// OutputGroupTypeDashIsoGroupSettings is a OutputGroupType enum value
30523	OutputGroupTypeDashIsoGroupSettings = "DASH_ISO_GROUP_SETTINGS"
30524
30525	// OutputGroupTypeFileGroupSettings is a OutputGroupType enum value
30526	OutputGroupTypeFileGroupSettings = "FILE_GROUP_SETTINGS"
30527
30528	// OutputGroupTypeMsSmoothGroupSettings is a OutputGroupType enum value
30529	OutputGroupTypeMsSmoothGroupSettings = "MS_SMOOTH_GROUP_SETTINGS"
30530
30531	// OutputGroupTypeCmafGroupSettings is a OutputGroupType enum value
30532	OutputGroupTypeCmafGroupSettings = "CMAF_GROUP_SETTINGS"
30533)
30534
30535// OutputGroupType_Values returns all elements of the OutputGroupType enum
30536func OutputGroupType_Values() []string {
30537	return []string{
30538		OutputGroupTypeHlsGroupSettings,
30539		OutputGroupTypeDashIsoGroupSettings,
30540		OutputGroupTypeFileGroupSettings,
30541		OutputGroupTypeMsSmoothGroupSettings,
30542		OutputGroupTypeCmafGroupSettings,
30543	}
30544}
30545
30546// Selects method of inserting SDT information into output stream. "Follow input
30547// SDT" copies SDT information from input stream to output stream. "Follow input
30548// SDT if present" copies SDT information from input stream to output stream
30549// if SDT information is present in the input, otherwise it will fall back on
30550// the user-defined values. Enter "SDT Manually" means user will enter the SDT
30551// information. "No SDT" means output stream will not contain SDT information.
30552const (
30553	// OutputSdtSdtFollow is a OutputSdt enum value
30554	OutputSdtSdtFollow = "SDT_FOLLOW"
30555
30556	// OutputSdtSdtFollowIfPresent is a OutputSdt enum value
30557	OutputSdtSdtFollowIfPresent = "SDT_FOLLOW_IF_PRESENT"
30558
30559	// OutputSdtSdtManual is a OutputSdt enum value
30560	OutputSdtSdtManual = "SDT_MANUAL"
30561
30562	// OutputSdtSdtNone is a OutputSdt enum value
30563	OutputSdtSdtNone = "SDT_NONE"
30564)
30565
30566// OutputSdt_Values returns all elements of the OutputSdt enum
30567func OutputSdt_Values() []string {
30568	return []string{
30569		OutputSdtSdtFollow,
30570		OutputSdtSdtFollowIfPresent,
30571		OutputSdtSdtManual,
30572		OutputSdtSdtNone,
30573	}
30574}
30575
30576// Optional. When you request a list of presets, you can choose to list them
30577// alphabetically by NAME or chronologically by CREATION_DATE. If you don't
30578// specify, the service will list them by name.
30579const (
30580	// PresetListByName is a PresetListBy enum value
30581	PresetListByName = "NAME"
30582
30583	// PresetListByCreationDate is a PresetListBy enum value
30584	PresetListByCreationDate = "CREATION_DATE"
30585
30586	// PresetListBySystem is a PresetListBy enum value
30587	PresetListBySystem = "SYSTEM"
30588)
30589
30590// PresetListBy_Values returns all elements of the PresetListBy enum
30591func PresetListBy_Values() []string {
30592	return []string{
30593		PresetListByName,
30594		PresetListByCreationDate,
30595		PresetListBySystem,
30596	}
30597}
30598
30599// Specifies whether the pricing plan for the queue is on-demand or reserved.
30600// For on-demand, you pay per minute, billed in increments of .01 minute. For
30601// reserved, you pay for the transcoding capacity of the entire queue, regardless
30602// of how much or how little you use it. Reserved pricing requires a 12-month
30603// commitment.
30604const (
30605	// PricingPlanOnDemand is a PricingPlan enum value
30606	PricingPlanOnDemand = "ON_DEMAND"
30607
30608	// PricingPlanReserved is a PricingPlan enum value
30609	PricingPlanReserved = "RESERVED"
30610)
30611
30612// PricingPlan_Values returns all elements of the PricingPlan enum
30613func PricingPlan_Values() []string {
30614	return []string{
30615		PricingPlanOnDemand,
30616		PricingPlanReserved,
30617	}
30618}
30619
30620// This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that
30621// you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4
30622// sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma
30623// sampling. You must specify a value for this setting when your output codec
30624// profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma
30625// sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose
30626// an output codec profile that supports 4:4:4 chroma sampling. These values
30627// for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444
30628// (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When
30629// you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all
30630// video preprocessors except for Nexguard file marker (PartnerWatermarking).
30631// When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate
30632// conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm)
30633// to Drop duplicate (DUPLICATE_DROP).
30634const (
30635	// ProresChromaSamplingPreserve444Sampling is a ProresChromaSampling enum value
30636	ProresChromaSamplingPreserve444Sampling = "PRESERVE_444_SAMPLING"
30637
30638	// ProresChromaSamplingSubsampleTo422 is a ProresChromaSampling enum value
30639	ProresChromaSamplingSubsampleTo422 = "SUBSAMPLE_TO_422"
30640)
30641
30642// ProresChromaSampling_Values returns all elements of the ProresChromaSampling enum
30643func ProresChromaSampling_Values() []string {
30644	return []string{
30645		ProresChromaSamplingPreserve444Sampling,
30646		ProresChromaSamplingSubsampleTo422,
30647	}
30648}
30649
30650// Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec
30651// to use for this output.
30652const (
30653	// ProresCodecProfileAppleProres422 is a ProresCodecProfile enum value
30654	ProresCodecProfileAppleProres422 = "APPLE_PRORES_422"
30655
30656	// ProresCodecProfileAppleProres422Hq is a ProresCodecProfile enum value
30657	ProresCodecProfileAppleProres422Hq = "APPLE_PRORES_422_HQ"
30658
30659	// ProresCodecProfileAppleProres422Lt is a ProresCodecProfile enum value
30660	ProresCodecProfileAppleProres422Lt = "APPLE_PRORES_422_LT"
30661
30662	// ProresCodecProfileAppleProres422Proxy is a ProresCodecProfile enum value
30663	ProresCodecProfileAppleProres422Proxy = "APPLE_PRORES_422_PROXY"
30664
30665	// ProresCodecProfileAppleProres4444 is a ProresCodecProfile enum value
30666	ProresCodecProfileAppleProres4444 = "APPLE_PRORES_4444"
30667
30668	// ProresCodecProfileAppleProres4444Xq is a ProresCodecProfile enum value
30669	ProresCodecProfileAppleProres4444Xq = "APPLE_PRORES_4444_XQ"
30670)
30671
30672// ProresCodecProfile_Values returns all elements of the ProresCodecProfile enum
30673func ProresCodecProfile_Values() []string {
30674	return []string{
30675		ProresCodecProfileAppleProres422,
30676		ProresCodecProfileAppleProres422Hq,
30677		ProresCodecProfileAppleProres422Lt,
30678		ProresCodecProfileAppleProres422Proxy,
30679		ProresCodecProfileAppleProres4444,
30680		ProresCodecProfileAppleProres4444Xq,
30681	}
30682}
30683
30684// If you are using the console, use the Framerate setting to specify the frame
30685// rate for this output. If you want to keep the same frame rate as the input
30686// video, choose Follow source. If you want to do frame rate conversion, choose
30687// a frame rate from the dropdown list or choose Custom. The framerates shown
30688// in the dropdown list are decimal approximations of fractions. If you choose
30689// Custom, specify your frame rate as a fraction. If you are creating your transcoding
30690// job specification as a JSON file without the console, use FramerateControl
30691// to specify which value the service uses for the frame rate for this output.
30692// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
30693// from the input. Choose SPECIFIED if you want the service to use the frame
30694// rate you specify in the settings FramerateNumerator and FramerateDenominator.
30695const (
30696	// ProresFramerateControlInitializeFromSource is a ProresFramerateControl enum value
30697	ProresFramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
30698
30699	// ProresFramerateControlSpecified is a ProresFramerateControl enum value
30700	ProresFramerateControlSpecified = "SPECIFIED"
30701)
30702
30703// ProresFramerateControl_Values returns all elements of the ProresFramerateControl enum
30704func ProresFramerateControl_Values() []string {
30705	return []string{
30706		ProresFramerateControlInitializeFromSource,
30707		ProresFramerateControlSpecified,
30708	}
30709}
30710
30711// Choose the method that you want MediaConvert to use when increasing or decreasing
30712// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
30713// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
30714// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
30715// smooth picture, but might introduce undesirable video artifacts. For complex
30716// frame rate conversions, especially if your source video has already been
30717// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
30718// motion-compensated interpolation. FrameFormer chooses the best conversion
30719// method frame by frame. Note that using FrameFormer increases the transcoding
30720// time and incurs a significant add-on cost.
30721const (
30722	// ProresFramerateConversionAlgorithmDuplicateDrop is a ProresFramerateConversionAlgorithm enum value
30723	ProresFramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP"
30724
30725	// ProresFramerateConversionAlgorithmInterpolate is a ProresFramerateConversionAlgorithm enum value
30726	ProresFramerateConversionAlgorithmInterpolate = "INTERPOLATE"
30727
30728	// ProresFramerateConversionAlgorithmFrameformer is a ProresFramerateConversionAlgorithm enum value
30729	ProresFramerateConversionAlgorithmFrameformer = "FRAMEFORMER"
30730)
30731
30732// ProresFramerateConversionAlgorithm_Values returns all elements of the ProresFramerateConversionAlgorithm enum
30733func ProresFramerateConversionAlgorithm_Values() []string {
30734	return []string{
30735		ProresFramerateConversionAlgorithmDuplicateDrop,
30736		ProresFramerateConversionAlgorithmInterpolate,
30737		ProresFramerateConversionAlgorithmFrameformer,
30738	}
30739}
30740
30741// Choose the scan line type for the output. Keep the default value, Progressive
30742// (PROGRESSIVE) to create a progressive output, regardless of the scan type
30743// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
30744// to create an output that's interlaced with the same field polarity throughout.
30745// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
30746// to produce outputs with the same field polarity as the source. For jobs that
30747// have multiple inputs, the output field polarity might change over the course
30748// of the output. Follow behavior depends on the input scan type. If the source
30749// is interlaced, the output will be interlaced with the same polarity as the
30750// source. If the source is progressive, the output will be interlaced with
30751// top field bottom field first, depending on which of the Follow options you
30752// choose.
30753const (
30754	// ProresInterlaceModeProgressive is a ProresInterlaceMode enum value
30755	ProresInterlaceModeProgressive = "PROGRESSIVE"
30756
30757	// ProresInterlaceModeTopField is a ProresInterlaceMode enum value
30758	ProresInterlaceModeTopField = "TOP_FIELD"
30759
30760	// ProresInterlaceModeBottomField is a ProresInterlaceMode enum value
30761	ProresInterlaceModeBottomField = "BOTTOM_FIELD"
30762
30763	// ProresInterlaceModeFollowTopField is a ProresInterlaceMode enum value
30764	ProresInterlaceModeFollowTopField = "FOLLOW_TOP_FIELD"
30765
30766	// ProresInterlaceModeFollowBottomField is a ProresInterlaceMode enum value
30767	ProresInterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD"
30768)
30769
30770// ProresInterlaceMode_Values returns all elements of the ProresInterlaceMode enum
30771func ProresInterlaceMode_Values() []string {
30772	return []string{
30773		ProresInterlaceModeProgressive,
30774		ProresInterlaceModeTopField,
30775		ProresInterlaceModeBottomField,
30776		ProresInterlaceModeFollowTopField,
30777		ProresInterlaceModeFollowBottomField,
30778	}
30779}
30780
30781// Optional. Specify how the service determines the pixel aspect ratio (PAR)
30782// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE),
30783// uses the PAR from your input video for your output. To specify a different
30784// PAR in the console, choose any value other than Follow source. To specify
30785// a different PAR by editing the JSON job specification, choose SPECIFIED.
30786// When you choose SPECIFIED for this setting, you must also specify values
30787// for the parNumerator and parDenominator settings.
30788const (
30789	// ProresParControlInitializeFromSource is a ProresParControl enum value
30790	ProresParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
30791
30792	// ProresParControlSpecified is a ProresParControl enum value
30793	ProresParControlSpecified = "SPECIFIED"
30794)
30795
30796// ProresParControl_Values returns all elements of the ProresParControl enum
30797func ProresParControl_Values() []string {
30798	return []string{
30799		ProresParControlInitializeFromSource,
30800		ProresParControlSpecified,
30801	}
30802}
30803
30804// Use this setting for interlaced outputs, when your output frame rate is half
30805// of your input frame rate. In this situation, choose Optimized interlacing
30806// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
30807// case, each progressive frame from the input corresponds to an interlaced
30808// field in the output. Keep the default value, Basic interlacing (INTERLACED),
30809// for all other output frame rates. With basic interlacing, MediaConvert performs
30810// any frame rate conversion first and then interlaces the frames. When you
30811// choose Optimized interlacing and you set your output frame rate to a value
30812// that isn't suitable for optimized interlacing, MediaConvert automatically
30813// falls back to basic interlacing. Required settings: To use optimized interlacing,
30814// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
30815// use optimized interlacing for hard telecine outputs. You must also set Interlace
30816// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
30817const (
30818	// ProresScanTypeConversionModeInterlaced is a ProresScanTypeConversionMode enum value
30819	ProresScanTypeConversionModeInterlaced = "INTERLACED"
30820
30821	// ProresScanTypeConversionModeInterlacedOptimize is a ProresScanTypeConversionMode enum value
30822	ProresScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE"
30823)
30824
30825// ProresScanTypeConversionMode_Values returns all elements of the ProresScanTypeConversionMode enum
30826func ProresScanTypeConversionMode_Values() []string {
30827	return []string{
30828		ProresScanTypeConversionModeInterlaced,
30829		ProresScanTypeConversionModeInterlacedOptimize,
30830	}
30831}
30832
30833// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
30834// second (fps). Enable slow PAL to create a 25 fps output. When you enable
30835// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples
30836// your audio to keep it synchronized with the video. Note that enabling this
30837// setting will slightly reduce the duration of your video. Required settings:
30838// You must also set Framerate to 25. In your JSON job specification, set (framerateControl)
30839// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
30840// 1.
30841const (
30842	// ProresSlowPalDisabled is a ProresSlowPal enum value
30843	ProresSlowPalDisabled = "DISABLED"
30844
30845	// ProresSlowPalEnabled is a ProresSlowPal enum value
30846	ProresSlowPalEnabled = "ENABLED"
30847)
30848
30849// ProresSlowPal_Values returns all elements of the ProresSlowPal enum
30850func ProresSlowPal_Values() []string {
30851	return []string{
30852		ProresSlowPalDisabled,
30853		ProresSlowPalEnabled,
30854	}
30855}
30856
30857// When you do frame rate conversion from 23.976 frames per second (fps) to
30858// 29.97 fps, and your output scan type is interlaced, you can optionally enable
30859// hard telecine (HARD) to create a smoother picture. When you keep the default
30860// value, None (NONE), MediaConvert does a standard frame rate conversion to
30861// 29.97 without doing anything with the field polarity to create a smoother
30862// picture.
30863const (
30864	// ProresTelecineNone is a ProresTelecine enum value
30865	ProresTelecineNone = "NONE"
30866
30867	// ProresTelecineHard is a ProresTelecine enum value
30868	ProresTelecineHard = "HARD"
30869)
30870
30871// ProresTelecine_Values returns all elements of the ProresTelecine enum
30872func ProresTelecine_Values() []string {
30873	return []string{
30874		ProresTelecineNone,
30875		ProresTelecineHard,
30876	}
30877}
30878
30879// Optional. When you request a list of queues, you can choose to list them
30880// alphabetically by NAME or chronologically by CREATION_DATE. If you don't
30881// specify, the service will list them by creation date.
30882const (
30883	// QueueListByName is a QueueListBy enum value
30884	QueueListByName = "NAME"
30885
30886	// QueueListByCreationDate is a QueueListBy enum value
30887	QueueListByCreationDate = "CREATION_DATE"
30888)
30889
30890// QueueListBy_Values returns all elements of the QueueListBy enum
30891func QueueListBy_Values() []string {
30892	return []string{
30893		QueueListByName,
30894		QueueListByCreationDate,
30895	}
30896}
30897
30898// Queues can be ACTIVE or PAUSED. If you pause a queue, jobs in that queue
30899// won't begin. Jobs that are running when you pause a queue continue to run
30900// until they finish or result in an error.
30901const (
30902	// QueueStatusActive is a QueueStatus enum value
30903	QueueStatusActive = "ACTIVE"
30904
30905	// QueueStatusPaused is a QueueStatus enum value
30906	QueueStatusPaused = "PAUSED"
30907)
30908
30909// QueueStatus_Values returns all elements of the QueueStatus enum
30910func QueueStatus_Values() []string {
30911	return []string{
30912		QueueStatusActive,
30913		QueueStatusPaused,
30914	}
30915}
30916
30917// Specifies whether the term of your reserved queue pricing plan is automatically
30918// extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term.
30919const (
30920	// RenewalTypeAutoRenew is a RenewalType enum value
30921	RenewalTypeAutoRenew = "AUTO_RENEW"
30922
30923	// RenewalTypeExpire is a RenewalType enum value
30924	RenewalTypeExpire = "EXPIRE"
30925)
30926
30927// RenewalType_Values returns all elements of the RenewalType enum
30928func RenewalType_Values() []string {
30929	return []string{
30930		RenewalTypeAutoRenew,
30931		RenewalTypeExpire,
30932	}
30933}
30934
30935// Specifies whether the pricing plan for your reserved queue is ACTIVE or EXPIRED.
30936const (
30937	// ReservationPlanStatusActive is a ReservationPlanStatus enum value
30938	ReservationPlanStatusActive = "ACTIVE"
30939
30940	// ReservationPlanStatusExpired is a ReservationPlanStatus enum value
30941	ReservationPlanStatusExpired = "EXPIRED"
30942)
30943
30944// ReservationPlanStatus_Values returns all elements of the ReservationPlanStatus enum
30945func ReservationPlanStatus_Values() []string {
30946	return []string{
30947		ReservationPlanStatusActive,
30948		ReservationPlanStatusExpired,
30949	}
30950}
30951
30952// Use Respond to AFD (RespondToAfd) to specify how the service changes the
30953// video itself in response to AFD values in the input. * Choose Respond to
30954// clip the input video frame according to the AFD value, input display aspect
30955// ratio, and output display aspect ratio. * Choose Passthrough to include the
30956// input AFD values. Do not choose this when AfdSignaling is set to (NONE).
30957// A preferred implementation of this workflow is to set RespondToAfd to (NONE)
30958// and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values
30959// from this output.
30960const (
30961	// RespondToAfdNone is a RespondToAfd enum value
30962	RespondToAfdNone = "NONE"
30963
30964	// RespondToAfdRespond is a RespondToAfd enum value
30965	RespondToAfdRespond = "RESPOND"
30966
30967	// RespondToAfdPassthrough is a RespondToAfd enum value
30968	RespondToAfdPassthrough = "PASSTHROUGH"
30969)
30970
30971// RespondToAfd_Values returns all elements of the RespondToAfd enum
30972func RespondToAfd_Values() []string {
30973	return []string{
30974		RespondToAfdNone,
30975		RespondToAfdRespond,
30976		RespondToAfdPassthrough,
30977	}
30978}
30979
30980// Choose an Amazon S3 canned ACL for MediaConvert to apply to this output.
30981const (
30982	// S3ObjectCannedAclPublicRead is a S3ObjectCannedAcl enum value
30983	S3ObjectCannedAclPublicRead = "PUBLIC_READ"
30984
30985	// S3ObjectCannedAclAuthenticatedRead is a S3ObjectCannedAcl enum value
30986	S3ObjectCannedAclAuthenticatedRead = "AUTHENTICATED_READ"
30987
30988	// S3ObjectCannedAclBucketOwnerRead is a S3ObjectCannedAcl enum value
30989	S3ObjectCannedAclBucketOwnerRead = "BUCKET_OWNER_READ"
30990
30991	// S3ObjectCannedAclBucketOwnerFullControl is a S3ObjectCannedAcl enum value
30992	S3ObjectCannedAclBucketOwnerFullControl = "BUCKET_OWNER_FULL_CONTROL"
30993)
30994
30995// S3ObjectCannedAcl_Values returns all elements of the S3ObjectCannedAcl enum
30996func S3ObjectCannedAcl_Values() []string {
30997	return []string{
30998		S3ObjectCannedAclPublicRead,
30999		S3ObjectCannedAclAuthenticatedRead,
31000		S3ObjectCannedAclBucketOwnerRead,
31001		S3ObjectCannedAclBucketOwnerFullControl,
31002	}
31003}
31004
31005// Specify how you want your data keys managed. AWS uses data keys to encrypt
31006// your content. AWS also encrypts the data keys themselves, using a customer
31007// master key (CMK), and then stores the encrypted data keys alongside your
31008// encrypted content. Use this setting to specify which AWS service manages
31009// the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3).
31010// If you want your master key to be managed by AWS Key Management Service (KMS),
31011// choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose
31012// AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with
31013// Amazon S3 to encrypt your data keys. You can optionally choose to specify
31014// a different, customer managed CMK. Do so by specifying the Amazon Resource
31015// Name (ARN) of the key for the setting KMS ARN (kmsKeyArn).
31016const (
31017	// S3ServerSideEncryptionTypeServerSideEncryptionS3 is a S3ServerSideEncryptionType enum value
31018	S3ServerSideEncryptionTypeServerSideEncryptionS3 = "SERVER_SIDE_ENCRYPTION_S3"
31019
31020	// S3ServerSideEncryptionTypeServerSideEncryptionKms is a S3ServerSideEncryptionType enum value
31021	S3ServerSideEncryptionTypeServerSideEncryptionKms = "SERVER_SIDE_ENCRYPTION_KMS"
31022)
31023
31024// S3ServerSideEncryptionType_Values returns all elements of the S3ServerSideEncryptionType enum
31025func S3ServerSideEncryptionType_Values() []string {
31026	return []string{
31027		S3ServerSideEncryptionTypeServerSideEncryptionS3,
31028		S3ServerSideEncryptionTypeServerSideEncryptionKms,
31029	}
31030}
31031
31032// Specify the video color sample range for this output. To create a full range
31033// output, you must start with a full range YUV input and keep the default value,
31034// None (NONE). To create a limited range output from a full range input, choose
31035// Limited range (LIMITED_RANGE_SQUEEZE). With RGB inputs, your output is always
31036// limited range, regardless of your choice here. When you create a limited
31037// range output from a full range input, MediaConvert limits the active pixel
31038// values in a way that depends on the output's bit depth: 8-bit outputs contain
31039// only values from 16 through 235 and 10-bit outputs contain only values from
31040// 64 through 940. With this conversion, MediaConvert also changes the output
31041// metadata to note the limited range.
31042const (
31043	// SampleRangeConversionLimitedRangeSqueeze is a SampleRangeConversion enum value
31044	SampleRangeConversionLimitedRangeSqueeze = "LIMITED_RANGE_SQUEEZE"
31045
31046	// SampleRangeConversionNone is a SampleRangeConversion enum value
31047	SampleRangeConversionNone = "NONE"
31048)
31049
31050// SampleRangeConversion_Values returns all elements of the SampleRangeConversion enum
31051func SampleRangeConversion_Values() []string {
31052	return []string{
31053		SampleRangeConversionLimitedRangeSqueeze,
31054		SampleRangeConversionNone,
31055	}
31056}
31057
31058// Specify how the service handles outputs that have a different aspect ratio
31059// from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT)
31060// to have the service stretch your video image to fit. Keep the setting Default
31061// (DEFAULT) to have the service letterbox your video instead. This setting
31062// overrides any value that you specify for the setting Selection placement
31063// (position) in this output.
31064const (
31065	// ScalingBehaviorDefault is a ScalingBehavior enum value
31066	ScalingBehaviorDefault = "DEFAULT"
31067
31068	// ScalingBehaviorStretchToOutput is a ScalingBehavior enum value
31069	ScalingBehaviorStretchToOutput = "STRETCH_TO_OUTPUT"
31070)
31071
31072// ScalingBehavior_Values returns all elements of the ScalingBehavior enum
31073func ScalingBehavior_Values() []string {
31074	return []string{
31075		ScalingBehaviorDefault,
31076		ScalingBehaviorStretchToOutput,
31077	}
31078}
31079
31080// Set Framerate (SccDestinationFramerate) to make sure that the captions and
31081// the video are synchronized in the output. Specify a frame rate that matches
31082// the frame rate of the associated video. If the video frame rate is 29.97,
31083// choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has
31084// video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97
31085// non-dropframe (FRAMERATE_29_97_NON_DROPFRAME).
31086const (
31087	// SccDestinationFramerateFramerate2397 is a SccDestinationFramerate enum value
31088	SccDestinationFramerateFramerate2397 = "FRAMERATE_23_97"
31089
31090	// SccDestinationFramerateFramerate24 is a SccDestinationFramerate enum value
31091	SccDestinationFramerateFramerate24 = "FRAMERATE_24"
31092
31093	// SccDestinationFramerateFramerate25 is a SccDestinationFramerate enum value
31094	SccDestinationFramerateFramerate25 = "FRAMERATE_25"
31095
31096	// SccDestinationFramerateFramerate2997Dropframe is a SccDestinationFramerate enum value
31097	SccDestinationFramerateFramerate2997Dropframe = "FRAMERATE_29_97_DROPFRAME"
31098
31099	// SccDestinationFramerateFramerate2997NonDropframe is a SccDestinationFramerate enum value
31100	SccDestinationFramerateFramerate2997NonDropframe = "FRAMERATE_29_97_NON_DROPFRAME"
31101)
31102
31103// SccDestinationFramerate_Values returns all elements of the SccDestinationFramerate enum
31104func SccDestinationFramerate_Values() []string {
31105	return []string{
31106		SccDestinationFramerateFramerate2397,
31107		SccDestinationFramerateFramerate24,
31108		SccDestinationFramerateFramerate25,
31109		SccDestinationFramerateFramerate2997Dropframe,
31110		SccDestinationFramerateFramerate2997NonDropframe,
31111	}
31112}
31113
31114// Enable this setting when you run a test job to estimate how many reserved
31115// transcoding slots (RTS) you need. When this is enabled, MediaConvert runs
31116// your job from an on-demand queue with similar performance to what you will
31117// see with one RTS in a reserved queue. This setting is disabled by default.
31118const (
31119	// SimulateReservedQueueDisabled is a SimulateReservedQueue enum value
31120	SimulateReservedQueueDisabled = "DISABLED"
31121
31122	// SimulateReservedQueueEnabled is a SimulateReservedQueue enum value
31123	SimulateReservedQueueEnabled = "ENABLED"
31124)
31125
31126// SimulateReservedQueue_Values returns all elements of the SimulateReservedQueue enum
31127func SimulateReservedQueue_Values() []string {
31128	return []string{
31129		SimulateReservedQueueDisabled,
31130		SimulateReservedQueueEnabled,
31131	}
31132}
31133
31134// Choose Enabled (ENABLED) to have MediaConvert use the font style, color,
31135// and position information from the captions source in the input. Keep the
31136// default value, Disabled (DISABLED), for simplified output captions.
31137const (
31138	// SrtStylePassthroughEnabled is a SrtStylePassthrough enum value
31139	SrtStylePassthroughEnabled = "ENABLED"
31140
31141	// SrtStylePassthroughDisabled is a SrtStylePassthrough enum value
31142	SrtStylePassthroughDisabled = "DISABLED"
31143)
31144
31145// SrtStylePassthrough_Values returns all elements of the SrtStylePassthrough enum
31146func SrtStylePassthrough_Values() []string {
31147	return []string{
31148		SrtStylePassthroughEnabled,
31149		SrtStylePassthroughDisabled,
31150	}
31151}
31152
31153// Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch
31154// Events. Set the interval, in seconds, between status updates. MediaConvert
31155// sends an update at this interval from the time the service begins processing
31156// your job to the time it completes the transcode or encounters an error.
31157const (
31158	// StatusUpdateIntervalSeconds10 is a StatusUpdateInterval enum value
31159	StatusUpdateIntervalSeconds10 = "SECONDS_10"
31160
31161	// StatusUpdateIntervalSeconds12 is a StatusUpdateInterval enum value
31162	StatusUpdateIntervalSeconds12 = "SECONDS_12"
31163
31164	// StatusUpdateIntervalSeconds15 is a StatusUpdateInterval enum value
31165	StatusUpdateIntervalSeconds15 = "SECONDS_15"
31166
31167	// StatusUpdateIntervalSeconds20 is a StatusUpdateInterval enum value
31168	StatusUpdateIntervalSeconds20 = "SECONDS_20"
31169
31170	// StatusUpdateIntervalSeconds30 is a StatusUpdateInterval enum value
31171	StatusUpdateIntervalSeconds30 = "SECONDS_30"
31172
31173	// StatusUpdateIntervalSeconds60 is a StatusUpdateInterval enum value
31174	StatusUpdateIntervalSeconds60 = "SECONDS_60"
31175
31176	// StatusUpdateIntervalSeconds120 is a StatusUpdateInterval enum value
31177	StatusUpdateIntervalSeconds120 = "SECONDS_120"
31178
31179	// StatusUpdateIntervalSeconds180 is a StatusUpdateInterval enum value
31180	StatusUpdateIntervalSeconds180 = "SECONDS_180"
31181
31182	// StatusUpdateIntervalSeconds240 is a StatusUpdateInterval enum value
31183	StatusUpdateIntervalSeconds240 = "SECONDS_240"
31184
31185	// StatusUpdateIntervalSeconds300 is a StatusUpdateInterval enum value
31186	StatusUpdateIntervalSeconds300 = "SECONDS_300"
31187
31188	// StatusUpdateIntervalSeconds360 is a StatusUpdateInterval enum value
31189	StatusUpdateIntervalSeconds360 = "SECONDS_360"
31190
31191	// StatusUpdateIntervalSeconds420 is a StatusUpdateInterval enum value
31192	StatusUpdateIntervalSeconds420 = "SECONDS_420"
31193
31194	// StatusUpdateIntervalSeconds480 is a StatusUpdateInterval enum value
31195	StatusUpdateIntervalSeconds480 = "SECONDS_480"
31196
31197	// StatusUpdateIntervalSeconds540 is a StatusUpdateInterval enum value
31198	StatusUpdateIntervalSeconds540 = "SECONDS_540"
31199
31200	// StatusUpdateIntervalSeconds600 is a StatusUpdateInterval enum value
31201	StatusUpdateIntervalSeconds600 = "SECONDS_600"
31202)
31203
31204// StatusUpdateInterval_Values returns all elements of the StatusUpdateInterval enum
31205func StatusUpdateInterval_Values() []string {
31206	return []string{
31207		StatusUpdateIntervalSeconds10,
31208		StatusUpdateIntervalSeconds12,
31209		StatusUpdateIntervalSeconds15,
31210		StatusUpdateIntervalSeconds20,
31211		StatusUpdateIntervalSeconds30,
31212		StatusUpdateIntervalSeconds60,
31213		StatusUpdateIntervalSeconds120,
31214		StatusUpdateIntervalSeconds180,
31215		StatusUpdateIntervalSeconds240,
31216		StatusUpdateIntervalSeconds300,
31217		StatusUpdateIntervalSeconds360,
31218		StatusUpdateIntervalSeconds420,
31219		StatusUpdateIntervalSeconds480,
31220		StatusUpdateIntervalSeconds540,
31221		StatusUpdateIntervalSeconds600,
31222	}
31223}
31224
31225// A page type as defined in the standard ETSI EN 300 468, Table 94
31226const (
31227	// TeletextPageTypePageTypeInitial is a TeletextPageType enum value
31228	TeletextPageTypePageTypeInitial = "PAGE_TYPE_INITIAL"
31229
31230	// TeletextPageTypePageTypeSubtitle is a TeletextPageType enum value
31231	TeletextPageTypePageTypeSubtitle = "PAGE_TYPE_SUBTITLE"
31232
31233	// TeletextPageTypePageTypeAddlInfo is a TeletextPageType enum value
31234	TeletextPageTypePageTypeAddlInfo = "PAGE_TYPE_ADDL_INFO"
31235
31236	// TeletextPageTypePageTypeProgramSchedule is a TeletextPageType enum value
31237	TeletextPageTypePageTypeProgramSchedule = "PAGE_TYPE_PROGRAM_SCHEDULE"
31238
31239	// TeletextPageTypePageTypeHearingImpairedSubtitle is a TeletextPageType enum value
31240	TeletextPageTypePageTypeHearingImpairedSubtitle = "PAGE_TYPE_HEARING_IMPAIRED_SUBTITLE"
31241)
31242
31243// TeletextPageType_Values returns all elements of the TeletextPageType enum
31244func TeletextPageType_Values() []string {
31245	return []string{
31246		TeletextPageTypePageTypeInitial,
31247		TeletextPageTypePageTypeSubtitle,
31248		TeletextPageTypePageTypeAddlInfo,
31249		TeletextPageTypePageTypeProgramSchedule,
31250		TeletextPageTypePageTypeHearingImpairedSubtitle,
31251	}
31252}
31253
31254// Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to
31255// specify the location the burned-in timecode on output video.
31256const (
31257	// TimecodeBurninPositionTopCenter is a TimecodeBurninPosition enum value
31258	TimecodeBurninPositionTopCenter = "TOP_CENTER"
31259
31260	// TimecodeBurninPositionTopLeft is a TimecodeBurninPosition enum value
31261	TimecodeBurninPositionTopLeft = "TOP_LEFT"
31262
31263	// TimecodeBurninPositionTopRight is a TimecodeBurninPosition enum value
31264	TimecodeBurninPositionTopRight = "TOP_RIGHT"
31265
31266	// TimecodeBurninPositionMiddleLeft is a TimecodeBurninPosition enum value
31267	TimecodeBurninPositionMiddleLeft = "MIDDLE_LEFT"
31268
31269	// TimecodeBurninPositionMiddleCenter is a TimecodeBurninPosition enum value
31270	TimecodeBurninPositionMiddleCenter = "MIDDLE_CENTER"
31271
31272	// TimecodeBurninPositionMiddleRight is a TimecodeBurninPosition enum value
31273	TimecodeBurninPositionMiddleRight = "MIDDLE_RIGHT"
31274
31275	// TimecodeBurninPositionBottomLeft is a TimecodeBurninPosition enum value
31276	TimecodeBurninPositionBottomLeft = "BOTTOM_LEFT"
31277
31278	// TimecodeBurninPositionBottomCenter is a TimecodeBurninPosition enum value
31279	TimecodeBurninPositionBottomCenter = "BOTTOM_CENTER"
31280
31281	// TimecodeBurninPositionBottomRight is a TimecodeBurninPosition enum value
31282	TimecodeBurninPositionBottomRight = "BOTTOM_RIGHT"
31283)
31284
31285// TimecodeBurninPosition_Values returns all elements of the TimecodeBurninPosition enum
31286func TimecodeBurninPosition_Values() []string {
31287	return []string{
31288		TimecodeBurninPositionTopCenter,
31289		TimecodeBurninPositionTopLeft,
31290		TimecodeBurninPositionTopRight,
31291		TimecodeBurninPositionMiddleLeft,
31292		TimecodeBurninPositionMiddleCenter,
31293		TimecodeBurninPositionMiddleRight,
31294		TimecodeBurninPositionBottomLeft,
31295		TimecodeBurninPositionBottomCenter,
31296		TimecodeBurninPositionBottomRight,
31297	}
31298}
31299
31300// Use Source (TimecodeSource) to set how timecodes are handled within this
31301// job. To make sure that your video, audio, captions, and markers are synchronized
31302// and that time-based features, such as image inserter, work correctly, choose
31303// the Timecode source option that matches your assets. All timecodes are in
31304// a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) -
31305// Use the timecode that is in the input video. If no embedded timecode is in
31306// the source, the service will use Start at 0 (ZEROBASED) instead. * Start
31307// at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00.
31308// * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame
31309// to a value other than zero. You use Start timecode (Start) to provide this
31310// value.
31311const (
31312	// TimecodeSourceEmbedded is a TimecodeSource enum value
31313	TimecodeSourceEmbedded = "EMBEDDED"
31314
31315	// TimecodeSourceZerobased is a TimecodeSource enum value
31316	TimecodeSourceZerobased = "ZEROBASED"
31317
31318	// TimecodeSourceSpecifiedstart is a TimecodeSource enum value
31319	TimecodeSourceSpecifiedstart = "SPECIFIEDSTART"
31320)
31321
31322// TimecodeSource_Values returns all elements of the TimecodeSource enum
31323func TimecodeSource_Values() []string {
31324	return []string{
31325		TimecodeSourceEmbedded,
31326		TimecodeSourceZerobased,
31327		TimecodeSourceSpecifiedstart,
31328	}
31329}
31330
31331// Applies only to HLS outputs. Use this setting to specify whether the service
31332// inserts the ID3 timed metadata from the input in this output.
31333const (
31334	// TimedMetadataPassthrough is a TimedMetadata enum value
31335	TimedMetadataPassthrough = "PASSTHROUGH"
31336
31337	// TimedMetadataNone is a TimedMetadata enum value
31338	TimedMetadataNone = "NONE"
31339)
31340
31341// TimedMetadata_Values returns all elements of the TimedMetadata enum
31342func TimedMetadata_Values() []string {
31343	return []string{
31344		TimedMetadataPassthrough,
31345		TimedMetadataNone,
31346	}
31347}
31348
31349// Pass through style and position information from a TTML-like input source
31350// (TTML, IMSC, SMPTE-TT) to the TTML output.
31351const (
31352	// TtmlStylePassthroughEnabled is a TtmlStylePassthrough enum value
31353	TtmlStylePassthroughEnabled = "ENABLED"
31354
31355	// TtmlStylePassthroughDisabled is a TtmlStylePassthrough enum value
31356	TtmlStylePassthroughDisabled = "DISABLED"
31357)
31358
31359// TtmlStylePassthrough_Values returns all elements of the TtmlStylePassthrough enum
31360func TtmlStylePassthrough_Values() []string {
31361	return []string{
31362		TtmlStylePassthroughEnabled,
31363		TtmlStylePassthroughDisabled,
31364	}
31365}
31366
31367const (
31368	// TypeSystem is a Type enum value
31369	TypeSystem = "SYSTEM"
31370
31371	// TypeCustom is a Type enum value
31372	TypeCustom = "CUSTOM"
31373)
31374
31375// Type_Values returns all elements of the Type enum
31376func Type_Values() []string {
31377	return []string{
31378		TypeSystem,
31379		TypeCustom,
31380	}
31381}
31382
31383// Specify the VC3 class to choose the quality characteristics for this output.
31384// VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator)
31385// and Resolution (height and width), determine your output bitrate. For example,
31386// say that your video resolution is 1920x1080 and your framerate is 29.97.
31387// Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately
31388// 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of
31389// approximately 220 Mbps. VC3 class also specifies the color bit depth of your
31390// output.
31391const (
31392	// Vc3ClassClass1458bit is a Vc3Class enum value
31393	Vc3ClassClass1458bit = "CLASS_145_8BIT"
31394
31395	// Vc3ClassClass2208bit is a Vc3Class enum value
31396	Vc3ClassClass2208bit = "CLASS_220_8BIT"
31397
31398	// Vc3ClassClass22010bit is a Vc3Class enum value
31399	Vc3ClassClass22010bit = "CLASS_220_10BIT"
31400)
31401
31402// Vc3Class_Values returns all elements of the Vc3Class enum
31403func Vc3Class_Values() []string {
31404	return []string{
31405		Vc3ClassClass1458bit,
31406		Vc3ClassClass2208bit,
31407		Vc3ClassClass22010bit,
31408	}
31409}
31410
31411// If you are using the console, use the Framerate setting to specify the frame
31412// rate for this output. If you want to keep the same frame rate as the input
31413// video, choose Follow source. If you want to do frame rate conversion, choose
31414// a frame rate from the dropdown list or choose Custom. The framerates shown
31415// in the dropdown list are decimal approximations of fractions. If you choose
31416// Custom, specify your frame rate as a fraction. If you are creating your transcoding
31417// job specification as a JSON file without the console, use FramerateControl
31418// to specify which value the service uses for the frame rate for this output.
31419// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
31420// from the input. Choose SPECIFIED if you want the service to use the frame
31421// rate you specify in the settings FramerateNumerator and FramerateDenominator.
31422const (
31423	// Vc3FramerateControlInitializeFromSource is a Vc3FramerateControl enum value
31424	Vc3FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
31425
31426	// Vc3FramerateControlSpecified is a Vc3FramerateControl enum value
31427	Vc3FramerateControlSpecified = "SPECIFIED"
31428)
31429
31430// Vc3FramerateControl_Values returns all elements of the Vc3FramerateControl enum
31431func Vc3FramerateControl_Values() []string {
31432	return []string{
31433		Vc3FramerateControlInitializeFromSource,
31434		Vc3FramerateControlSpecified,
31435	}
31436}
31437
31438// Choose the method that you want MediaConvert to use when increasing or decreasing
31439// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
31440// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
31441// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
31442// smooth picture, but might introduce undesirable video artifacts. For complex
31443// frame rate conversions, especially if your source video has already been
31444// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
31445// motion-compensated interpolation. FrameFormer chooses the best conversion
31446// method frame by frame. Note that using FrameFormer increases the transcoding
31447// time and incurs a significant add-on cost.
31448const (
31449	// Vc3FramerateConversionAlgorithmDuplicateDrop is a Vc3FramerateConversionAlgorithm enum value
31450	Vc3FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP"
31451
31452	// Vc3FramerateConversionAlgorithmInterpolate is a Vc3FramerateConversionAlgorithm enum value
31453	Vc3FramerateConversionAlgorithmInterpolate = "INTERPOLATE"
31454
31455	// Vc3FramerateConversionAlgorithmFrameformer is a Vc3FramerateConversionAlgorithm enum value
31456	Vc3FramerateConversionAlgorithmFrameformer = "FRAMEFORMER"
31457)
31458
31459// Vc3FramerateConversionAlgorithm_Values returns all elements of the Vc3FramerateConversionAlgorithm enum
31460func Vc3FramerateConversionAlgorithm_Values() []string {
31461	return []string{
31462		Vc3FramerateConversionAlgorithmDuplicateDrop,
31463		Vc3FramerateConversionAlgorithmInterpolate,
31464		Vc3FramerateConversionAlgorithmFrameformer,
31465	}
31466}
31467
31468// Optional. Choose the scan line type for this output. If you don't specify
31469// a value, MediaConvert will create a progressive output.
31470const (
31471	// Vc3InterlaceModeInterlaced is a Vc3InterlaceMode enum value
31472	Vc3InterlaceModeInterlaced = "INTERLACED"
31473
31474	// Vc3InterlaceModeProgressive is a Vc3InterlaceMode enum value
31475	Vc3InterlaceModeProgressive = "PROGRESSIVE"
31476)
31477
31478// Vc3InterlaceMode_Values returns all elements of the Vc3InterlaceMode enum
31479func Vc3InterlaceMode_Values() []string {
31480	return []string{
31481		Vc3InterlaceModeInterlaced,
31482		Vc3InterlaceModeProgressive,
31483	}
31484}
31485
31486// Use this setting for interlaced outputs, when your output frame rate is half
31487// of your input frame rate. In this situation, choose Optimized interlacing
31488// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
31489// case, each progressive frame from the input corresponds to an interlaced
31490// field in the output. Keep the default value, Basic interlacing (INTERLACED),
31491// for all other output frame rates. With basic interlacing, MediaConvert performs
31492// any frame rate conversion first and then interlaces the frames. When you
31493// choose Optimized interlacing and you set your output frame rate to a value
31494// that isn't suitable for optimized interlacing, MediaConvert automatically
31495// falls back to basic interlacing. Required settings: To use optimized interlacing,
31496// you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't
31497// use optimized interlacing for hard telecine outputs. You must also set Interlace
31498// mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).
31499const (
31500	// Vc3ScanTypeConversionModeInterlaced is a Vc3ScanTypeConversionMode enum value
31501	Vc3ScanTypeConversionModeInterlaced = "INTERLACED"
31502
31503	// Vc3ScanTypeConversionModeInterlacedOptimize is a Vc3ScanTypeConversionMode enum value
31504	Vc3ScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE"
31505)
31506
31507// Vc3ScanTypeConversionMode_Values returns all elements of the Vc3ScanTypeConversionMode enum
31508func Vc3ScanTypeConversionMode_Values() []string {
31509	return []string{
31510		Vc3ScanTypeConversionModeInterlaced,
31511		Vc3ScanTypeConversionModeInterlacedOptimize,
31512	}
31513}
31514
31515// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
31516// second (fps). Enable slow PAL to create a 25 fps output by relabeling the
31517// video frames and resampling your audio. Note that enabling this setting will
31518// slightly reduce the duration of your video. Related settings: You must also
31519// set Framerate to 25. In your JSON job specification, set (framerateControl)
31520// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
31521// 1.
31522const (
31523	// Vc3SlowPalDisabled is a Vc3SlowPal enum value
31524	Vc3SlowPalDisabled = "DISABLED"
31525
31526	// Vc3SlowPalEnabled is a Vc3SlowPal enum value
31527	Vc3SlowPalEnabled = "ENABLED"
31528)
31529
31530// Vc3SlowPal_Values returns all elements of the Vc3SlowPal enum
31531func Vc3SlowPal_Values() []string {
31532	return []string{
31533		Vc3SlowPalDisabled,
31534		Vc3SlowPalEnabled,
31535	}
31536}
31537
31538// When you do frame rate conversion from 23.976 frames per second (fps) to
31539// 29.97 fps, and your output scan type is interlaced, you can optionally enable
31540// hard telecine (HARD) to create a smoother picture. When you keep the default
31541// value, None (NONE), MediaConvert does a standard frame rate conversion to
31542// 29.97 without doing anything with the field polarity to create a smoother
31543// picture.
31544const (
31545	// Vc3TelecineNone is a Vc3Telecine enum value
31546	Vc3TelecineNone = "NONE"
31547
31548	// Vc3TelecineHard is a Vc3Telecine enum value
31549	Vc3TelecineHard = "HARD"
31550)
31551
31552// Vc3Telecine_Values returns all elements of the Vc3Telecine enum
31553func Vc3Telecine_Values() []string {
31554	return []string{
31555		Vc3TelecineNone,
31556		Vc3TelecineHard,
31557	}
31558}
31559
31560// The action to take on content advisory XDS packets. If you select PASSTHROUGH,
31561// packets will not be changed. If you select STRIP, any packets will be removed
31562// in output captions.
31563const (
31564	// VchipActionPassthrough is a VchipAction enum value
31565	VchipActionPassthrough = "PASSTHROUGH"
31566
31567	// VchipActionStrip is a VchipAction enum value
31568	VchipActionStrip = "STRIP"
31569)
31570
31571// VchipAction_Values returns all elements of the VchipAction enum
31572func VchipAction_Values() []string {
31573	return []string{
31574		VchipActionPassthrough,
31575		VchipActionStrip,
31576	}
31577}
31578
31579// Type of video codec
31580const (
31581	// VideoCodecAv1 is a VideoCodec enum value
31582	VideoCodecAv1 = "AV1"
31583
31584	// VideoCodecAvcIntra is a VideoCodec enum value
31585	VideoCodecAvcIntra = "AVC_INTRA"
31586
31587	// VideoCodecFrameCapture is a VideoCodec enum value
31588	VideoCodecFrameCapture = "FRAME_CAPTURE"
31589
31590	// VideoCodecH264 is a VideoCodec enum value
31591	VideoCodecH264 = "H_264"
31592
31593	// VideoCodecH265 is a VideoCodec enum value
31594	VideoCodecH265 = "H_265"
31595
31596	// VideoCodecMpeg2 is a VideoCodec enum value
31597	VideoCodecMpeg2 = "MPEG2"
31598
31599	// VideoCodecProres is a VideoCodec enum value
31600	VideoCodecProres = "PRORES"
31601
31602	// VideoCodecVc3 is a VideoCodec enum value
31603	VideoCodecVc3 = "VC3"
31604
31605	// VideoCodecVp8 is a VideoCodec enum value
31606	VideoCodecVp8 = "VP8"
31607
31608	// VideoCodecVp9 is a VideoCodec enum value
31609	VideoCodecVp9 = "VP9"
31610
31611	// VideoCodecXavc is a VideoCodec enum value
31612	VideoCodecXavc = "XAVC"
31613)
31614
31615// VideoCodec_Values returns all elements of the VideoCodec enum
31616func VideoCodec_Values() []string {
31617	return []string{
31618		VideoCodecAv1,
31619		VideoCodecAvcIntra,
31620		VideoCodecFrameCapture,
31621		VideoCodecH264,
31622		VideoCodecH265,
31623		VideoCodecMpeg2,
31624		VideoCodecProres,
31625		VideoCodecVc3,
31626		VideoCodecVp8,
31627		VideoCodecVp9,
31628		VideoCodecXavc,
31629	}
31630}
31631
31632// Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode
31633// insertion when the input frame rate is identical to the output frame rate.
31634// To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion)
31635// to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED.
31636// When the service inserts timecodes in an output, by default, it uses any
31637// embedded timecodes from the input. If none are present, the service will
31638// set the timecode for the first output frame to zero. To change this default
31639// behavior, adjust the settings under Timecode configuration (TimecodeConfig).
31640// In the console, these settings are located under Job > Job settings > Timecode
31641// configuration. Note - Timecode source under input settings (InputTimecodeSource)
31642// does not affect the timecodes that are inserted in the output. Source under
31643// Job settings > Timecode configuration (TimecodeSource) does.
31644const (
31645	// VideoTimecodeInsertionDisabled is a VideoTimecodeInsertion enum value
31646	VideoTimecodeInsertionDisabled = "DISABLED"
31647
31648	// VideoTimecodeInsertionPicTimingSei is a VideoTimecodeInsertion enum value
31649	VideoTimecodeInsertionPicTimingSei = "PIC_TIMING_SEI"
31650)
31651
31652// VideoTimecodeInsertion_Values returns all elements of the VideoTimecodeInsertion enum
31653func VideoTimecodeInsertion_Values() []string {
31654	return []string{
31655		VideoTimecodeInsertionDisabled,
31656		VideoTimecodeInsertionPicTimingSei,
31657	}
31658}
31659
31660// If you are using the console, use the Framerate setting to specify the frame
31661// rate for this output. If you want to keep the same frame rate as the input
31662// video, choose Follow source. If you want to do frame rate conversion, choose
31663// a frame rate from the dropdown list or choose Custom. The framerates shown
31664// in the dropdown list are decimal approximations of fractions. If you choose
31665// Custom, specify your frame rate as a fraction. If you are creating your transcoding
31666// job specification as a JSON file without the console, use FramerateControl
31667// to specify which value the service uses for the frame rate for this output.
31668// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
31669// from the input. Choose SPECIFIED if you want the service to use the frame
31670// rate you specify in the settings FramerateNumerator and FramerateDenominator.
31671const (
31672	// Vp8FramerateControlInitializeFromSource is a Vp8FramerateControl enum value
31673	Vp8FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
31674
31675	// Vp8FramerateControlSpecified is a Vp8FramerateControl enum value
31676	Vp8FramerateControlSpecified = "SPECIFIED"
31677)
31678
31679// Vp8FramerateControl_Values returns all elements of the Vp8FramerateControl enum
31680func Vp8FramerateControl_Values() []string {
31681	return []string{
31682		Vp8FramerateControlInitializeFromSource,
31683		Vp8FramerateControlSpecified,
31684	}
31685}
31686
31687// Choose the method that you want MediaConvert to use when increasing or decreasing
31688// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
31689// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
31690// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
31691// smooth picture, but might introduce undesirable video artifacts. For complex
31692// frame rate conversions, especially if your source video has already been
31693// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
31694// motion-compensated interpolation. FrameFormer chooses the best conversion
31695// method frame by frame. Note that using FrameFormer increases the transcoding
31696// time and incurs a significant add-on cost.
31697const (
31698	// Vp8FramerateConversionAlgorithmDuplicateDrop is a Vp8FramerateConversionAlgorithm enum value
31699	Vp8FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP"
31700
31701	// Vp8FramerateConversionAlgorithmInterpolate is a Vp8FramerateConversionAlgorithm enum value
31702	Vp8FramerateConversionAlgorithmInterpolate = "INTERPOLATE"
31703
31704	// Vp8FramerateConversionAlgorithmFrameformer is a Vp8FramerateConversionAlgorithm enum value
31705	Vp8FramerateConversionAlgorithmFrameformer = "FRAMEFORMER"
31706)
31707
31708// Vp8FramerateConversionAlgorithm_Values returns all elements of the Vp8FramerateConversionAlgorithm enum
31709func Vp8FramerateConversionAlgorithm_Values() []string {
31710	return []string{
31711		Vp8FramerateConversionAlgorithmDuplicateDrop,
31712		Vp8FramerateConversionAlgorithmInterpolate,
31713		Vp8FramerateConversionAlgorithmFrameformer,
31714	}
31715}
31716
31717// Optional. Specify how the service determines the pixel aspect ratio (PAR)
31718// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE),
31719// uses the PAR from your input video for your output. To specify a different
31720// PAR in the console, choose any value other than Follow source. To specify
31721// a different PAR by editing the JSON job specification, choose SPECIFIED.
31722// When you choose SPECIFIED for this setting, you must also specify values
31723// for the parNumerator and parDenominator settings.
31724const (
31725	// Vp8ParControlInitializeFromSource is a Vp8ParControl enum value
31726	Vp8ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
31727
31728	// Vp8ParControlSpecified is a Vp8ParControl enum value
31729	Vp8ParControlSpecified = "SPECIFIED"
31730)
31731
31732// Vp8ParControl_Values returns all elements of the Vp8ParControl enum
31733func Vp8ParControl_Values() []string {
31734	return []string{
31735		Vp8ParControlInitializeFromSource,
31736		Vp8ParControlSpecified,
31737	}
31738}
31739
31740// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
31741// want to trade off encoding speed for output video quality. The default behavior
31742// is faster, lower quality, multi-pass encoding.
31743const (
31744	// Vp8QualityTuningLevelMultiPass is a Vp8QualityTuningLevel enum value
31745	Vp8QualityTuningLevelMultiPass = "MULTI_PASS"
31746
31747	// Vp8QualityTuningLevelMultiPassHq is a Vp8QualityTuningLevel enum value
31748	Vp8QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ"
31749)
31750
31751// Vp8QualityTuningLevel_Values returns all elements of the Vp8QualityTuningLevel enum
31752func Vp8QualityTuningLevel_Values() []string {
31753	return []string{
31754		Vp8QualityTuningLevelMultiPass,
31755		Vp8QualityTuningLevelMultiPassHq,
31756	}
31757}
31758
31759// With the VP8 codec, you can use only the variable bitrate (VBR) rate control
31760// mode.
31761const (
31762	// Vp8RateControlModeVbr is a Vp8RateControlMode enum value
31763	Vp8RateControlModeVbr = "VBR"
31764)
31765
31766// Vp8RateControlMode_Values returns all elements of the Vp8RateControlMode enum
31767func Vp8RateControlMode_Values() []string {
31768	return []string{
31769		Vp8RateControlModeVbr,
31770	}
31771}
31772
31773// If you are using the console, use the Framerate setting to specify the frame
31774// rate for this output. If you want to keep the same frame rate as the input
31775// video, choose Follow source. If you want to do frame rate conversion, choose
31776// a frame rate from the dropdown list or choose Custom. The framerates shown
31777// in the dropdown list are decimal approximations of fractions. If you choose
31778// Custom, specify your frame rate as a fraction. If you are creating your transcoding
31779// job specification as a JSON file without the console, use FramerateControl
31780// to specify which value the service uses for the frame rate for this output.
31781// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
31782// from the input. Choose SPECIFIED if you want the service to use the frame
31783// rate you specify in the settings FramerateNumerator and FramerateDenominator.
31784const (
31785	// Vp9FramerateControlInitializeFromSource is a Vp9FramerateControl enum value
31786	Vp9FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
31787
31788	// Vp9FramerateControlSpecified is a Vp9FramerateControl enum value
31789	Vp9FramerateControlSpecified = "SPECIFIED"
31790)
31791
31792// Vp9FramerateControl_Values returns all elements of the Vp9FramerateControl enum
31793func Vp9FramerateControl_Values() []string {
31794	return []string{
31795		Vp9FramerateControlInitializeFromSource,
31796		Vp9FramerateControlSpecified,
31797	}
31798}
31799
31800// Choose the method that you want MediaConvert to use when increasing or decreasing
31801// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
31802// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
31803// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
31804// smooth picture, but might introduce undesirable video artifacts. For complex
31805// frame rate conversions, especially if your source video has already been
31806// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
31807// motion-compensated interpolation. FrameFormer chooses the best conversion
31808// method frame by frame. Note that using FrameFormer increases the transcoding
31809// time and incurs a significant add-on cost.
31810const (
31811	// Vp9FramerateConversionAlgorithmDuplicateDrop is a Vp9FramerateConversionAlgorithm enum value
31812	Vp9FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP"
31813
31814	// Vp9FramerateConversionAlgorithmInterpolate is a Vp9FramerateConversionAlgorithm enum value
31815	Vp9FramerateConversionAlgorithmInterpolate = "INTERPOLATE"
31816
31817	// Vp9FramerateConversionAlgorithmFrameformer is a Vp9FramerateConversionAlgorithm enum value
31818	Vp9FramerateConversionAlgorithmFrameformer = "FRAMEFORMER"
31819)
31820
31821// Vp9FramerateConversionAlgorithm_Values returns all elements of the Vp9FramerateConversionAlgorithm enum
31822func Vp9FramerateConversionAlgorithm_Values() []string {
31823	return []string{
31824		Vp9FramerateConversionAlgorithmDuplicateDrop,
31825		Vp9FramerateConversionAlgorithmInterpolate,
31826		Vp9FramerateConversionAlgorithmFrameformer,
31827	}
31828}
31829
31830// Optional. Specify how the service determines the pixel aspect ratio (PAR)
31831// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE),
31832// uses the PAR from your input video for your output. To specify a different
31833// PAR in the console, choose any value other than Follow source. To specify
31834// a different PAR by editing the JSON job specification, choose SPECIFIED.
31835// When you choose SPECIFIED for this setting, you must also specify values
31836// for the parNumerator and parDenominator settings.
31837const (
31838	// Vp9ParControlInitializeFromSource is a Vp9ParControl enum value
31839	Vp9ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
31840
31841	// Vp9ParControlSpecified is a Vp9ParControl enum value
31842	Vp9ParControlSpecified = "SPECIFIED"
31843)
31844
31845// Vp9ParControl_Values returns all elements of the Vp9ParControl enum
31846func Vp9ParControl_Values() []string {
31847	return []string{
31848		Vp9ParControlInitializeFromSource,
31849		Vp9ParControlSpecified,
31850	}
31851}
31852
31853// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
31854// want to trade off encoding speed for output video quality. The default behavior
31855// is faster, lower quality, multi-pass encoding.
31856const (
31857	// Vp9QualityTuningLevelMultiPass is a Vp9QualityTuningLevel enum value
31858	Vp9QualityTuningLevelMultiPass = "MULTI_PASS"
31859
31860	// Vp9QualityTuningLevelMultiPassHq is a Vp9QualityTuningLevel enum value
31861	Vp9QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ"
31862)
31863
31864// Vp9QualityTuningLevel_Values returns all elements of the Vp9QualityTuningLevel enum
31865func Vp9QualityTuningLevel_Values() []string {
31866	return []string{
31867		Vp9QualityTuningLevelMultiPass,
31868		Vp9QualityTuningLevelMultiPassHq,
31869	}
31870}
31871
31872// With the VP9 codec, you can use only the variable bitrate (VBR) rate control
31873// mode.
31874const (
31875	// Vp9RateControlModeVbr is a Vp9RateControlMode enum value
31876	Vp9RateControlModeVbr = "VBR"
31877)
31878
31879// Vp9RateControlMode_Values returns all elements of the Vp9RateControlMode enum
31880func Vp9RateControlMode_Values() []string {
31881	return []string{
31882		Vp9RateControlModeVbr,
31883	}
31884}
31885
31886// Optional. Ignore this setting unless Nagra support directs you to specify
31887// a value. When you don't specify a value here, the Nagra NexGuard library
31888// uses its default value.
31889const (
31890	// WatermarkingStrengthLightest is a WatermarkingStrength enum value
31891	WatermarkingStrengthLightest = "LIGHTEST"
31892
31893	// WatermarkingStrengthLighter is a WatermarkingStrength enum value
31894	WatermarkingStrengthLighter = "LIGHTER"
31895
31896	// WatermarkingStrengthDefault is a WatermarkingStrength enum value
31897	WatermarkingStrengthDefault = "DEFAULT"
31898
31899	// WatermarkingStrengthStronger is a WatermarkingStrength enum value
31900	WatermarkingStrengthStronger = "STRONGER"
31901
31902	// WatermarkingStrengthStrongest is a WatermarkingStrength enum value
31903	WatermarkingStrengthStrongest = "STRONGEST"
31904)
31905
31906// WatermarkingStrength_Values returns all elements of the WatermarkingStrength enum
31907func WatermarkingStrength_Values() []string {
31908	return []string{
31909		WatermarkingStrengthLightest,
31910		WatermarkingStrengthLighter,
31911		WatermarkingStrengthDefault,
31912		WatermarkingStrengthStronger,
31913		WatermarkingStrengthStrongest,
31914	}
31915}
31916
31917// The service defaults to using RIFF for WAV outputs. If your output audio
31918// is likely to exceed 4 GB in file size, or if you otherwise need the extended
31919// support of the RF64 format, set your output WAV file format to RF64.
31920const (
31921	// WavFormatRiff is a WavFormat enum value
31922	WavFormatRiff = "RIFF"
31923
31924	// WavFormatRf64 is a WavFormat enum value
31925	WavFormatRf64 = "RF64"
31926)
31927
31928// WavFormat_Values returns all elements of the WavFormat enum
31929func WavFormat_Values() []string {
31930	return []string{
31931		WavFormatRiff,
31932		WavFormatRf64,
31933	}
31934}
31935
31936// Choose Enabled (ENABLED) to have MediaConvert use the font style, color,
31937// and position information from the captions source in the input. Keep the
31938// default value, Disabled (DISABLED), for simplified output captions.
31939const (
31940	// WebvttStylePassthroughEnabled is a WebvttStylePassthrough enum value
31941	WebvttStylePassthroughEnabled = "ENABLED"
31942
31943	// WebvttStylePassthroughDisabled is a WebvttStylePassthrough enum value
31944	WebvttStylePassthroughDisabled = "DISABLED"
31945)
31946
31947// WebvttStylePassthrough_Values returns all elements of the WebvttStylePassthrough enum
31948func WebvttStylePassthrough_Values() []string {
31949	return []string{
31950		WebvttStylePassthroughEnabled,
31951		WebvttStylePassthroughDisabled,
31952	}
31953}
31954
31955// Specify the XAVC Intra 4k (CBG) Class to set the bitrate of your output.
31956// Outputs of the same class have similar image quality over the operating points
31957// that are valid for that class.
31958const (
31959	// Xavc4kIntraCbgProfileClassClass100 is a Xavc4kIntraCbgProfileClass enum value
31960	Xavc4kIntraCbgProfileClassClass100 = "CLASS_100"
31961
31962	// Xavc4kIntraCbgProfileClassClass300 is a Xavc4kIntraCbgProfileClass enum value
31963	Xavc4kIntraCbgProfileClassClass300 = "CLASS_300"
31964
31965	// Xavc4kIntraCbgProfileClassClass480 is a Xavc4kIntraCbgProfileClass enum value
31966	Xavc4kIntraCbgProfileClassClass480 = "CLASS_480"
31967)
31968
31969// Xavc4kIntraCbgProfileClass_Values returns all elements of the Xavc4kIntraCbgProfileClass enum
31970func Xavc4kIntraCbgProfileClass_Values() []string {
31971	return []string{
31972		Xavc4kIntraCbgProfileClassClass100,
31973		Xavc4kIntraCbgProfileClassClass300,
31974		Xavc4kIntraCbgProfileClassClass480,
31975	}
31976}
31977
31978// Specify the XAVC Intra 4k (VBR) Class to set the bitrate of your output.
31979// Outputs of the same class have similar image quality over the operating points
31980// that are valid for that class.
31981const (
31982	// Xavc4kIntraVbrProfileClassClass100 is a Xavc4kIntraVbrProfileClass enum value
31983	Xavc4kIntraVbrProfileClassClass100 = "CLASS_100"
31984
31985	// Xavc4kIntraVbrProfileClassClass300 is a Xavc4kIntraVbrProfileClass enum value
31986	Xavc4kIntraVbrProfileClassClass300 = "CLASS_300"
31987
31988	// Xavc4kIntraVbrProfileClassClass480 is a Xavc4kIntraVbrProfileClass enum value
31989	Xavc4kIntraVbrProfileClassClass480 = "CLASS_480"
31990)
31991
31992// Xavc4kIntraVbrProfileClass_Values returns all elements of the Xavc4kIntraVbrProfileClass enum
31993func Xavc4kIntraVbrProfileClass_Values() []string {
31994	return []string{
31995		Xavc4kIntraVbrProfileClassClass100,
31996		Xavc4kIntraVbrProfileClassClass300,
31997		Xavc4kIntraVbrProfileClassClass480,
31998	}
31999}
32000
32001// Specify the XAVC 4k (Long GOP) Bitrate Class to set the bitrate of your output.
32002// Outputs of the same class have similar image quality over the operating points
32003// that are valid for that class.
32004const (
32005	// Xavc4kProfileBitrateClassBitrateClass100 is a Xavc4kProfileBitrateClass enum value
32006	Xavc4kProfileBitrateClassBitrateClass100 = "BITRATE_CLASS_100"
32007
32008	// Xavc4kProfileBitrateClassBitrateClass140 is a Xavc4kProfileBitrateClass enum value
32009	Xavc4kProfileBitrateClassBitrateClass140 = "BITRATE_CLASS_140"
32010
32011	// Xavc4kProfileBitrateClassBitrateClass200 is a Xavc4kProfileBitrateClass enum value
32012	Xavc4kProfileBitrateClassBitrateClass200 = "BITRATE_CLASS_200"
32013)
32014
32015// Xavc4kProfileBitrateClass_Values returns all elements of the Xavc4kProfileBitrateClass enum
32016func Xavc4kProfileBitrateClass_Values() []string {
32017	return []string{
32018		Xavc4kProfileBitrateClassBitrateClass100,
32019		Xavc4kProfileBitrateClassBitrateClass140,
32020		Xavc4kProfileBitrateClassBitrateClass200,
32021	}
32022}
32023
32024// Specify the codec profile for this output. Choose High, 8-bit, 4:2:0 (HIGH)
32025// or High, 10-bit, 4:2:2 (HIGH_422). These profiles are specified in ITU-T
32026// H.264.
32027const (
32028	// Xavc4kProfileCodecProfileHigh is a Xavc4kProfileCodecProfile enum value
32029	Xavc4kProfileCodecProfileHigh = "HIGH"
32030
32031	// Xavc4kProfileCodecProfileHigh422 is a Xavc4kProfileCodecProfile enum value
32032	Xavc4kProfileCodecProfileHigh422 = "HIGH_422"
32033)
32034
32035// Xavc4kProfileCodecProfile_Values returns all elements of the Xavc4kProfileCodecProfile enum
32036func Xavc4kProfileCodecProfile_Values() []string {
32037	return []string{
32038		Xavc4kProfileCodecProfileHigh,
32039		Xavc4kProfileCodecProfileHigh422,
32040	}
32041}
32042
32043// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
32044// want to trade off encoding speed for output video quality. The default behavior
32045// is faster, lower quality, single-pass encoding.
32046const (
32047	// Xavc4kProfileQualityTuningLevelSinglePass is a Xavc4kProfileQualityTuningLevel enum value
32048	Xavc4kProfileQualityTuningLevelSinglePass = "SINGLE_PASS"
32049
32050	// Xavc4kProfileQualityTuningLevelSinglePassHq is a Xavc4kProfileQualityTuningLevel enum value
32051	Xavc4kProfileQualityTuningLevelSinglePassHq = "SINGLE_PASS_HQ"
32052
32053	// Xavc4kProfileQualityTuningLevelMultiPassHq is a Xavc4kProfileQualityTuningLevel enum value
32054	Xavc4kProfileQualityTuningLevelMultiPassHq = "MULTI_PASS_HQ"
32055)
32056
32057// Xavc4kProfileQualityTuningLevel_Values returns all elements of the Xavc4kProfileQualityTuningLevel enum
32058func Xavc4kProfileQualityTuningLevel_Values() []string {
32059	return []string{
32060		Xavc4kProfileQualityTuningLevelSinglePass,
32061		Xavc4kProfileQualityTuningLevelSinglePassHq,
32062		Xavc4kProfileQualityTuningLevelMultiPassHq,
32063	}
32064}
32065
32066// Keep the default value, Auto (AUTO), for this setting to have MediaConvert
32067// automatically apply the best types of quantization for your video content.
32068// When you want to apply your quantization settings manually, you must set
32069// Adaptive quantization (adaptiveQuantization) to a value other than Auto (AUTO).
32070// Use this setting to specify the strength of any adaptive quantization filters
32071// that you enable. If you don't want MediaConvert to do any adaptive quantization
32072// in this transcode, set Adaptive quantization to Off (OFF). Related settings:
32073// The value that you choose here applies to the following settings: Flicker
32074// adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization
32075// (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization).
32076const (
32077	// XavcAdaptiveQuantizationOff is a XavcAdaptiveQuantization enum value
32078	XavcAdaptiveQuantizationOff = "OFF"
32079
32080	// XavcAdaptiveQuantizationAuto is a XavcAdaptiveQuantization enum value
32081	XavcAdaptiveQuantizationAuto = "AUTO"
32082
32083	// XavcAdaptiveQuantizationLow is a XavcAdaptiveQuantization enum value
32084	XavcAdaptiveQuantizationLow = "LOW"
32085
32086	// XavcAdaptiveQuantizationMedium is a XavcAdaptiveQuantization enum value
32087	XavcAdaptiveQuantizationMedium = "MEDIUM"
32088
32089	// XavcAdaptiveQuantizationHigh is a XavcAdaptiveQuantization enum value
32090	XavcAdaptiveQuantizationHigh = "HIGH"
32091
32092	// XavcAdaptiveQuantizationHigher is a XavcAdaptiveQuantization enum value
32093	XavcAdaptiveQuantizationHigher = "HIGHER"
32094
32095	// XavcAdaptiveQuantizationMax is a XavcAdaptiveQuantization enum value
32096	XavcAdaptiveQuantizationMax = "MAX"
32097)
32098
32099// XavcAdaptiveQuantization_Values returns all elements of the XavcAdaptiveQuantization enum
32100func XavcAdaptiveQuantization_Values() []string {
32101	return []string{
32102		XavcAdaptiveQuantizationOff,
32103		XavcAdaptiveQuantizationAuto,
32104		XavcAdaptiveQuantizationLow,
32105		XavcAdaptiveQuantizationMedium,
32106		XavcAdaptiveQuantizationHigh,
32107		XavcAdaptiveQuantizationHigher,
32108		XavcAdaptiveQuantizationMax,
32109	}
32110}
32111
32112// Optional. Choose a specific entropy encoding mode only when you want to override
32113// XAVC recommendations. If you choose the value auto, MediaConvert uses the
32114// mode that the XAVC file format specifies given this output's operating point.
32115const (
32116	// XavcEntropyEncodingAuto is a XavcEntropyEncoding enum value
32117	XavcEntropyEncodingAuto = "AUTO"
32118
32119	// XavcEntropyEncodingCabac is a XavcEntropyEncoding enum value
32120	XavcEntropyEncodingCabac = "CABAC"
32121
32122	// XavcEntropyEncodingCavlc is a XavcEntropyEncoding enum value
32123	XavcEntropyEncodingCavlc = "CAVLC"
32124)
32125
32126// XavcEntropyEncoding_Values returns all elements of the XavcEntropyEncoding enum
32127func XavcEntropyEncoding_Values() []string {
32128	return []string{
32129		XavcEntropyEncodingAuto,
32130		XavcEntropyEncodingCabac,
32131		XavcEntropyEncodingCavlc,
32132	}
32133}
32134
32135// The best way to set up adaptive quantization is to keep the default value,
32136// Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization).
32137// When you do so, MediaConvert automatically applies the best types of quantization
32138// for your video content. Include this setting in your JSON job specification
32139// only when you choose to change the default value for Adaptive quantization.
32140// Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears
32141// as a visual flicker that can arise when the encoder saves bits by copying
32142// some macroblocks many times from frame to frame, and then refreshes them
32143// at the I-frame. When you enable this setting, the encoder updates these macroblocks
32144// slightly more often to smooth out the flicker. This setting is disabled by
32145// default. Related setting: In addition to enabling this setting, you must
32146// also set Adaptive quantization (adaptiveQuantization) to a value other than
32147// Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree
32148// of smoothing that Flicker adaptive quantization provides.
32149const (
32150	// XavcFlickerAdaptiveQuantizationDisabled is a XavcFlickerAdaptiveQuantization enum value
32151	XavcFlickerAdaptiveQuantizationDisabled = "DISABLED"
32152
32153	// XavcFlickerAdaptiveQuantizationEnabled is a XavcFlickerAdaptiveQuantization enum value
32154	XavcFlickerAdaptiveQuantizationEnabled = "ENABLED"
32155)
32156
32157// XavcFlickerAdaptiveQuantization_Values returns all elements of the XavcFlickerAdaptiveQuantization enum
32158func XavcFlickerAdaptiveQuantization_Values() []string {
32159	return []string{
32160		XavcFlickerAdaptiveQuantizationDisabled,
32161		XavcFlickerAdaptiveQuantizationEnabled,
32162	}
32163}
32164
32165// If you are using the console, use the Frame rate setting to specify the frame
32166// rate for this output. If you want to keep the same frame rate as the input
32167// video, choose Follow source. If you want to do frame rate conversion, choose
32168// a frame rate from the dropdown list. The framerates shown in the dropdown
32169// list are decimal approximations of fractions. If you are creating your transcoding
32170// job specification as a JSON file without the console, use FramerateControl
32171// to specify which value the service uses for the frame rate for this output.
32172// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate
32173// from the input. Choose SPECIFIED if you want the service to use the frame
32174// rate that you specify in the settings FramerateNumerator and FramerateDenominator.
32175const (
32176	// XavcFramerateControlInitializeFromSource is a XavcFramerateControl enum value
32177	XavcFramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE"
32178
32179	// XavcFramerateControlSpecified is a XavcFramerateControl enum value
32180	XavcFramerateControlSpecified = "SPECIFIED"
32181)
32182
32183// XavcFramerateControl_Values returns all elements of the XavcFramerateControl enum
32184func XavcFramerateControl_Values() []string {
32185	return []string{
32186		XavcFramerateControlInitializeFromSource,
32187		XavcFramerateControlSpecified,
32188	}
32189}
32190
32191// Choose the method that you want MediaConvert to use when increasing or decreasing
32192// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically
32193// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions,
32194// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a
32195// smooth picture, but might introduce undesirable video artifacts. For complex
32196// frame rate conversions, especially if your source video has already been
32197// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do
32198// motion-compensated interpolation. FrameFormer chooses the best conversion
32199// method frame by frame. Note that using FrameFormer increases the transcoding
32200// time and incurs a significant add-on cost.
32201const (
32202	// XavcFramerateConversionAlgorithmDuplicateDrop is a XavcFramerateConversionAlgorithm enum value
32203	XavcFramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP"
32204
32205	// XavcFramerateConversionAlgorithmInterpolate is a XavcFramerateConversionAlgorithm enum value
32206	XavcFramerateConversionAlgorithmInterpolate = "INTERPOLATE"
32207
32208	// XavcFramerateConversionAlgorithmFrameformer is a XavcFramerateConversionAlgorithm enum value
32209	XavcFramerateConversionAlgorithmFrameformer = "FRAMEFORMER"
32210)
32211
32212// XavcFramerateConversionAlgorithm_Values returns all elements of the XavcFramerateConversionAlgorithm enum
32213func XavcFramerateConversionAlgorithm_Values() []string {
32214	return []string{
32215		XavcFramerateConversionAlgorithmDuplicateDrop,
32216		XavcFramerateConversionAlgorithmInterpolate,
32217		XavcFramerateConversionAlgorithmFrameformer,
32218	}
32219}
32220
32221// Specify whether the encoder uses B-frames as reference frames for other pictures
32222// in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames
32223// as reference frames. Choose Don't allow (DISABLED) to prevent the encoder
32224// from using B-frames as reference frames.
32225const (
32226	// XavcGopBReferenceDisabled is a XavcGopBReference enum value
32227	XavcGopBReferenceDisabled = "DISABLED"
32228
32229	// XavcGopBReferenceEnabled is a XavcGopBReference enum value
32230	XavcGopBReferenceEnabled = "ENABLED"
32231)
32232
32233// XavcGopBReference_Values returns all elements of the XavcGopBReference enum
32234func XavcGopBReference_Values() []string {
32235	return []string{
32236		XavcGopBReferenceDisabled,
32237		XavcGopBReferenceEnabled,
32238	}
32239}
32240
32241// Specify the XAVC Intra HD (CBG) Class to set the bitrate of your output.
32242// Outputs of the same class have similar image quality over the operating points
32243// that are valid for that class.
32244const (
32245	// XavcHdIntraCbgProfileClassClass50 is a XavcHdIntraCbgProfileClass enum value
32246	XavcHdIntraCbgProfileClassClass50 = "CLASS_50"
32247
32248	// XavcHdIntraCbgProfileClassClass100 is a XavcHdIntraCbgProfileClass enum value
32249	XavcHdIntraCbgProfileClassClass100 = "CLASS_100"
32250
32251	// XavcHdIntraCbgProfileClassClass200 is a XavcHdIntraCbgProfileClass enum value
32252	XavcHdIntraCbgProfileClassClass200 = "CLASS_200"
32253)
32254
32255// XavcHdIntraCbgProfileClass_Values returns all elements of the XavcHdIntraCbgProfileClass enum
32256func XavcHdIntraCbgProfileClass_Values() []string {
32257	return []string{
32258		XavcHdIntraCbgProfileClassClass50,
32259		XavcHdIntraCbgProfileClassClass100,
32260		XavcHdIntraCbgProfileClassClass200,
32261	}
32262}
32263
32264// Specify the XAVC HD (Long GOP) Bitrate Class to set the bitrate of your output.
32265// Outputs of the same class have similar image quality over the operating points
32266// that are valid for that class.
32267const (
32268	// XavcHdProfileBitrateClassBitrateClass25 is a XavcHdProfileBitrateClass enum value
32269	XavcHdProfileBitrateClassBitrateClass25 = "BITRATE_CLASS_25"
32270
32271	// XavcHdProfileBitrateClassBitrateClass35 is a XavcHdProfileBitrateClass enum value
32272	XavcHdProfileBitrateClassBitrateClass35 = "BITRATE_CLASS_35"
32273
32274	// XavcHdProfileBitrateClassBitrateClass50 is a XavcHdProfileBitrateClass enum value
32275	XavcHdProfileBitrateClassBitrateClass50 = "BITRATE_CLASS_50"
32276)
32277
32278// XavcHdProfileBitrateClass_Values returns all elements of the XavcHdProfileBitrateClass enum
32279func XavcHdProfileBitrateClass_Values() []string {
32280	return []string{
32281		XavcHdProfileBitrateClassBitrateClass25,
32282		XavcHdProfileBitrateClassBitrateClass35,
32283		XavcHdProfileBitrateClassBitrateClass50,
32284	}
32285}
32286
32287// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you
32288// want to trade off encoding speed for output video quality. The default behavior
32289// is faster, lower quality, single-pass encoding.
32290const (
32291	// XavcHdProfileQualityTuningLevelSinglePass is a XavcHdProfileQualityTuningLevel enum value
32292	XavcHdProfileQualityTuningLevelSinglePass = "SINGLE_PASS"
32293
32294	// XavcHdProfileQualityTuningLevelSinglePassHq is a XavcHdProfileQualityTuningLevel enum value
32295	XavcHdProfileQualityTuningLevelSinglePassHq = "SINGLE_PASS_HQ"
32296
32297	// XavcHdProfileQualityTuningLevelMultiPassHq is a XavcHdProfileQualityTuningLevel enum value
32298	XavcHdProfileQualityTuningLevelMultiPassHq = "MULTI_PASS_HQ"
32299)
32300
32301// XavcHdProfileQualityTuningLevel_Values returns all elements of the XavcHdProfileQualityTuningLevel enum
32302func XavcHdProfileQualityTuningLevel_Values() []string {
32303	return []string{
32304		XavcHdProfileQualityTuningLevelSinglePass,
32305		XavcHdProfileQualityTuningLevelSinglePassHq,
32306		XavcHdProfileQualityTuningLevelMultiPassHq,
32307	}
32308}
32309
32310// Ignore this setting unless you set Frame rate (framerateNumerator divided
32311// by framerateDenominator) to 29.970. If your input framerate is 23.976, choose
32312// Hard (HARD). Otherwise, keep the default value None (NONE). For more information,
32313// see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html.
32314const (
32315	// XavcHdProfileTelecineNone is a XavcHdProfileTelecine enum value
32316	XavcHdProfileTelecineNone = "NONE"
32317
32318	// XavcHdProfileTelecineHard is a XavcHdProfileTelecine enum value
32319	XavcHdProfileTelecineHard = "HARD"
32320)
32321
32322// XavcHdProfileTelecine_Values returns all elements of the XavcHdProfileTelecine enum
32323func XavcHdProfileTelecine_Values() []string {
32324	return []string{
32325		XavcHdProfileTelecineNone,
32326		XavcHdProfileTelecineHard,
32327	}
32328}
32329
32330// Choose the scan line type for the output. Keep the default value, Progressive
32331// (PROGRESSIVE) to create a progressive output, regardless of the scan type
32332// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
32333// to create an output that's interlaced with the same field polarity throughout.
32334// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD)
32335// to produce outputs with the same field polarity as the source. For jobs that
32336// have multiple inputs, the output field polarity might change over the course
32337// of the output. Follow behavior depends on the input scan type. If the source
32338// is interlaced, the output will be interlaced with the same polarity as the
32339// source. If the source is progressive, the output will be interlaced with
32340// top field bottom field first, depending on which of the Follow options you
32341// choose.
32342const (
32343	// XavcInterlaceModeProgressive is a XavcInterlaceMode enum value
32344	XavcInterlaceModeProgressive = "PROGRESSIVE"
32345
32346	// XavcInterlaceModeTopField is a XavcInterlaceMode enum value
32347	XavcInterlaceModeTopField = "TOP_FIELD"
32348
32349	// XavcInterlaceModeBottomField is a XavcInterlaceMode enum value
32350	XavcInterlaceModeBottomField = "BOTTOM_FIELD"
32351
32352	// XavcInterlaceModeFollowTopField is a XavcInterlaceMode enum value
32353	XavcInterlaceModeFollowTopField = "FOLLOW_TOP_FIELD"
32354
32355	// XavcInterlaceModeFollowBottomField is a XavcInterlaceMode enum value
32356	XavcInterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD"
32357)
32358
32359// XavcInterlaceMode_Values returns all elements of the XavcInterlaceMode enum
32360func XavcInterlaceMode_Values() []string {
32361	return []string{
32362		XavcInterlaceModeProgressive,
32363		XavcInterlaceModeTopField,
32364		XavcInterlaceModeBottomField,
32365		XavcInterlaceModeFollowTopField,
32366		XavcInterlaceModeFollowBottomField,
32367	}
32368}
32369
32370// Specify the XAVC profile for this output. For more information, see the Sony
32371// documentation at https://www.xavc-info.org/. Note that MediaConvert doesn't
32372// support the interlaced video XAVC operating points for XAVC_HD_INTRA_CBG.
32373// To create an interlaced XAVC output, choose the profile XAVC_HD.
32374const (
32375	// XavcProfileXavcHdIntraCbg is a XavcProfile enum value
32376	XavcProfileXavcHdIntraCbg = "XAVC_HD_INTRA_CBG"
32377
32378	// XavcProfileXavc4kIntraCbg is a XavcProfile enum value
32379	XavcProfileXavc4kIntraCbg = "XAVC_4K_INTRA_CBG"
32380
32381	// XavcProfileXavc4kIntraVbr is a XavcProfile enum value
32382	XavcProfileXavc4kIntraVbr = "XAVC_4K_INTRA_VBR"
32383
32384	// XavcProfileXavcHd is a XavcProfile enum value
32385	XavcProfileXavcHd = "XAVC_HD"
32386
32387	// XavcProfileXavc4k is a XavcProfile enum value
32388	XavcProfileXavc4k = "XAVC_4K"
32389)
32390
32391// XavcProfile_Values returns all elements of the XavcProfile enum
32392func XavcProfile_Values() []string {
32393	return []string{
32394		XavcProfileXavcHdIntraCbg,
32395		XavcProfileXavc4kIntraCbg,
32396		XavcProfileXavc4kIntraVbr,
32397		XavcProfileXavcHd,
32398		XavcProfileXavc4k,
32399	}
32400}
32401
32402// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
32403// second (fps). Enable slow PAL to create a 25 fps output by relabeling the
32404// video frames and resampling your audio. Note that enabling this setting will
32405// slightly reduce the duration of your video. Related settings: You must also
32406// set Frame rate to 25. In your JSON job specification, set (framerateControl)
32407// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to
32408// 1.
32409const (
32410	// XavcSlowPalDisabled is a XavcSlowPal enum value
32411	XavcSlowPalDisabled = "DISABLED"
32412
32413	// XavcSlowPalEnabled is a XavcSlowPal enum value
32414	XavcSlowPalEnabled = "ENABLED"
32415)
32416
32417// XavcSlowPal_Values returns all elements of the XavcSlowPal enum
32418func XavcSlowPal_Values() []string {
32419	return []string{
32420		XavcSlowPalDisabled,
32421		XavcSlowPalEnabled,
32422	}
32423}
32424
32425// The best way to set up adaptive quantization is to keep the default value,
32426// Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization).
32427// When you do so, MediaConvert automatically applies the best types of quantization
32428// for your video content. Include this setting in your JSON job specification
32429// only when you choose to change the default value for Adaptive quantization.
32430// For this setting, keep the default value, Enabled (ENABLED), to adjust quantization
32431// within each frame based on spatial variation of content complexity. When
32432// you enable this feature, the encoder uses fewer bits on areas that can sustain
32433// more distortion with no noticeable visual degradation and uses more bits
32434// on areas where any small distortion will be noticeable. For example, complex
32435// textured blocks are encoded with fewer bits and smooth textured blocks are
32436// encoded with more bits. Enabling this feature will almost always improve
32437// your video quality. Note, though, that this feature doesn't take into account
32438// where the viewer's attention is likely to be. If viewers are likely to be
32439// focusing their attention on a part of the screen with a lot of complex texture,
32440// you might choose to disable this feature. Related setting: When you enable
32441// spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization)
32442// depending on your content. For homogeneous content, such as cartoons and
32443// video games, set it to Low. For content with a wider variety of textures,
32444// set it to High or Higher.
32445const (
32446	// XavcSpatialAdaptiveQuantizationDisabled is a XavcSpatialAdaptiveQuantization enum value
32447	XavcSpatialAdaptiveQuantizationDisabled = "DISABLED"
32448
32449	// XavcSpatialAdaptiveQuantizationEnabled is a XavcSpatialAdaptiveQuantization enum value
32450	XavcSpatialAdaptiveQuantizationEnabled = "ENABLED"
32451)
32452
32453// XavcSpatialAdaptiveQuantization_Values returns all elements of the XavcSpatialAdaptiveQuantization enum
32454func XavcSpatialAdaptiveQuantization_Values() []string {
32455	return []string{
32456		XavcSpatialAdaptiveQuantizationDisabled,
32457		XavcSpatialAdaptiveQuantizationEnabled,
32458	}
32459}
32460
32461// The best way to set up adaptive quantization is to keep the default value,
32462// Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization).
32463// When you do so, MediaConvert automatically applies the best types of quantization
32464// for your video content. Include this setting in your JSON job specification
32465// only when you choose to change the default value for Adaptive quantization.
32466// For this setting, keep the default value, Enabled (ENABLED), to adjust quantization
32467// within each frame based on temporal variation of content complexity. When
32468// you enable this feature, the encoder uses fewer bits on areas of the frame
32469// that aren't moving and uses more bits on complex objects with sharp edges
32470// that move a lot. For example, this feature improves the readability of text
32471// tickers on newscasts and scoreboards on sports matches. Enabling this feature
32472// will almost always improve your video quality. Note, though, that this feature
32473// doesn't take into account where the viewer's attention is likely to be. If
32474// viewers are likely to be focusing their attention on a part of the screen
32475// that doesn't have moving objects with sharp edges, such as sports athletes'
32476// faces, you might choose to disable this feature. Related setting: When you
32477// enable temporal adaptive quantization, adjust the strength of the filter
32478// with the setting Adaptive quantization (adaptiveQuantization).
32479const (
32480	// XavcTemporalAdaptiveQuantizationDisabled is a XavcTemporalAdaptiveQuantization enum value
32481	XavcTemporalAdaptiveQuantizationDisabled = "DISABLED"
32482
32483	// XavcTemporalAdaptiveQuantizationEnabled is a XavcTemporalAdaptiveQuantization enum value
32484	XavcTemporalAdaptiveQuantizationEnabled = "ENABLED"
32485)
32486
32487// XavcTemporalAdaptiveQuantization_Values returns all elements of the XavcTemporalAdaptiveQuantization enum
32488func XavcTemporalAdaptiveQuantization_Values() []string {
32489	return []string{
32490		XavcTemporalAdaptiveQuantizationDisabled,
32491		XavcTemporalAdaptiveQuantizationEnabled,
32492	}
32493}
32494