// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package mediaconvert import ( "fmt" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) const opAssociateCertificate = "AssociateCertificate" // AssociateCertificateRequest generates a "aws/request.Request" representing the // client's request for the AssociateCertificate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See AssociateCertificate for more information on using the AssociateCertificate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the AssociateCertificateRequest method. // req, resp := client.AssociateCertificateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AssociateCertificate func (c *MediaConvert) AssociateCertificateRequest(input *AssociateCertificateInput) (req *request.Request, output *AssociateCertificateOutput) { op := &request.Operation{ Name: opAssociateCertificate, HTTPMethod: "POST", HTTPPath: "/2017-08-29/certificates", } if input == nil { input = &AssociateCertificateInput{} } output = &AssociateCertificateOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // AssociateCertificate API operation for AWS Elemental MediaConvert. // // Associates an AWS Certificate Manager (ACM) Amazon Resource Name (ARN) with // AWS Elemental MediaConvert. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation AssociateCertificate for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AssociateCertificate func (c *MediaConvert) AssociateCertificate(input *AssociateCertificateInput) (*AssociateCertificateOutput, error) { req, out := c.AssociateCertificateRequest(input) return out, req.Send() } // AssociateCertificateWithContext is the same as AssociateCertificate with the addition of // the ability to pass a context and additional request options. // // See AssociateCertificate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) AssociateCertificateWithContext(ctx aws.Context, input *AssociateCertificateInput, opts ...request.Option) (*AssociateCertificateOutput, error) { req, out := c.AssociateCertificateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCancelJob = "CancelJob" // CancelJobRequest generates a "aws/request.Request" representing the // client's request for the CancelJob operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CancelJob for more information on using the CancelJob // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CancelJobRequest method. // req, resp := client.CancelJobRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CancelJob func (c *MediaConvert) CancelJobRequest(input *CancelJobInput) (req *request.Request, output *CancelJobOutput) { op := &request.Operation{ Name: opCancelJob, HTTPMethod: "DELETE", HTTPPath: "/2017-08-29/jobs/{id}", } if input == nil { input = &CancelJobInput{} } output = &CancelJobOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // CancelJob API operation for AWS Elemental MediaConvert. // // Permanently cancel a job. Once you have canceled a job, you can't start it // again. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation CancelJob for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CancelJob func (c *MediaConvert) CancelJob(input *CancelJobInput) (*CancelJobOutput, error) { req, out := c.CancelJobRequest(input) return out, req.Send() } // CancelJobWithContext is the same as CancelJob with the addition of // the ability to pass a context and additional request options. // // See CancelJob for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) CancelJobWithContext(ctx aws.Context, input *CancelJobInput, opts ...request.Option) (*CancelJobOutput, error) { req, out := c.CancelJobRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateJob = "CreateJob" // CreateJobRequest generates a "aws/request.Request" representing the // client's request for the CreateJob operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateJob for more information on using the CreateJob // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateJobRequest method. // req, resp := client.CreateJobRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJob func (c *MediaConvert) CreateJobRequest(input *CreateJobInput) (req *request.Request, output *CreateJobOutput) { op := &request.Operation{ Name: opCreateJob, HTTPMethod: "POST", HTTPPath: "/2017-08-29/jobs", } if input == nil { input = &CreateJobInput{} } output = &CreateJobOutput{} req = c.newRequest(op, input, output) return } // CreateJob API operation for AWS Elemental MediaConvert. // // Create a new transcoding job. For information about jobs and job settings, // see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation CreateJob for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJob func (c *MediaConvert) CreateJob(input *CreateJobInput) (*CreateJobOutput, error) { req, out := c.CreateJobRequest(input) return out, req.Send() } // CreateJobWithContext is the same as CreateJob with the addition of // the ability to pass a context and additional request options. // // See CreateJob for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) CreateJobWithContext(ctx aws.Context, input *CreateJobInput, opts ...request.Option) (*CreateJobOutput, error) { req, out := c.CreateJobRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateJobTemplate = "CreateJobTemplate" // CreateJobTemplateRequest generates a "aws/request.Request" representing the // client's request for the CreateJobTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateJobTemplate for more information on using the CreateJobTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateJobTemplateRequest method. // req, resp := client.CreateJobTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJobTemplate func (c *MediaConvert) CreateJobTemplateRequest(input *CreateJobTemplateInput) (req *request.Request, output *CreateJobTemplateOutput) { op := &request.Operation{ Name: opCreateJobTemplate, HTTPMethod: "POST", HTTPPath: "/2017-08-29/jobTemplates", } if input == nil { input = &CreateJobTemplateInput{} } output = &CreateJobTemplateOutput{} req = c.newRequest(op, input, output) return } // CreateJobTemplate API operation for AWS Elemental MediaConvert. // // Create a new job template. For information about job templates see the User // Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation CreateJobTemplate for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateJobTemplate func (c *MediaConvert) CreateJobTemplate(input *CreateJobTemplateInput) (*CreateJobTemplateOutput, error) { req, out := c.CreateJobTemplateRequest(input) return out, req.Send() } // CreateJobTemplateWithContext is the same as CreateJobTemplate with the addition of // the ability to pass a context and additional request options. // // See CreateJobTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) CreateJobTemplateWithContext(ctx aws.Context, input *CreateJobTemplateInput, opts ...request.Option) (*CreateJobTemplateOutput, error) { req, out := c.CreateJobTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreatePreset = "CreatePreset" // CreatePresetRequest generates a "aws/request.Request" representing the // client's request for the CreatePreset operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreatePreset for more information on using the CreatePreset // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreatePresetRequest method. // req, resp := client.CreatePresetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreatePreset func (c *MediaConvert) CreatePresetRequest(input *CreatePresetInput) (req *request.Request, output *CreatePresetOutput) { op := &request.Operation{ Name: opCreatePreset, HTTPMethod: "POST", HTTPPath: "/2017-08-29/presets", } if input == nil { input = &CreatePresetInput{} } output = &CreatePresetOutput{} req = c.newRequest(op, input, output) return } // CreatePreset API operation for AWS Elemental MediaConvert. // // Create a new preset. For information about job templates see the User Guide // at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation CreatePreset for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreatePreset func (c *MediaConvert) CreatePreset(input *CreatePresetInput) (*CreatePresetOutput, error) { req, out := c.CreatePresetRequest(input) return out, req.Send() } // CreatePresetWithContext is the same as CreatePreset with the addition of // the ability to pass a context and additional request options. // // See CreatePreset for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) CreatePresetWithContext(ctx aws.Context, input *CreatePresetInput, opts ...request.Option) (*CreatePresetOutput, error) { req, out := c.CreatePresetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateQueue = "CreateQueue" // CreateQueueRequest generates a "aws/request.Request" representing the // client's request for the CreateQueue operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateQueue for more information on using the CreateQueue // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateQueueRequest method. // req, resp := client.CreateQueueRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateQueue func (c *MediaConvert) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, output *CreateQueueOutput) { op := &request.Operation{ Name: opCreateQueue, HTTPMethod: "POST", HTTPPath: "/2017-08-29/queues", } if input == nil { input = &CreateQueueInput{} } output = &CreateQueueOutput{} req = c.newRequest(op, input, output) return } // CreateQueue API operation for AWS Elemental MediaConvert. // // Create a new transcoding queue. For information about queues, see Working // With Queues in the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation CreateQueue for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/CreateQueue func (c *MediaConvert) CreateQueue(input *CreateQueueInput) (*CreateQueueOutput, error) { req, out := c.CreateQueueRequest(input) return out, req.Send() } // CreateQueueWithContext is the same as CreateQueue with the addition of // the ability to pass a context and additional request options. // // See CreateQueue for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) CreateQueueWithContext(ctx aws.Context, input *CreateQueueInput, opts ...request.Option) (*CreateQueueOutput, error) { req, out := c.CreateQueueRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteJobTemplate = "DeleteJobTemplate" // DeleteJobTemplateRequest generates a "aws/request.Request" representing the // client's request for the DeleteJobTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteJobTemplate for more information on using the DeleteJobTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteJobTemplateRequest method. // req, resp := client.DeleteJobTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteJobTemplate func (c *MediaConvert) DeleteJobTemplateRequest(input *DeleteJobTemplateInput) (req *request.Request, output *DeleteJobTemplateOutput) { op := &request.Operation{ Name: opDeleteJobTemplate, HTTPMethod: "DELETE", HTTPPath: "/2017-08-29/jobTemplates/{name}", } if input == nil { input = &DeleteJobTemplateInput{} } output = &DeleteJobTemplateOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteJobTemplate API operation for AWS Elemental MediaConvert. // // Permanently delete a job template you have created. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation DeleteJobTemplate for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteJobTemplate func (c *MediaConvert) DeleteJobTemplate(input *DeleteJobTemplateInput) (*DeleteJobTemplateOutput, error) { req, out := c.DeleteJobTemplateRequest(input) return out, req.Send() } // DeleteJobTemplateWithContext is the same as DeleteJobTemplate with the addition of // the ability to pass a context and additional request options. // // See DeleteJobTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) DeleteJobTemplateWithContext(ctx aws.Context, input *DeleteJobTemplateInput, opts ...request.Option) (*DeleteJobTemplateOutput, error) { req, out := c.DeleteJobTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeletePreset = "DeletePreset" // DeletePresetRequest generates a "aws/request.Request" representing the // client's request for the DeletePreset operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeletePreset for more information on using the DeletePreset // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeletePresetRequest method. // req, resp := client.DeletePresetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeletePreset func (c *MediaConvert) DeletePresetRequest(input *DeletePresetInput) (req *request.Request, output *DeletePresetOutput) { op := &request.Operation{ Name: opDeletePreset, HTTPMethod: "DELETE", HTTPPath: "/2017-08-29/presets/{name}", } if input == nil { input = &DeletePresetInput{} } output = &DeletePresetOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeletePreset API operation for AWS Elemental MediaConvert. // // Permanently delete a preset you have created. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation DeletePreset for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeletePreset func (c *MediaConvert) DeletePreset(input *DeletePresetInput) (*DeletePresetOutput, error) { req, out := c.DeletePresetRequest(input) return out, req.Send() } // DeletePresetWithContext is the same as DeletePreset with the addition of // the ability to pass a context and additional request options. // // See DeletePreset for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) DeletePresetWithContext(ctx aws.Context, input *DeletePresetInput, opts ...request.Option) (*DeletePresetOutput, error) { req, out := c.DeletePresetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteQueue = "DeleteQueue" // DeleteQueueRequest generates a "aws/request.Request" representing the // client's request for the DeleteQueue operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteQueue for more information on using the DeleteQueue // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteQueueRequest method. // req, resp := client.DeleteQueueRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteQueue func (c *MediaConvert) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, output *DeleteQueueOutput) { op := &request.Operation{ Name: opDeleteQueue, HTTPMethod: "DELETE", HTTPPath: "/2017-08-29/queues/{name}", } if input == nil { input = &DeleteQueueInput{} } output = &DeleteQueueOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteQueue API operation for AWS Elemental MediaConvert. // // Permanently delete a queue you have created. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation DeleteQueue for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DeleteQueue func (c *MediaConvert) DeleteQueue(input *DeleteQueueInput) (*DeleteQueueOutput, error) { req, out := c.DeleteQueueRequest(input) return out, req.Send() } // DeleteQueueWithContext is the same as DeleteQueue with the addition of // the ability to pass a context and additional request options. // // See DeleteQueue for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) DeleteQueueWithContext(ctx aws.Context, input *DeleteQueueInput, opts ...request.Option) (*DeleteQueueOutput, error) { req, out := c.DeleteQueueRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeEndpoints = "DescribeEndpoints" // DescribeEndpointsRequest generates a "aws/request.Request" representing the // client's request for the DescribeEndpoints operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeEndpoints for more information on using the DescribeEndpoints // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeEndpointsRequest method. // req, resp := client.DescribeEndpointsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DescribeEndpoints func (c *MediaConvert) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) { op := &request.Operation{ Name: opDescribeEndpoints, HTTPMethod: "POST", HTTPPath: "/2017-08-29/endpoints", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "MaxResults", TruncationToken: "", }, } if input == nil { input = &DescribeEndpointsInput{} } output = &DescribeEndpointsOutput{} req = c.newRequest(op, input, output) return } // DescribeEndpoints API operation for AWS Elemental MediaConvert. // // Send an request with an empty body to the regional API endpoint to get your // account API endpoint. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation DescribeEndpoints for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DescribeEndpoints func (c *MediaConvert) DescribeEndpoints(input *DescribeEndpointsInput) (*DescribeEndpointsOutput, error) { req, out := c.DescribeEndpointsRequest(input) return out, req.Send() } // DescribeEndpointsWithContext is the same as DescribeEndpoints with the addition of // the ability to pass a context and additional request options. // // See DescribeEndpoints for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) DescribeEndpointsWithContext(ctx aws.Context, input *DescribeEndpointsInput, opts ...request.Option) (*DescribeEndpointsOutput, error) { req, out := c.DescribeEndpointsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // DescribeEndpointsPages iterates over the pages of a DescribeEndpoints operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See DescribeEndpoints method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a DescribeEndpoints operation. // pageNum := 0 // err := client.DescribeEndpointsPages(params, // func(page *mediaconvert.DescribeEndpointsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *MediaConvert) DescribeEndpointsPages(input *DescribeEndpointsInput, fn func(*DescribeEndpointsOutput, bool) bool) error { return c.DescribeEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) } // DescribeEndpointsPagesWithContext same as DescribeEndpointsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) DescribeEndpointsPagesWithContext(ctx aws.Context, input *DescribeEndpointsInput, fn func(*DescribeEndpointsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *DescribeEndpointsInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.DescribeEndpointsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*DescribeEndpointsOutput), !p.HasNextPage()) { break } } return p.Err() } const opDisassociateCertificate = "DisassociateCertificate" // DisassociateCertificateRequest generates a "aws/request.Request" representing the // client's request for the DisassociateCertificate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DisassociateCertificate for more information on using the DisassociateCertificate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DisassociateCertificateRequest method. // req, resp := client.DisassociateCertificateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DisassociateCertificate func (c *MediaConvert) DisassociateCertificateRequest(input *DisassociateCertificateInput) (req *request.Request, output *DisassociateCertificateOutput) { op := &request.Operation{ Name: opDisassociateCertificate, HTTPMethod: "DELETE", HTTPPath: "/2017-08-29/certificates/{arn}", } if input == nil { input = &DisassociateCertificateInput{} } output = &DisassociateCertificateOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DisassociateCertificate API operation for AWS Elemental MediaConvert. // // Removes an association between the Amazon Resource Name (ARN) of an AWS Certificate // Manager (ACM) certificate and an AWS Elemental MediaConvert resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation DisassociateCertificate for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/DisassociateCertificate func (c *MediaConvert) DisassociateCertificate(input *DisassociateCertificateInput) (*DisassociateCertificateOutput, error) { req, out := c.DisassociateCertificateRequest(input) return out, req.Send() } // DisassociateCertificateWithContext is the same as DisassociateCertificate with the addition of // the ability to pass a context and additional request options. // // See DisassociateCertificate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) DisassociateCertificateWithContext(ctx aws.Context, input *DisassociateCertificateInput, opts ...request.Option) (*DisassociateCertificateOutput, error) { req, out := c.DisassociateCertificateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opGetJob = "GetJob" // GetJobRequest generates a "aws/request.Request" representing the // client's request for the GetJob operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See GetJob for more information on using the GetJob // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the GetJobRequest method. // req, resp := client.GetJobRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJob func (c *MediaConvert) GetJobRequest(input *GetJobInput) (req *request.Request, output *GetJobOutput) { op := &request.Operation{ Name: opGetJob, HTTPMethod: "GET", HTTPPath: "/2017-08-29/jobs/{id}", } if input == nil { input = &GetJobInput{} } output = &GetJobOutput{} req = c.newRequest(op, input, output) return } // GetJob API operation for AWS Elemental MediaConvert. // // Retrieve the JSON for a specific completed transcoding job. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation GetJob for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJob func (c *MediaConvert) GetJob(input *GetJobInput) (*GetJobOutput, error) { req, out := c.GetJobRequest(input) return out, req.Send() } // GetJobWithContext is the same as GetJob with the addition of // the ability to pass a context and additional request options. // // See GetJob for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) GetJobWithContext(ctx aws.Context, input *GetJobInput, opts ...request.Option) (*GetJobOutput, error) { req, out := c.GetJobRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opGetJobTemplate = "GetJobTemplate" // GetJobTemplateRequest generates a "aws/request.Request" representing the // client's request for the GetJobTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See GetJobTemplate for more information on using the GetJobTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the GetJobTemplateRequest method. // req, resp := client.GetJobTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJobTemplate func (c *MediaConvert) GetJobTemplateRequest(input *GetJobTemplateInput) (req *request.Request, output *GetJobTemplateOutput) { op := &request.Operation{ Name: opGetJobTemplate, HTTPMethod: "GET", HTTPPath: "/2017-08-29/jobTemplates/{name}", } if input == nil { input = &GetJobTemplateInput{} } output = &GetJobTemplateOutput{} req = c.newRequest(op, input, output) return } // GetJobTemplate API operation for AWS Elemental MediaConvert. // // Retrieve the JSON for a specific job template. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation GetJobTemplate for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetJobTemplate func (c *MediaConvert) GetJobTemplate(input *GetJobTemplateInput) (*GetJobTemplateOutput, error) { req, out := c.GetJobTemplateRequest(input) return out, req.Send() } // GetJobTemplateWithContext is the same as GetJobTemplate with the addition of // the ability to pass a context and additional request options. // // See GetJobTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) GetJobTemplateWithContext(ctx aws.Context, input *GetJobTemplateInput, opts ...request.Option) (*GetJobTemplateOutput, error) { req, out := c.GetJobTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opGetPreset = "GetPreset" // GetPresetRequest generates a "aws/request.Request" representing the // client's request for the GetPreset operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See GetPreset for more information on using the GetPreset // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the GetPresetRequest method. // req, resp := client.GetPresetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetPreset func (c *MediaConvert) GetPresetRequest(input *GetPresetInput) (req *request.Request, output *GetPresetOutput) { op := &request.Operation{ Name: opGetPreset, HTTPMethod: "GET", HTTPPath: "/2017-08-29/presets/{name}", } if input == nil { input = &GetPresetInput{} } output = &GetPresetOutput{} req = c.newRequest(op, input, output) return } // GetPreset API operation for AWS Elemental MediaConvert. // // Retrieve the JSON for a specific preset. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation GetPreset for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetPreset func (c *MediaConvert) GetPreset(input *GetPresetInput) (*GetPresetOutput, error) { req, out := c.GetPresetRequest(input) return out, req.Send() } // GetPresetWithContext is the same as GetPreset with the addition of // the ability to pass a context and additional request options. // // See GetPreset for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) GetPresetWithContext(ctx aws.Context, input *GetPresetInput, opts ...request.Option) (*GetPresetOutput, error) { req, out := c.GetPresetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opGetQueue = "GetQueue" // GetQueueRequest generates a "aws/request.Request" representing the // client's request for the GetQueue operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See GetQueue for more information on using the GetQueue // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the GetQueueRequest method. // req, resp := client.GetQueueRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetQueue func (c *MediaConvert) GetQueueRequest(input *GetQueueInput) (req *request.Request, output *GetQueueOutput) { op := &request.Operation{ Name: opGetQueue, HTTPMethod: "GET", HTTPPath: "/2017-08-29/queues/{name}", } if input == nil { input = &GetQueueInput{} } output = &GetQueueOutput{} req = c.newRequest(op, input, output) return } // GetQueue API operation for AWS Elemental MediaConvert. // // Retrieve the JSON for a specific queue. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation GetQueue for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/GetQueue func (c *MediaConvert) GetQueue(input *GetQueueInput) (*GetQueueOutput, error) { req, out := c.GetQueueRequest(input) return out, req.Send() } // GetQueueWithContext is the same as GetQueue with the addition of // the ability to pass a context and additional request options. // // See GetQueue for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) GetQueueWithContext(ctx aws.Context, input *GetQueueInput, opts ...request.Option) (*GetQueueOutput, error) { req, out := c.GetQueueRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opListJobTemplates = "ListJobTemplates" // ListJobTemplatesRequest generates a "aws/request.Request" representing the // client's request for the ListJobTemplates operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListJobTemplates for more information on using the ListJobTemplates // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListJobTemplatesRequest method. // req, resp := client.ListJobTemplatesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobTemplates func (c *MediaConvert) ListJobTemplatesRequest(input *ListJobTemplatesInput) (req *request.Request, output *ListJobTemplatesOutput) { op := &request.Operation{ Name: opListJobTemplates, HTTPMethod: "GET", HTTPPath: "/2017-08-29/jobTemplates", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "MaxResults", TruncationToken: "", }, } if input == nil { input = &ListJobTemplatesInput{} } output = &ListJobTemplatesOutput{} req = c.newRequest(op, input, output) return } // ListJobTemplates API operation for AWS Elemental MediaConvert. // // Retrieve a JSON array of up to twenty of your job templates. This will return // the templates themselves, not just a list of them. To retrieve the next twenty // templates, use the nextToken string returned with the array // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation ListJobTemplates for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobTemplates func (c *MediaConvert) ListJobTemplates(input *ListJobTemplatesInput) (*ListJobTemplatesOutput, error) { req, out := c.ListJobTemplatesRequest(input) return out, req.Send() } // ListJobTemplatesWithContext is the same as ListJobTemplates with the addition of // the ability to pass a context and additional request options. // // See ListJobTemplates for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) ListJobTemplatesWithContext(ctx aws.Context, input *ListJobTemplatesInput, opts ...request.Option) (*ListJobTemplatesOutput, error) { req, out := c.ListJobTemplatesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // ListJobTemplatesPages iterates over the pages of a ListJobTemplates operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See ListJobTemplates method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a ListJobTemplates operation. // pageNum := 0 // err := client.ListJobTemplatesPages(params, // func(page *mediaconvert.ListJobTemplatesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *MediaConvert) ListJobTemplatesPages(input *ListJobTemplatesInput, fn func(*ListJobTemplatesOutput, bool) bool) error { return c.ListJobTemplatesPagesWithContext(aws.BackgroundContext(), input, fn) } // ListJobTemplatesPagesWithContext same as ListJobTemplatesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) ListJobTemplatesPagesWithContext(ctx aws.Context, input *ListJobTemplatesInput, fn func(*ListJobTemplatesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *ListJobTemplatesInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.ListJobTemplatesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*ListJobTemplatesOutput), !p.HasNextPage()) { break } } return p.Err() } const opListJobs = "ListJobs" // ListJobsRequest generates a "aws/request.Request" representing the // client's request for the ListJobs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListJobs for more information on using the ListJobs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListJobsRequest method. // req, resp := client.ListJobsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobs func (c *MediaConvert) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { op := &request.Operation{ Name: opListJobs, HTTPMethod: "GET", HTTPPath: "/2017-08-29/jobs", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "MaxResults", TruncationToken: "", }, } if input == nil { input = &ListJobsInput{} } output = &ListJobsOutput{} req = c.newRequest(op, input, output) return } // ListJobs API operation for AWS Elemental MediaConvert. // // Retrieve a JSON array of up to twenty of your most recently created jobs. // This array includes in-process, completed, and errored jobs. This will return // the jobs themselves, not just a list of the jobs. To retrieve the twenty // next most recent jobs, use the nextToken string returned with the array. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation ListJobs for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListJobs func (c *MediaConvert) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { req, out := c.ListJobsRequest(input) return out, req.Send() } // ListJobsWithContext is the same as ListJobs with the addition of // the ability to pass a context and additional request options. // // See ListJobs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts ...request.Option) (*ListJobsOutput, error) { req, out := c.ListJobsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // ListJobsPages iterates over the pages of a ListJobs operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See ListJobs method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a ListJobs operation. // pageNum := 0 // err := client.ListJobsPages(params, // func(page *mediaconvert.ListJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *MediaConvert) ListJobsPages(input *ListJobsInput, fn func(*ListJobsOutput, bool) bool) error { return c.ListJobsPagesWithContext(aws.BackgroundContext(), input, fn) } // ListJobsPagesWithContext same as ListJobsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput, fn func(*ListJobsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *ListJobsInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.ListJobsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) { break } } return p.Err() } const opListPresets = "ListPresets" // ListPresetsRequest generates a "aws/request.Request" representing the // client's request for the ListPresets operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListPresets for more information on using the ListPresets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListPresetsRequest method. // req, resp := client.ListPresetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListPresets func (c *MediaConvert) ListPresetsRequest(input *ListPresetsInput) (req *request.Request, output *ListPresetsOutput) { op := &request.Operation{ Name: opListPresets, HTTPMethod: "GET", HTTPPath: "/2017-08-29/presets", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "MaxResults", TruncationToken: "", }, } if input == nil { input = &ListPresetsInput{} } output = &ListPresetsOutput{} req = c.newRequest(op, input, output) return } // ListPresets API operation for AWS Elemental MediaConvert. // // Retrieve a JSON array of up to twenty of your presets. This will return the // presets themselves, not just a list of them. To retrieve the next twenty // presets, use the nextToken string returned with the array. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation ListPresets for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListPresets func (c *MediaConvert) ListPresets(input *ListPresetsInput) (*ListPresetsOutput, error) { req, out := c.ListPresetsRequest(input) return out, req.Send() } // ListPresetsWithContext is the same as ListPresets with the addition of // the ability to pass a context and additional request options. // // See ListPresets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) ListPresetsWithContext(ctx aws.Context, input *ListPresetsInput, opts ...request.Option) (*ListPresetsOutput, error) { req, out := c.ListPresetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // ListPresetsPages iterates over the pages of a ListPresets operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See ListPresets method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a ListPresets operation. // pageNum := 0 // err := client.ListPresetsPages(params, // func(page *mediaconvert.ListPresetsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *MediaConvert) ListPresetsPages(input *ListPresetsInput, fn func(*ListPresetsOutput, bool) bool) error { return c.ListPresetsPagesWithContext(aws.BackgroundContext(), input, fn) } // ListPresetsPagesWithContext same as ListPresetsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) ListPresetsPagesWithContext(ctx aws.Context, input *ListPresetsInput, fn func(*ListPresetsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *ListPresetsInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.ListPresetsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*ListPresetsOutput), !p.HasNextPage()) { break } } return p.Err() } const opListQueues = "ListQueues" // ListQueuesRequest generates a "aws/request.Request" representing the // client's request for the ListQueues operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListQueues for more information on using the ListQueues // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListQueuesRequest method. // req, resp := client.ListQueuesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListQueues func (c *MediaConvert) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, output *ListQueuesOutput) { op := &request.Operation{ Name: opListQueues, HTTPMethod: "GET", HTTPPath: "/2017-08-29/queues", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "MaxResults", TruncationToken: "", }, } if input == nil { input = &ListQueuesInput{} } output = &ListQueuesOutput{} req = c.newRequest(op, input, output) return } // ListQueues API operation for AWS Elemental MediaConvert. // // Retrieve a JSON array of up to twenty of your queues. This will return the // queues themselves, not just a list of them. To retrieve the next twenty queues, // use the nextToken string returned with the array. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation ListQueues for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListQueues func (c *MediaConvert) ListQueues(input *ListQueuesInput) (*ListQueuesOutput, error) { req, out := c.ListQueuesRequest(input) return out, req.Send() } // ListQueuesWithContext is the same as ListQueues with the addition of // the ability to pass a context and additional request options. // // See ListQueues for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) ListQueuesWithContext(ctx aws.Context, input *ListQueuesInput, opts ...request.Option) (*ListQueuesOutput, error) { req, out := c.ListQueuesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // ListQueuesPages iterates over the pages of a ListQueues operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See ListQueues method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a ListQueues operation. // pageNum := 0 // err := client.ListQueuesPages(params, // func(page *mediaconvert.ListQueuesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *MediaConvert) ListQueuesPages(input *ListQueuesInput, fn func(*ListQueuesOutput, bool) bool) error { return c.ListQueuesPagesWithContext(aws.BackgroundContext(), input, fn) } // ListQueuesPagesWithContext same as ListQueuesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) ListQueuesPagesWithContext(ctx aws.Context, input *ListQueuesInput, fn func(*ListQueuesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *ListQueuesInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.ListQueuesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*ListQueuesOutput), !p.HasNextPage()) { break } } return p.Err() } const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the // client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListTagsForResourceRequest method. // req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListTagsForResource func (c *MediaConvert) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ Name: opListTagsForResource, HTTPMethod: "GET", HTTPPath: "/2017-08-29/tags/{arn}", } if input == nil { input = &ListTagsForResourceInput{} } output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) return } // ListTagsForResource API operation for AWS Elemental MediaConvert. // // Retrieve the tags for a MediaConvert resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation ListTagsForResource for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/ListTagsForResource func (c *MediaConvert) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { req, out := c.ListTagsForResourceRequest(input) return out, req.Send() } // ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of // the ability to pass a context and additional request options. // // See ListTagsForResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { req, out := c.ListTagsForResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opTagResource = "TagResource" // TagResourceRequest generates a "aws/request.Request" representing the // client's request for the TagResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See TagResource for more information on using the TagResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the TagResourceRequest method. // req, resp := client.TagResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/TagResource func (c *MediaConvert) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { op := &request.Operation{ Name: opTagResource, HTTPMethod: "POST", HTTPPath: "/2017-08-29/tags", } if input == nil { input = &TagResourceInput{} } output = &TagResourceOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // TagResource API operation for AWS Elemental MediaConvert. // // Add tags to a MediaConvert queue, preset, or job template. For information // about tagging, see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/tagging-resources.html // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation TagResource for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/TagResource func (c *MediaConvert) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { req, out := c.TagResourceRequest(input) return out, req.Send() } // TagResourceWithContext is the same as TagResource with the addition of // the ability to pass a context and additional request options. // // See TagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { req, out := c.TagResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUntagResource = "UntagResource" // UntagResourceRequest generates a "aws/request.Request" representing the // client's request for the UntagResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UntagResource for more information on using the UntagResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UntagResourceRequest method. // req, resp := client.UntagResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UntagResource func (c *MediaConvert) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { op := &request.Operation{ Name: opUntagResource, HTTPMethod: "PUT", HTTPPath: "/2017-08-29/tags/{arn}", } if input == nil { input = &UntagResourceInput{} } output = &UntagResourceOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // UntagResource API operation for AWS Elemental MediaConvert. // // Remove tags from a MediaConvert queue, preset, or job template. For information // about tagging, see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/tagging-resources.html // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation UntagResource for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UntagResource func (c *MediaConvert) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { req, out := c.UntagResourceRequest(input) return out, req.Send() } // UntagResourceWithContext is the same as UntagResource with the addition of // the ability to pass a context and additional request options. // // See UntagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { req, out := c.UntagResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateJobTemplate = "UpdateJobTemplate" // UpdateJobTemplateRequest generates a "aws/request.Request" representing the // client's request for the UpdateJobTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateJobTemplate for more information on using the UpdateJobTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateJobTemplateRequest method. // req, resp := client.UpdateJobTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateJobTemplate func (c *MediaConvert) UpdateJobTemplateRequest(input *UpdateJobTemplateInput) (req *request.Request, output *UpdateJobTemplateOutput) { op := &request.Operation{ Name: opUpdateJobTemplate, HTTPMethod: "PUT", HTTPPath: "/2017-08-29/jobTemplates/{name}", } if input == nil { input = &UpdateJobTemplateInput{} } output = &UpdateJobTemplateOutput{} req = c.newRequest(op, input, output) return } // UpdateJobTemplate API operation for AWS Elemental MediaConvert. // // Modify one of your existing job templates. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation UpdateJobTemplate for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateJobTemplate func (c *MediaConvert) UpdateJobTemplate(input *UpdateJobTemplateInput) (*UpdateJobTemplateOutput, error) { req, out := c.UpdateJobTemplateRequest(input) return out, req.Send() } // UpdateJobTemplateWithContext is the same as UpdateJobTemplate with the addition of // the ability to pass a context and additional request options. // // See UpdateJobTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) UpdateJobTemplateWithContext(ctx aws.Context, input *UpdateJobTemplateInput, opts ...request.Option) (*UpdateJobTemplateOutput, error) { req, out := c.UpdateJobTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdatePreset = "UpdatePreset" // UpdatePresetRequest generates a "aws/request.Request" representing the // client's request for the UpdatePreset operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdatePreset for more information on using the UpdatePreset // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdatePresetRequest method. // req, resp := client.UpdatePresetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdatePreset func (c *MediaConvert) UpdatePresetRequest(input *UpdatePresetInput) (req *request.Request, output *UpdatePresetOutput) { op := &request.Operation{ Name: opUpdatePreset, HTTPMethod: "PUT", HTTPPath: "/2017-08-29/presets/{name}", } if input == nil { input = &UpdatePresetInput{} } output = &UpdatePresetOutput{} req = c.newRequest(op, input, output) return } // UpdatePreset API operation for AWS Elemental MediaConvert. // // Modify one of your existing presets. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation UpdatePreset for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdatePreset func (c *MediaConvert) UpdatePreset(input *UpdatePresetInput) (*UpdatePresetOutput, error) { req, out := c.UpdatePresetRequest(input) return out, req.Send() } // UpdatePresetWithContext is the same as UpdatePreset with the addition of // the ability to pass a context and additional request options. // // See UpdatePreset for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) UpdatePresetWithContext(ctx aws.Context, input *UpdatePresetInput, opts ...request.Option) (*UpdatePresetOutput, error) { req, out := c.UpdatePresetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateQueue = "UpdateQueue" // UpdateQueueRequest generates a "aws/request.Request" representing the // client's request for the UpdateQueue operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateQueue for more information on using the UpdateQueue // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateQueueRequest method. // req, resp := client.UpdateQueueRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateQueue func (c *MediaConvert) UpdateQueueRequest(input *UpdateQueueInput) (req *request.Request, output *UpdateQueueOutput) { op := &request.Operation{ Name: opUpdateQueue, HTTPMethod: "PUT", HTTPPath: "/2017-08-29/queues/{name}", } if input == nil { input = &UpdateQueueInput{} } output = &UpdateQueueOutput{} req = c.newRequest(op, input, output) return } // UpdateQueue API operation for AWS Elemental MediaConvert. // // Modify one of your existing queues. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaConvert's // API operation UpdateQueue for usage and error information. // // Returned Error Types: // * BadRequestException // // * InternalServerErrorException // // * ForbiddenException // // * NotFoundException // // * TooManyRequestsException // // * ConflictException // // See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/UpdateQueue func (c *MediaConvert) UpdateQueue(input *UpdateQueueInput) (*UpdateQueueOutput, error) { req, out := c.UpdateQueueRequest(input) return out, req.Send() } // UpdateQueueWithContext is the same as UpdateQueue with the addition of // the ability to pass a context and additional request options. // // See UpdateQueue for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *MediaConvert) UpdateQueueWithContext(ctx aws.Context, input *UpdateQueueInput, opts ...request.Option) (*UpdateQueueOutput, error) { req, out := c.UpdateQueueRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AAC. The service accepts one of two mutually exclusive groups of // AAC settings--VBR and CBR. To select one of these modes, set the value of // Bitrate control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you // control the audio quality with the setting VBR quality (vbrQuality). In CBR // mode, you use the setting Bitrate (bitrate). Defaults and valid values depend // on the rate control mode. type AacSettings struct { _ struct{} `type:"structure"` // Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio // + audio description (AD) as a stereo pair. The value for AudioType will be // set to 3, which signals to downstream systems that this stream contains "broadcaster // mixed AD". Note that the input received by the encoder must contain pre-mixed // audio; the encoder does not perform the mixing. When you choose BROADCASTER_MIXED_AD, // the encoder ignores any values you provide in AudioType and FollowInputAudioType. // Choose NORMAL when the input does not contain pre-mixed audio + audio description // (AD). In this case, the encoder will use any values you provide for AudioType // and FollowInputAudioType. AudioDescriptionBroadcasterMix *string `locationName:"audioDescriptionBroadcasterMix" type:"string" enum:"AacAudioDescriptionBroadcasterMix"` // Specify the average bitrate in bits per second. The set of valid values for // this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, // 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, // 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, // 768000, 896000, 1024000. The value you set is also constrained by the values // that you choose for Profile (codecProfile), Bitrate control mode (codingMode), // and Sample rate (sampleRate). Default values depend on Bitrate control mode // and Profile. Bitrate *int64 `locationName:"bitrate" min:"6000" type:"integer"` // AAC Profile. CodecProfile *string `locationName:"codecProfile" type:"string" enum:"AacCodecProfile"` // Mono (Audio Description), Mono, Stereo, or 5.1 channel layout. Valid values // depend on rate control mode and profile. "1.0 - Audio Description (Receiver // Mix)" setting receives a stereo description plus control track and emits // a mono AAC encode of the description track, with control data emitted in // the PES header as per ETSI TS 101 154 Annex E. CodingMode *string `locationName:"codingMode" type:"string" enum:"AacCodingMode"` // Rate Control Mode. RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"AacRateControlMode"` // Enables LATM/LOAS AAC output. Note that if you use LATM/LOAS AAC in an output, // you must choose "No container" for the output container. RawFormat *string `locationName:"rawFormat" type:"string" enum:"AacRawFormat"` // Sample rate in Hz. Valid values depend on rate control mode and profile. SampleRate *int64 `locationName:"sampleRate" min:"8000" type:"integer"` // Use MPEG-2 AAC instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream // containers. Specification *string `locationName:"specification" type:"string" enum:"AacSpecification"` // VBR Quality Level - Only used if rate_control_mode is VBR. VbrQuality *string `locationName:"vbrQuality" type:"string" enum:"AacVbrQuality"` } // String returns the string representation func (s AacSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AacSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AacSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AacSettings"} if s.Bitrate != nil && *s.Bitrate < 6000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 6000)) } if s.SampleRate != nil && *s.SampleRate < 8000 { invalidParams.Add(request.NewErrParamMinValue("SampleRate", 8000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAudioDescriptionBroadcasterMix sets the AudioDescriptionBroadcasterMix field's value. func (s *AacSettings) SetAudioDescriptionBroadcasterMix(v string) *AacSettings { s.AudioDescriptionBroadcasterMix = &v return s } // SetBitrate sets the Bitrate field's value. func (s *AacSettings) SetBitrate(v int64) *AacSettings { s.Bitrate = &v return s } // SetCodecProfile sets the CodecProfile field's value. func (s *AacSettings) SetCodecProfile(v string) *AacSettings { s.CodecProfile = &v return s } // SetCodingMode sets the CodingMode field's value. func (s *AacSettings) SetCodingMode(v string) *AacSettings { s.CodingMode = &v return s } // SetRateControlMode sets the RateControlMode field's value. func (s *AacSettings) SetRateControlMode(v string) *AacSettings { s.RateControlMode = &v return s } // SetRawFormat sets the RawFormat field's value. func (s *AacSettings) SetRawFormat(v string) *AacSettings { s.RawFormat = &v return s } // SetSampleRate sets the SampleRate field's value. func (s *AacSettings) SetSampleRate(v int64) *AacSettings { s.SampleRate = &v return s } // SetSpecification sets the Specification field's value. func (s *AacSettings) SetSpecification(v string) *AacSettings { s.Specification = &v return s } // SetVbrQuality sets the VbrQuality field's value. func (s *AacSettings) SetVbrQuality(v string) *AacSettings { s.VbrQuality = &v return s } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AC3. type Ac3Settings struct { _ struct{} `type:"structure"` // Specify the average bitrate in bits per second. Valid bitrates depend on // the coding mode. Bitrate *int64 `locationName:"bitrate" min:"64000" type:"integer"` // Specify the bitstream mode for the AC-3 stream that the encoder emits. For // more information about the AC3 bitstream mode, see ATSC A/52-2012 (Annex // E). BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Ac3BitstreamMode"` // Dolby Digital coding mode. Determines number of channels. CodingMode *string `locationName:"codingMode" type:"string" enum:"Ac3CodingMode"` // Sets the dialnorm for the output. If blank and input audio is Dolby Digital, // dialnorm will be passed through. Dialnorm *int64 `locationName:"dialnorm" min:"1" type:"integer"` // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the line // operating mode. Related setting: When you use this setting, MediaConvert // ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). // For information about the Dolby Digital DRC operating modes and profiles, // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Ac3DynamicRangeCompressionLine"` // When you want to add Dolby dynamic range compression (DRC) signaling to your // output stream, we recommend that you use the mode-specific settings instead // of Dynamic range compression profile (DynamicRangeCompressionProfile). The // mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine) // and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf). // Note that when you specify values for all three settings, MediaConvert ignores // the value of this setting in favor of the mode-specific settings. If you // do use this setting instead of the mode-specific settings, choose None (NONE) // to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD) // to set the profile to Dolby's film standard profile for all operating modes. DynamicRangeCompressionProfile *string `locationName:"dynamicRangeCompressionProfile" type:"string" enum:"Ac3DynamicRangeCompressionProfile"` // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the RF operating // mode. Related setting: When you use this setting, MediaConvert ignores any // value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). // For information about the Dolby Digital DRC operating modes and profiles, // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Ac3DynamicRangeCompressionRf"` // Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only // valid with 3_2_LFE coding mode. LfeFilter *string `locationName:"lfeFilter" type:"string" enum:"Ac3LfeFilter"` // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, // or DolbyE decoder that supplied this audio data. If audio was not supplied // from one of these streams, then the static metadata settings will be used. MetadataControl *string `locationName:"metadataControl" type:"string" enum:"Ac3MetadataControl"` // This value is always 48000. It represents the sample rate in Hz. SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"` } // String returns the string representation func (s Ac3Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Ac3Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Ac3Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Ac3Settings"} if s.Bitrate != nil && *s.Bitrate < 64000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 64000)) } if s.Dialnorm != nil && *s.Dialnorm < 1 { invalidParams.Add(request.NewErrParamMinValue("Dialnorm", 1)) } if s.SampleRate != nil && *s.SampleRate < 48000 { invalidParams.Add(request.NewErrParamMinValue("SampleRate", 48000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBitrate sets the Bitrate field's value. func (s *Ac3Settings) SetBitrate(v int64) *Ac3Settings { s.Bitrate = &v return s } // SetBitstreamMode sets the BitstreamMode field's value. func (s *Ac3Settings) SetBitstreamMode(v string) *Ac3Settings { s.BitstreamMode = &v return s } // SetCodingMode sets the CodingMode field's value. func (s *Ac3Settings) SetCodingMode(v string) *Ac3Settings { s.CodingMode = &v return s } // SetDialnorm sets the Dialnorm field's value. func (s *Ac3Settings) SetDialnorm(v int64) *Ac3Settings { s.Dialnorm = &v return s } // SetDynamicRangeCompressionLine sets the DynamicRangeCompressionLine field's value. func (s *Ac3Settings) SetDynamicRangeCompressionLine(v string) *Ac3Settings { s.DynamicRangeCompressionLine = &v return s } // SetDynamicRangeCompressionProfile sets the DynamicRangeCompressionProfile field's value. func (s *Ac3Settings) SetDynamicRangeCompressionProfile(v string) *Ac3Settings { s.DynamicRangeCompressionProfile = &v return s } // SetDynamicRangeCompressionRf sets the DynamicRangeCompressionRf field's value. func (s *Ac3Settings) SetDynamicRangeCompressionRf(v string) *Ac3Settings { s.DynamicRangeCompressionRf = &v return s } // SetLfeFilter sets the LfeFilter field's value. func (s *Ac3Settings) SetLfeFilter(v string) *Ac3Settings { s.LfeFilter = &v return s } // SetMetadataControl sets the MetadataControl field's value. func (s *Ac3Settings) SetMetadataControl(v string) *Ac3Settings { s.MetadataControl = &v return s } // SetSampleRate sets the SampleRate field's value. func (s *Ac3Settings) SetSampleRate(v int64) *Ac3Settings { s.SampleRate = &v return s } // Accelerated transcoding can significantly speed up jobs with long, visually // complex content. type AccelerationSettings struct { _ struct{} `type:"structure"` // Specify the conditions when the service will run your job with accelerated // transcoding. // // Mode is a required field Mode *string `locationName:"mode" type:"string" required:"true" enum:"AccelerationMode"` } // String returns the string representation func (s AccelerationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AccelerationSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AccelerationSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AccelerationSettings"} if s.Mode == nil { invalidParams.Add(request.NewErrParamRequired("Mode")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMode sets the Mode field's value. func (s *AccelerationSettings) SetMode(v string) *AccelerationSettings { s.Mode = &v return s } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AIFF. type AiffSettings struct { _ struct{} `type:"structure"` // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding // quality for this audio track. BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"` // Specify the number of channels in this output audio track. Valid values are // 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Sample rate in hz. SampleRate *int64 `locationName:"sampleRate" min:"8000" type:"integer"` } // String returns the string representation func (s AiffSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AiffSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AiffSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AiffSettings"} if s.BitDepth != nil && *s.BitDepth < 16 { invalidParams.Add(request.NewErrParamMinValue("BitDepth", 16)) } if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(request.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 8000 { invalidParams.Add(request.NewErrParamMinValue("SampleRate", 8000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBitDepth sets the BitDepth field's value. func (s *AiffSettings) SetBitDepth(v int64) *AiffSettings { s.BitDepth = &v return s } // SetChannels sets the Channels field's value. func (s *AiffSettings) SetChannels(v int64) *AiffSettings { s.Channels = &v return s } // SetSampleRate sets the SampleRate field's value. func (s *AiffSettings) SetSampleRate(v int64) *AiffSettings { s.SampleRate = &v return s } // Settings for ancillary captions source. type AncillarySourceSettings struct { _ struct{} `type:"structure"` // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 // compatibility bytes fields of the 708 wrapper, and it also translates the // 608 data into 708. Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"AncillaryConvert608To708"` // Specifies the 608 channel number in the ancillary data track from which to // extract captions. Unused for passthrough. SourceAncillaryChannelNumber *int64 `locationName:"sourceAncillaryChannelNumber" min:"1" type:"integer"` // By default, the service terminates any unterminated captions at the end of // each input. If you want the caption to continue onto your next input, disable // this setting. TerminateCaptions *string `locationName:"terminateCaptions" type:"string" enum:"AncillaryTerminateCaptions"` } // String returns the string representation func (s AncillarySourceSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AncillarySourceSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AncillarySourceSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AncillarySourceSettings"} if s.SourceAncillaryChannelNumber != nil && *s.SourceAncillaryChannelNumber < 1 { invalidParams.Add(request.NewErrParamMinValue("SourceAncillaryChannelNumber", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetConvert608To708 sets the Convert608To708 field's value. func (s *AncillarySourceSettings) SetConvert608To708(v string) *AncillarySourceSettings { s.Convert608To708 = &v return s } // SetSourceAncillaryChannelNumber sets the SourceAncillaryChannelNumber field's value. func (s *AncillarySourceSettings) SetSourceAncillaryChannelNumber(v int64) *AncillarySourceSettings { s.SourceAncillaryChannelNumber = &v return s } // SetTerminateCaptions sets the TerminateCaptions field's value. func (s *AncillarySourceSettings) SetTerminateCaptions(v string) *AncillarySourceSettings { s.TerminateCaptions = &v return s } // Associates the Amazon Resource Name (ARN) of an AWS Certificate Manager (ACM) // certificate with an AWS Elemental MediaConvert resource. type AssociateCertificateInput struct { _ struct{} `type:"structure"` // The ARN of the ACM certificate that you want to associate with your MediaConvert // resource. // // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` } // String returns the string representation func (s AssociateCertificateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AssociateCertificateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AssociateCertificateInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AssociateCertificateInput"} if s.Arn == nil { invalidParams.Add(request.NewErrParamRequired("Arn")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetArn sets the Arn field's value. func (s *AssociateCertificateInput) SetArn(v string) *AssociateCertificateInput { s.Arn = &v return s } // Successful association of Certificate Manager Amazon Resource Name (ARN) // with Mediaconvert returns an OK message. type AssociateCertificateOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s AssociateCertificateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AssociateCertificateOutput) GoString() string { return s.String() } // When you mimic a multi-channel audio layout with multiple mono-channel tracks, // you can tag each channel layout manually. For example, you would tag the // tracks that contain your left, right, and center audio with Left (L), Right // (R), and Center (C), respectively. When you don't specify a value, MediaConvert // labels your track as Center (C) by default. To use audio layout tagging, // your output must be in a QuickTime (.mov) container; your audio codec must // be AAC, WAV, or AIFF; and you must set up your audio track to have only one // channel. type AudioChannelTaggingSettings struct { _ struct{} `type:"structure"` // You can add a tag for this mono-channel audio track to mimic its placement // in a multi-channel layout. For example, if this track is the left surround // channel, choose Left surround (LS). ChannelTag *string `locationName:"channelTag" type:"string" enum:"AudioChannelTag"` } // String returns the string representation func (s AudioChannelTaggingSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AudioChannelTaggingSettings) GoString() string { return s.String() } // SetChannelTag sets the ChannelTag field's value. func (s *AudioChannelTaggingSettings) SetChannelTag(v string) *AudioChannelTaggingSettings { s.ChannelTag = &v return s } // Settings related to audio encoding. The settings in this group vary depending // on the value that you choose for your audio codec. type AudioCodecSettings struct { _ struct{} `type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AAC. The service accepts one of two mutually exclusive groups of // AAC settings--VBR and CBR. To select one of these modes, set the value of // Bitrate control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you // control the audio quality with the setting VBR quality (vbrQuality). In CBR // mode, you use the setting Bitrate (bitrate). Defaults and valid values depend // on the rate control mode. AacSettings *AacSettings `locationName:"aacSettings" type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AC3. Ac3Settings *Ac3Settings `locationName:"ac3Settings" type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value AIFF. AiffSettings *AiffSettings `locationName:"aiffSettings" type:"structure"` // Choose the audio codec for this output. Note that the option Dolby Digital // passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital // Plus audio inputs. Make sure that you choose a codec that's supported with // your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio // For audio-only outputs, make sure that both your input audio codec and your // output audio codec are supported for audio-only workflows. For more information, // see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only // and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output Codec *string `locationName:"codec" type:"string" enum:"AudioCodec"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value EAC3_ATMOS. Eac3AtmosSettings *Eac3AtmosSettings `locationName:"eac3AtmosSettings" type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value EAC3. Eac3Settings *Eac3Settings `locationName:"eac3Settings" type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value MP2. Mp2Settings *Mp2Settings `locationName:"mp2Settings" type:"structure"` // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value MP3. Mp3Settings *Mp3Settings `locationName:"mp3Settings" type:"structure"` // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value OPUS. OpusSettings *OpusSettings `locationName:"opusSettings" type:"structure"` // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value Vorbis. VorbisSettings *VorbisSettings `locationName:"vorbisSettings" type:"structure"` // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value WAV. WavSettings *WavSettings `locationName:"wavSettings" type:"structure"` } // String returns the string representation func (s AudioCodecSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AudioCodecSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AudioCodecSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AudioCodecSettings"} if s.AacSettings != nil { if err := s.AacSettings.Validate(); err != nil { invalidParams.AddNested("AacSettings", err.(request.ErrInvalidParams)) } } if s.Ac3Settings != nil { if err := s.Ac3Settings.Validate(); err != nil { invalidParams.AddNested("Ac3Settings", err.(request.ErrInvalidParams)) } } if s.AiffSettings != nil { if err := s.AiffSettings.Validate(); err != nil { invalidParams.AddNested("AiffSettings", err.(request.ErrInvalidParams)) } } if s.Eac3AtmosSettings != nil { if err := s.Eac3AtmosSettings.Validate(); err != nil { invalidParams.AddNested("Eac3AtmosSettings", err.(request.ErrInvalidParams)) } } if s.Eac3Settings != nil { if err := s.Eac3Settings.Validate(); err != nil { invalidParams.AddNested("Eac3Settings", err.(request.ErrInvalidParams)) } } if s.Mp2Settings != nil { if err := s.Mp2Settings.Validate(); err != nil { invalidParams.AddNested("Mp2Settings", err.(request.ErrInvalidParams)) } } if s.Mp3Settings != nil { if err := s.Mp3Settings.Validate(); err != nil { invalidParams.AddNested("Mp3Settings", err.(request.ErrInvalidParams)) } } if s.OpusSettings != nil { if err := s.OpusSettings.Validate(); err != nil { invalidParams.AddNested("OpusSettings", err.(request.ErrInvalidParams)) } } if s.VorbisSettings != nil { if err := s.VorbisSettings.Validate(); err != nil { invalidParams.AddNested("VorbisSettings", err.(request.ErrInvalidParams)) } } if s.WavSettings != nil { if err := s.WavSettings.Validate(); err != nil { invalidParams.AddNested("WavSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAacSettings sets the AacSettings field's value. func (s *AudioCodecSettings) SetAacSettings(v *AacSettings) *AudioCodecSettings { s.AacSettings = v return s } // SetAc3Settings sets the Ac3Settings field's value. func (s *AudioCodecSettings) SetAc3Settings(v *Ac3Settings) *AudioCodecSettings { s.Ac3Settings = v return s } // SetAiffSettings sets the AiffSettings field's value. func (s *AudioCodecSettings) SetAiffSettings(v *AiffSettings) *AudioCodecSettings { s.AiffSettings = v return s } // SetCodec sets the Codec field's value. func (s *AudioCodecSettings) SetCodec(v string) *AudioCodecSettings { s.Codec = &v return s } // SetEac3AtmosSettings sets the Eac3AtmosSettings field's value. func (s *AudioCodecSettings) SetEac3AtmosSettings(v *Eac3AtmosSettings) *AudioCodecSettings { s.Eac3AtmosSettings = v return s } // SetEac3Settings sets the Eac3Settings field's value. func (s *AudioCodecSettings) SetEac3Settings(v *Eac3Settings) *AudioCodecSettings { s.Eac3Settings = v return s } // SetMp2Settings sets the Mp2Settings field's value. func (s *AudioCodecSettings) SetMp2Settings(v *Mp2Settings) *AudioCodecSettings { s.Mp2Settings = v return s } // SetMp3Settings sets the Mp3Settings field's value. func (s *AudioCodecSettings) SetMp3Settings(v *Mp3Settings) *AudioCodecSettings { s.Mp3Settings = v return s } // SetOpusSettings sets the OpusSettings field's value. func (s *AudioCodecSettings) SetOpusSettings(v *OpusSettings) *AudioCodecSettings { s.OpusSettings = v return s } // SetVorbisSettings sets the VorbisSettings field's value. func (s *AudioCodecSettings) SetVorbisSettings(v *VorbisSettings) *AudioCodecSettings { s.VorbisSettings = v return s } // SetWavSettings sets the WavSettings field's value. func (s *AudioCodecSettings) SetWavSettings(v *WavSettings) *AudioCodecSettings { s.WavSettings = v return s } // Settings related to one audio tab on the MediaConvert console. In your job // JSON, an instance of AudioDescription is equivalent to one audio tab in the // console. Usually, one audio tab corresponds to one output audio track. Depending // on how you set up your input audio selectors and whether you use audio selector // groups, one audio tab can correspond to a group of output audio tracks. type AudioDescription struct { _ struct{} `type:"structure"` // When you mimic a multi-channel audio layout with multiple mono-channel tracks, // you can tag each channel layout manually. For example, you would tag the // tracks that contain your left, right, and center audio with Left (L), Right // (R), and Center (C), respectively. When you don't specify a value, MediaConvert // labels your track as Center (C) by default. To use audio layout tagging, // your output must be in a QuickTime (.mov) container; your audio codec must // be AAC, WAV, or AIFF; and you must set up your audio track to have only one // channel. AudioChannelTaggingSettings *AudioChannelTaggingSettings `locationName:"audioChannelTaggingSettings" type:"structure"` // Advanced audio normalization settings. Ignore these settings unless you need // to comply with a loudness standard. AudioNormalizationSettings *AudioNormalizationSettings `locationName:"audioNormalizationSettings" type:"structure"` // Specifies which audio data to use from each input. In the simplest case, // specify an "Audio Selector":#inputs-audio_selector by name based on its order // within each input. For example if you specify "Audio Selector 3", then the // third audio selector will be used from each input. If an input does not have // an "Audio Selector 3", then the audio selector marked as "default" in that // input will be used. If there is no audio selector marked as "default", silence // will be inserted for the duration of that input. Alternatively, an "Audio // Selector Group":#inputs-audio_selector_group name may be specified, with // similar default/silence behavior. If no audio_source_name is specified, then // "Audio Selector 1" will be chosen automatically. AudioSourceName *string `locationName:"audioSourceName" type:"string"` // Applies only if Follow Input Audio Type is unchecked (false). A number between // 0 and 255. The following are defined in ISO-IEC 13818-1: 0 = Undefined, 1 // = Clean Effects, 2 = Hearing Impaired, 3 = Visually Impaired Commentary, // 4-255 = Reserved. AudioType *int64 `locationName:"audioType" type:"integer"` // When set to FOLLOW_INPUT, if the input contains an ISO 639 audio_type, then // that value is passed through to the output. If the input contains no ISO // 639 audio_type, the value in Audio Type is included in the output. Otherwise // the value in Audio Type is included in the output. Note that this field and // audioType are both ignored if audioDescriptionBroadcasterMix is set to BROADCASTER_MIXED_AD. AudioTypeControl *string `locationName:"audioTypeControl" type:"string" enum:"AudioTypeControl"` // Settings related to audio encoding. The settings in this group vary depending // on the value that you choose for your audio codec. CodecSettings *AudioCodecSettings `locationName:"codecSettings" type:"structure"` // Specify the language for this audio output track. The service puts this language // code into your output audio track when you set Language code control (AudioLanguageCodeControl) // to Use configured (USE_CONFIGURED). The service also uses your specified // custom language code when you set Language code control (AudioLanguageCodeControl) // to Follow input (FOLLOW_INPUT), but your input file doesn't specify a language // code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming // outputs, you can also use any other code in the full RFC-5646 specification. // Streaming outputs are those that are in one of the following output groups: // CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming. CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` // Indicates the language of the audio output track. The ISO 639 language specified // in the 'Language Code' drop down will be used when 'Follow Input Language // Code' is not selected or when 'Follow Input Language Code' is selected but // there is no ISO 639 language code specified by the input. LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` // Specify which source for language code takes precedence for this audio track. // When you choose Follow input (FOLLOW_INPUT), the service uses the language // code from the input track if it's present. If there's no languge code on // the input track, the service uses the code that you specify in the setting // Language code (languageCode or customLanguageCode). When you choose Use configured // (USE_CONFIGURED), the service uses the language code that you specify. LanguageCodeControl *string `locationName:"languageCodeControl" type:"string" enum:"AudioLanguageCodeControl"` // Advanced audio remixing settings. RemixSettings *RemixSettings `locationName:"remixSettings" type:"structure"` // Specify a label for this output audio stream. For example, "English", "Director // commentary", or "track_2". For streaming outputs, MediaConvert passes this // information into destination manifests for display on the end-viewer's player // device. For outputs in other output groups, the service ignores this setting. StreamName *string `locationName:"streamName" type:"string"` } // String returns the string representation func (s AudioDescription) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AudioDescription) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AudioDescription) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AudioDescription"} if s.AudioNormalizationSettings != nil { if err := s.AudioNormalizationSettings.Validate(); err != nil { invalidParams.AddNested("AudioNormalizationSettings", err.(request.ErrInvalidParams)) } } if s.CodecSettings != nil { if err := s.CodecSettings.Validate(); err != nil { invalidParams.AddNested("CodecSettings", err.(request.ErrInvalidParams)) } } if s.RemixSettings != nil { if err := s.RemixSettings.Validate(); err != nil { invalidParams.AddNested("RemixSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAudioChannelTaggingSettings sets the AudioChannelTaggingSettings field's value. func (s *AudioDescription) SetAudioChannelTaggingSettings(v *AudioChannelTaggingSettings) *AudioDescription { s.AudioChannelTaggingSettings = v return s } // SetAudioNormalizationSettings sets the AudioNormalizationSettings field's value. func (s *AudioDescription) SetAudioNormalizationSettings(v *AudioNormalizationSettings) *AudioDescription { s.AudioNormalizationSettings = v return s } // SetAudioSourceName sets the AudioSourceName field's value. func (s *AudioDescription) SetAudioSourceName(v string) *AudioDescription { s.AudioSourceName = &v return s } // SetAudioType sets the AudioType field's value. func (s *AudioDescription) SetAudioType(v int64) *AudioDescription { s.AudioType = &v return s } // SetAudioTypeControl sets the AudioTypeControl field's value. func (s *AudioDescription) SetAudioTypeControl(v string) *AudioDescription { s.AudioTypeControl = &v return s } // SetCodecSettings sets the CodecSettings field's value. func (s *AudioDescription) SetCodecSettings(v *AudioCodecSettings) *AudioDescription { s.CodecSettings = v return s } // SetCustomLanguageCode sets the CustomLanguageCode field's value. func (s *AudioDescription) SetCustomLanguageCode(v string) *AudioDescription { s.CustomLanguageCode = &v return s } // SetLanguageCode sets the LanguageCode field's value. func (s *AudioDescription) SetLanguageCode(v string) *AudioDescription { s.LanguageCode = &v return s } // SetLanguageCodeControl sets the LanguageCodeControl field's value. func (s *AudioDescription) SetLanguageCodeControl(v string) *AudioDescription { s.LanguageCodeControl = &v return s } // SetRemixSettings sets the RemixSettings field's value. func (s *AudioDescription) SetRemixSettings(v *RemixSettings) *AudioDescription { s.RemixSettings = v return s } // SetStreamName sets the StreamName field's value. func (s *AudioDescription) SetStreamName(v string) *AudioDescription { s.StreamName = &v return s } // Advanced audio normalization settings. Ignore these settings unless you need // to comply with a loudness standard. type AudioNormalizationSettings struct { _ struct{} `type:"structure"` // Choose one of the following audio normalization algorithms: ITU-R BS.1770-1: // Ungated loudness. A measurement of ungated average loudness for an entire // piece of content, suitable for measurement of short-form content under ATSC // recommendation A/85. Supports up to 5.1 audio channels. ITU-R BS.1770-2: // Gated loudness. A measurement of gated average loudness compliant with the // requirements of EBU-R128. Supports up to 5.1 audio channels. ITU-R BS.1770-3: // Modified peak. The same loudness measurement algorithm as 1770-2, with an // updated true peak measurement. ITU-R BS.1770-4: Higher channel count. Allows // for more audio channels than the other algorithms, including configurations // such as 7.1. Algorithm *string `locationName:"algorithm" type:"string" enum:"AudioNormalizationAlgorithm"` // When enabled the output audio is corrected using the chosen algorithm. If // disabled, the audio will be measured but not adjusted. AlgorithmControl *string `locationName:"algorithmControl" type:"string" enum:"AudioNormalizationAlgorithmControl"` // Content measuring above this level will be corrected to the target level. // Content measuring below this level will not be corrected. CorrectionGateLevel *int64 `locationName:"correctionGateLevel" type:"integer"` // If set to LOG, log each output's audio track loudness to a CSV file. LoudnessLogging *string `locationName:"loudnessLogging" type:"string" enum:"AudioNormalizationLoudnessLogging"` // If set to TRUE_PEAK, calculate and log the TruePeak for each output's audio // track loudness. PeakCalculation *string `locationName:"peakCalculation" type:"string" enum:"AudioNormalizationPeakCalculation"` // When you use Audio normalization (AudioNormalizationSettings), optionally // use this setting to specify a target loudness. If you don't specify a value // here, the encoder chooses a value for you, based on the algorithm that you // choose for Algorithm (algorithm). If you choose algorithm 1770-1, the encoder // will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS. TargetLkfs *float64 `locationName:"targetLkfs" type:"double"` } // String returns the string representation func (s AudioNormalizationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AudioNormalizationSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AudioNormalizationSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AudioNormalizationSettings"} if s.CorrectionGateLevel != nil && *s.CorrectionGateLevel < -70 { invalidParams.Add(request.NewErrParamMinValue("CorrectionGateLevel", -70)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAlgorithm sets the Algorithm field's value. func (s *AudioNormalizationSettings) SetAlgorithm(v string) *AudioNormalizationSettings { s.Algorithm = &v return s } // SetAlgorithmControl sets the AlgorithmControl field's value. func (s *AudioNormalizationSettings) SetAlgorithmControl(v string) *AudioNormalizationSettings { s.AlgorithmControl = &v return s } // SetCorrectionGateLevel sets the CorrectionGateLevel field's value. func (s *AudioNormalizationSettings) SetCorrectionGateLevel(v int64) *AudioNormalizationSettings { s.CorrectionGateLevel = &v return s } // SetLoudnessLogging sets the LoudnessLogging field's value. func (s *AudioNormalizationSettings) SetLoudnessLogging(v string) *AudioNormalizationSettings { s.LoudnessLogging = &v return s } // SetPeakCalculation sets the PeakCalculation field's value. func (s *AudioNormalizationSettings) SetPeakCalculation(v string) *AudioNormalizationSettings { s.PeakCalculation = &v return s } // SetTargetLkfs sets the TargetLkfs field's value. func (s *AudioNormalizationSettings) SetTargetLkfs(v float64) *AudioNormalizationSettings { s.TargetLkfs = &v return s } // Use Audio selectors (AudioSelectors) to specify a track or set of tracks // from the input that you will use in your outputs. You can use multiple Audio // selectors per input. type AudioSelector struct { _ struct{} `type:"structure"` // Selects a specific language code from within an audio source, using the ISO // 639-2 or ISO 639-3 three-letter language code CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"` // Enable this setting on one audio selector to set it as the default for the // job. The service uses this default for outputs where it can't find the specified // input audio. If you don't set a default, those outputs have no audio. DefaultSelection *string `locationName:"defaultSelection" type:"string" enum:"AudioDefaultSelection"` // Specifies audio data from an external file source. ExternalAudioFileInput *string `locationName:"externalAudioFileInput" type:"string"` // Settings specific to audio sources in an HLS alternate rendition group. Specify // the properties (renditionGroupId, renditionName or renditionLanguageCode) // to identify the unique audio track among the alternative rendition groups // present in the HLS manifest. If no unique track is found, or multiple tracks // match the properties provided, the job fails. If no properties in hlsRenditionGroupSettings // are specified, the default audio track within the video segment is chosen. // If there is no audio within video segment, the alternative audio with DEFAULT=YES // is chosen instead. HlsRenditionGroupSettings *HlsRenditionGroupSettings `locationName:"hlsRenditionGroupSettings" type:"structure"` // Selects a specific language code from within an audio source. LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` // Specifies a time delta in milliseconds to offset the audio from the input // video. Offset *int64 `locationName:"offset" type:"integer"` // Selects a specific PID from within an audio source (e.g. 257 selects PID // 0x101). Pids []*int64 `locationName:"pids" type:"list"` // Use this setting for input streams that contain Dolby E, to have the service // extract specific program data from the track. To select multiple programs, // create multiple selectors with the same Track and different Program numbers. // In the console, this setting is visible when you set Selector type to Track. // Choose the program number from the dropdown list. If you are sending a JSON // file, provide the program ID, which is part of the audio metadata. If your // input file has incorrect metadata, you can choose All channels instead of // a program number to have the service ignore the program IDs and include all // the programs in the track. ProgramSelection *int64 `locationName:"programSelection" type:"integer"` // Use these settings to reorder the audio channels of one input to match those // of another input. This allows you to combine the two files into a single // output, one after the other. RemixSettings *RemixSettings `locationName:"remixSettings" type:"structure"` // Specifies the type of the audio selector. SelectorType *string `locationName:"selectorType" type:"string" enum:"AudioSelectorType"` // Identify a track from the input audio to include in this selector by entering // the track index number. To include several tracks in a single audio selector, // specify multiple tracks as follows. Using the console, enter a comma-separated // list. For examle, type "1,2,3" to include tracks 1 through 3. Specifying // directly in your JSON job file, provide the track numbers in an array. For // example, "tracks": [1,2,3]. Tracks []*int64 `locationName:"tracks" type:"list"` } // String returns the string representation func (s AudioSelector) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AudioSelector) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AudioSelector) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AudioSelector"} if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 { invalidParams.Add(request.NewErrParamMinLen("CustomLanguageCode", 3)) } if s.Offset != nil && *s.Offset < -2.147483648e+09 { invalidParams.Add(request.NewErrParamMinValue("Offset", -2.147483648e+09)) } if s.RemixSettings != nil { if err := s.RemixSettings.Validate(); err != nil { invalidParams.AddNested("RemixSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCustomLanguageCode sets the CustomLanguageCode field's value. func (s *AudioSelector) SetCustomLanguageCode(v string) *AudioSelector { s.CustomLanguageCode = &v return s } // SetDefaultSelection sets the DefaultSelection field's value. func (s *AudioSelector) SetDefaultSelection(v string) *AudioSelector { s.DefaultSelection = &v return s } // SetExternalAudioFileInput sets the ExternalAudioFileInput field's value. func (s *AudioSelector) SetExternalAudioFileInput(v string) *AudioSelector { s.ExternalAudioFileInput = &v return s } // SetHlsRenditionGroupSettings sets the HlsRenditionGroupSettings field's value. func (s *AudioSelector) SetHlsRenditionGroupSettings(v *HlsRenditionGroupSettings) *AudioSelector { s.HlsRenditionGroupSettings = v return s } // SetLanguageCode sets the LanguageCode field's value. func (s *AudioSelector) SetLanguageCode(v string) *AudioSelector { s.LanguageCode = &v return s } // SetOffset sets the Offset field's value. func (s *AudioSelector) SetOffset(v int64) *AudioSelector { s.Offset = &v return s } // SetPids sets the Pids field's value. func (s *AudioSelector) SetPids(v []*int64) *AudioSelector { s.Pids = v return s } // SetProgramSelection sets the ProgramSelection field's value. func (s *AudioSelector) SetProgramSelection(v int64) *AudioSelector { s.ProgramSelection = &v return s } // SetRemixSettings sets the RemixSettings field's value. func (s *AudioSelector) SetRemixSettings(v *RemixSettings) *AudioSelector { s.RemixSettings = v return s } // SetSelectorType sets the SelectorType field's value. func (s *AudioSelector) SetSelectorType(v string) *AudioSelector { s.SelectorType = &v return s } // SetTracks sets the Tracks field's value. func (s *AudioSelector) SetTracks(v []*int64) *AudioSelector { s.Tracks = v return s } // Use audio selector groups to combine multiple sidecar audio inputs so that // you can assign them to a single output audio tab (AudioDescription). Note // that, if you're working with embedded audio, it's simpler to assign multiple // input tracks into a single audio selector rather than use an audio selector // group. type AudioSelectorGroup struct { _ struct{} `type:"structure"` // Name of an Audio Selector within the same input to include in the group. // Audio selector names are standardized, based on their order within the input // (e.g., "Audio Selector 1"). The audio selector name parameter can be repeated // to add any number of audio selectors to the group. AudioSelectorNames []*string `locationName:"audioSelectorNames" type:"list"` } // String returns the string representation func (s AudioSelectorGroup) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AudioSelectorGroup) GoString() string { return s.String() } // SetAudioSelectorNames sets the AudioSelectorNames field's value. func (s *AudioSelectorGroup) SetAudioSelectorNames(v []*string) *AudioSelectorGroup { s.AudioSelectorNames = v return s } // Use automated ABR to have MediaConvert set up the renditions in your ABR // package for you automatically, based on characteristics of your input video. // This feature optimizes video quality while minimizing the overall size of // your ABR package. type AutomatedAbrSettings struct { _ struct{} `type:"structure"` // Optional. The maximum target bit rate used in your automated ABR stack. Use // this value to set an upper limit on the bandwidth consumed by the highest-quality // rendition. This is the rendition that is delivered to viewers with the fastest // internet connections. If you don't specify a value, MediaConvert uses 8,000,000 // (8 mb/s) by default. MaxAbrBitrate *int64 `locationName:"maxAbrBitrate" min:"100000" type:"integer"` // Optional. The maximum number of renditions that MediaConvert will create // in your automated ABR stack. The number of renditions is determined automatically, // based on analysis of each job, but will never exceed this limit. When you // set this to Auto in the console, which is equivalent to excluding it from // your JSON job specification, MediaConvert defaults to a limit of 15. MaxRenditions *int64 `locationName:"maxRenditions" min:"3" type:"integer"` // Optional. The minimum target bitrate used in your automated ABR stack. Use // this value to set a lower limit on the bitrate of video delivered to viewers // with slow internet connections. If you don't specify a value, MediaConvert // uses 600,000 (600 kb/s) by default. MinAbrBitrate *int64 `locationName:"minAbrBitrate" min:"100000" type:"integer"` } // String returns the string representation func (s AutomatedAbrSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AutomatedAbrSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AutomatedAbrSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AutomatedAbrSettings"} if s.MaxAbrBitrate != nil && *s.MaxAbrBitrate < 100000 { invalidParams.Add(request.NewErrParamMinValue("MaxAbrBitrate", 100000)) } if s.MaxRenditions != nil && *s.MaxRenditions < 3 { invalidParams.Add(request.NewErrParamMinValue("MaxRenditions", 3)) } if s.MinAbrBitrate != nil && *s.MinAbrBitrate < 100000 { invalidParams.Add(request.NewErrParamMinValue("MinAbrBitrate", 100000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMaxAbrBitrate sets the MaxAbrBitrate field's value. func (s *AutomatedAbrSettings) SetMaxAbrBitrate(v int64) *AutomatedAbrSettings { s.MaxAbrBitrate = &v return s } // SetMaxRenditions sets the MaxRenditions field's value. func (s *AutomatedAbrSettings) SetMaxRenditions(v int64) *AutomatedAbrSettings { s.MaxRenditions = &v return s } // SetMinAbrBitrate sets the MinAbrBitrate field's value. func (s *AutomatedAbrSettings) SetMinAbrBitrate(v int64) *AutomatedAbrSettings { s.MinAbrBitrate = &v return s } // Use automated encoding to have MediaConvert choose your encoding settings // for you, based on characteristics of your input video. type AutomatedEncodingSettings struct { _ struct{} `type:"structure"` // Use automated ABR to have MediaConvert set up the renditions in your ABR // package for you automatically, based on characteristics of your input video. // This feature optimizes video quality while minimizing the overall size of // your ABR package. AbrSettings *AutomatedAbrSettings `locationName:"abrSettings" type:"structure"` } // String returns the string representation func (s AutomatedEncodingSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AutomatedEncodingSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AutomatedEncodingSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AutomatedEncodingSettings"} if s.AbrSettings != nil { if err := s.AbrSettings.Validate(); err != nil { invalidParams.AddNested("AbrSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAbrSettings sets the AbrSettings field's value. func (s *AutomatedEncodingSettings) SetAbrSettings(v *AutomatedAbrSettings) *AutomatedEncodingSettings { s.AbrSettings = v return s } // Settings for quality-defined variable bitrate encoding with the AV1 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. type Av1QvbrSettings struct { _ struct{} `type:"structure"` // Required when you use QVBR rate control mode. That is, when you specify qvbrSettings // within av1Settings. Specify the general target quality level for this output, // from 1 to 10. Use higher numbers for greater quality. Level 10 results in // nearly lossless compression. The quality level for most broadcast-quality // transcodes is between 6 and 9. Optionally, to specify a value between whole // numbers, also provide a value for the setting qvbrQualityLevelFineTune. For // example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel // to 7 and set qvbrQualityLevelFineTune to .33. QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` // Optional. Specify a value here to set the QVBR quality to a level that is // between whole numbers. For example, if you want your QVBR quality level to // be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. // MediaConvert rounds your QVBR quality level to the nearest third of a whole // number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune // to .25, your actual QVBR quality level is 7.33. QvbrQualityLevelFineTune *float64 `locationName:"qvbrQualityLevelFineTune" type:"double"` } // String returns the string representation func (s Av1QvbrSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Av1QvbrSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Av1QvbrSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Av1QvbrSettings"} if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 { invalidParams.Add(request.NewErrParamMinValue("QvbrQualityLevel", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetQvbrQualityLevel sets the QvbrQualityLevel field's value. func (s *Av1QvbrSettings) SetQvbrQualityLevel(v int64) *Av1QvbrSettings { s.QvbrQualityLevel = &v return s } // SetQvbrQualityLevelFineTune sets the QvbrQualityLevelFineTune field's value. func (s *Av1QvbrSettings) SetQvbrQualityLevelFineTune(v float64) *Av1QvbrSettings { s.QvbrQualityLevelFineTune = &v return s } // Required when you set Codec, under VideoDescription>CodecSettings to the // value AV1. type Av1Settings struct { _ struct{} `type:"structure"` // Specify the strength of any adaptive quantization filters that you enable. // The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization). AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"Av1AdaptiveQuantization"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Av1FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Av1FramerateConversionAlgorithm"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // Specify the GOP length (keyframe interval) in frames. With AV1, MediaConvert // doesn't support GOP length in seconds. This value must be greater than zero // and preferably equal to 1 + ((numberBFrames + 1) * x), where x is an integer // value. GopSize *float64 `locationName:"gopSize" type:"double"` // Maximum bitrate in bits/second. For example, enter five megabits per second // as 5000000. Required when Rate control mode is QVBR. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Specify from the number of B-frames, in the range of 0-15. For AV1 encoding, // we recommend using 7 or 15. Choose a larger number for a lower bitrate and // smaller file size; choose a smaller number for better video quality. NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` // Settings for quality-defined variable bitrate encoding with the AV1 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. QvbrSettings *Av1QvbrSettings `locationName:"qvbrSettings" type:"structure"` // 'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined // variable bitrate (QVBR). You can''t use CBR or VBR.' RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Av1RateControlMode"` // Specify the number of slices per picture. This value must be 1, 2, 4, 8, // 16, or 32. For progressive pictures, this value must be less than or equal // to the number of macroblock rows. For interlaced pictures, this value must // be less than or equal to half the number of macroblock rows. Slices *int64 `locationName:"slices" min:"1" type:"integer"` // Keep the default value, Enabled (ENABLED), to adjust quantization within // each frame based on spatial variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas that can sustain more // distortion with no noticeable visual degradation and uses more bits on areas // where any small distortion will be noticeable. For example, complex textured // blocks are encoded with fewer bits and smooth textured blocks are encoded // with more bits. Enabling this feature will almost always improve your video // quality. Note, though, that this feature doesn't take into account where // the viewer's attention is likely to be. If viewers are likely to be focusing // their attention on a part of the screen with a lot of complex texture, you // might choose to disable this feature. Related setting: When you enable spatial // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) // depending on your content. For homogeneous content, such as cartoons and // video games, set it to Low. For content with a wider variety of textures, // set it to High or Higher. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"Av1SpatialAdaptiveQuantization"` } // String returns the string representation func (s Av1Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Av1Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Av1Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Av1Settings"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000)) } if s.Slices != nil && *s.Slices < 1 { invalidParams.Add(request.NewErrParamMinValue("Slices", 1)) } if s.QvbrSettings != nil { if err := s.QvbrSettings.Validate(); err != nil { invalidParams.AddNested("QvbrSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdaptiveQuantization sets the AdaptiveQuantization field's value. func (s *Av1Settings) SetAdaptiveQuantization(v string) *Av1Settings { s.AdaptiveQuantization = &v return s } // SetFramerateControl sets the FramerateControl field's value. func (s *Av1Settings) SetFramerateControl(v string) *Av1Settings { s.FramerateControl = &v return s } // SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. func (s *Av1Settings) SetFramerateConversionAlgorithm(v string) *Av1Settings { s.FramerateConversionAlgorithm = &v return s } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *Av1Settings) SetFramerateDenominator(v int64) *Av1Settings { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *Av1Settings) SetFramerateNumerator(v int64) *Av1Settings { s.FramerateNumerator = &v return s } // SetGopSize sets the GopSize field's value. func (s *Av1Settings) SetGopSize(v float64) *Av1Settings { s.GopSize = &v return s } // SetMaxBitrate sets the MaxBitrate field's value. func (s *Av1Settings) SetMaxBitrate(v int64) *Av1Settings { s.MaxBitrate = &v return s } // SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value. func (s *Av1Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *Av1Settings { s.NumberBFramesBetweenReferenceFrames = &v return s } // SetQvbrSettings sets the QvbrSettings field's value. func (s *Av1Settings) SetQvbrSettings(v *Av1QvbrSettings) *Av1Settings { s.QvbrSettings = v return s } // SetRateControlMode sets the RateControlMode field's value. func (s *Av1Settings) SetRateControlMode(v string) *Av1Settings { s.RateControlMode = &v return s } // SetSlices sets the Slices field's value. func (s *Av1Settings) SetSlices(v int64) *Av1Settings { s.Slices = &v return s } // SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value. func (s *Av1Settings) SetSpatialAdaptiveQuantization(v string) *Av1Settings { s.SpatialAdaptiveQuantization = &v return s } // Use ad avail blanking settings to specify your output content during SCTE-35 // triggered ad avails. You can blank your video or overlay it with an image. // MediaConvert also removes any audio and embedded captions during the ad avail. // For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ad-avail-blanking.html. type AvailBlanking struct { _ struct{} `type:"structure"` // Blanking image to be used. Leave empty for solid black. Only bmp and png // images are supported. AvailBlankingImage *string `locationName:"availBlankingImage" min:"14" type:"string"` } // String returns the string representation func (s AvailBlanking) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AvailBlanking) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AvailBlanking) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AvailBlanking"} if s.AvailBlankingImage != nil && len(*s.AvailBlankingImage) < 14 { invalidParams.Add(request.NewErrParamMinLen("AvailBlankingImage", 14)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAvailBlankingImage sets the AvailBlankingImage field's value. func (s *AvailBlanking) SetAvailBlankingImage(v string) *AvailBlanking { s.AvailBlankingImage = &v return s } // Required when you choose AVC-Intra for your output video codec. For more // information about the AVC-Intra settings, see the relevant specification. // For detailed information about SD and HD in AVC-Intra, see https://ieeexplore.ieee.org/document/7290936. // For information about 4K/2K in AVC-Intra, see https://pro-av.panasonic.net/en/avc-ultra/AVC-ULTRAoverview.pdf. type AvcIntraSettings struct { _ struct{} `type:"structure"` // Specify the AVC-Intra class of your output. The AVC-Intra class selection // determines the output video bit rate depending on the frame rate of the output. // Outputs with higher class values have higher bitrates and improved image // quality. Note that for Class 4K/2K, MediaConvert supports only 4:2:2 chroma // subsampling. AvcIntraClass *string `locationName:"avcIntraClass" type:"string" enum:"AvcIntraClass"` // Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K). // When you set AVC-Intra class to a different value, this object isn't allowed. AvcIntraUhdSettings *AvcIntraUhdSettings `locationName:"avcIntraUhdSettings" type:"structure"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"AvcIntraFramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"AvcIntraFramerateConversionAlgorithm"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"AvcIntraInterlaceMode"` // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"AvcIntraScanTypeConversionMode"` // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output. When you enable // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. SlowPal *string `locationName:"slowPal" type:"string" enum:"AvcIntraSlowPal"` // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable // hard telecine (HARD) to create a smoother picture. When you keep the default // value, None (NONE), MediaConvert does a standard frame rate conversion to // 29.97 without doing anything with the field polarity to create a smoother // picture. Telecine *string `locationName:"telecine" type:"string" enum:"AvcIntraTelecine"` } // String returns the string representation func (s AvcIntraSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AvcIntraSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AvcIntraSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AvcIntraSettings"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 24 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 24)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAvcIntraClass sets the AvcIntraClass field's value. func (s *AvcIntraSettings) SetAvcIntraClass(v string) *AvcIntraSettings { s.AvcIntraClass = &v return s } // SetAvcIntraUhdSettings sets the AvcIntraUhdSettings field's value. func (s *AvcIntraSettings) SetAvcIntraUhdSettings(v *AvcIntraUhdSettings) *AvcIntraSettings { s.AvcIntraUhdSettings = v return s } // SetFramerateControl sets the FramerateControl field's value. func (s *AvcIntraSettings) SetFramerateControl(v string) *AvcIntraSettings { s.FramerateControl = &v return s } // SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. func (s *AvcIntraSettings) SetFramerateConversionAlgorithm(v string) *AvcIntraSettings { s.FramerateConversionAlgorithm = &v return s } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *AvcIntraSettings) SetFramerateDenominator(v int64) *AvcIntraSettings { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *AvcIntraSettings) SetFramerateNumerator(v int64) *AvcIntraSettings { s.FramerateNumerator = &v return s } // SetInterlaceMode sets the InterlaceMode field's value. func (s *AvcIntraSettings) SetInterlaceMode(v string) *AvcIntraSettings { s.InterlaceMode = &v return s } // SetScanTypeConversionMode sets the ScanTypeConversionMode field's value. func (s *AvcIntraSettings) SetScanTypeConversionMode(v string) *AvcIntraSettings { s.ScanTypeConversionMode = &v return s } // SetSlowPal sets the SlowPal field's value. func (s *AvcIntraSettings) SetSlowPal(v string) *AvcIntraSettings { s.SlowPal = &v return s } // SetTelecine sets the Telecine field's value. func (s *AvcIntraSettings) SetTelecine(v string) *AvcIntraSettings { s.Telecine = &v return s } // Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K). // When you set AVC-Intra class to a different value, this object isn't allowed. type AvcIntraUhdSettings struct { _ struct{} `type:"structure"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how many // transcoding passes MediaConvert does with your video. When you choose Multi-pass // (MULTI_PASS), your video quality is better and your output bitrate is more // accurate. That is, the actual bitrate of your output is closer to the target // bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS), // your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS). QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"AvcIntraUhdQualityTuningLevel"` } // String returns the string representation func (s AvcIntraUhdSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AvcIntraUhdSettings) GoString() string { return s.String() } // SetQualityTuningLevel sets the QualityTuningLevel field's value. func (s *AvcIntraUhdSettings) SetQualityTuningLevel(v string) *AvcIntraUhdSettings { s.QualityTuningLevel = &v return s } type BadRequestException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s BadRequestException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s BadRequestException) GoString() string { return s.String() } func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ RespMetadata: v, } } // Code returns the exception type name. func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *BadRequestException) OrigErr() error { return nil } func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. func (s *BadRequestException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *BadRequestException) RequestID() string { return s.RespMetadata.RequestID } // Settings related to burn-in captions. Set up burn-in captions in the same // output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to BURN_IN. type BurninDestinationSettings struct { _ struct{} `type:"structure"` // If no explicit x_position or y_position is provided, setting alignment to // centered will place the captions at the bottom center of the output. Similarly, // setting a left alignment will align captions to the bottom left of the output. // If x and y positions are given in conjunction with the alignment parameter, // the font will be justified (either left or centered) relative to those coordinates. // This option is not valid for source captions that are STL, 608/embedded or // teletext. These source settings are already pre-defined by the caption stream. // All burn-in and DVB-Sub font settings must match. Alignment *string `locationName:"alignment" type:"string" enum:"BurninSubtitleAlignment"` // Specifies the color of the rectangle behind the captions.All burn-in and // DVB-Sub font settings must match. BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"BurninSubtitleBackgroundColor"` // Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. // Leaving this parameter blank is equivalent to setting it to 0 (transparent). // All burn-in and DVB-Sub font settings must match. BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"` // Specifies the color of the burned-in captions. This option is not valid for // source captions that are STL, 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font // settings must match. FontColor *string `locationName:"fontColor" type:"string" enum:"BurninSubtitleFontColor"` // Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.All // burn-in and DVB-Sub font settings must match. FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` // Font resolution in DPI (dots per inch); default is 96 dpi.All burn-in and // DVB-Sub font settings must match. FontResolution *int64 `locationName:"fontResolution" min:"96" type:"integer"` // Provide the font script, using an ISO 15924 script code, if the LanguageCode // is not sufficient for determining the script type. Where LanguageCode or // CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. This is // used to help determine the appropriate font for rendering burn-in captions. FontScript *string `locationName:"fontScript" type:"string" enum:"FontScript"` // A positive integer indicates the exact font size in points. Set to 0 for // automatic font size selection. All burn-in and DVB-Sub font settings must // match. FontSize *int64 `locationName:"fontSize" type:"integer"` // Specifies font outline color. This option is not valid for source captions // that are either 608/embedded or teletext. These source settings are already // pre-defined by the caption stream. All burn-in and DVB-Sub font settings // must match. OutlineColor *string `locationName:"outlineColor" type:"string" enum:"BurninSubtitleOutlineColor"` // Specifies font outline size in pixels. This option is not valid for source // captions that are either 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font // settings must match. OutlineSize *int64 `locationName:"outlineSize" type:"integer"` // Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub // font settings must match. ShadowColor *string `locationName:"shadowColor" type:"string" enum:"BurninSubtitleShadowColor"` // Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving // this parameter blank is equivalent to setting it to 0 (transparent). All // burn-in and DVB-Sub font settings must match. ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"` // Specifies the horizontal offset of the shadow relative to the captions in // pixels. A value of -2 would result in a shadow offset 2 pixels to the left. // All burn-in and DVB-Sub font settings must match. ShadowXOffset *int64 `locationName:"shadowXOffset" type:"integer"` // Specifies the vertical offset of the shadow relative to the captions in pixels. // A value of -2 would result in a shadow offset 2 pixels above the text. All // burn-in and DVB-Sub font settings must match. ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"` // Only applies to jobs with input captions in Teletext or STL formats. Specify // whether the spacing between letters in your captions is set by the captions // grid or varies depending on letter width. Choose fixed grid to conform to // the spacing specified in the captions file more accurately. Choose proportional // to make the text easier to read if the captions are closed caption. TeletextSpacing *string `locationName:"teletextSpacing" type:"string" enum:"BurninSubtitleTeletextSpacing"` // Specifies the horizontal position of the caption relative to the left side // of the output in pixels. A value of 10 would result in the captions starting // 10 pixels from the left of the output. If no explicit x_position is provided, // the horizontal caption position will be determined by the alignment parameter. // This option is not valid for source captions that are STL, 608/embedded or // teletext. These source settings are already pre-defined by the caption stream. // All burn-in and DVB-Sub font settings must match. XPosition *int64 `locationName:"xPosition" type:"integer"` // Specifies the vertical position of the caption relative to the top of the // output in pixels. A value of 10 would result in the captions starting 10 // pixels from the top of the output. If no explicit y_position is provided, // the caption will be positioned towards the bottom of the output. This option // is not valid for source captions that are STL, 608/embedded or teletext. // These source settings are already pre-defined by the caption stream. All // burn-in and DVB-Sub font settings must match. YPosition *int64 `locationName:"yPosition" type:"integer"` } // String returns the string representation func (s BurninDestinationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s BurninDestinationSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *BurninDestinationSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "BurninDestinationSettings"} if s.FontResolution != nil && *s.FontResolution < 96 { invalidParams.Add(request.NewErrParamMinValue("FontResolution", 96)) } if s.ShadowXOffset != nil && *s.ShadowXOffset < -2.147483648e+09 { invalidParams.Add(request.NewErrParamMinValue("ShadowXOffset", -2.147483648e+09)) } if s.ShadowYOffset != nil && *s.ShadowYOffset < -2.147483648e+09 { invalidParams.Add(request.NewErrParamMinValue("ShadowYOffset", -2.147483648e+09)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAlignment sets the Alignment field's value. func (s *BurninDestinationSettings) SetAlignment(v string) *BurninDestinationSettings { s.Alignment = &v return s } // SetBackgroundColor sets the BackgroundColor field's value. func (s *BurninDestinationSettings) SetBackgroundColor(v string) *BurninDestinationSettings { s.BackgroundColor = &v return s } // SetBackgroundOpacity sets the BackgroundOpacity field's value. func (s *BurninDestinationSettings) SetBackgroundOpacity(v int64) *BurninDestinationSettings { s.BackgroundOpacity = &v return s } // SetFontColor sets the FontColor field's value. func (s *BurninDestinationSettings) SetFontColor(v string) *BurninDestinationSettings { s.FontColor = &v return s } // SetFontOpacity sets the FontOpacity field's value. func (s *BurninDestinationSettings) SetFontOpacity(v int64) *BurninDestinationSettings { s.FontOpacity = &v return s } // SetFontResolution sets the FontResolution field's value. func (s *BurninDestinationSettings) SetFontResolution(v int64) *BurninDestinationSettings { s.FontResolution = &v return s } // SetFontScript sets the FontScript field's value. func (s *BurninDestinationSettings) SetFontScript(v string) *BurninDestinationSettings { s.FontScript = &v return s } // SetFontSize sets the FontSize field's value. func (s *BurninDestinationSettings) SetFontSize(v int64) *BurninDestinationSettings { s.FontSize = &v return s } // SetOutlineColor sets the OutlineColor field's value. func (s *BurninDestinationSettings) SetOutlineColor(v string) *BurninDestinationSettings { s.OutlineColor = &v return s } // SetOutlineSize sets the OutlineSize field's value. func (s *BurninDestinationSettings) SetOutlineSize(v int64) *BurninDestinationSettings { s.OutlineSize = &v return s } // SetShadowColor sets the ShadowColor field's value. func (s *BurninDestinationSettings) SetShadowColor(v string) *BurninDestinationSettings { s.ShadowColor = &v return s } // SetShadowOpacity sets the ShadowOpacity field's value. func (s *BurninDestinationSettings) SetShadowOpacity(v int64) *BurninDestinationSettings { s.ShadowOpacity = &v return s } // SetShadowXOffset sets the ShadowXOffset field's value. func (s *BurninDestinationSettings) SetShadowXOffset(v int64) *BurninDestinationSettings { s.ShadowXOffset = &v return s } // SetShadowYOffset sets the ShadowYOffset field's value. func (s *BurninDestinationSettings) SetShadowYOffset(v int64) *BurninDestinationSettings { s.ShadowYOffset = &v return s } // SetTeletextSpacing sets the TeletextSpacing field's value. func (s *BurninDestinationSettings) SetTeletextSpacing(v string) *BurninDestinationSettings { s.TeletextSpacing = &v return s } // SetXPosition sets the XPosition field's value. func (s *BurninDestinationSettings) SetXPosition(v int64) *BurninDestinationSettings { s.XPosition = &v return s } // SetYPosition sets the YPosition field's value. func (s *BurninDestinationSettings) SetYPosition(v int64) *BurninDestinationSettings { s.YPosition = &v return s } // Cancel a job by sending a request with the job ID type CancelJobInput struct { _ struct{} `type:"structure"` // The Job ID of the job to be cancelled. // // Id is a required field Id *string `location:"uri" locationName:"id" type:"string" required:"true"` } // String returns the string representation func (s CancelJobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CancelJobInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CancelJobInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CancelJobInput"} if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } if s.Id != nil && len(*s.Id) < 1 { invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetId sets the Id field's value. func (s *CancelJobInput) SetId(v string) *CancelJobInput { s.Id = &v return s } // A cancel job request will receive a response with an empty body. type CancelJobOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s CancelJobOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CancelJobOutput) GoString() string { return s.String() } // This object holds groups of settings related to captions for one output. // For each output that has captions, include one instance of CaptionDescriptions. type CaptionDescription struct { _ struct{} `type:"structure"` // Specifies which "Caption Selector":#inputs-caption_selector to use from each // input when generating captions. The name should be of the format "Caption // Selector ", which denotes that the Nth Caption Selector will be used from // each input. CaptionSelectorName *string `locationName:"captionSelectorName" min:"1" type:"string"` // Specify the language for this captions output track. For most captions output // formats, the encoder puts this language information in the output captions // metadata. If your output captions format is DVB-Sub or Burn in, the encoder // uses this language information when automatically selecting the font script // for rendering the captions text. For all outputs, you can use an ISO 639-2 // or ISO 639-3 code. For streaming outputs, you can also use any other code // in the full RFC-5646 specification. Streaming outputs are those that are // in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft // Smooth Streaming. CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` // Settings related to one captions tab on the MediaConvert console. In your // job JSON, an instance of captions DestinationSettings is equivalent to one // captions tab in the console. Usually, one captions tab corresponds to one // output captions track. Depending on your output captions format, one tab // might correspond to a set of output captions tracks. For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. DestinationSettings *CaptionDestinationSettings `locationName:"destinationSettings" type:"structure"` // Specify the language of this captions output track. For most captions output // formats, the encoder puts this language information in the output captions // metadata. If your output captions format is DVB-Sub or Burn in, the encoder // uses this language information to choose the font language for rendering // the captions text. LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` // Specify a label for this set of output captions. For example, "English", // "Director commentary", or "track_2". For streaming outputs, MediaConvert // passes this information into destination manifests for display on the end-viewer's // player device. For outputs in other output groups, the service ignores this // setting. LanguageDescription *string `locationName:"languageDescription" type:"string"` } // String returns the string representation func (s CaptionDescription) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CaptionDescription) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionDescription) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CaptionDescription"} if s.CaptionSelectorName != nil && len(*s.CaptionSelectorName) < 1 { invalidParams.Add(request.NewErrParamMinLen("CaptionSelectorName", 1)) } if s.DestinationSettings != nil { if err := s.DestinationSettings.Validate(); err != nil { invalidParams.AddNested("DestinationSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCaptionSelectorName sets the CaptionSelectorName field's value. func (s *CaptionDescription) SetCaptionSelectorName(v string) *CaptionDescription { s.CaptionSelectorName = &v return s } // SetCustomLanguageCode sets the CustomLanguageCode field's value. func (s *CaptionDescription) SetCustomLanguageCode(v string) *CaptionDescription { s.CustomLanguageCode = &v return s } // SetDestinationSettings sets the DestinationSettings field's value. func (s *CaptionDescription) SetDestinationSettings(v *CaptionDestinationSettings) *CaptionDescription { s.DestinationSettings = v return s } // SetLanguageCode sets the LanguageCode field's value. func (s *CaptionDescription) SetLanguageCode(v string) *CaptionDescription { s.LanguageCode = &v return s } // SetLanguageDescription sets the LanguageDescription field's value. func (s *CaptionDescription) SetLanguageDescription(v string) *CaptionDescription { s.LanguageDescription = &v return s } // Caption Description for preset type CaptionDescriptionPreset struct { _ struct{} `type:"structure"` // Specify the language for this captions output track. For most captions output // formats, the encoder puts this language information in the output captions // metadata. If your output captions format is DVB-Sub or Burn in, the encoder // uses this language information when automatically selecting the font script // for rendering the captions text. For all outputs, you can use an ISO 639-2 // or ISO 639-3 code. For streaming outputs, you can also use any other code // in the full RFC-5646 specification. Streaming outputs are those that are // in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft // Smooth Streaming. CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` // Settings related to one captions tab on the MediaConvert console. In your // job JSON, an instance of captions DestinationSettings is equivalent to one // captions tab in the console. Usually, one captions tab corresponds to one // output captions track. Depending on your output captions format, one tab // might correspond to a set of output captions tracks. For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. DestinationSettings *CaptionDestinationSettings `locationName:"destinationSettings" type:"structure"` // Specify the language of this captions output track. For most captions output // formats, the encoder puts this language information in the output captions // metadata. If your output captions format is DVB-Sub or Burn in, the encoder // uses this language information to choose the font language for rendering // the captions text. LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` // Specify a label for this set of output captions. For example, "English", // "Director commentary", or "track_2". For streaming outputs, MediaConvert // passes this information into destination manifests for display on the end-viewer's // player device. For outputs in other output groups, the service ignores this // setting. LanguageDescription *string `locationName:"languageDescription" type:"string"` } // String returns the string representation func (s CaptionDescriptionPreset) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CaptionDescriptionPreset) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionDescriptionPreset) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CaptionDescriptionPreset"} if s.DestinationSettings != nil { if err := s.DestinationSettings.Validate(); err != nil { invalidParams.AddNested("DestinationSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCustomLanguageCode sets the CustomLanguageCode field's value. func (s *CaptionDescriptionPreset) SetCustomLanguageCode(v string) *CaptionDescriptionPreset { s.CustomLanguageCode = &v return s } // SetDestinationSettings sets the DestinationSettings field's value. func (s *CaptionDescriptionPreset) SetDestinationSettings(v *CaptionDestinationSettings) *CaptionDescriptionPreset { s.DestinationSettings = v return s } // SetLanguageCode sets the LanguageCode field's value. func (s *CaptionDescriptionPreset) SetLanguageCode(v string) *CaptionDescriptionPreset { s.LanguageCode = &v return s } // SetLanguageDescription sets the LanguageDescription field's value. func (s *CaptionDescriptionPreset) SetLanguageDescription(v string) *CaptionDescriptionPreset { s.LanguageDescription = &v return s } // Settings related to one captions tab on the MediaConvert console. In your // job JSON, an instance of captions DestinationSettings is equivalent to one // captions tab in the console. Usually, one captions tab corresponds to one // output captions track. Depending on your output captions format, one tab // might correspond to a set of output captions tracks. For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html. type CaptionDestinationSettings struct { _ struct{} `type:"structure"` // Settings related to burn-in captions. Set up burn-in captions in the same // output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to BURN_IN. BurninDestinationSettings *BurninDestinationSettings `locationName:"burninDestinationSettings" type:"structure"` // Specify the format for this set of captions on this output. The default format // is embedded without SCTE-20. Note that your choice of video output container // constrains your choice of output captions format. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. // If you are using SCTE-20 and you want to create an output that complies with // the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To // create a non-compliant output where the embedded captions come first, choose // Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20). DestinationType *string `locationName:"destinationType" type:"string" enum:"CaptionDestinationType"` // Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same // output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to DVB_SUB. DvbSubDestinationSettings *DvbSubDestinationSettings `locationName:"dvbSubDestinationSettings" type:"structure"` // Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or // ancillary) captions. Set up embedded captions in the same output as your // video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20, // or SCTE20_PLUS_EMBEDDED. EmbeddedDestinationSettings *EmbeddedDestinationSettings `locationName:"embeddedDestinationSettings" type:"structure"` // Settings related to IMSC captions. IMSC is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to IMSC. ImscDestinationSettings *ImscDestinationSettings `locationName:"imscDestinationSettings" type:"structure"` // Settings related to SCC captions. SCC is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to SCC. SccDestinationSettings *SccDestinationSettings `locationName:"sccDestinationSettings" type:"structure"` // Settings related to teletext captions. Set up teletext captions in the same // output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to TELETEXT. TeletextDestinationSettings *TeletextDestinationSettings `locationName:"teletextDestinationSettings" type:"structure"` // Settings related to TTML captions. TTML is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to TTML. TtmlDestinationSettings *TtmlDestinationSettings `locationName:"ttmlDestinationSettings" type:"structure"` // WEBVTT Destination Settings WebvttDestinationSettings *WebvttDestinationSettings `locationName:"webvttDestinationSettings" type:"structure"` } // String returns the string representation func (s CaptionDestinationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CaptionDestinationSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionDestinationSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CaptionDestinationSettings"} if s.BurninDestinationSettings != nil { if err := s.BurninDestinationSettings.Validate(); err != nil { invalidParams.AddNested("BurninDestinationSettings", err.(request.ErrInvalidParams)) } } if s.DvbSubDestinationSettings != nil { if err := s.DvbSubDestinationSettings.Validate(); err != nil { invalidParams.AddNested("DvbSubDestinationSettings", err.(request.ErrInvalidParams)) } } if s.EmbeddedDestinationSettings != nil { if err := s.EmbeddedDestinationSettings.Validate(); err != nil { invalidParams.AddNested("EmbeddedDestinationSettings", err.(request.ErrInvalidParams)) } } if s.TeletextDestinationSettings != nil { if err := s.TeletextDestinationSettings.Validate(); err != nil { invalidParams.AddNested("TeletextDestinationSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBurninDestinationSettings sets the BurninDestinationSettings field's value. func (s *CaptionDestinationSettings) SetBurninDestinationSettings(v *BurninDestinationSettings) *CaptionDestinationSettings { s.BurninDestinationSettings = v return s } // SetDestinationType sets the DestinationType field's value. func (s *CaptionDestinationSettings) SetDestinationType(v string) *CaptionDestinationSettings { s.DestinationType = &v return s } // SetDvbSubDestinationSettings sets the DvbSubDestinationSettings field's value. func (s *CaptionDestinationSettings) SetDvbSubDestinationSettings(v *DvbSubDestinationSettings) *CaptionDestinationSettings { s.DvbSubDestinationSettings = v return s } // SetEmbeddedDestinationSettings sets the EmbeddedDestinationSettings field's value. func (s *CaptionDestinationSettings) SetEmbeddedDestinationSettings(v *EmbeddedDestinationSettings) *CaptionDestinationSettings { s.EmbeddedDestinationSettings = v return s } // SetImscDestinationSettings sets the ImscDestinationSettings field's value. func (s *CaptionDestinationSettings) SetImscDestinationSettings(v *ImscDestinationSettings) *CaptionDestinationSettings { s.ImscDestinationSettings = v return s } // SetSccDestinationSettings sets the SccDestinationSettings field's value. func (s *CaptionDestinationSettings) SetSccDestinationSettings(v *SccDestinationSettings) *CaptionDestinationSettings { s.SccDestinationSettings = v return s } // SetTeletextDestinationSettings sets the TeletextDestinationSettings field's value. func (s *CaptionDestinationSettings) SetTeletextDestinationSettings(v *TeletextDestinationSettings) *CaptionDestinationSettings { s.TeletextDestinationSettings = v return s } // SetTtmlDestinationSettings sets the TtmlDestinationSettings field's value. func (s *CaptionDestinationSettings) SetTtmlDestinationSettings(v *TtmlDestinationSettings) *CaptionDestinationSettings { s.TtmlDestinationSettings = v return s } // SetWebvttDestinationSettings sets the WebvttDestinationSettings field's value. func (s *CaptionDestinationSettings) SetWebvttDestinationSettings(v *WebvttDestinationSettings) *CaptionDestinationSettings { s.WebvttDestinationSettings = v return s } // Use captions selectors to specify the captions data from your input that // you use in your outputs. You can use up to 20 captions selectors per input. type CaptionSelector struct { _ struct{} `type:"structure"` // The specific language to extract from source, using the ISO 639-2 or ISO // 639-3 three-letter language code. If input is SCTE-27, complete this field // and/or PID to select the caption language to extract. If input is DVB-Sub // and output is Burn-in or SMPTE-TT, complete this field and/or PID to select // the caption language to extract. If input is DVB-Sub that is being passed // through, omit this field (and PID field); there is no way to extract a specific // language with pass-through captions. CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"` // The specific language to extract from source. If input is SCTE-27, complete // this field and/or PID to select the caption language to extract. If input // is DVB-Sub and output is Burn-in or SMPTE-TT, complete this field and/or // PID to select the caption language to extract. If input is DVB-Sub that is // being passed through, omit this field (and PID field); there is no way to // extract a specific language with pass-through captions. LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` // If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, // specify the URI of the input captions source file. If your input captions // are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings. SourceSettings *CaptionSourceSettings `locationName:"sourceSettings" type:"structure"` } // String returns the string representation func (s CaptionSelector) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CaptionSelector) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionSelector) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CaptionSelector"} if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 { invalidParams.Add(request.NewErrParamMinLen("CustomLanguageCode", 3)) } if s.SourceSettings != nil { if err := s.SourceSettings.Validate(); err != nil { invalidParams.AddNested("SourceSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCustomLanguageCode sets the CustomLanguageCode field's value. func (s *CaptionSelector) SetCustomLanguageCode(v string) *CaptionSelector { s.CustomLanguageCode = &v return s } // SetLanguageCode sets the LanguageCode field's value. func (s *CaptionSelector) SetLanguageCode(v string) *CaptionSelector { s.LanguageCode = &v return s } // SetSourceSettings sets the SourceSettings field's value. func (s *CaptionSelector) SetSourceSettings(v *CaptionSourceSettings) *CaptionSelector { s.SourceSettings = v return s } // Ignore this setting unless your input captions format is SCC. To have the // service compensate for differing frame rates between your input captions // and input video, specify the frame rate of the captions file. Specify this // value as a fraction, using the settings Framerate numerator (framerateNumerator) // and Framerate denominator (framerateDenominator). For example, you might // specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, // or 30000 / 1001 for 29.97 fps. type CaptionSourceFramerate struct { _ struct{} `type:"structure"` // Specify the denominator of the fraction that represents the frame rate for // the setting Caption source frame rate (CaptionSourceFramerate). Use this // setting along with the setting Framerate numerator (framerateNumerator). FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // Specify the numerator of the fraction that represents the frame rate for // the setting Caption source frame rate (CaptionSourceFramerate). Use this // setting along with the setting Framerate denominator (framerateDenominator). FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` } // String returns the string representation func (s CaptionSourceFramerate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CaptionSourceFramerate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionSourceFramerate) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CaptionSourceFramerate"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *CaptionSourceFramerate) SetFramerateDenominator(v int64) *CaptionSourceFramerate { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *CaptionSourceFramerate) SetFramerateNumerator(v int64) *CaptionSourceFramerate { s.FramerateNumerator = &v return s } // If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, // specify the URI of the input captions source file. If your input captions // are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings. type CaptionSourceSettings struct { _ struct{} `type:"structure"` // Settings for ancillary captions source. AncillarySourceSettings *AncillarySourceSettings `locationName:"ancillarySourceSettings" type:"structure"` // DVB Sub Source Settings DvbSubSourceSettings *DvbSubSourceSettings `locationName:"dvbSubSourceSettings" type:"structure"` // Settings for embedded captions Source EmbeddedSourceSettings *EmbeddedSourceSettings `locationName:"embeddedSourceSettings" type:"structure"` // If your input captions are SCC, SMI, SRT, STL, TTML, WebVTT, or IMSC 1.1 // in an xml file, specify the URI of the input caption source file. If your // caption source is IMSC in an IMF package, use TrackSourceSettings instead // of FileSoureSettings. FileSourceSettings *FileSourceSettings `locationName:"fileSourceSettings" type:"structure"` // Use Source (SourceType) to identify the format of your input captions. The // service cannot auto-detect caption format. SourceType *string `locationName:"sourceType" type:"string" enum:"CaptionSourceType"` // Settings specific to Teletext caption sources, including Page number. TeletextSourceSettings *TeletextSourceSettings `locationName:"teletextSourceSettings" type:"structure"` // Settings specific to caption sources that are specified by track number. // Currently, this is only IMSC captions in an IMF package. If your caption // source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead // of TrackSourceSettings. TrackSourceSettings *TrackSourceSettings `locationName:"trackSourceSettings" type:"structure"` // Settings specific to WebVTT sources in HLS alternative rendition group. Specify // the properties (renditionGroupId, renditionName or renditionLanguageCode) // to identify the unique subtitle track among the alternative rendition groups // present in the HLS manifest. If no unique track is found, or multiple tracks // match the specified properties, the job fails. If there is only one subtitle // track in the rendition group, the settings can be left empty and the default // subtitle track will be chosen. If your caption source is a sidecar file, // use FileSourceSettings instead of WebvttHlsSourceSettings. WebvttHlsSourceSettings *WebvttHlsSourceSettings `locationName:"webvttHlsSourceSettings" type:"structure"` } // String returns the string representation func (s CaptionSourceSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CaptionSourceSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionSourceSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CaptionSourceSettings"} if s.AncillarySourceSettings != nil { if err := s.AncillarySourceSettings.Validate(); err != nil { invalidParams.AddNested("AncillarySourceSettings", err.(request.ErrInvalidParams)) } } if s.DvbSubSourceSettings != nil { if err := s.DvbSubSourceSettings.Validate(); err != nil { invalidParams.AddNested("DvbSubSourceSettings", err.(request.ErrInvalidParams)) } } if s.EmbeddedSourceSettings != nil { if err := s.EmbeddedSourceSettings.Validate(); err != nil { invalidParams.AddNested("EmbeddedSourceSettings", err.(request.ErrInvalidParams)) } } if s.FileSourceSettings != nil { if err := s.FileSourceSettings.Validate(); err != nil { invalidParams.AddNested("FileSourceSettings", err.(request.ErrInvalidParams)) } } if s.TeletextSourceSettings != nil { if err := s.TeletextSourceSettings.Validate(); err != nil { invalidParams.AddNested("TeletextSourceSettings", err.(request.ErrInvalidParams)) } } if s.TrackSourceSettings != nil { if err := s.TrackSourceSettings.Validate(); err != nil { invalidParams.AddNested("TrackSourceSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAncillarySourceSettings sets the AncillarySourceSettings field's value. func (s *CaptionSourceSettings) SetAncillarySourceSettings(v *AncillarySourceSettings) *CaptionSourceSettings { s.AncillarySourceSettings = v return s } // SetDvbSubSourceSettings sets the DvbSubSourceSettings field's value. func (s *CaptionSourceSettings) SetDvbSubSourceSettings(v *DvbSubSourceSettings) *CaptionSourceSettings { s.DvbSubSourceSettings = v return s } // SetEmbeddedSourceSettings sets the EmbeddedSourceSettings field's value. func (s *CaptionSourceSettings) SetEmbeddedSourceSettings(v *EmbeddedSourceSettings) *CaptionSourceSettings { s.EmbeddedSourceSettings = v return s } // SetFileSourceSettings sets the FileSourceSettings field's value. func (s *CaptionSourceSettings) SetFileSourceSettings(v *FileSourceSettings) *CaptionSourceSettings { s.FileSourceSettings = v return s } // SetSourceType sets the SourceType field's value. func (s *CaptionSourceSettings) SetSourceType(v string) *CaptionSourceSettings { s.SourceType = &v return s } // SetTeletextSourceSettings sets the TeletextSourceSettings field's value. func (s *CaptionSourceSettings) SetTeletextSourceSettings(v *TeletextSourceSettings) *CaptionSourceSettings { s.TeletextSourceSettings = v return s } // SetTrackSourceSettings sets the TrackSourceSettings field's value. func (s *CaptionSourceSettings) SetTrackSourceSettings(v *TrackSourceSettings) *CaptionSourceSettings { s.TrackSourceSettings = v return s } // SetWebvttHlsSourceSettings sets the WebvttHlsSourceSettings field's value. func (s *CaptionSourceSettings) SetWebvttHlsSourceSettings(v *WebvttHlsSourceSettings) *CaptionSourceSettings { s.WebvttHlsSourceSettings = v return s } // Channel mapping (ChannelMapping) contains the group of fields that hold the // remixing value for each channel, in dB. Specify remix values to indicate // how much of the content from your input audio channel you want in your output // audio channels. Each instance of the InputChannels or InputChannelsFineTune // array specifies these values for one output channel. Use one instance of // this array for each output channel. In the console, each array corresponds // to a column in the graphical depiction of the mapping matrix. The rows of // the graphical matrix correspond to input channels. Valid values are within // the range from -60 (mute) through 6. A setting of 0 passes the input channel // unchanged to the output channel (no attenuation or amplification). Use InputChannels // or InputChannelsFineTune to specify your remix values. Don't use both. type ChannelMapping struct { _ struct{} `type:"structure"` // In your JSON job specification, include one child of OutputChannels for each // audio channel that you want in your output. Each child should contain one // instance of InputChannels or InputChannelsFineTune. OutputChannels []*OutputChannelMapping `locationName:"outputChannels" type:"list"` } // String returns the string representation func (s ChannelMapping) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ChannelMapping) GoString() string { return s.String() } // SetOutputChannels sets the OutputChannels field's value. func (s *ChannelMapping) SetOutputChannels(v []*OutputChannelMapping) *ChannelMapping { s.OutputChannels = v return s } // Specify the details for each pair of HLS and DASH additional manifests that // you want the service to generate for this CMAF output group. Each pair of // manifests can reference a different subset of outputs in the group. type CmafAdditionalManifest struct { _ struct{} `type:"structure"` // Specify a name modifier that the service adds to the name of this manifest // to make it different from the file names of the other main manifests in the // output group. For example, say that the default main manifest for your HLS // group is film-name.m3u8. If you enter "-no-premium" for this setting, then // the file name the service generates for this top-level manifest is film-name-no-premium.m3u8. // For HLS output groups, specify a manifestNameModifier that is different from // the nameModifier of the output. The service uses the output name modifier // to create unique names for the individual variant manifests. ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` // Specify the outputs that you want this additional top-level manifest to reference. SelectedOutputs []*string `locationName:"selectedOutputs" type:"list"` } // String returns the string representation func (s CmafAdditionalManifest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CmafAdditionalManifest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CmafAdditionalManifest) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CmafAdditionalManifest"} if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { invalidParams.Add(request.NewErrParamMinLen("ManifestNameModifier", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetManifestNameModifier sets the ManifestNameModifier field's value. func (s *CmafAdditionalManifest) SetManifestNameModifier(v string) *CmafAdditionalManifest { s.ManifestNameModifier = &v return s } // SetSelectedOutputs sets the SelectedOutputs field's value. func (s *CmafAdditionalManifest) SetSelectedOutputs(v []*string) *CmafAdditionalManifest { s.SelectedOutputs = v return s } // Settings for CMAF encryption type CmafEncryptionSettings struct { _ struct{} `type:"structure"` // This is a 128-bit, 16-byte hex value represented by a 32-character text string. // If this parameter is not set then the Initialization Vector will follow the // segment number by default. ConstantInitializationVector *string `locationName:"constantInitializationVector" min:"32" type:"string"` // Specify the encryption scheme that you want the service to use when encrypting // your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR). EncryptionMethod *string `locationName:"encryptionMethod" type:"string" enum:"CmafEncryptionType"` // When you use DRM with CMAF outputs, choose whether the service writes the // 128-bit encryption initialization vector in the HLS and DASH manifests. InitializationVectorInManifest *string `locationName:"initializationVectorInManifest" type:"string" enum:"CmafInitializationVectorInManifest"` // If your output group type is CMAF, use these settings when doing DRM encryption // with a SPEKE-compliant key provider. If your output group type is HLS, DASH, // or Microsoft Smooth, use the SpekeKeyProvider settings instead. SpekeKeyProvider *SpekeKeyProviderCmaf `locationName:"spekeKeyProvider" type:"structure"` // Use these settings to set up encryption with a static key provider. StaticKeyProvider *StaticKeyProvider `locationName:"staticKeyProvider" type:"structure"` // Specify whether your DRM encryption key is static or from a key provider // that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. Type *string `locationName:"type" type:"string" enum:"CmafKeyProviderType"` } // String returns the string representation func (s CmafEncryptionSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CmafEncryptionSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CmafEncryptionSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CmafEncryptionSettings"} if s.ConstantInitializationVector != nil && len(*s.ConstantInitializationVector) < 32 { invalidParams.Add(request.NewErrParamMinLen("ConstantInitializationVector", 32)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetConstantInitializationVector sets the ConstantInitializationVector field's value. func (s *CmafEncryptionSettings) SetConstantInitializationVector(v string) *CmafEncryptionSettings { s.ConstantInitializationVector = &v return s } // SetEncryptionMethod sets the EncryptionMethod field's value. func (s *CmafEncryptionSettings) SetEncryptionMethod(v string) *CmafEncryptionSettings { s.EncryptionMethod = &v return s } // SetInitializationVectorInManifest sets the InitializationVectorInManifest field's value. func (s *CmafEncryptionSettings) SetInitializationVectorInManifest(v string) *CmafEncryptionSettings { s.InitializationVectorInManifest = &v return s } // SetSpekeKeyProvider sets the SpekeKeyProvider field's value. func (s *CmafEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProviderCmaf) *CmafEncryptionSettings { s.SpekeKeyProvider = v return s } // SetStaticKeyProvider sets the StaticKeyProvider field's value. func (s *CmafEncryptionSettings) SetStaticKeyProvider(v *StaticKeyProvider) *CmafEncryptionSettings { s.StaticKeyProvider = v return s } // SetType sets the Type field's value. func (s *CmafEncryptionSettings) SetType(v string) *CmafEncryptionSettings { s.Type = &v return s } // Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. // When you work directly in your JSON job specification, include this object // and any required children when you set Type, under OutputGroupSettings, to // CMAF_GROUP_SETTINGS. type CmafGroupSettings struct { _ struct{} `type:"structure"` // By default, the service creates one top-level .m3u8 HLS manifest and one // top -level .mpd DASH manifest for each CMAF output group in your job. These // default manifests reference every output in the output group. To create additional // top-level manifests that reference a subset of the outputs in the output // group, specify a list of them here. For each additional manifest that you // specify, the service creates one HLS manifest and one DASH manifest. AdditionalManifests []*CmafAdditionalManifest `locationName:"additionalManifests" type:"list"` // A partial URI prefix that will be put in the manifest file at the top level // BaseURL element. Can be used if streams are delivered from a different URL // than the manifest file. BaseUrl *string `locationName:"baseUrl" type:"string"` // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no // tag. Otherwise, keep the default value Enabled (ENABLED) and control caching // in your video distribution set up. For example, use the Cache-Control http // header. ClientCache *string `locationName:"clientCache" type:"string" enum:"CmafClientCache"` // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist // generation. CodecSpecification *string `locationName:"codecSpecification" type:"string" enum:"CmafCodecSpecification"` // Use Destination (Destination) to specify the S3 output location and the output // filename base. Destination accepts format identifiers. If you do not specify // the base filename in the URI, the service will use the filename of the input // file. If your job has multiple inputs, the service uses the filename of the // first input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"` // DRM settings. Encryption *CmafEncryptionSettings `locationName:"encryption" type:"structure"` // Length of fragments to generate (in seconds). Fragment length must be compatible // with GOP size and Framerate. Note that fragments will end on the next keyframe // after this number of seconds, so actual fragment length may be longer. When // Emit Single File is checked, the fragmentation is internal to a single output // file and it does not cause the creation of many output files as in other // output types. FragmentLength *int64 `locationName:"fragmentLength" min:"1" type:"integer"` // Specify whether MediaConvert generates images for trick play. Keep the default // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) // to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) // to generate tiled thumbnails and full-resolution images of single frames. // When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates // a child manifest for each set of images that you generate and adds corresponding // entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest), // MediaConvert adds an entry in the .mpd manifest for each set of images that // you generate. A common application for these images is Roku trick mode. The // thumbnails and full-frame images that MediaConvert creates with this feature // are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md ImageBasedTrickPlay *string `locationName:"imageBasedTrickPlay" type:"string" enum:"CmafImageBasedTrickPlay"` // When set to GZIP, compresses HLS playlist. ManifestCompression *string `locationName:"manifestCompression" type:"string" enum:"CmafManifestCompression"` // Indicates whether the output manifest should use floating point values for // segment duration. ManifestDurationFormat *string `locationName:"manifestDurationFormat" type:"string" enum:"CmafManifestDurationFormat"` // Minimum time of initially buffered media that is needed to ensure smooth // playout. MinBufferTime *int64 `locationName:"minBufferTime" type:"integer"` // Keep this setting at the default value of 0, unless you are troubleshooting // a problem with how devices play back the end of your video asset. If you // know that player devices are hanging on the final segment of your video because // the length of your final segment is too short, use this setting to specify // a minimum final segment length, in seconds. Choose a value that is greater // than or equal to 1 and less than your segment length. When you specify a // value for this setting, the encoder will combine any final segment that is // shorter than the length that you specify with the previous segment. For example, // your segment length is 3 seconds and your final segment is .5 seconds without // a minimum final segment length; when you set the minimum final segment length // to 1, your final segment is 3.5 seconds. MinFinalSegmentLength *float64 `locationName:"minFinalSegmentLength" type:"double"` // Specify whether your DASH profile is on-demand or main. When you choose Main // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. // When you choose On-demand, you must also set the output group setting Segment // control (SegmentControl) to Single file (SINGLE_FILE). MpdProfile *string `locationName:"mpdProfile" type:"string" enum:"CmafMpdProfile"` // Use this setting only when your output video stream has B-frames, which causes // the initial presentation time stamp (PTS) to be offset from the initial decode // time stamp (DTS). Specify how MediaConvert handles PTS when writing time // stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) // when you want MediaConvert to use the initial PTS as the first time stamp // in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore // the initial PTS in the video stream and instead write the initial time stamp // as zero in the manifest. For outputs that don't have B-frames, the time stamps // in your DASH manifests start at zero regardless of your choice here. PtsOffsetHandlingForBFrames *string `locationName:"ptsOffsetHandlingForBFrames" type:"string" enum:"CmafPtsOffsetHandlingForBFrames"` // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. SegmentControl *string `locationName:"segmentControl" type:"string" enum:"CmafSegmentControl"` // Use this setting to specify the length, in seconds, of each individual CMAF // segment. This value applies to the whole package; that is, to every output // in the output group. Note that segments end on the first keyframe after this // number of seconds, so the actual segment length might be slightly longer. // If you set Segment control (CmafSegmentControl) to single file, the service // puts the content of each output in a single file that has metadata that marks // these segments. If you set it to segmented files, the service creates multiple // files for each output, each with the content of one segment. SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"` // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag // of variant manifest. StreamInfResolution *string `locationName:"streamInfResolution" type:"string" enum:"CmafStreamInfResolution"` // When set to ENABLED, a DASH MPD manifest will be generated for this output. WriteDashManifest *string `locationName:"writeDashManifest" type:"string" enum:"CmafWriteDASHManifest"` // When set to ENABLED, an Apple HLS manifest will be generated for this output. WriteHlsManifest *string `locationName:"writeHlsManifest" type:"string" enum:"CmafWriteHLSManifest"` // When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), // your DASH manifest shows precise segment durations. The segment duration // information appears inside the SegmentTimeline element, inside SegmentTemplate // at the Representation level. When this feature isn't enabled, the segment // durations in your DASH manifest are approximate. The segment duration information // appears in the duration attribute of the SegmentTemplate element. WriteSegmentTimelineInRepresentation *string `locationName:"writeSegmentTimelineInRepresentation" type:"string" enum:"CmafWriteSegmentTimelineInRepresentation"` } // String returns the string representation func (s CmafGroupSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CmafGroupSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CmafGroupSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CmafGroupSettings"} if s.FragmentLength != nil && *s.FragmentLength < 1 { invalidParams.Add(request.NewErrParamMinValue("FragmentLength", 1)) } if s.SegmentLength != nil && *s.SegmentLength < 1 { invalidParams.Add(request.NewErrParamMinValue("SegmentLength", 1)) } if s.AdditionalManifests != nil { for i, v := range s.AdditionalManifests { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(request.ErrInvalidParams)) } } } if s.Encryption != nil { if err := s.Encryption.Validate(); err != nil { invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdditionalManifests sets the AdditionalManifests field's value. func (s *CmafGroupSettings) SetAdditionalManifests(v []*CmafAdditionalManifest) *CmafGroupSettings { s.AdditionalManifests = v return s } // SetBaseUrl sets the BaseUrl field's value. func (s *CmafGroupSettings) SetBaseUrl(v string) *CmafGroupSettings { s.BaseUrl = &v return s } // SetClientCache sets the ClientCache field's value. func (s *CmafGroupSettings) SetClientCache(v string) *CmafGroupSettings { s.ClientCache = &v return s } // SetCodecSpecification sets the CodecSpecification field's value. func (s *CmafGroupSettings) SetCodecSpecification(v string) *CmafGroupSettings { s.CodecSpecification = &v return s } // SetDestination sets the Destination field's value. func (s *CmafGroupSettings) SetDestination(v string) *CmafGroupSettings { s.Destination = &v return s } // SetDestinationSettings sets the DestinationSettings field's value. func (s *CmafGroupSettings) SetDestinationSettings(v *DestinationSettings) *CmafGroupSettings { s.DestinationSettings = v return s } // SetEncryption sets the Encryption field's value. func (s *CmafGroupSettings) SetEncryption(v *CmafEncryptionSettings) *CmafGroupSettings { s.Encryption = v return s } // SetFragmentLength sets the FragmentLength field's value. func (s *CmafGroupSettings) SetFragmentLength(v int64) *CmafGroupSettings { s.FragmentLength = &v return s } // SetImageBasedTrickPlay sets the ImageBasedTrickPlay field's value. func (s *CmafGroupSettings) SetImageBasedTrickPlay(v string) *CmafGroupSettings { s.ImageBasedTrickPlay = &v return s } // SetManifestCompression sets the ManifestCompression field's value. func (s *CmafGroupSettings) SetManifestCompression(v string) *CmafGroupSettings { s.ManifestCompression = &v return s } // SetManifestDurationFormat sets the ManifestDurationFormat field's value. func (s *CmafGroupSettings) SetManifestDurationFormat(v string) *CmafGroupSettings { s.ManifestDurationFormat = &v return s } // SetMinBufferTime sets the MinBufferTime field's value. func (s *CmafGroupSettings) SetMinBufferTime(v int64) *CmafGroupSettings { s.MinBufferTime = &v return s } // SetMinFinalSegmentLength sets the MinFinalSegmentLength field's value. func (s *CmafGroupSettings) SetMinFinalSegmentLength(v float64) *CmafGroupSettings { s.MinFinalSegmentLength = &v return s } // SetMpdProfile sets the MpdProfile field's value. func (s *CmafGroupSettings) SetMpdProfile(v string) *CmafGroupSettings { s.MpdProfile = &v return s } // SetPtsOffsetHandlingForBFrames sets the PtsOffsetHandlingForBFrames field's value. func (s *CmafGroupSettings) SetPtsOffsetHandlingForBFrames(v string) *CmafGroupSettings { s.PtsOffsetHandlingForBFrames = &v return s } // SetSegmentControl sets the SegmentControl field's value. func (s *CmafGroupSettings) SetSegmentControl(v string) *CmafGroupSettings { s.SegmentControl = &v return s } // SetSegmentLength sets the SegmentLength field's value. func (s *CmafGroupSettings) SetSegmentLength(v int64) *CmafGroupSettings { s.SegmentLength = &v return s } // SetStreamInfResolution sets the StreamInfResolution field's value. func (s *CmafGroupSettings) SetStreamInfResolution(v string) *CmafGroupSettings { s.StreamInfResolution = &v return s } // SetWriteDashManifest sets the WriteDashManifest field's value. func (s *CmafGroupSettings) SetWriteDashManifest(v string) *CmafGroupSettings { s.WriteDashManifest = &v return s } // SetWriteHlsManifest sets the WriteHlsManifest field's value. func (s *CmafGroupSettings) SetWriteHlsManifest(v string) *CmafGroupSettings { s.WriteHlsManifest = &v return s } // SetWriteSegmentTimelineInRepresentation sets the WriteSegmentTimelineInRepresentation field's value. func (s *CmafGroupSettings) SetWriteSegmentTimelineInRepresentation(v string) *CmafGroupSettings { s.WriteSegmentTimelineInRepresentation = &v return s } // These settings relate to the fragmented MP4 container for the segments in // your CMAF outputs. type CmfcSettings struct { _ struct{} `type:"structure"` // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences // between video and audio. For this situation, choose Match video duration // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, // MediaConvert pads the output audio streams with silence or trims them to // ensure that the total duration of each audio stream is at least as long as // the total duration of the video stream. After padding or trimming, the audio // stream duration is no more than one frame longer than the video stream. MediaConvert // applies audio padding or trimming only to the end of the last segment of // the output. For unsegmented outputs, MediaConvert adds padding only to the // end of the file. When you keep the default value, any minor discrepancies // between audio and video duration will depend on your output audio codec. AudioDuration *string `locationName:"audioDuration" type:"string" enum:"CmfcAudioDuration"` // Specify the audio rendition group for this audio rendition. Specify up to // one value for each audio output in your output group. This value appears // in your HLS parent manifest in the EXT-X-MEDIA tag of TYPE=AUDIO, as the // value for the GROUP-ID attribute. For example, if you specify "audio_aac_1" // for Audio group ID, it appears in your manifest like this: #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio_aac_1". // Related setting: To associate the rendition group that this audio track belongs // to with a video rendition, include the same value that you provide here for // that video output's setting Audio rendition sets (audioRenditionSets). AudioGroupId *string `locationName:"audioGroupId" type:"string"` // List the audio rendition groups that you want included with this video rendition. // Use a comma-separated list. For example, say you want to include the audio // rendition groups that have the audio group IDs "audio_aac_1" and "audio_dolby". // Then you would specify this value: "audio_aac_1, audio_dolby". Related setting: // The rendition groups that you include in your comma-separated list should // all match values that you specify in the setting Audio group ID (AudioGroupId) // for audio renditions in the same output group as this video rendition. Default // behavior: If you don't specify anything here and for Audio group ID, MediaConvert // puts each audio variant in its own audio rendition group and associates it // with every video variant. Each value in your list appears in your HLS parent // manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO attribute. // To continue the previous example, say that the file name for the child manifest // for your video rendition is "amazing_video_1.m3u8". Then, in your parent // manifest, each value will appear on separate lines, like this: #EXT-X-STREAM-INF:AUDIO="audio_aac_1"... // amazing_video_1.m3u8 #EXT-X-STREAM-INF:AUDIO="audio_dolby"... amazing_video_1.m3u8 AudioRenditionSets *string `locationName:"audioRenditionSets" type:"string"` // Use this setting to control the values that MediaConvert puts in your HLS // parent playlist to control how the client player selects which audio track // to play. The other options for this setting determine the values that MediaConvert // writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry // for the audio variant. For more information about these attributes, see the // Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. // Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) // to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant // in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) // to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select // to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this // setting, MediaConvert defaults to Alternate audio, auto select, default. // When there is more than one variant in your output group, you must explicitly // choose a value for this setting. AudioTrackType *string `locationName:"audioTrackType" type:"string" enum:"CmfcAudioTrackType"` // Specify whether to flag this audio track as descriptive video service (DVS) // in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes // the parameter CHARACTERISTICS="public.accessibility.describes-video" in the // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't // flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can // help with accessibility on Apple devices. For more information, see the Apple // documentation. DescriptiveVideoServiceFlag *string `locationName:"descriptiveVideoServiceFlag" type:"string" enum:"CmfcDescriptiveVideoServiceFlag"` // Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest // that lists only the I-frames for this rendition, in addition to your regular // manifest for this rendition. You might use this manifest as part of a workflow // that creates preview functions for your video. MediaConvert adds both the // I-frame only child manifest and the regular child manifest to the parent // manifest. When you don't need the I-frame only child manifest, keep the default // value Exclude (EXCLUDE). IFrameOnlyManifest *string `locationName:"iFrameOnlyManifest" type:"string" enum:"CmfcIFrameOnlyManifest"` // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting // SCC XML (sccXml). Scte35Esam *string `locationName:"scte35Esam" type:"string" enum:"CmfcScte35Esam"` // Ignore this setting unless you have SCTE-35 markers in your input video file. // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear // in your input to also appear in this output. Choose None (NONE) if you don't // want those SCTE-35 markers in this output. Scte35Source *string `locationName:"scte35Source" type:"string" enum:"CmfcScte35Source"` } // String returns the string representation func (s CmfcSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CmfcSettings) GoString() string { return s.String() } // SetAudioDuration sets the AudioDuration field's value. func (s *CmfcSettings) SetAudioDuration(v string) *CmfcSettings { s.AudioDuration = &v return s } // SetAudioGroupId sets the AudioGroupId field's value. func (s *CmfcSettings) SetAudioGroupId(v string) *CmfcSettings { s.AudioGroupId = &v return s } // SetAudioRenditionSets sets the AudioRenditionSets field's value. func (s *CmfcSettings) SetAudioRenditionSets(v string) *CmfcSettings { s.AudioRenditionSets = &v return s } // SetAudioTrackType sets the AudioTrackType field's value. func (s *CmfcSettings) SetAudioTrackType(v string) *CmfcSettings { s.AudioTrackType = &v return s } // SetDescriptiveVideoServiceFlag sets the DescriptiveVideoServiceFlag field's value. func (s *CmfcSettings) SetDescriptiveVideoServiceFlag(v string) *CmfcSettings { s.DescriptiveVideoServiceFlag = &v return s } // SetIFrameOnlyManifest sets the IFrameOnlyManifest field's value. func (s *CmfcSettings) SetIFrameOnlyManifest(v string) *CmfcSettings { s.IFrameOnlyManifest = &v return s } // SetScte35Esam sets the Scte35Esam field's value. func (s *CmfcSettings) SetScte35Esam(v string) *CmfcSettings { s.Scte35Esam = &v return s } // SetScte35Source sets the Scte35Source field's value. func (s *CmfcSettings) SetScte35Source(v string) *CmfcSettings { s.Scte35Source = &v return s } // Settings for color correction. type ColorCorrector struct { _ struct{} `type:"structure"` // Brightness level. Brightness *int64 `locationName:"brightness" min:"1" type:"integer"` // Specify the color space you want for this output. The service supports conversion // between HDR formats, between SDR formats, from SDR to HDR, and from HDR to // SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted // video has an HDR format, but visually appears the same as an unconverted // output. HDR to SDR conversion uses Elemental tone mapping technology to approximate // the outcome of manually regrading from HDR to SDR. ColorSpaceConversion *string `locationName:"colorSpaceConversion" type:"string" enum:"ColorSpaceConversion"` // Contrast level. Contrast *int64 `locationName:"contrast" min:"1" type:"integer"` // Use these settings when you convert to the HDR 10 color space. Specify the // SMPTE ST 2086 Mastering Display Color Volume static metadata that you want // signaled in the output. These values don't affect the pixel values that are // encoded in the video stream. They are intended to help the downstream video // player display content in a way that reflects the intentions of the the content // creator. When you set Color space conversion (ColorSpaceConversion) to HDR // 10 (FORCE_HDR10), these settings are required. You must set values for Max // frame average light level (maxFrameAverageLightLevel) and Max content light // level (maxContentLightLevel); these settings don't have a default value. // The default values for the other HDR 10 metadata settings are defined by // the P3D65 color space. For more information about MediaConvert HDR jobs, // see https://docs.aws.amazon.com/console/mediaconvert/hdr. Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"` // Hue in degrees. Hue *int64 `locationName:"hue" type:"integer"` // Specify the video color sample range for this output. To create a full range // output, you must start with a full range YUV input and keep the default value, // None (NONE). To create a limited range output from a full range input, choose // Limited range (LIMITED_RANGE_SQUEEZE). With RGB inputs, your output is always // limited range, regardless of your choice here. When you create a limited // range output from a full range input, MediaConvert limits the active pixel // values in a way that depends on the output's bit depth: 8-bit outputs contain // only values from 16 through 235 and 10-bit outputs contain only values from // 64 through 940. With this conversion, MediaConvert also changes the output // metadata to note the limited range. SampleRangeConversion *string `locationName:"sampleRangeConversion" type:"string" enum:"SampleRangeConversion"` // Saturation level. Saturation *int64 `locationName:"saturation" min:"1" type:"integer"` } // String returns the string representation func (s ColorCorrector) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ColorCorrector) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ColorCorrector) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ColorCorrector"} if s.Brightness != nil && *s.Brightness < 1 { invalidParams.Add(request.NewErrParamMinValue("Brightness", 1)) } if s.Contrast != nil && *s.Contrast < 1 { invalidParams.Add(request.NewErrParamMinValue("Contrast", 1)) } if s.Hue != nil && *s.Hue < -180 { invalidParams.Add(request.NewErrParamMinValue("Hue", -180)) } if s.Saturation != nil && *s.Saturation < 1 { invalidParams.Add(request.NewErrParamMinValue("Saturation", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBrightness sets the Brightness field's value. func (s *ColorCorrector) SetBrightness(v int64) *ColorCorrector { s.Brightness = &v return s } // SetColorSpaceConversion sets the ColorSpaceConversion field's value. func (s *ColorCorrector) SetColorSpaceConversion(v string) *ColorCorrector { s.ColorSpaceConversion = &v return s } // SetContrast sets the Contrast field's value. func (s *ColorCorrector) SetContrast(v int64) *ColorCorrector { s.Contrast = &v return s } // SetHdr10Metadata sets the Hdr10Metadata field's value. func (s *ColorCorrector) SetHdr10Metadata(v *Hdr10Metadata) *ColorCorrector { s.Hdr10Metadata = v return s } // SetHue sets the Hue field's value. func (s *ColorCorrector) SetHue(v int64) *ColorCorrector { s.Hue = &v return s } // SetSampleRangeConversion sets the SampleRangeConversion field's value. func (s *ColorCorrector) SetSampleRangeConversion(v string) *ColorCorrector { s.SampleRangeConversion = &v return s } // SetSaturation sets the Saturation field's value. func (s *ColorCorrector) SetSaturation(v int64) *ColorCorrector { s.Saturation = &v return s } type ConflictException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s ConflictException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ConflictException) GoString() string { return s.String() } func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ RespMetadata: v, } } // Code returns the exception type name. func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *ConflictException) OrigErr() error { return nil } func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. func (s *ConflictException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *ConflictException) RequestID() string { return s.RespMetadata.RequestID } // Container specific settings. type ContainerSettings struct { _ struct{} `type:"structure"` // These settings relate to the fragmented MP4 container for the segments in // your CMAF outputs. CmfcSettings *CmfcSettings `locationName:"cmfcSettings" type:"structure"` // Container for this output. Some containers require a container settings object. // If not specified, the default object will be created. Container *string `locationName:"container" type:"string" enum:"ContainerType"` // Settings for F4v container F4vSettings *F4vSettings `locationName:"f4vSettings" type:"structure"` // MPEG-2 TS container settings. These apply to outputs in a File output group // when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). // In these assets, data is organized by the program map table (PMT). Each transport // stream program contains subsets of data, including audio, video, and metadata. // Each of these subsets of data has a numerical label called a packet identifier // (PID). Each transport stream program corresponds to one MediaConvert output. // The PMT lists the types of data in a program along with their PID. Downstream // systems and players use the program map table to look up the PID for each // type of data it accesses and then uses the PIDs to locate specific data within // the asset. M2tsSettings *M2tsSettings `locationName:"m2tsSettings" type:"structure"` // These settings relate to the MPEG-2 transport stream (MPEG2-TS) container // for the MPEG2-TS segments in your HLS outputs. M3u8Settings *M3u8Settings `locationName:"m3u8Settings" type:"structure"` // These settings relate to your QuickTime MOV output container. MovSettings *MovSettings `locationName:"movSettings" type:"structure"` // These settings relate to your MP4 output container. You can create audio // only outputs with this container. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/supported-codecs-containers-audio-only.html#output-codecs-and-containers-supported-for-audio-only. Mp4Settings *Mp4Settings `locationName:"mp4Settings" type:"structure"` // These settings relate to the fragmented MP4 container for the segments in // your DASH outputs. MpdSettings *MpdSettings `locationName:"mpdSettings" type:"structure"` // These settings relate to your MXF output container. MxfSettings *MxfSettings `locationName:"mxfSettings" type:"structure"` } // String returns the string representation func (s ContainerSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ContainerSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ContainerSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ContainerSettings"} if s.M2tsSettings != nil { if err := s.M2tsSettings.Validate(); err != nil { invalidParams.AddNested("M2tsSettings", err.(request.ErrInvalidParams)) } } if s.M3u8Settings != nil { if err := s.M3u8Settings.Validate(); err != nil { invalidParams.AddNested("M3u8Settings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCmfcSettings sets the CmfcSettings field's value. func (s *ContainerSettings) SetCmfcSettings(v *CmfcSettings) *ContainerSettings { s.CmfcSettings = v return s } // SetContainer sets the Container field's value. func (s *ContainerSettings) SetContainer(v string) *ContainerSettings { s.Container = &v return s } // SetF4vSettings sets the F4vSettings field's value. func (s *ContainerSettings) SetF4vSettings(v *F4vSettings) *ContainerSettings { s.F4vSettings = v return s } // SetM2tsSettings sets the M2tsSettings field's value. func (s *ContainerSettings) SetM2tsSettings(v *M2tsSettings) *ContainerSettings { s.M2tsSettings = v return s } // SetM3u8Settings sets the M3u8Settings field's value. func (s *ContainerSettings) SetM3u8Settings(v *M3u8Settings) *ContainerSettings { s.M3u8Settings = v return s } // SetMovSettings sets the MovSettings field's value. func (s *ContainerSettings) SetMovSettings(v *MovSettings) *ContainerSettings { s.MovSettings = v return s } // SetMp4Settings sets the Mp4Settings field's value. func (s *ContainerSettings) SetMp4Settings(v *Mp4Settings) *ContainerSettings { s.Mp4Settings = v return s } // SetMpdSettings sets the MpdSettings field's value. func (s *ContainerSettings) SetMpdSettings(v *MpdSettings) *ContainerSettings { s.MpdSettings = v return s } // SetMxfSettings sets the MxfSettings field's value. func (s *ContainerSettings) SetMxfSettings(v *MxfSettings) *ContainerSettings { s.MxfSettings = v return s } // Send your create job request with your job settings and IAM role. Optionally, // include user metadata and the ARN for the queue. type CreateJobInput struct { _ struct{} `type:"structure"` // Optional. Accelerated transcoding can significantly speed up jobs with long, // visually complex content. Outputs that use this feature incur pro-tier pricing. // For information about feature limitations, see the AWS Elemental MediaConvert // User Guide. AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"` // Optional. Choose a tag type that AWS Billing and Cost Management will use // to sort your AWS Elemental MediaConvert costs on any billing report that // you set up. Any transcoding outputs that don't have an associated tag will // appear in your billing report unsorted. If you don't choose a valid value // for this field, your job outputs will appear on the billing report unsorted. BillingTagsSource *string `locationName:"billingTagsSource" type:"string" enum:"BillingTagsSource"` // Optional. Idempotency token for CreateJob operation. ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` // Optional. Use queue hopping to avoid overly long waits in the backlog of // the queue that you submit your job to. Specify an alternate queue and the // maximum time that your job will wait in the initial queue before hopping. // For more information about this feature, see the AWS Elemental MediaConvert // User Guide. HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"` // Optional. When you create a job, you can either specify a job template or // specify the transcoding settings individually. JobTemplate *string `locationName:"jobTemplate" type:"string"` // Optional. Specify the relative priority for this job. In any given queue, // the service begins processing the job with the highest value first. When // more than one job has the same priority, the service begins processing the // job that you submitted first. If you don't specify a priority, the service // uses the default value 0. Priority *int64 `locationName:"priority" type:"integer"` // Optional. When you create a job, you can specify a queue to send it to. If // you don't specify, the job will go to the default queue. For more about queues, // see the User Guide topic at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html. Queue *string `locationName:"queue" type:"string"` // Required. The IAM role you use for creating this job. For details about permissions, // see the User Guide topic at the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html. // // Role is a required field Role *string `locationName:"role" type:"string" required:"true"` // JobSettings contains all the transcode settings for a job. // // Settings is a required field Settings *JobSettings `locationName:"settings" type:"structure" required:"true"` // Optional. Enable this setting when you run a test job to estimate how many // reserved transcoding slots (RTS) you need. When this is enabled, MediaConvert // runs your job from an on-demand queue with similar performance to what you // will see with one RTS in a reserved queue. This setting is disabled by default. SimulateReservedQueue *string `locationName:"simulateReservedQueue" type:"string" enum:"SimulateReservedQueue"` // Optional. Specify how often MediaConvert sends STATUS_UPDATE events to Amazon // CloudWatch Events. Set the interval, in seconds, between status updates. // MediaConvert sends an update at this interval from the time the service begins // processing your job to the time it completes the transcode or encounters // an error. StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"` // Optional. The tags that you want to add to the resource. You can tag resources // with a key-value pair or with only a key. Use standard AWS tags on your job // for automatic integration with AWS services and for custom integrations and // workflows. Tags map[string]*string `locationName:"tags" type:"map"` // Optional. User-defined metadata that you want to associate with an MediaConvert // job. You specify metadata in key/value pairs. Use only for existing integrations // or workflows that rely on job metadata tags. Otherwise, we recommend that // you use standard AWS tags. UserMetadata map[string]*string `locationName:"userMetadata" type:"map"` } // String returns the string representation func (s CreateJobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateJobInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateJobInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateJobInput"} if s.Priority != nil && *s.Priority < -50 { invalidParams.Add(request.NewErrParamMinValue("Priority", -50)) } if s.Role == nil { invalidParams.Add(request.NewErrParamRequired("Role")) } if s.Settings == nil { invalidParams.Add(request.NewErrParamRequired("Settings")) } if s.AccelerationSettings != nil { if err := s.AccelerationSettings.Validate(); err != nil { invalidParams.AddNested("AccelerationSettings", err.(request.ErrInvalidParams)) } } if s.HopDestinations != nil { for i, v := range s.HopDestinations { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HopDestinations", i), err.(request.ErrInvalidParams)) } } } if s.Settings != nil { if err := s.Settings.Validate(); err != nil { invalidParams.AddNested("Settings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAccelerationSettings sets the AccelerationSettings field's value. func (s *CreateJobInput) SetAccelerationSettings(v *AccelerationSettings) *CreateJobInput { s.AccelerationSettings = v return s } // SetBillingTagsSource sets the BillingTagsSource field's value. func (s *CreateJobInput) SetBillingTagsSource(v string) *CreateJobInput { s.BillingTagsSource = &v return s } // SetClientRequestToken sets the ClientRequestToken field's value. func (s *CreateJobInput) SetClientRequestToken(v string) *CreateJobInput { s.ClientRequestToken = &v return s } // SetHopDestinations sets the HopDestinations field's value. func (s *CreateJobInput) SetHopDestinations(v []*HopDestination) *CreateJobInput { s.HopDestinations = v return s } // SetJobTemplate sets the JobTemplate field's value. func (s *CreateJobInput) SetJobTemplate(v string) *CreateJobInput { s.JobTemplate = &v return s } // SetPriority sets the Priority field's value. func (s *CreateJobInput) SetPriority(v int64) *CreateJobInput { s.Priority = &v return s } // SetQueue sets the Queue field's value. func (s *CreateJobInput) SetQueue(v string) *CreateJobInput { s.Queue = &v return s } // SetRole sets the Role field's value. func (s *CreateJobInput) SetRole(v string) *CreateJobInput { s.Role = &v return s } // SetSettings sets the Settings field's value. func (s *CreateJobInput) SetSettings(v *JobSettings) *CreateJobInput { s.Settings = v return s } // SetSimulateReservedQueue sets the SimulateReservedQueue field's value. func (s *CreateJobInput) SetSimulateReservedQueue(v string) *CreateJobInput { s.SimulateReservedQueue = &v return s } // SetStatusUpdateInterval sets the StatusUpdateInterval field's value. func (s *CreateJobInput) SetStatusUpdateInterval(v string) *CreateJobInput { s.StatusUpdateInterval = &v return s } // SetTags sets the Tags field's value. func (s *CreateJobInput) SetTags(v map[string]*string) *CreateJobInput { s.Tags = v return s } // SetUserMetadata sets the UserMetadata field's value. func (s *CreateJobInput) SetUserMetadata(v map[string]*string) *CreateJobInput { s.UserMetadata = v return s } // Successful create job requests will return the job JSON. type CreateJobOutput struct { _ struct{} `type:"structure"` // Each job converts an input file into an output file or files. For more information, // see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html Job *Job `locationName:"job" type:"structure"` } // String returns the string representation func (s CreateJobOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateJobOutput) GoString() string { return s.String() } // SetJob sets the Job field's value. func (s *CreateJobOutput) SetJob(v *Job) *CreateJobOutput { s.Job = v return s } // Send your create job template request with the name of the template and the // JSON for the template. The template JSON should include everything in a valid // job, except for input location and filename, IAM role, and user metadata. type CreateJobTemplateInput struct { _ struct{} `type:"structure"` // Accelerated transcoding can significantly speed up jobs with long, visually // complex content. Outputs that use this feature incur pro-tier pricing. For // information about feature limitations, see the AWS Elemental MediaConvert // User Guide. AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"` // Optional. A category for the job template you are creating Category *string `locationName:"category" type:"string"` // Optional. A description of the job template you are creating. Description *string `locationName:"description" type:"string"` // Optional. Use queue hopping to avoid overly long waits in the backlog of // the queue that you submit your job to. Specify an alternate queue and the // maximum time that your job will wait in the initial queue before hopping. // For more information about this feature, see the AWS Elemental MediaConvert // User Guide. HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"` // The name of the job template you are creating. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` // Specify the relative priority for this job. In any given queue, the service // begins processing the job with the highest value first. When more than one // job has the same priority, the service begins processing the job that you // submitted first. If you don't specify a priority, the service uses the default // value 0. Priority *int64 `locationName:"priority" type:"integer"` // Optional. The queue that jobs created from this template are assigned to. // If you don't specify this, jobs will go to the default queue. Queue *string `locationName:"queue" type:"string"` // JobTemplateSettings contains all the transcode settings saved in the template // that will be applied to jobs created from it. // // Settings is a required field Settings *JobTemplateSettings `locationName:"settings" type:"structure" required:"true"` // Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch // Events. Set the interval, in seconds, between status updates. MediaConvert // sends an update at this interval from the time the service begins processing // your job to the time it completes the transcode or encounters an error. StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"` // The tags that you want to add to the resource. You can tag resources with // a key-value pair or with only a key. Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation func (s CreateJobTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateJobTemplateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateJobTemplateInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateJobTemplateInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Priority != nil && *s.Priority < -50 { invalidParams.Add(request.NewErrParamMinValue("Priority", -50)) } if s.Settings == nil { invalidParams.Add(request.NewErrParamRequired("Settings")) } if s.AccelerationSettings != nil { if err := s.AccelerationSettings.Validate(); err != nil { invalidParams.AddNested("AccelerationSettings", err.(request.ErrInvalidParams)) } } if s.HopDestinations != nil { for i, v := range s.HopDestinations { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HopDestinations", i), err.(request.ErrInvalidParams)) } } } if s.Settings != nil { if err := s.Settings.Validate(); err != nil { invalidParams.AddNested("Settings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAccelerationSettings sets the AccelerationSettings field's value. func (s *CreateJobTemplateInput) SetAccelerationSettings(v *AccelerationSettings) *CreateJobTemplateInput { s.AccelerationSettings = v return s } // SetCategory sets the Category field's value. func (s *CreateJobTemplateInput) SetCategory(v string) *CreateJobTemplateInput { s.Category = &v return s } // SetDescription sets the Description field's value. func (s *CreateJobTemplateInput) SetDescription(v string) *CreateJobTemplateInput { s.Description = &v return s } // SetHopDestinations sets the HopDestinations field's value. func (s *CreateJobTemplateInput) SetHopDestinations(v []*HopDestination) *CreateJobTemplateInput { s.HopDestinations = v return s } // SetName sets the Name field's value. func (s *CreateJobTemplateInput) SetName(v string) *CreateJobTemplateInput { s.Name = &v return s } // SetPriority sets the Priority field's value. func (s *CreateJobTemplateInput) SetPriority(v int64) *CreateJobTemplateInput { s.Priority = &v return s } // SetQueue sets the Queue field's value. func (s *CreateJobTemplateInput) SetQueue(v string) *CreateJobTemplateInput { s.Queue = &v return s } // SetSettings sets the Settings field's value. func (s *CreateJobTemplateInput) SetSettings(v *JobTemplateSettings) *CreateJobTemplateInput { s.Settings = v return s } // SetStatusUpdateInterval sets the StatusUpdateInterval field's value. func (s *CreateJobTemplateInput) SetStatusUpdateInterval(v string) *CreateJobTemplateInput { s.StatusUpdateInterval = &v return s } // SetTags sets the Tags field's value. func (s *CreateJobTemplateInput) SetTags(v map[string]*string) *CreateJobTemplateInput { s.Tags = v return s } // Successful create job template requests will return the template JSON. type CreateJobTemplateOutput struct { _ struct{} `type:"structure"` // A job template is a pre-made set of encoding instructions that you can use // to quickly create a job. JobTemplate *JobTemplate `locationName:"jobTemplate" type:"structure"` } // String returns the string representation func (s CreateJobTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateJobTemplateOutput) GoString() string { return s.String() } // SetJobTemplate sets the JobTemplate field's value. func (s *CreateJobTemplateOutput) SetJobTemplate(v *JobTemplate) *CreateJobTemplateOutput { s.JobTemplate = v return s } // Send your create preset request with the name of the preset and the JSON // for the output settings specified by the preset. type CreatePresetInput struct { _ struct{} `type:"structure"` // Optional. A category for the preset you are creating. Category *string `locationName:"category" type:"string"` // Optional. A description of the preset you are creating. Description *string `locationName:"description" type:"string"` // The name of the preset you are creating. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` // Settings for preset // // Settings is a required field Settings *PresetSettings `locationName:"settings" type:"structure" required:"true"` // The tags that you want to add to the resource. You can tag resources with // a key-value pair or with only a key. Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation func (s CreatePresetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreatePresetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreatePresetInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreatePresetInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Settings == nil { invalidParams.Add(request.NewErrParamRequired("Settings")) } if s.Settings != nil { if err := s.Settings.Validate(); err != nil { invalidParams.AddNested("Settings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCategory sets the Category field's value. func (s *CreatePresetInput) SetCategory(v string) *CreatePresetInput { s.Category = &v return s } // SetDescription sets the Description field's value. func (s *CreatePresetInput) SetDescription(v string) *CreatePresetInput { s.Description = &v return s } // SetName sets the Name field's value. func (s *CreatePresetInput) SetName(v string) *CreatePresetInput { s.Name = &v return s } // SetSettings sets the Settings field's value. func (s *CreatePresetInput) SetSettings(v *PresetSettings) *CreatePresetInput { s.Settings = v return s } // SetTags sets the Tags field's value. func (s *CreatePresetInput) SetTags(v map[string]*string) *CreatePresetInput { s.Tags = v return s } // Successful create preset requests will return the preset JSON. type CreatePresetOutput struct { _ struct{} `type:"structure"` // A preset is a collection of preconfigured media conversion settings that // you want MediaConvert to apply to the output during the conversion process. Preset *Preset `locationName:"preset" type:"structure"` } // String returns the string representation func (s CreatePresetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreatePresetOutput) GoString() string { return s.String() } // SetPreset sets the Preset field's value. func (s *CreatePresetOutput) SetPreset(v *Preset) *CreatePresetOutput { s.Preset = v return s } // Create an on-demand queue by sending a CreateQueue request with the name // of the queue. Create a reserved queue by sending a CreateQueue request with // the pricing plan set to RESERVED and with values specified for the settings // under reservationPlanSettings. When you create a reserved queue, you enter // into a 12-month commitment to purchase the RTS that you specify. You can't // cancel this commitment. type CreateQueueInput struct { _ struct{} `type:"structure"` // Optional. A description of the queue that you are creating. Description *string `locationName:"description" type:"string"` // The name of the queue that you are creating. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` // Specifies whether the pricing plan for the queue is on-demand or reserved. // For on-demand, you pay per minute, billed in increments of .01 minute. For // reserved, you pay for the transcoding capacity of the entire queue, regardless // of how much or how little you use it. Reserved pricing requires a 12-month // commitment. When you use the API to create a queue, the default is on-demand. PricingPlan *string `locationName:"pricingPlan" type:"string" enum:"PricingPlan"` // Details about the pricing plan for your reserved queue. Required for reserved // queues and not applicable to on-demand queues. ReservationPlanSettings *ReservationPlanSettings `locationName:"reservationPlanSettings" type:"structure"` // Initial state of the queue. If you create a paused queue, then jobs in that // queue won't begin. Status *string `locationName:"status" type:"string" enum:"QueueStatus"` // The tags that you want to add to the resource. You can tag resources with // a key-value pair or with only a key. Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation func (s CreateQueueInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateQueueInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateQueueInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateQueueInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.ReservationPlanSettings != nil { if err := s.ReservationPlanSettings.Validate(); err != nil { invalidParams.AddNested("ReservationPlanSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDescription sets the Description field's value. func (s *CreateQueueInput) SetDescription(v string) *CreateQueueInput { s.Description = &v return s } // SetName sets the Name field's value. func (s *CreateQueueInput) SetName(v string) *CreateQueueInput { s.Name = &v return s } // SetPricingPlan sets the PricingPlan field's value. func (s *CreateQueueInput) SetPricingPlan(v string) *CreateQueueInput { s.PricingPlan = &v return s } // SetReservationPlanSettings sets the ReservationPlanSettings field's value. func (s *CreateQueueInput) SetReservationPlanSettings(v *ReservationPlanSettings) *CreateQueueInput { s.ReservationPlanSettings = v return s } // SetStatus sets the Status field's value. func (s *CreateQueueInput) SetStatus(v string) *CreateQueueInput { s.Status = &v return s } // SetTags sets the Tags field's value. func (s *CreateQueueInput) SetTags(v map[string]*string) *CreateQueueInput { s.Tags = v return s } // Successful create queue requests return the name of the queue that you just // created and information about it. type CreateQueueOutput struct { _ struct{} `type:"structure"` // You can use queues to manage the resources that are available to your AWS // account for running multiple transcoding jobs at the same time. If you don't // specify a queue, the service sends all jobs through the default queue. For // more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html. Queue *Queue `locationName:"queue" type:"structure"` } // String returns the string representation func (s CreateQueueOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateQueueOutput) GoString() string { return s.String() } // SetQueue sets the Queue field's value. func (s *CreateQueueOutput) SetQueue(v *Queue) *CreateQueueOutput { s.Queue = v return s } // Specify the details for each additional DASH manifest that you want the service // to generate for this output group. Each manifest can reference a different // subset of outputs in the group. type DashAdditionalManifest struct { _ struct{} `type:"structure"` // Specify a name modifier that the service adds to the name of this manifest // to make it different from the file names of the other main manifests in the // output group. For example, say that the default main manifest for your DASH // group is film-name.mpd. If you enter "-no-premium" for this setting, then // the file name the service generates for this top-level manifest is film-name-no-premium.mpd. ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` // Specify the outputs that you want this additional top-level manifest to reference. SelectedOutputs []*string `locationName:"selectedOutputs" type:"list"` } // String returns the string representation func (s DashAdditionalManifest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DashAdditionalManifest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DashAdditionalManifest) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DashAdditionalManifest"} if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { invalidParams.Add(request.NewErrParamMinLen("ManifestNameModifier", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetManifestNameModifier sets the ManifestNameModifier field's value. func (s *DashAdditionalManifest) SetManifestNameModifier(v string) *DashAdditionalManifest { s.ManifestNameModifier = &v return s } // SetSelectedOutputs sets the SelectedOutputs field's value. func (s *DashAdditionalManifest) SetSelectedOutputs(v []*string) *DashAdditionalManifest { s.SelectedOutputs = v return s } // Specifies DRM settings for DASH outputs. type DashIsoEncryptionSettings struct { _ struct{} `type:"structure"` // This setting can improve the compatibility of your output with video players // on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. // Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback // on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). // If you choose Unencrypted SEI, for that output, the service will exclude // the access unit delimiter and will leave the SEI NAL units unencrypted. PlaybackDeviceCompatibility *string `locationName:"playbackDeviceCompatibility" type:"string" enum:"DashIsoPlaybackDeviceCompatibility"` // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings // when doing DRM encryption with a SPEKE-compliant key provider. If your output // group type is CMAF, use the SpekeKeyProviderCmaf settings instead. SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` } // String returns the string representation func (s DashIsoEncryptionSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DashIsoEncryptionSettings) GoString() string { return s.String() } // SetPlaybackDeviceCompatibility sets the PlaybackDeviceCompatibility field's value. func (s *DashIsoEncryptionSettings) SetPlaybackDeviceCompatibility(v string) *DashIsoEncryptionSettings { s.PlaybackDeviceCompatibility = &v return s } // SetSpekeKeyProvider sets the SpekeKeyProvider field's value. func (s *DashIsoEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProvider) *DashIsoEncryptionSettings { s.SpekeKeyProvider = v return s } // Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. // When you work directly in your JSON job specification, include this object // and any required children when you set Type, under OutputGroupSettings, to // DASH_ISO_GROUP_SETTINGS. type DashIsoGroupSettings struct { _ struct{} `type:"structure"` // By default, the service creates one .mpd DASH manifest for each DASH ISO // output group in your job. This default manifest references every output in // the output group. To create additional DASH manifests that reference a subset // of the outputs in the output group, specify a list of them here. AdditionalManifests []*DashAdditionalManifest `locationName:"additionalManifests" type:"list"` // Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or // Atmos) and your downstream workflow requires that your DASH manifest use // the Dolby channel configuration tag, rather than the MPEG one. For example, // you might need to use this to make dynamic ad insertion work. Specify which // audio channel configuration scheme ID URI MediaConvert writes in your DASH // manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), // to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. // Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have // MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011. AudioChannelConfigSchemeIdUri *string `locationName:"audioChannelConfigSchemeIdUri" type:"string" enum:"DashIsoGroupAudioChannelConfigSchemeIdUri"` // A partial URI prefix that will be put in the manifest (.mpd) file at the // top level BaseURL element. Can be used if streams are delivered from a different // URL than the manifest file. BaseUrl *string `locationName:"baseUrl" type:"string"` // Use Destination (Destination) to specify the S3 output location and the output // filename base. Destination accepts format identifiers. If you do not specify // the base filename in the URI, the service will use the filename of the input // file. If your job has multiple inputs, the service uses the filename of the // first input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"` // DRM settings. Encryption *DashIsoEncryptionSettings `locationName:"encryption" type:"structure"` // Length of fragments to generate (in seconds). Fragment length must be compatible // with GOP size and Framerate. Note that fragments will end on the next keyframe // after this number of seconds, so actual fragment length may be longer. When // Emit Single File is checked, the fragmentation is internal to a single output // file and it does not cause the creation of many output files as in other // output types. FragmentLength *int64 `locationName:"fragmentLength" min:"1" type:"integer"` // Supports HbbTV specification as indicated HbbtvCompliance *string `locationName:"hbbtvCompliance" type:"string" enum:"DashIsoHbbtvCompliance"` // Specify whether MediaConvert generates images for trick play. Keep the default // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) // to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) // to generate tiled thumbnails and full-resolution images of single frames. // MediaConvert adds an entry in the .mpd manifest for each set of images that // you generate. A common application for these images is Roku trick mode. The // thumbnails and full-frame images that MediaConvert creates with this feature // are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md ImageBasedTrickPlay *string `locationName:"imageBasedTrickPlay" type:"string" enum:"DashIsoImageBasedTrickPlay"` // Minimum time of initially buffered media that is needed to ensure smooth // playout. MinBufferTime *int64 `locationName:"minBufferTime" type:"integer"` // Keep this setting at the default value of 0, unless you are troubleshooting // a problem with how devices play back the end of your video asset. If you // know that player devices are hanging on the final segment of your video because // the length of your final segment is too short, use this setting to specify // a minimum final segment length, in seconds. Choose a value that is greater // than or equal to 1 and less than your segment length. When you specify a // value for this setting, the encoder will combine any final segment that is // shorter than the length that you specify with the previous segment. For example, // your segment length is 3 seconds and your final segment is .5 seconds without // a minimum final segment length; when you set the minimum final segment length // to 1, your final segment is 3.5 seconds. MinFinalSegmentLength *float64 `locationName:"minFinalSegmentLength" type:"double"` // Specify whether your DASH profile is on-demand or main. When you choose Main // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. // When you choose On-demand, you must also set the output group setting Segment // control (SegmentControl) to Single file (SINGLE_FILE). MpdProfile *string `locationName:"mpdProfile" type:"string" enum:"DashIsoMpdProfile"` // Use this setting only when your output video stream has B-frames, which causes // the initial presentation time stamp (PTS) to be offset from the initial decode // time stamp (DTS). Specify how MediaConvert handles PTS when writing time // stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) // when you want MediaConvert to use the initial PTS as the first time stamp // in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore // the initial PTS in the video stream and instead write the initial time stamp // as zero in the manifest. For outputs that don't have B-frames, the time stamps // in your DASH manifests start at zero regardless of your choice here. PtsOffsetHandlingForBFrames *string `locationName:"ptsOffsetHandlingForBFrames" type:"string" enum:"DashIsoPtsOffsetHandlingForBFrames"` // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. SegmentControl *string `locationName:"segmentControl" type:"string" enum:"DashIsoSegmentControl"` // Length of mpd segments to create (in seconds). Note that segments will end // on the next keyframe after this number of seconds, so actual segment length // may be longer. When Emit Single File is checked, the segmentation is internal // to a single output file and it does not cause the creation of many output // files as in other output types. SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"` // If you get an HTTP error in the 400 range when you play back your DASH output, // enable this setting and run your transcoding job again. When you enable this // setting, the service writes precise segment durations in the DASH manifest. // The segment duration information appears inside the SegmentTimeline element, // inside SegmentTemplate at the Representation level. When you don't enable // this setting, the service writes approximate segment durations in your DASH // manifest. WriteSegmentTimelineInRepresentation *string `locationName:"writeSegmentTimelineInRepresentation" type:"string" enum:"DashIsoWriteSegmentTimelineInRepresentation"` } // String returns the string representation func (s DashIsoGroupSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DashIsoGroupSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DashIsoGroupSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DashIsoGroupSettings"} if s.FragmentLength != nil && *s.FragmentLength < 1 { invalidParams.Add(request.NewErrParamMinValue("FragmentLength", 1)) } if s.SegmentLength != nil && *s.SegmentLength < 1 { invalidParams.Add(request.NewErrParamMinValue("SegmentLength", 1)) } if s.AdditionalManifests != nil { for i, v := range s.AdditionalManifests { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdditionalManifests sets the AdditionalManifests field's value. func (s *DashIsoGroupSettings) SetAdditionalManifests(v []*DashAdditionalManifest) *DashIsoGroupSettings { s.AdditionalManifests = v return s } // SetAudioChannelConfigSchemeIdUri sets the AudioChannelConfigSchemeIdUri field's value. func (s *DashIsoGroupSettings) SetAudioChannelConfigSchemeIdUri(v string) *DashIsoGroupSettings { s.AudioChannelConfigSchemeIdUri = &v return s } // SetBaseUrl sets the BaseUrl field's value. func (s *DashIsoGroupSettings) SetBaseUrl(v string) *DashIsoGroupSettings { s.BaseUrl = &v return s } // SetDestination sets the Destination field's value. func (s *DashIsoGroupSettings) SetDestination(v string) *DashIsoGroupSettings { s.Destination = &v return s } // SetDestinationSettings sets the DestinationSettings field's value. func (s *DashIsoGroupSettings) SetDestinationSettings(v *DestinationSettings) *DashIsoGroupSettings { s.DestinationSettings = v return s } // SetEncryption sets the Encryption field's value. func (s *DashIsoGroupSettings) SetEncryption(v *DashIsoEncryptionSettings) *DashIsoGroupSettings { s.Encryption = v return s } // SetFragmentLength sets the FragmentLength field's value. func (s *DashIsoGroupSettings) SetFragmentLength(v int64) *DashIsoGroupSettings { s.FragmentLength = &v return s } // SetHbbtvCompliance sets the HbbtvCompliance field's value. func (s *DashIsoGroupSettings) SetHbbtvCompliance(v string) *DashIsoGroupSettings { s.HbbtvCompliance = &v return s } // SetImageBasedTrickPlay sets the ImageBasedTrickPlay field's value. func (s *DashIsoGroupSettings) SetImageBasedTrickPlay(v string) *DashIsoGroupSettings { s.ImageBasedTrickPlay = &v return s } // SetMinBufferTime sets the MinBufferTime field's value. func (s *DashIsoGroupSettings) SetMinBufferTime(v int64) *DashIsoGroupSettings { s.MinBufferTime = &v return s } // SetMinFinalSegmentLength sets the MinFinalSegmentLength field's value. func (s *DashIsoGroupSettings) SetMinFinalSegmentLength(v float64) *DashIsoGroupSettings { s.MinFinalSegmentLength = &v return s } // SetMpdProfile sets the MpdProfile field's value. func (s *DashIsoGroupSettings) SetMpdProfile(v string) *DashIsoGroupSettings { s.MpdProfile = &v return s } // SetPtsOffsetHandlingForBFrames sets the PtsOffsetHandlingForBFrames field's value. func (s *DashIsoGroupSettings) SetPtsOffsetHandlingForBFrames(v string) *DashIsoGroupSettings { s.PtsOffsetHandlingForBFrames = &v return s } // SetSegmentControl sets the SegmentControl field's value. func (s *DashIsoGroupSettings) SetSegmentControl(v string) *DashIsoGroupSettings { s.SegmentControl = &v return s } // SetSegmentLength sets the SegmentLength field's value. func (s *DashIsoGroupSettings) SetSegmentLength(v int64) *DashIsoGroupSettings { s.SegmentLength = &v return s } // SetWriteSegmentTimelineInRepresentation sets the WriteSegmentTimelineInRepresentation field's value. func (s *DashIsoGroupSettings) SetWriteSegmentTimelineInRepresentation(v string) *DashIsoGroupSettings { s.WriteSegmentTimelineInRepresentation = &v return s } // Settings for deinterlacer type Deinterlacer struct { _ struct{} `type:"structure"` // Only applies when you set Deinterlacer (DeinterlaceMode) to Deinterlace (DEINTERLACE) // or Adaptive (ADAPTIVE). Motion adaptive interpolate (INTERPOLATE) produces // sharper pictures, while blend (BLEND) produces smoother motion. Use (INTERPOLATE_TICKER) // OR (BLEND_TICKER) if your source file includes a ticker, such as a scrolling // headline at the bottom of the frame. Algorithm *string `locationName:"algorithm" type:"string" enum:"DeinterlaceAlgorithm"` // - When set to NORMAL (default), the deinterlacer does not convert frames // that are tagged in metadata as progressive. It will only convert those that // are tagged as some other type. - When set to FORCE_ALL_FRAMES, the deinterlacer // converts every frame to progressive - even those that are already tagged // as progressive. Turn Force mode on only if there is a good chance that the // metadata has tagged frames as progressive when they are not progressive. // Do not turn on otherwise; processing frames that are already progressive // into progressive will probably result in lower quality video. Control *string `locationName:"control" type:"string" enum:"DeinterlacerControl"` // Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. // Default is Deinterlace. - Deinterlace converts interlaced to progressive. // - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. // - Adaptive auto-detects and converts to progressive. Mode *string `locationName:"mode" type:"string" enum:"DeinterlacerMode"` } // String returns the string representation func (s Deinterlacer) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Deinterlacer) GoString() string { return s.String() } // SetAlgorithm sets the Algorithm field's value. func (s *Deinterlacer) SetAlgorithm(v string) *Deinterlacer { s.Algorithm = &v return s } // SetControl sets the Control field's value. func (s *Deinterlacer) SetControl(v string) *Deinterlacer { s.Control = &v return s } // SetMode sets the Mode field's value. func (s *Deinterlacer) SetMode(v string) *Deinterlacer { s.Mode = &v return s } // Delete a job template by sending a request with the job template name type DeleteJobTemplateInput struct { _ struct{} `type:"structure"` // The name of the job template to be deleted. // // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` } // String returns the string representation func (s DeleteJobTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteJobTemplateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteJobTemplateInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteJobTemplateInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetName sets the Name field's value. func (s *DeleteJobTemplateInput) SetName(v string) *DeleteJobTemplateInput { s.Name = &v return s } // Delete job template requests will return an OK message or error message with // an empty body. type DeleteJobTemplateOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s DeleteJobTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteJobTemplateOutput) GoString() string { return s.String() } // Delete a preset by sending a request with the preset name type DeletePresetInput struct { _ struct{} `type:"structure"` // The name of the preset to be deleted. // // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` } // String returns the string representation func (s DeletePresetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeletePresetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeletePresetInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeletePresetInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetName sets the Name field's value. func (s *DeletePresetInput) SetName(v string) *DeletePresetInput { s.Name = &v return s } // Delete preset requests will return an OK message or error message with an // empty body. type DeletePresetOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s DeletePresetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeletePresetOutput) GoString() string { return s.String() } // Delete a queue by sending a request with the queue name. You can't delete // a queue with an active pricing plan or one that has unprocessed jobs in it. type DeleteQueueInput struct { _ struct{} `type:"structure"` // The name of the queue that you want to delete. // // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` } // String returns the string representation func (s DeleteQueueInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteQueueInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteQueueInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteQueueInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetName sets the Name field's value. func (s *DeleteQueueInput) SetName(v string) *DeleteQueueInput { s.Name = &v return s } // Delete queue requests return an OK message or error message with an empty // body. type DeleteQueueOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s DeleteQueueOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteQueueOutput) GoString() string { return s.String() } // Send an request with an empty body to the regional API endpoint to get your // account API endpoint. type DescribeEndpointsInput struct { _ struct{} `type:"structure"` // Optional. Max number of endpoints, up to twenty, that will be returned at // one time. MaxResults *int64 `locationName:"maxResults" type:"integer"` // Optional field, defaults to DEFAULT. Specify DEFAULT for this operation to // return your endpoints if any exist, or to create an endpoint for you and // return it if one doesn't already exist. Specify GET_ONLY to return your endpoints // if any exist, or an empty list if none exist. Mode *string `locationName:"mode" type:"string" enum:"DescribeEndpointsMode"` // Use this string, provided with the response to a previous request, to request // the next batch of endpoints. NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation func (s DescribeEndpointsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeEndpointsInput) GoString() string { return s.String() } // SetMaxResults sets the MaxResults field's value. func (s *DescribeEndpointsInput) SetMaxResults(v int64) *DescribeEndpointsInput { s.MaxResults = &v return s } // SetMode sets the Mode field's value. func (s *DescribeEndpointsInput) SetMode(v string) *DescribeEndpointsInput { s.Mode = &v return s } // SetNextToken sets the NextToken field's value. func (s *DescribeEndpointsInput) SetNextToken(v string) *DescribeEndpointsInput { s.NextToken = &v return s } // Successful describe endpoints requests will return your account API endpoint. type DescribeEndpointsOutput struct { _ struct{} `type:"structure"` // List of endpoints Endpoints []*Endpoint `locationName:"endpoints" type:"list"` // Use this string to request the next batch of endpoints. NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation func (s DescribeEndpointsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeEndpointsOutput) GoString() string { return s.String() } // SetEndpoints sets the Endpoints field's value. func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput { s.Endpoints = v return s } // SetNextToken sets the NextToken field's value. func (s *DescribeEndpointsOutput) SetNextToken(v string) *DescribeEndpointsOutput { s.NextToken = &v return s } // Settings associated with the destination. Will vary based on the type of // destination type DestinationSettings struct { _ struct{} `type:"structure"` // Settings associated with S3 destination S3Settings *S3DestinationSettings `locationName:"s3Settings" type:"structure"` } // String returns the string representation func (s DestinationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DestinationSettings) GoString() string { return s.String() } // SetS3Settings sets the S3Settings field's value. func (s *DestinationSettings) SetS3Settings(v *S3DestinationSettings) *DestinationSettings { s.S3Settings = v return s } // Removes an association between the Amazon Resource Name (ARN) of an AWS Certificate // Manager (ACM) certificate and an AWS Elemental MediaConvert resource. type DisassociateCertificateInput struct { _ struct{} `type:"structure"` // The ARN of the ACM certificate that you want to disassociate from your MediaConvert // resource. // // Arn is a required field Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"` } // String returns the string representation func (s DisassociateCertificateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DisassociateCertificateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DisassociateCertificateInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DisassociateCertificateInput"} if s.Arn == nil { invalidParams.Add(request.NewErrParamRequired("Arn")) } if s.Arn != nil && len(*s.Arn) < 1 { invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetArn sets the Arn field's value. func (s *DisassociateCertificateInput) SetArn(v string) *DisassociateCertificateInput { s.Arn = &v return s } // Successful disassociation of Certificate Manager Amazon Resource Name (ARN) // with Mediaconvert returns an OK message. type DisassociateCertificateOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s DisassociateCertificateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DisassociateCertificateOutput) GoString() string { return s.String() } // With AWS Elemental MediaConvert, you can create profile 5 Dolby Vision outputs // from MXF and IMF sources that contain mastering information as frame-interleaved // Dolby Vision metadata. type DolbyVision struct { _ struct{} `type:"structure"` // Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override // the MaxCLL and MaxFALL values in your input with new values. L6Metadata *DolbyVisionLevel6Metadata `locationName:"l6Metadata" type:"structure"` // Use Dolby Vision Mode to choose how the service will handle Dolby Vision // MaxCLL and MaxFALL properies. L6Mode *string `locationName:"l6Mode" type:"string" enum:"DolbyVisionLevel6Mode"` // In the current MediaConvert implementation, the Dolby Vision profile is always // 5 (PROFILE_5). Therefore, all of your inputs must contain Dolby Vision frame // interleaved data. Profile *string `locationName:"profile" type:"string" enum:"DolbyVisionProfile"` } // String returns the string representation func (s DolbyVision) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DolbyVision) GoString() string { return s.String() } // SetL6Metadata sets the L6Metadata field's value. func (s *DolbyVision) SetL6Metadata(v *DolbyVisionLevel6Metadata) *DolbyVision { s.L6Metadata = v return s } // SetL6Mode sets the L6Mode field's value. func (s *DolbyVision) SetL6Mode(v string) *DolbyVision { s.L6Mode = &v return s } // SetProfile sets the Profile field's value. func (s *DolbyVision) SetProfile(v string) *DolbyVision { s.Profile = &v return s } // Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override // the MaxCLL and MaxFALL values in your input with new values. type DolbyVisionLevel6Metadata struct { _ struct{} `type:"structure"` // Maximum Content Light Level. Static HDR metadata that corresponds to the // brightest pixel in the entire stream. Measured in nits. MaxCll *int64 `locationName:"maxCll" type:"integer"` // Maximum Frame-Average Light Level. Static HDR metadata that corresponds to // the highest frame-average brightness in the entire stream. Measured in nits. MaxFall *int64 `locationName:"maxFall" type:"integer"` } // String returns the string representation func (s DolbyVisionLevel6Metadata) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DolbyVisionLevel6Metadata) GoString() string { return s.String() } // SetMaxCll sets the MaxCll field's value. func (s *DolbyVisionLevel6Metadata) SetMaxCll(v int64) *DolbyVisionLevel6Metadata { s.MaxCll = &v return s } // SetMaxFall sets the MaxFall field's value. func (s *DolbyVisionLevel6Metadata) SetMaxFall(v int64) *DolbyVisionLevel6Metadata { s.MaxFall = &v return s } // Use these settings to insert a DVB Network Information Table (NIT) in the // transport stream of this output. When you work directly in your JSON job // specification, include this object only when your job has a transport stream // output and the container settings contain the object M2tsSettings. type DvbNitSettings struct { _ struct{} `type:"structure"` // The numeric value placed in the Network Information Table (NIT). NetworkId *int64 `locationName:"networkId" type:"integer"` // The network name text placed in the network_name_descriptor inside the Network // Information Table. Maximum length is 256 characters. NetworkName *string `locationName:"networkName" min:"1" type:"string"` // The number of milliseconds between instances of this table in the output // transport stream. NitInterval *int64 `locationName:"nitInterval" min:"25" type:"integer"` } // String returns the string representation func (s DvbNitSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DvbNitSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DvbNitSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DvbNitSettings"} if s.NetworkName != nil && len(*s.NetworkName) < 1 { invalidParams.Add(request.NewErrParamMinLen("NetworkName", 1)) } if s.NitInterval != nil && *s.NitInterval < 25 { invalidParams.Add(request.NewErrParamMinValue("NitInterval", 25)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetNetworkId sets the NetworkId field's value. func (s *DvbNitSettings) SetNetworkId(v int64) *DvbNitSettings { s.NetworkId = &v return s } // SetNetworkName sets the NetworkName field's value. func (s *DvbNitSettings) SetNetworkName(v string) *DvbNitSettings { s.NetworkName = &v return s } // SetNitInterval sets the NitInterval field's value. func (s *DvbNitSettings) SetNitInterval(v int64) *DvbNitSettings { s.NitInterval = &v return s } // Use these settings to insert a DVB Service Description Table (SDT) in the // transport stream of this output. When you work directly in your JSON job // specification, include this object only when your job has a transport stream // output and the container settings contain the object M2tsSettings. type DvbSdtSettings struct { _ struct{} `type:"structure"` // Selects method of inserting SDT information into output stream. "Follow input // SDT" copies SDT information from input stream to output stream. "Follow input // SDT if present" copies SDT information from input stream to output stream // if SDT information is present in the input, otherwise it will fall back on // the user-defined values. Enter "SDT Manually" means user will enter the SDT // information. "No SDT" means output stream will not contain SDT information. OutputSdt *string `locationName:"outputSdt" type:"string" enum:"OutputSdt"` // The number of milliseconds between instances of this table in the output // transport stream. SdtInterval *int64 `locationName:"sdtInterval" min:"25" type:"integer"` // The service name placed in the service_descriptor in the Service Description // Table. Maximum length is 256 characters. ServiceName *string `locationName:"serviceName" min:"1" type:"string"` // The service provider name placed in the service_descriptor in the Service // Description Table. Maximum length is 256 characters. ServiceProviderName *string `locationName:"serviceProviderName" min:"1" type:"string"` } // String returns the string representation func (s DvbSdtSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DvbSdtSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DvbSdtSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DvbSdtSettings"} if s.SdtInterval != nil && *s.SdtInterval < 25 { invalidParams.Add(request.NewErrParamMinValue("SdtInterval", 25)) } if s.ServiceName != nil && len(*s.ServiceName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ServiceName", 1)) } if s.ServiceProviderName != nil && len(*s.ServiceProviderName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ServiceProviderName", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetOutputSdt sets the OutputSdt field's value. func (s *DvbSdtSettings) SetOutputSdt(v string) *DvbSdtSettings { s.OutputSdt = &v return s } // SetSdtInterval sets the SdtInterval field's value. func (s *DvbSdtSettings) SetSdtInterval(v int64) *DvbSdtSettings { s.SdtInterval = &v return s } // SetServiceName sets the ServiceName field's value. func (s *DvbSdtSettings) SetServiceName(v string) *DvbSdtSettings { s.ServiceName = &v return s } // SetServiceProviderName sets the ServiceProviderName field's value. func (s *DvbSdtSettings) SetServiceProviderName(v string) *DvbSdtSettings { s.ServiceProviderName = &v return s } // Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same // output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to DVB_SUB. type DvbSubDestinationSettings struct { _ struct{} `type:"structure"` // If no explicit x_position or y_position is provided, setting alignment to // centered will place the captions at the bottom center of the output. Similarly, // setting a left alignment will align captions to the bottom left of the output. // If x and y positions are given in conjunction with the alignment parameter, // the font will be justified (either left or centered) relative to those coordinates. // This option is not valid for source captions that are STL, 608/embedded or // teletext. These source settings are already pre-defined by the caption stream. // All burn-in and DVB-Sub font settings must match. Alignment *string `locationName:"alignment" type:"string" enum:"DvbSubtitleAlignment"` // Specifies the color of the rectangle behind the captions.All burn-in and // DVB-Sub font settings must match. BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"DvbSubtitleBackgroundColor"` // Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. // Leaving this parameter blank is equivalent to setting it to 0 (transparent). // All burn-in and DVB-Sub font settings must match. BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"` // Specify how MediaConvert handles the display definition segment (DDS). Keep // the default, None (NONE), to exclude the DDS from this set of captions. Choose // No display window (NO_DISPLAY_WINDOW) to have MediaConvert include the DDS // but not include display window data. In this case, MediaConvert writes that // information to the page composition segment (PCS) instead. Choose Specify // (SPECIFIED) to have MediaConvert set up the display window based on the values // that you specify in related job settings. For video resolutions that are // 576 pixels or smaller in height, MediaConvert doesn't include the DDS, regardless // of the value you choose for DDS handling (ddsHandling). In this case, it // doesn't write the display window data to the PCS either. Related settings: // Use the settings DDS x-coordinate (ddsXCoordinate) and DDS y-coordinate (ddsYCoordinate) // to specify the offset between the top left corner of the display window and // the top left corner of the video frame. All burn-in and DVB-Sub font settings // must match. DdsHandling *string `locationName:"ddsHandling" type:"string" enum:"DvbddsHandling"` // Use this setting, along with DDS y-coordinate (ddsYCoordinate), to specify // the upper left corner of the display definition segment (DDS) display window. // With this setting, specify the distance, in pixels, between the left side // of the frame and the left side of the DDS display window. Keep the default // value, 0, to have MediaConvert automatically choose this offset. Related // setting: When you use this setting, you must set DDS handling (ddsHandling) // to a value other than None (NONE). MediaConvert uses these values to determine // whether to write page position data to the DDS or to the page composition // segment (PCS). All burn-in and DVB-Sub font settings must match. DdsXCoordinate *int64 `locationName:"ddsXCoordinate" type:"integer"` // Use this setting, along with DDS x-coordinate (ddsXCoordinate), to specify // the upper left corner of the display definition segment (DDS) display window. // With this setting, specify the distance, in pixels, between the top of the // frame and the top of the DDS display window. Keep the default value, 0, to // have MediaConvert automatically choose this offset. Related setting: When // you use this setting, you must set DDS handling (ddsHandling) to a value // other than None (NONE). MediaConvert uses these values to determine whether // to write page position data to the DDS or to the page composition segment // (PCS). All burn-in and DVB-Sub font settings must match. DdsYCoordinate *int64 `locationName:"ddsYCoordinate" type:"integer"` // Specifies the color of the burned-in captions. This option is not valid for // source captions that are STL, 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font // settings must match. FontColor *string `locationName:"fontColor" type:"string" enum:"DvbSubtitleFontColor"` // Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.All // burn-in and DVB-Sub font settings must match. FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` // Font resolution in DPI (dots per inch); default is 96 dpi.All burn-in and // DVB-Sub font settings must match. FontResolution *int64 `locationName:"fontResolution" min:"96" type:"integer"` // Provide the font script, using an ISO 15924 script code, if the LanguageCode // is not sufficient for determining the script type. Where LanguageCode or // CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. This is // used to help determine the appropriate font for rendering DVB-Sub captions. FontScript *string `locationName:"fontScript" type:"string" enum:"FontScript"` // A positive integer indicates the exact font size in points. Set to 0 for // automatic font size selection. All burn-in and DVB-Sub font settings must // match. FontSize *int64 `locationName:"fontSize" type:"integer"` // Specify the height, in pixels, of this set of DVB-Sub captions. The default // value is 576 pixels. Related setting: When you use this setting, you must // set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in // and DVB-Sub font settings must match. Height *int64 `locationName:"height" min:"1" type:"integer"` // Specifies font outline color. This option is not valid for source captions // that are either 608/embedded or teletext. These source settings are already // pre-defined by the caption stream. All burn-in and DVB-Sub font settings // must match. OutlineColor *string `locationName:"outlineColor" type:"string" enum:"DvbSubtitleOutlineColor"` // Specifies font outline size in pixels. This option is not valid for source // captions that are either 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font // settings must match. OutlineSize *int64 `locationName:"outlineSize" type:"integer"` // Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub // font settings must match. ShadowColor *string `locationName:"shadowColor" type:"string" enum:"DvbSubtitleShadowColor"` // Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving // this parameter blank is equivalent to setting it to 0 (transparent). All // burn-in and DVB-Sub font settings must match. ShadowOpacity *int64 `locationName:"shadowOpacity" type:"integer"` // Specifies the horizontal offset of the shadow relative to the captions in // pixels. A value of -2 would result in a shadow offset 2 pixels to the left. // All burn-in and DVB-Sub font settings must match. ShadowXOffset *int64 `locationName:"shadowXOffset" type:"integer"` // Specifies the vertical offset of the shadow relative to the captions in pixels. // A value of -2 would result in a shadow offset 2 pixels above the text. All // burn-in and DVB-Sub font settings must match. ShadowYOffset *int64 `locationName:"shadowYOffset" type:"integer"` // Specify whether your DVB subtitles are standard or for hearing impaired. // Choose hearing impaired if your subtitles include audio descriptions and // dialogue. Choose standard if your subtitles include only dialogue. SubtitlingType *string `locationName:"subtitlingType" type:"string" enum:"DvbSubtitlingType"` // Only applies to jobs with input captions in Teletext or STL formats. Specify // whether the spacing between letters in your captions is set by the captions // grid or varies depending on letter width. Choose fixed grid to conform to // the spacing specified in the captions file more accurately. Choose proportional // to make the text easier to read if the captions are closed caption. TeletextSpacing *string `locationName:"teletextSpacing" type:"string" enum:"DvbSubtitleTeletextSpacing"` // Specify the width, in pixels, of this set of DVB-Sub captions. The default // value is 720 pixels. Related setting: When you use this setting, you must // set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in // and DVB-Sub font settings must match. Width *int64 `locationName:"width" min:"1" type:"integer"` // Specifies the horizontal position of the caption relative to the left side // of the output in pixels. A value of 10 would result in the captions starting // 10 pixels from the left of the output. If no explicit x_position is provided, // the horizontal caption position will be determined by the alignment parameter. // This option is not valid for source captions that are STL, 608/embedded or // teletext. These source settings are already pre-defined by the caption stream. // All burn-in and DVB-Sub font settings must match. XPosition *int64 `locationName:"xPosition" type:"integer"` // Specifies the vertical position of the caption relative to the top of the // output in pixels. A value of 10 would result in the captions starting 10 // pixels from the top of the output. If no explicit y_position is provided, // the caption will be positioned towards the bottom of the output. This option // is not valid for source captions that are STL, 608/embedded or teletext. // These source settings are already pre-defined by the caption stream. All // burn-in and DVB-Sub font settings must match. YPosition *int64 `locationName:"yPosition" type:"integer"` } // String returns the string representation func (s DvbSubDestinationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DvbSubDestinationSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DvbSubDestinationSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DvbSubDestinationSettings"} if s.FontResolution != nil && *s.FontResolution < 96 { invalidParams.Add(request.NewErrParamMinValue("FontResolution", 96)) } if s.Height != nil && *s.Height < 1 { invalidParams.Add(request.NewErrParamMinValue("Height", 1)) } if s.ShadowXOffset != nil && *s.ShadowXOffset < -2.147483648e+09 { invalidParams.Add(request.NewErrParamMinValue("ShadowXOffset", -2.147483648e+09)) } if s.ShadowYOffset != nil && *s.ShadowYOffset < -2.147483648e+09 { invalidParams.Add(request.NewErrParamMinValue("ShadowYOffset", -2.147483648e+09)) } if s.Width != nil && *s.Width < 1 { invalidParams.Add(request.NewErrParamMinValue("Width", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAlignment sets the Alignment field's value. func (s *DvbSubDestinationSettings) SetAlignment(v string) *DvbSubDestinationSettings { s.Alignment = &v return s } // SetBackgroundColor sets the BackgroundColor field's value. func (s *DvbSubDestinationSettings) SetBackgroundColor(v string) *DvbSubDestinationSettings { s.BackgroundColor = &v return s } // SetBackgroundOpacity sets the BackgroundOpacity field's value. func (s *DvbSubDestinationSettings) SetBackgroundOpacity(v int64) *DvbSubDestinationSettings { s.BackgroundOpacity = &v return s } // SetDdsHandling sets the DdsHandling field's value. func (s *DvbSubDestinationSettings) SetDdsHandling(v string) *DvbSubDestinationSettings { s.DdsHandling = &v return s } // SetDdsXCoordinate sets the DdsXCoordinate field's value. func (s *DvbSubDestinationSettings) SetDdsXCoordinate(v int64) *DvbSubDestinationSettings { s.DdsXCoordinate = &v return s } // SetDdsYCoordinate sets the DdsYCoordinate field's value. func (s *DvbSubDestinationSettings) SetDdsYCoordinate(v int64) *DvbSubDestinationSettings { s.DdsYCoordinate = &v return s } // SetFontColor sets the FontColor field's value. func (s *DvbSubDestinationSettings) SetFontColor(v string) *DvbSubDestinationSettings { s.FontColor = &v return s } // SetFontOpacity sets the FontOpacity field's value. func (s *DvbSubDestinationSettings) SetFontOpacity(v int64) *DvbSubDestinationSettings { s.FontOpacity = &v return s } // SetFontResolution sets the FontResolution field's value. func (s *DvbSubDestinationSettings) SetFontResolution(v int64) *DvbSubDestinationSettings { s.FontResolution = &v return s } // SetFontScript sets the FontScript field's value. func (s *DvbSubDestinationSettings) SetFontScript(v string) *DvbSubDestinationSettings { s.FontScript = &v return s } // SetFontSize sets the FontSize field's value. func (s *DvbSubDestinationSettings) SetFontSize(v int64) *DvbSubDestinationSettings { s.FontSize = &v return s } // SetHeight sets the Height field's value. func (s *DvbSubDestinationSettings) SetHeight(v int64) *DvbSubDestinationSettings { s.Height = &v return s } // SetOutlineColor sets the OutlineColor field's value. func (s *DvbSubDestinationSettings) SetOutlineColor(v string) *DvbSubDestinationSettings { s.OutlineColor = &v return s } // SetOutlineSize sets the OutlineSize field's value. func (s *DvbSubDestinationSettings) SetOutlineSize(v int64) *DvbSubDestinationSettings { s.OutlineSize = &v return s } // SetShadowColor sets the ShadowColor field's value. func (s *DvbSubDestinationSettings) SetShadowColor(v string) *DvbSubDestinationSettings { s.ShadowColor = &v return s } // SetShadowOpacity sets the ShadowOpacity field's value. func (s *DvbSubDestinationSettings) SetShadowOpacity(v int64) *DvbSubDestinationSettings { s.ShadowOpacity = &v return s } // SetShadowXOffset sets the ShadowXOffset field's value. func (s *DvbSubDestinationSettings) SetShadowXOffset(v int64) *DvbSubDestinationSettings { s.ShadowXOffset = &v return s } // SetShadowYOffset sets the ShadowYOffset field's value. func (s *DvbSubDestinationSettings) SetShadowYOffset(v int64) *DvbSubDestinationSettings { s.ShadowYOffset = &v return s } // SetSubtitlingType sets the SubtitlingType field's value. func (s *DvbSubDestinationSettings) SetSubtitlingType(v string) *DvbSubDestinationSettings { s.SubtitlingType = &v return s } // SetTeletextSpacing sets the TeletextSpacing field's value. func (s *DvbSubDestinationSettings) SetTeletextSpacing(v string) *DvbSubDestinationSettings { s.TeletextSpacing = &v return s } // SetWidth sets the Width field's value. func (s *DvbSubDestinationSettings) SetWidth(v int64) *DvbSubDestinationSettings { s.Width = &v return s } // SetXPosition sets the XPosition field's value. func (s *DvbSubDestinationSettings) SetXPosition(v int64) *DvbSubDestinationSettings { s.XPosition = &v return s } // SetYPosition sets the YPosition field's value. func (s *DvbSubDestinationSettings) SetYPosition(v int64) *DvbSubDestinationSettings { s.YPosition = &v return s } // DVB Sub Source Settings type DvbSubSourceSettings struct { _ struct{} `type:"structure"` // When using DVB-Sub with Burn-In or SMPTE-TT, use this PID for the source // content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through, // regardless of selectors. Pid *int64 `locationName:"pid" min:"1" type:"integer"` } // String returns the string representation func (s DvbSubSourceSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DvbSubSourceSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DvbSubSourceSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DvbSubSourceSettings"} if s.Pid != nil && *s.Pid < 1 { invalidParams.Add(request.NewErrParamMinValue("Pid", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetPid sets the Pid field's value. func (s *DvbSubSourceSettings) SetPid(v int64) *DvbSubSourceSettings { s.Pid = &v return s } // Use these settings to insert a DVB Time and Date Table (TDT) in the transport // stream of this output. When you work directly in your JSON job specification, // include this object only when your job has a transport stream output and // the container settings contain the object M2tsSettings. type DvbTdtSettings struct { _ struct{} `type:"structure"` // The number of milliseconds between instances of this table in the output // transport stream. TdtInterval *int64 `locationName:"tdtInterval" min:"1000" type:"integer"` } // String returns the string representation func (s DvbTdtSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DvbTdtSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DvbTdtSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DvbTdtSettings"} if s.TdtInterval != nil && *s.TdtInterval < 1000 { invalidParams.Add(request.NewErrParamMinValue("TdtInterval", 1000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetTdtInterval sets the TdtInterval field's value. func (s *DvbTdtSettings) SetTdtInterval(v int64) *DvbTdtSettings { s.TdtInterval = &v return s } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value EAC3_ATMOS. type Eac3AtmosSettings struct { _ struct{} `type:"structure"` // Specify the average bitrate for this output in bits per second. Valid values: // 384k, 448k, 576k, 640k, 768k, 1024k Default value: 448k Note that MediaConvert // supports 384k only with channel-based immersive (CBI) 7.1.4 and 5.1.4 inputs. // For CBI 9.1.6 and other input types, MediaConvert automatically increases // your output bitrate to 448k. Bitrate *int64 `locationName:"bitrate" min:"384000" type:"integer"` // Specify the bitstream mode for the E-AC-3 stream that the encoder emits. // For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex // E). BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Eac3AtmosBitstreamMode"` // The coding mode for Dolby Digital Plus JOC (Atmos). CodingMode *string `locationName:"codingMode" type:"string" enum:"Eac3AtmosCodingMode"` // Enable Dolby Dialogue Intelligence to adjust loudness based on dialogue analysis. DialogueIntelligence *string `locationName:"dialogueIntelligence" type:"string" enum:"Eac3AtmosDialogueIntelligence"` // Specify whether MediaConvert should use any downmix metadata from your input // file. Keep the default value, Custom (SPECIFIED) to provide downmix values // in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use // the metadata from your input. Related settings--Use these settings to specify // your downmix values: Left only/Right only surround (LoRoSurroundMixLevel), // Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right // total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel), // and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for // Downmix control (DownmixControl) and you don't specify values for the related // settings, MediaConvert uses default values for those settings. DownmixControl *string `locationName:"downmixControl" type:"string" enum:"Eac3AtmosDownmixControl"` // Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses // when encoding the metadata in the Dolby stream for the line operating mode. // Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: // To have MediaConvert use the value you specify here, keep the default value, // Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). // Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine). // For information about the Dolby DRC operating modes and profiles, see the // Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Eac3AtmosDynamicRangeCompressionLine"` // Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses // when encoding the metadata in the Dolby stream for the RF operating mode. // Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: // To have MediaConvert use the value you specify here, keep the default value, // Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). // Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf). // For information about the Dolby DRC operating modes and profiles, see the // Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Eac3AtmosDynamicRangeCompressionRf"` // Specify whether MediaConvert should use any dynamic range control metadata // from your input file. Keep the default value, Custom (SPECIFIED), to provide // dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) // to use the metadata from your input. Related settings--Use these settings // to specify your dynamic range control values: Dynamic range compression line // (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf). // When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl) // and you don't specify values for the related settings, MediaConvert uses // default values for those settings. DynamicRangeControl *string `locationName:"dynamicRangeControl" type:"string" enum:"Eac3AtmosDynamicRangeControl"` // Specify a value for the following Dolby Atmos setting: Left only/Right only // center mix (Lo/Ro center). MediaConvert uses this value for downmixing. Default // value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: 3.0, 1.5, // 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this // value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). // Related setting: To have MediaConvert use this value, keep the default value, // Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, // MediaConvert ignores Left only/Right only center (LoRoCenterMixLevel). LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"` // Specify a value for the following Dolby Atmos setting: Left only/Right only // (Lo/Ro surround). MediaConvert uses this value for downmixing. Default value: // -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: -1.5, -3.0, -4.5, // -6.0, and -60. The value -60 mutes the channel. Related setting: How the // service uses this value depends on the value that you choose for Stereo downmix // (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this // value, keep the default value, Custom (SPECIFIED) for the setting Downmix // control (DownmixControl). Otherwise, MediaConvert ignores Left only/Right // only surround (LoRoSurroundMixLevel). LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"` // Specify a value for the following Dolby Atmos setting: Left total/Right total // center mix (Lt/Rt center). MediaConvert uses this value for downmixing. Default // value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: 3.0, 1.5, // 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this // value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). // Related setting: To have MediaConvert use this value, keep the default value, // Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, // MediaConvert ignores Left total/Right total center (LtRtCenterMixLevel). LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"` // Specify a value for the following Dolby Atmos setting: Left total/Right total // surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. // Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: // -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related // setting: How the service uses this value depends on the value that you choose // for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert // use this value, keep the default value, Custom (SPECIFIED) for the setting // Downmix control (DownmixControl). Otherwise, the service ignores Left total/Right // total surround (LtRtSurroundMixLevel). LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"` // Choose how the service meters the loudness of your audio. MeteringMode *string `locationName:"meteringMode" type:"string" enum:"Eac3AtmosMeteringMode"` // This value is always 48000. It represents the sample rate in Hz. SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"` // Specify the percentage of audio content, from 0% to 100%, that must be speech // in order for the encoder to use the measured speech loudness as the overall // program loudness. Default value: 15% SpeechThreshold *int64 `locationName:"speechThreshold" type:"integer"` // Choose how the service does stereo downmixing. Default value: Not indicated // (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert // use this value, keep the default value, Custom (SPECIFIED) for the setting // Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo // downmix (StereoDownmix). StereoDownmix *string `locationName:"stereoDownmix" type:"string" enum:"Eac3AtmosStereoDownmix"` // Specify whether your input audio has an additional center rear surround channel // matrix encoded into your left and right surround channels. SurroundExMode *string `locationName:"surroundExMode" type:"string" enum:"Eac3AtmosSurroundExMode"` } // String returns the string representation func (s Eac3AtmosSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Eac3AtmosSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Eac3AtmosSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Eac3AtmosSettings"} if s.Bitrate != nil && *s.Bitrate < 384000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 384000)) } if s.SampleRate != nil && *s.SampleRate < 48000 { invalidParams.Add(request.NewErrParamMinValue("SampleRate", 48000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBitrate sets the Bitrate field's value. func (s *Eac3AtmosSettings) SetBitrate(v int64) *Eac3AtmosSettings { s.Bitrate = &v return s } // SetBitstreamMode sets the BitstreamMode field's value. func (s *Eac3AtmosSettings) SetBitstreamMode(v string) *Eac3AtmosSettings { s.BitstreamMode = &v return s } // SetCodingMode sets the CodingMode field's value. func (s *Eac3AtmosSettings) SetCodingMode(v string) *Eac3AtmosSettings { s.CodingMode = &v return s } // SetDialogueIntelligence sets the DialogueIntelligence field's value. func (s *Eac3AtmosSettings) SetDialogueIntelligence(v string) *Eac3AtmosSettings { s.DialogueIntelligence = &v return s } // SetDownmixControl sets the DownmixControl field's value. func (s *Eac3AtmosSettings) SetDownmixControl(v string) *Eac3AtmosSettings { s.DownmixControl = &v return s } // SetDynamicRangeCompressionLine sets the DynamicRangeCompressionLine field's value. func (s *Eac3AtmosSettings) SetDynamicRangeCompressionLine(v string) *Eac3AtmosSettings { s.DynamicRangeCompressionLine = &v return s } // SetDynamicRangeCompressionRf sets the DynamicRangeCompressionRf field's value. func (s *Eac3AtmosSettings) SetDynamicRangeCompressionRf(v string) *Eac3AtmosSettings { s.DynamicRangeCompressionRf = &v return s } // SetDynamicRangeControl sets the DynamicRangeControl field's value. func (s *Eac3AtmosSettings) SetDynamicRangeControl(v string) *Eac3AtmosSettings { s.DynamicRangeControl = &v return s } // SetLoRoCenterMixLevel sets the LoRoCenterMixLevel field's value. func (s *Eac3AtmosSettings) SetLoRoCenterMixLevel(v float64) *Eac3AtmosSettings { s.LoRoCenterMixLevel = &v return s } // SetLoRoSurroundMixLevel sets the LoRoSurroundMixLevel field's value. func (s *Eac3AtmosSettings) SetLoRoSurroundMixLevel(v float64) *Eac3AtmosSettings { s.LoRoSurroundMixLevel = &v return s } // SetLtRtCenterMixLevel sets the LtRtCenterMixLevel field's value. func (s *Eac3AtmosSettings) SetLtRtCenterMixLevel(v float64) *Eac3AtmosSettings { s.LtRtCenterMixLevel = &v return s } // SetLtRtSurroundMixLevel sets the LtRtSurroundMixLevel field's value. func (s *Eac3AtmosSettings) SetLtRtSurroundMixLevel(v float64) *Eac3AtmosSettings { s.LtRtSurroundMixLevel = &v return s } // SetMeteringMode sets the MeteringMode field's value. func (s *Eac3AtmosSettings) SetMeteringMode(v string) *Eac3AtmosSettings { s.MeteringMode = &v return s } // SetSampleRate sets the SampleRate field's value. func (s *Eac3AtmosSettings) SetSampleRate(v int64) *Eac3AtmosSettings { s.SampleRate = &v return s } // SetSpeechThreshold sets the SpeechThreshold field's value. func (s *Eac3AtmosSettings) SetSpeechThreshold(v int64) *Eac3AtmosSettings { s.SpeechThreshold = &v return s } // SetStereoDownmix sets the StereoDownmix field's value. func (s *Eac3AtmosSettings) SetStereoDownmix(v string) *Eac3AtmosSettings { s.StereoDownmix = &v return s } // SetSurroundExMode sets the SurroundExMode field's value. func (s *Eac3AtmosSettings) SetSurroundExMode(v string) *Eac3AtmosSettings { s.SurroundExMode = &v return s } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value EAC3. type Eac3Settings struct { _ struct{} `type:"structure"` // If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels. // Only used for 3/2 coding mode. AttenuationControl *string `locationName:"attenuationControl" type:"string" enum:"Eac3AttenuationControl"` // Specify the average bitrate in bits per second. Valid bitrates depend on // the coding mode. Bitrate *int64 `locationName:"bitrate" min:"64000" type:"integer"` // Specify the bitstream mode for the E-AC-3 stream that the encoder emits. // For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex // E). BitstreamMode *string `locationName:"bitstreamMode" type:"string" enum:"Eac3BitstreamMode"` // Dolby Digital Plus coding mode. Determines number of channels. CodingMode *string `locationName:"codingMode" type:"string" enum:"Eac3CodingMode"` // Activates a DC highpass filter for all input channels. DcFilter *string `locationName:"dcFilter" type:"string" enum:"Eac3DcFilter"` // Sets the dialnorm for the output. If blank and input audio is Dolby Digital // Plus, dialnorm will be passed through. Dialnorm *int64 `locationName:"dialnorm" min:"1" type:"integer"` // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the line // operating mode. Related setting: When you use this setting, MediaConvert // ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). // For information about the Dolby Digital DRC operating modes and profiles, // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionLine *string `locationName:"dynamicRangeCompressionLine" type:"string" enum:"Eac3DynamicRangeCompressionLine"` // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the RF operating // mode. Related setting: When you use this setting, MediaConvert ignores any // value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). // For information about the Dolby Digital DRC operating modes and profiles, // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. DynamicRangeCompressionRf *string `locationName:"dynamicRangeCompressionRf" type:"string" enum:"Eac3DynamicRangeCompressionRf"` // When encoding 3/2 audio, controls whether the LFE channel is enabled LfeControl *string `locationName:"lfeControl" type:"string" enum:"Eac3LfeControl"` // Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only // valid with 3_2_LFE coding mode. LfeFilter *string `locationName:"lfeFilter" type:"string" enum:"Eac3LfeFilter"` // Specify a value for the following Dolby Digital Plus setting: Left only/Right // only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. // How the service uses this value depends on the value that you choose for // Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, // -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies // only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) // for the setting Coding mode (Eac3CodingMode). If you choose a different value // for Coding mode, the service ignores Left only/Right only center (loRoCenterMixLevel). LoRoCenterMixLevel *float64 `locationName:"loRoCenterMixLevel" type:"double"` // Specify a value for the following Dolby Digital Plus setting: Left only/Right // only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the // service uses this value depends on the value that you choose for Stereo downmix // (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value // -60 mutes the channel. This setting applies only if you keep the default // value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode // (Eac3CodingMode). If you choose a different value for Coding mode, the service // ignores Left only/Right only surround (loRoSurroundMixLevel). LoRoSurroundMixLevel *float64 `locationName:"loRoSurroundMixLevel" type:"double"` // Specify a value for the following Dolby Digital Plus setting: Left total/Right // total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. // How the service uses this value depends on the value that you choose for // Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, // -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies // only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) // for the setting Coding mode (Eac3CodingMode). If you choose a different value // for Coding mode, the service ignores Left total/Right total center (ltRtCenterMixLevel). LtRtCenterMixLevel *float64 `locationName:"ltRtCenterMixLevel" type:"double"` // Specify a value for the following Dolby Digital Plus setting: Left total/Right // total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. // How the service uses this value depends on the value that you choose for // Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, // and -60. The value -60 mutes the channel. This setting applies only if you // keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the // setting Coding mode (Eac3CodingMode). If you choose a different value for // Coding mode, the service ignores Left total/Right total surround (ltRtSurroundMixLevel). LtRtSurroundMixLevel *float64 `locationName:"ltRtSurroundMixLevel" type:"double"` // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, // or DolbyE decoder that supplied this audio data. If audio was not supplied // from one of these streams, then the static metadata settings will be used. MetadataControl *string `locationName:"metadataControl" type:"string" enum:"Eac3MetadataControl"` // When set to WHEN_POSSIBLE, input DD+ audio will be passed through if it is // present on the input. this detection is dynamic over the life of the transcode. // Inputs that alternate between DD+ and non-DD+ content will have a consistent // DD+ output as the system alternates between passthrough and encoding. PassthroughControl *string `locationName:"passthroughControl" type:"string" enum:"Eac3PassthroughControl"` // Controls the amount of phase-shift applied to the surround channels. Only // used for 3/2 coding mode. PhaseControl *string `locationName:"phaseControl" type:"string" enum:"Eac3PhaseControl"` // This value is always 48000. It represents the sample rate in Hz. SampleRate *int64 `locationName:"sampleRate" min:"48000" type:"integer"` // Choose how the service does stereo downmixing. This setting only applies // if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) // for the setting Coding mode (Eac3CodingMode). If you choose a different value // for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix). StereoDownmix *string `locationName:"stereoDownmix" type:"string" enum:"Eac3StereoDownmix"` // When encoding 3/2 audio, sets whether an extra center back surround channel // is matrix encoded into the left and right surround channels. SurroundExMode *string `locationName:"surroundExMode" type:"string" enum:"Eac3SurroundExMode"` // When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into // the two channels. SurroundMode *string `locationName:"surroundMode" type:"string" enum:"Eac3SurroundMode"` } // String returns the string representation func (s Eac3Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Eac3Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Eac3Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Eac3Settings"} if s.Bitrate != nil && *s.Bitrate < 64000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 64000)) } if s.Dialnorm != nil && *s.Dialnorm < 1 { invalidParams.Add(request.NewErrParamMinValue("Dialnorm", 1)) } if s.SampleRate != nil && *s.SampleRate < 48000 { invalidParams.Add(request.NewErrParamMinValue("SampleRate", 48000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAttenuationControl sets the AttenuationControl field's value. func (s *Eac3Settings) SetAttenuationControl(v string) *Eac3Settings { s.AttenuationControl = &v return s } // SetBitrate sets the Bitrate field's value. func (s *Eac3Settings) SetBitrate(v int64) *Eac3Settings { s.Bitrate = &v return s } // SetBitstreamMode sets the BitstreamMode field's value. func (s *Eac3Settings) SetBitstreamMode(v string) *Eac3Settings { s.BitstreamMode = &v return s } // SetCodingMode sets the CodingMode field's value. func (s *Eac3Settings) SetCodingMode(v string) *Eac3Settings { s.CodingMode = &v return s } // SetDcFilter sets the DcFilter field's value. func (s *Eac3Settings) SetDcFilter(v string) *Eac3Settings { s.DcFilter = &v return s } // SetDialnorm sets the Dialnorm field's value. func (s *Eac3Settings) SetDialnorm(v int64) *Eac3Settings { s.Dialnorm = &v return s } // SetDynamicRangeCompressionLine sets the DynamicRangeCompressionLine field's value. func (s *Eac3Settings) SetDynamicRangeCompressionLine(v string) *Eac3Settings { s.DynamicRangeCompressionLine = &v return s } // SetDynamicRangeCompressionRf sets the DynamicRangeCompressionRf field's value. func (s *Eac3Settings) SetDynamicRangeCompressionRf(v string) *Eac3Settings { s.DynamicRangeCompressionRf = &v return s } // SetLfeControl sets the LfeControl field's value. func (s *Eac3Settings) SetLfeControl(v string) *Eac3Settings { s.LfeControl = &v return s } // SetLfeFilter sets the LfeFilter field's value. func (s *Eac3Settings) SetLfeFilter(v string) *Eac3Settings { s.LfeFilter = &v return s } // SetLoRoCenterMixLevel sets the LoRoCenterMixLevel field's value. func (s *Eac3Settings) SetLoRoCenterMixLevel(v float64) *Eac3Settings { s.LoRoCenterMixLevel = &v return s } // SetLoRoSurroundMixLevel sets the LoRoSurroundMixLevel field's value. func (s *Eac3Settings) SetLoRoSurroundMixLevel(v float64) *Eac3Settings { s.LoRoSurroundMixLevel = &v return s } // SetLtRtCenterMixLevel sets the LtRtCenterMixLevel field's value. func (s *Eac3Settings) SetLtRtCenterMixLevel(v float64) *Eac3Settings { s.LtRtCenterMixLevel = &v return s } // SetLtRtSurroundMixLevel sets the LtRtSurroundMixLevel field's value. func (s *Eac3Settings) SetLtRtSurroundMixLevel(v float64) *Eac3Settings { s.LtRtSurroundMixLevel = &v return s } // SetMetadataControl sets the MetadataControl field's value. func (s *Eac3Settings) SetMetadataControl(v string) *Eac3Settings { s.MetadataControl = &v return s } // SetPassthroughControl sets the PassthroughControl field's value. func (s *Eac3Settings) SetPassthroughControl(v string) *Eac3Settings { s.PassthroughControl = &v return s } // SetPhaseControl sets the PhaseControl field's value. func (s *Eac3Settings) SetPhaseControl(v string) *Eac3Settings { s.PhaseControl = &v return s } // SetSampleRate sets the SampleRate field's value. func (s *Eac3Settings) SetSampleRate(v int64) *Eac3Settings { s.SampleRate = &v return s } // SetStereoDownmix sets the StereoDownmix field's value. func (s *Eac3Settings) SetStereoDownmix(v string) *Eac3Settings { s.StereoDownmix = &v return s } // SetSurroundExMode sets the SurroundExMode field's value. func (s *Eac3Settings) SetSurroundExMode(v string) *Eac3Settings { s.SurroundExMode = &v return s } // SetSurroundMode sets the SurroundMode field's value. func (s *Eac3Settings) SetSurroundMode(v string) *Eac3Settings { s.SurroundMode = &v return s } // Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or // ancillary) captions. Set up embedded captions in the same output as your // video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20, // or SCTE20_PLUS_EMBEDDED. type EmbeddedDestinationSettings struct { _ struct{} `type:"structure"` // Ignore this setting unless your input captions are SCC format and your output // captions are embedded in the video stream. Specify a CC number for each captions // channel in this output. If you have two channels, choose CC numbers that // aren't in the same field. For example, choose 1 and 3. For more information, // see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded. Destination608ChannelNumber *int64 `locationName:"destination608ChannelNumber" min:"1" type:"integer"` // Ignore this setting unless your input captions are SCC format and you want // both 608 and 708 captions embedded in your output stream. Optionally, specify // the 708 service number for each output captions channel. Choose a different // number for each channel. To use this setting, also set Force 608 to 708 upconvert // (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector // settings. If you choose to upconvert but don't specify a 708 service number, // MediaConvert uses the number that you specify for CC channel number (destination608ChannelNumber) // for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded. Destination708ServiceNumber *int64 `locationName:"destination708ServiceNumber" min:"1" type:"integer"` } // String returns the string representation func (s EmbeddedDestinationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s EmbeddedDestinationSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *EmbeddedDestinationSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "EmbeddedDestinationSettings"} if s.Destination608ChannelNumber != nil && *s.Destination608ChannelNumber < 1 { invalidParams.Add(request.NewErrParamMinValue("Destination608ChannelNumber", 1)) } if s.Destination708ServiceNumber != nil && *s.Destination708ServiceNumber < 1 { invalidParams.Add(request.NewErrParamMinValue("Destination708ServiceNumber", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDestination608ChannelNumber sets the Destination608ChannelNumber field's value. func (s *EmbeddedDestinationSettings) SetDestination608ChannelNumber(v int64) *EmbeddedDestinationSettings { s.Destination608ChannelNumber = &v return s } // SetDestination708ServiceNumber sets the Destination708ServiceNumber field's value. func (s *EmbeddedDestinationSettings) SetDestination708ServiceNumber(v int64) *EmbeddedDestinationSettings { s.Destination708ServiceNumber = &v return s } // Settings for embedded captions Source type EmbeddedSourceSettings struct { _ struct{} `type:"structure"` // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 // compatibility bytes fields of the 708 wrapper, and it also translates the // 608 data into 708. Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"EmbeddedConvert608To708"` // Specifies the 608/708 channel number within the video track from which to // extract captions. Unused for passthrough. Source608ChannelNumber *int64 `locationName:"source608ChannelNumber" min:"1" type:"integer"` // Specifies the video track index used for extracting captions. The system // only supports one input video track, so this should always be set to '1'. Source608TrackNumber *int64 `locationName:"source608TrackNumber" min:"1" type:"integer"` // By default, the service terminates any unterminated captions at the end of // each input. If you want the caption to continue onto your next input, disable // this setting. TerminateCaptions *string `locationName:"terminateCaptions" type:"string" enum:"EmbeddedTerminateCaptions"` } // String returns the string representation func (s EmbeddedSourceSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s EmbeddedSourceSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *EmbeddedSourceSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "EmbeddedSourceSettings"} if s.Source608ChannelNumber != nil && *s.Source608ChannelNumber < 1 { invalidParams.Add(request.NewErrParamMinValue("Source608ChannelNumber", 1)) } if s.Source608TrackNumber != nil && *s.Source608TrackNumber < 1 { invalidParams.Add(request.NewErrParamMinValue("Source608TrackNumber", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetConvert608To708 sets the Convert608To708 field's value. func (s *EmbeddedSourceSettings) SetConvert608To708(v string) *EmbeddedSourceSettings { s.Convert608To708 = &v return s } // SetSource608ChannelNumber sets the Source608ChannelNumber field's value. func (s *EmbeddedSourceSettings) SetSource608ChannelNumber(v int64) *EmbeddedSourceSettings { s.Source608ChannelNumber = &v return s } // SetSource608TrackNumber sets the Source608TrackNumber field's value. func (s *EmbeddedSourceSettings) SetSource608TrackNumber(v int64) *EmbeddedSourceSettings { s.Source608TrackNumber = &v return s } // SetTerminateCaptions sets the TerminateCaptions field's value. func (s *EmbeddedSourceSettings) SetTerminateCaptions(v string) *EmbeddedSourceSettings { s.TerminateCaptions = &v return s } // Describes an account-specific API endpoint. type Endpoint struct { _ struct{} `type:"structure"` // URL of endpoint Url *string `locationName:"url" type:"string"` } // String returns the string representation func (s Endpoint) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Endpoint) GoString() string { return s.String() } // SetUrl sets the Url field's value. func (s *Endpoint) SetUrl(v string) *Endpoint { s.Url = &v return s } // ESAM ManifestConfirmConditionNotification defined by OC-SP-ESAM-API-I03-131025. type EsamManifestConfirmConditionNotification struct { _ struct{} `type:"structure"` // Provide your ESAM ManifestConfirmConditionNotification XML document inside // your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. // The transcoder will use the Manifest Conditioning instructions in the message // that you supply. MccXml *string `locationName:"mccXml" type:"string"` } // String returns the string representation func (s EsamManifestConfirmConditionNotification) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s EsamManifestConfirmConditionNotification) GoString() string { return s.String() } // SetMccXml sets the MccXml field's value. func (s *EsamManifestConfirmConditionNotification) SetMccXml(v string) *EsamManifestConfirmConditionNotification { s.MccXml = &v return s } // Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion, // you can ignore these settings. type EsamSettings struct { _ struct{} `type:"structure"` // Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025. // The transcoder uses the manifest conditioning instructions that you provide // in the setting MCC XML (mccXml). ManifestConfirmConditionNotification *EsamManifestConfirmConditionNotification `locationName:"manifestConfirmConditionNotification" type:"structure"` // Specifies the stream distance, in milliseconds, between the SCTE 35 messages // that the transcoder places and the splice points that they refer to. If the // time between the start of the asset and the SCTE-35 message is less than // this value, then the transcoder places the SCTE-35 marker at the beginning // of the stream. ResponseSignalPreroll *int64 `locationName:"responseSignalPreroll" type:"integer"` // Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025. // The transcoder uses the signal processing instructions that you provide in // the setting SCC XML (sccXml). SignalProcessingNotification *EsamSignalProcessingNotification `locationName:"signalProcessingNotification" type:"structure"` } // String returns the string representation func (s EsamSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s EsamSettings) GoString() string { return s.String() } // SetManifestConfirmConditionNotification sets the ManifestConfirmConditionNotification field's value. func (s *EsamSettings) SetManifestConfirmConditionNotification(v *EsamManifestConfirmConditionNotification) *EsamSettings { s.ManifestConfirmConditionNotification = v return s } // SetResponseSignalPreroll sets the ResponseSignalPreroll field's value. func (s *EsamSettings) SetResponseSignalPreroll(v int64) *EsamSettings { s.ResponseSignalPreroll = &v return s } // SetSignalProcessingNotification sets the SignalProcessingNotification field's value. func (s *EsamSettings) SetSignalProcessingNotification(v *EsamSignalProcessingNotification) *EsamSettings { s.SignalProcessingNotification = v return s } // ESAM SignalProcessingNotification data defined by OC-SP-ESAM-API-I03-131025. type EsamSignalProcessingNotification struct { _ struct{} `type:"structure"` // Provide your ESAM SignalProcessingNotification XML document inside your JSON // job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The // transcoder will use the signal processing instructions in the message that // you supply. Provide your ESAM SignalProcessingNotification XML document inside // your JSON job settings. For your MPEG2-TS file outputs, if you want the service // to place SCTE-35 markers at the insertion points you specify in the XML document, // you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either // specify an ESAM XML document or enable SCTE-35 passthrough. You can't do // both. SccXml *string `locationName:"sccXml" type:"string"` } // String returns the string representation func (s EsamSignalProcessingNotification) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s EsamSignalProcessingNotification) GoString() string { return s.String() } // SetSccXml sets the SccXml field's value. func (s *EsamSignalProcessingNotification) SetSccXml(v string) *EsamSignalProcessingNotification { s.SccXml = &v return s } // Settings for F4v container type F4vSettings struct { _ struct{} `type:"structure"` // If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning // of the archive as required for progressive downloading. Otherwise it is placed // normally at the end. MoovPlacement *string `locationName:"moovPlacement" type:"string" enum:"F4vMoovPlacement"` } // String returns the string representation func (s F4vSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s F4vSettings) GoString() string { return s.String() } // SetMoovPlacement sets the MoovPlacement field's value. func (s *F4vSettings) SetMoovPlacement(v string) *F4vSettings { s.MoovPlacement = &v return s } // Settings related to your File output group. MediaConvert uses this group // of settings to generate a single standalone file, rather than a streaming // package. When you work directly in your JSON job specification, include this // object and any required children when you set Type, under OutputGroupSettings, // to FILE_GROUP_SETTINGS. type FileGroupSettings struct { _ struct{} `type:"structure"` // Use Destination (Destination) to specify the S3 output location and the output // filename base. Destination accepts format identifiers. If you do not specify // the base filename in the URI, the service will use the filename of the input // file. If your job has multiple inputs, the service uses the filename of the // first input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"` } // String returns the string representation func (s FileGroupSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s FileGroupSettings) GoString() string { return s.String() } // SetDestination sets the Destination field's value. func (s *FileGroupSettings) SetDestination(v string) *FileGroupSettings { s.Destination = &v return s } // SetDestinationSettings sets the DestinationSettings field's value. func (s *FileGroupSettings) SetDestinationSettings(v *DestinationSettings) *FileGroupSettings { s.DestinationSettings = v return s } // If your input captions are SCC, SMI, SRT, STL, TTML, WebVTT, or IMSC 1.1 // in an xml file, specify the URI of the input caption source file. If your // caption source is IMSC in an IMF package, use TrackSourceSettings instead // of FileSoureSettings. type FileSourceSettings struct { _ struct{} `type:"structure"` // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 // compatibility bytes fields of the 708 wrapper, and it also translates the // 608 data into 708. Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"FileSourceConvert608To708"` // Ignore this setting unless your input captions format is SCC. To have the // service compensate for differing frame rates between your input captions // and input video, specify the frame rate of the captions file. Specify this // value as a fraction, using the settings Framerate numerator (framerateNumerator) // and Framerate denominator (framerateDenominator). For example, you might // specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, // or 30000 / 1001 for 29.97 fps. Framerate *CaptionSourceFramerate `locationName:"framerate" type:"structure"` // External caption file used for loading captions. Accepted file extensions // are 'scc', 'ttml', 'dfxp', 'stl', 'srt', 'xml', 'smi', 'webvtt', and 'vtt'. SourceFile *string `locationName:"sourceFile" min:"14" type:"string"` // Specifies a time delta in seconds to offset the captions from the source // file. TimeDelta *int64 `locationName:"timeDelta" type:"integer"` } // String returns the string representation func (s FileSourceSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s FileSourceSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *FileSourceSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "FileSourceSettings"} if s.SourceFile != nil && len(*s.SourceFile) < 14 { invalidParams.Add(request.NewErrParamMinLen("SourceFile", 14)) } if s.TimeDelta != nil && *s.TimeDelta < -2.147483648e+09 { invalidParams.Add(request.NewErrParamMinValue("TimeDelta", -2.147483648e+09)) } if s.Framerate != nil { if err := s.Framerate.Validate(); err != nil { invalidParams.AddNested("Framerate", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetConvert608To708 sets the Convert608To708 field's value. func (s *FileSourceSettings) SetConvert608To708(v string) *FileSourceSettings { s.Convert608To708 = &v return s } // SetFramerate sets the Framerate field's value. func (s *FileSourceSettings) SetFramerate(v *CaptionSourceFramerate) *FileSourceSettings { s.Framerate = v return s } // SetSourceFile sets the SourceFile field's value. func (s *FileSourceSettings) SetSourceFile(v string) *FileSourceSettings { s.SourceFile = &v return s } // SetTimeDelta sets the TimeDelta field's value. func (s *FileSourceSettings) SetTimeDelta(v int64) *FileSourceSettings { s.TimeDelta = &v return s } type ForbiddenException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s ForbiddenException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ForbiddenException) GoString() string { return s.String() } func newErrorForbiddenException(v protocol.ResponseMetadata) error { return &ForbiddenException{ RespMetadata: v, } } // Code returns the exception type name. func (s *ForbiddenException) Code() string { return "ForbiddenException" } // Message returns the exception's message. func (s *ForbiddenException) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *ForbiddenException) OrigErr() error { return nil } func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. func (s *ForbiddenException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *ForbiddenException) RequestID() string { return s.RespMetadata.RequestID } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value FRAME_CAPTURE. type FrameCaptureSettings struct { _ struct{} `type:"structure"` // Frame capture will encode the first frame of the output stream, then one // frame every framerateDenominator/framerateNumerator seconds. For example, // settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of // 1/3 frame per second) will capture the first frame, then 1 frame every 3s. // Files will be named as filename.n.jpg where n is the 0-based sequence number // of each Capture. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // Frame capture will encode the first frame of the output stream, then one // frame every framerateDenominator/framerateNumerator seconds. For example, // settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of // 1/3 frame per second) will capture the first frame, then 1 frame every 3s. // Files will be named as filename.NNNNNNN.jpg where N is the 0-based frame // sequence number zero padded to 7 decimal places. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // Maximum number of captures (encoded jpg output files). MaxCaptures *int64 `locationName:"maxCaptures" min:"1" type:"integer"` // JPEG Quality - a higher value equals higher quality. Quality *int64 `locationName:"quality" min:"1" type:"integer"` } // String returns the string representation func (s FrameCaptureSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s FrameCaptureSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *FrameCaptureSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "FrameCaptureSettings"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxCaptures != nil && *s.MaxCaptures < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxCaptures", 1)) } if s.Quality != nil && *s.Quality < 1 { invalidParams.Add(request.NewErrParamMinValue("Quality", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *FrameCaptureSettings) SetFramerateDenominator(v int64) *FrameCaptureSettings { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *FrameCaptureSettings) SetFramerateNumerator(v int64) *FrameCaptureSettings { s.FramerateNumerator = &v return s } // SetMaxCaptures sets the MaxCaptures field's value. func (s *FrameCaptureSettings) SetMaxCaptures(v int64) *FrameCaptureSettings { s.MaxCaptures = &v return s } // SetQuality sets the Quality field's value. func (s *FrameCaptureSettings) SetQuality(v int64) *FrameCaptureSettings { s.Quality = &v return s } // Query a job by sending a request with the job ID. type GetJobInput struct { _ struct{} `type:"structure"` // the job ID of the job. // // Id is a required field Id *string `location:"uri" locationName:"id" type:"string" required:"true"` } // String returns the string representation func (s GetJobInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetJobInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *GetJobInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetJobInput"} if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } if s.Id != nil && len(*s.Id) < 1 { invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetId sets the Id field's value. func (s *GetJobInput) SetId(v string) *GetJobInput { s.Id = &v return s } // Successful get job requests will return an OK message and the job JSON. type GetJobOutput struct { _ struct{} `type:"structure"` // Each job converts an input file into an output file or files. For more information, // see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html Job *Job `locationName:"job" type:"structure"` } // String returns the string representation func (s GetJobOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetJobOutput) GoString() string { return s.String() } // SetJob sets the Job field's value. func (s *GetJobOutput) SetJob(v *Job) *GetJobOutput { s.Job = v return s } // Query a job template by sending a request with the job template name. type GetJobTemplateInput struct { _ struct{} `type:"structure"` // The name of the job template. // // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` } // String returns the string representation func (s GetJobTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetJobTemplateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *GetJobTemplateInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetJobTemplateInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetName sets the Name field's value. func (s *GetJobTemplateInput) SetName(v string) *GetJobTemplateInput { s.Name = &v return s } // Successful get job template requests will return an OK message and the job // template JSON. type GetJobTemplateOutput struct { _ struct{} `type:"structure"` // A job template is a pre-made set of encoding instructions that you can use // to quickly create a job. JobTemplate *JobTemplate `locationName:"jobTemplate" type:"structure"` } // String returns the string representation func (s GetJobTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetJobTemplateOutput) GoString() string { return s.String() } // SetJobTemplate sets the JobTemplate field's value. func (s *GetJobTemplateOutput) SetJobTemplate(v *JobTemplate) *GetJobTemplateOutput { s.JobTemplate = v return s } // Query a preset by sending a request with the preset name. type GetPresetInput struct { _ struct{} `type:"structure"` // The name of the preset. // // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` } // String returns the string representation func (s GetPresetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetPresetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *GetPresetInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetPresetInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetName sets the Name field's value. func (s *GetPresetInput) SetName(v string) *GetPresetInput { s.Name = &v return s } // Successful get preset requests will return an OK message and the preset JSON. type GetPresetOutput struct { _ struct{} `type:"structure"` // A preset is a collection of preconfigured media conversion settings that // you want MediaConvert to apply to the output during the conversion process. Preset *Preset `locationName:"preset" type:"structure"` } // String returns the string representation func (s GetPresetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetPresetOutput) GoString() string { return s.String() } // SetPreset sets the Preset field's value. func (s *GetPresetOutput) SetPreset(v *Preset) *GetPresetOutput { s.Preset = v return s } // Get information about a queue by sending a request with the queue name. type GetQueueInput struct { _ struct{} `type:"structure"` // The name of the queue that you want information about. // // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` } // String returns the string representation func (s GetQueueInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetQueueInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *GetQueueInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetQueueInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetName sets the Name field's value. func (s *GetQueueInput) SetName(v string) *GetQueueInput { s.Name = &v return s } // Successful get queue requests return an OK message and information about // the queue in JSON. type GetQueueOutput struct { _ struct{} `type:"structure"` // You can use queues to manage the resources that are available to your AWS // account for running multiple transcoding jobs at the same time. If you don't // specify a queue, the service sends all jobs through the default queue. For // more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html. Queue *Queue `locationName:"queue" type:"structure"` } // String returns the string representation func (s GetQueueOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetQueueOutput) GoString() string { return s.String() } // SetQueue sets the Queue field's value. func (s *GetQueueOutput) SetQueue(v *Queue) *GetQueueOutput { s.Queue = v return s } // Settings for quality-defined variable bitrate encoding with the H.264 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. type H264QvbrSettings struct { _ struct{} `type:"structure"` // Use this setting only when Rate control mode is QVBR and Quality tuning level // is Multi-pass HQ. For Max average bitrate values suited to the complexity // of your input video, the service limits the average bitrate of the video // part of this output to the value that you choose. That is, the total size // of the video element is less than or equal to the value you set multiplied // by the number of seconds of encoded output. MaxAverageBitrate *int64 `locationName:"maxAverageBitrate" min:"1000" type:"integer"` // Required when you use QVBR rate control mode. That is, when you specify qvbrSettings // within h264Settings. Specify the general target quality level for this output, // from 1 to 10. Use higher numbers for greater quality. Level 10 results in // nearly lossless compression. The quality level for most broadcast-quality // transcodes is between 6 and 9. Optionally, to specify a value between whole // numbers, also provide a value for the setting qvbrQualityLevelFineTune. For // example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel // to 7 and set qvbrQualityLevelFineTune to .33. QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` // Optional. Specify a value here to set the QVBR quality to a level that is // between whole numbers. For example, if you want your QVBR quality level to // be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. // MediaConvert rounds your QVBR quality level to the nearest third of a whole // number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune // to .25, your actual QVBR quality level is 7.33. QvbrQualityLevelFineTune *float64 `locationName:"qvbrQualityLevelFineTune" type:"double"` } // String returns the string representation func (s H264QvbrSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s H264QvbrSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *H264QvbrSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "H264QvbrSettings"} if s.MaxAverageBitrate != nil && *s.MaxAverageBitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("MaxAverageBitrate", 1000)) } if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 { invalidParams.Add(request.NewErrParamMinValue("QvbrQualityLevel", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMaxAverageBitrate sets the MaxAverageBitrate field's value. func (s *H264QvbrSettings) SetMaxAverageBitrate(v int64) *H264QvbrSettings { s.MaxAverageBitrate = &v return s } // SetQvbrQualityLevel sets the QvbrQualityLevel field's value. func (s *H264QvbrSettings) SetQvbrQualityLevel(v int64) *H264QvbrSettings { s.QvbrQualityLevel = &v return s } // SetQvbrQualityLevelFineTune sets the QvbrQualityLevelFineTune field's value. func (s *H264QvbrSettings) SetQvbrQualityLevelFineTune(v float64) *H264QvbrSettings { s.QvbrQualityLevelFineTune = &v return s } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value H_264. type H264Settings struct { _ struct{} `type:"structure"` // Keep the default value, Auto (AUTO), for this setting to have MediaConvert // automatically apply the best types of quantization for your video content. // When you want to apply your quantization settings manually, you must set // H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting // to specify the strength of any adaptive quantization filters that you enable. // If you don't want MediaConvert to do any adaptive quantization in this transcode, // set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related // settings: The value that you choose here applies to the following settings: // H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization. AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H264AdaptiveQuantization"` // Specify the average bitrate in bits per second. Required for VBR and CBR. // For MS Smooth outputs, bitrates must be unique when rounded down to the nearest // multiple of 1000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // Specify an H.264 level that is consistent with your output video settings. // If you aren't sure what level to specify, choose Auto (AUTO). CodecLevel *string `locationName:"codecLevel" type:"string" enum:"H264CodecLevel"` // H.264 Profile. High 4:2:2 and 10-bit profiles are only available with the // AVC-I License. CodecProfile *string `locationName:"codecProfile" type:"string" enum:"H264CodecProfile"` // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the // value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). DynamicSubGop *string `locationName:"dynamicSubGop" type:"string" enum:"H264DynamicSubGop"` // Entropy encoding mode. Use CABAC (must be in Main or High profile) or CAVLC. EntropyEncoding *string `locationName:"entropyEncoding" type:"string" enum:"H264EntropyEncoding"` // Keep the default value, PAFF, to have MediaConvert use PAFF encoding for // interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding // and create separate interlaced fields. FieldEncoding *string `locationName:"fieldEncoding" type:"string" enum:"H264FieldEncoding"` // Only use this setting when you change the default value, AUTO, for the setting // H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization // and all other adaptive quantization from your JSON job specification, MediaConvert // automatically applies the best types of quantization for your video content. // When you set H264AdaptiveQuantization to a value other than AUTO, the default // value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change // this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears // as a visual flicker that can arise when the encoder saves bits by copying // some macroblocks many times from frame to frame, and then refreshes them // at the I-frame. When you enable this setting, the encoder updates these macroblocks // slightly more often to smooth out the flicker. To manually enable or disable // H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) // to a value other than AUTO. FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"H264FlickerAdaptiveQuantization"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H264FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"H264FramerateConversionAlgorithm"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // If enable, use reference B frames for GOP structures that have B frames > // 1. GopBReference *string `locationName:"gopBReference" type:"string" enum:"H264GopBReference"` // Frequency of closed GOPs. In streaming applications, it is recommended that // this be set to 1 so a decoder joining mid-stream will receive an IDR frame // as quickly as possible. Setting this value to 0 will break output segmenting. GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` // GOP Length (keyframe interval) in frames or seconds. Must be greater than // zero. GopSize *float64 `locationName:"gopSize" type:"double"` // Indicates if the GOP Size in H264 is specified in frames or seconds. If seconds // the system will convert the GOP Size into a frame count at run time. GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"H264GopSizeUnits"` // Percentage of the buffer that should initially be filled (HRD buffer model). HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"` // Size of buffer (HRD buffer model) in bits. For example, enter five megabits // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"H264InterlaceMode"` // Maximum bitrate in bits/second. For example, enter five megabits per second // as 5000000. Required when Rate control mode is QVBR. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Enforces separation between repeated (cadence) I-frames and I-frames inserted // by Scene Change Detection. If a scene change I-frame is within I-interval // frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene // change I-frame. GOP stretch requires enabling lookahead as well as setting // I-interval. The normal cadence resumes for the next GOP. This setting is // only used when Scene Change Detect is enabled. Note: Maximum GOP stretch // = GOP size + Min-I-interval - 1 MinIInterval *int64 `locationName:"minIInterval" type:"integer"` // Number of B-frames between reference frames. NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` // Number of reference frames to use. The encoder may use more than requested // if using B-frames and/or interlaced encoding. NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" min:"1" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"H264ParControl"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"H264QualityTuningLevel"` // Settings for quality-defined variable bitrate encoding with the H.264 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. QvbrSettings *H264QvbrSettings `locationName:"qvbrSettings" type:"structure"` // Use this setting to specify whether this output has a variable bitrate (VBR), // constant bitrate (CBR) or quality-defined variable bitrate (QVBR). RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"H264RateControlMode"` // Places a PPS header on each encoded picture, even if repeated. RepeatPps *string `locationName:"repeatPps" type:"string" enum:"H264RepeatPps"` // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"H264ScanTypeConversionMode"` // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) // for further video quality improvement. For more information about QVBR, see // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H264SceneChangeDetect"` // Number of slices per picture. Must be less than or equal to the number of // macroblock rows for progressive pictures, and less than or equal to half // the number of macroblock rows for interlaced pictures. Slices *int64 `locationName:"slices" min:"1" type:"integer"` // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output. When you enable // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. SlowPal *string `locationName:"slowPal" type:"string" enum:"H264SlowPal"` // Ignore this setting unless you need to comply with a specification that requires // a specific value. If you don't have a specification requirement, we recommend // that you adjust the softness of your output by using a lower value for the // setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). // The Softness (softness) setting specifies the quantization matrices that // the encoder uses. Keep the default value, 0, for flat quantization. Choose // the value 1 or 16 to use the default JVT softening quantization matricies // from the H.264 specification. Choose a value from 17 to 128 to use planar // interpolation. Increasing values from 17 to 128 result in increasing reduction // of high-frequency data. The value 128 results in the softest video. Softness *int64 `locationName:"softness" type:"integer"` // Only use this setting when you change the default value, Auto (AUTO), for // the setting H264AdaptiveQuantization. When you keep all defaults, excluding // H264AdaptiveQuantization and all other adaptive quantization from your JSON // job specification, MediaConvert automatically applies the best types of quantization // for your video content. When you set H264AdaptiveQuantization to a value // other than AUTO, the default value for H264SpatialAdaptiveQuantization is // Enabled (ENABLED). Keep this default value to adjust quantization within // each frame based on spatial variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas that can sustain more // distortion with no noticeable visual degradation and uses more bits on areas // where any small distortion will be noticeable. For example, complex textured // blocks are encoded with fewer bits and smooth textured blocks are encoded // with more bits. Enabling this feature will almost always improve your video // quality. Note, though, that this feature doesn't take into account where // the viewer's attention is likely to be. If viewers are likely to be focusing // their attention on a part of the screen with a lot of complex texture, you // might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). // Related setting: When you enable spatial adaptive quantization, set the value // for Adaptive quantization (H264AdaptiveQuantization) depending on your content. // For homogeneous content, such as cartoons and video games, set it to Low. // For content with a wider variety of textures, set it to High or Higher. To // manually enable or disable H264SpatialAdaptiveQuantization, you must set // Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"H264SpatialAdaptiveQuantization"` // Produces a bitstream compliant with SMPTE RP-2027. Syntax *string `locationName:"syntax" type:"string" enum:"H264Syntax"` // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable // hard or soft telecine to create a smoother picture. Hard telecine (HARD) // produces a 29.97i output. Soft telecine (SOFT) produces an output with a // 23.976 output that signals to the video player device to do the conversion // during play back. When you keep the default value, None (NONE), MediaConvert // does a standard frame rate conversion to 29.97 without doing anything with // the field polarity to create a smoother picture. Telecine *string `locationName:"telecine" type:"string" enum:"H264Telecine"` // Only use this setting when you change the default value, AUTO, for the setting // H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization // and all other adaptive quantization from your JSON job specification, MediaConvert // automatically applies the best types of quantization for your video content. // When you set H264AdaptiveQuantization to a value other than AUTO, the default // value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this // default value to adjust quantization within each frame based on temporal // variation of content complexity. When you enable this feature, the encoder // uses fewer bits on areas of the frame that aren't moving and uses more bits // on complex objects with sharp edges that move a lot. For example, this feature // improves the readability of text tickers on newscasts and scoreboards on // sports matches. Enabling this feature will almost always improve your video // quality. Note, though, that this feature doesn't take into account where // the viewer's attention is likely to be. If viewers are likely to be focusing // their attention on a part of the screen that doesn't have moving objects // with sharp edges, such as sports athletes' faces, you might choose to set // H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: // When you enable temporal quantization, adjust the strength of the filter // with the setting Adaptive quantization (adaptiveQuantization). To manually // enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive // quantization (H264AdaptiveQuantization) to a value other than AUTO. TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"H264TemporalAdaptiveQuantization"` // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. UnregisteredSeiTimecode *string `locationName:"unregisteredSeiTimecode" type:"string" enum:"H264UnregisteredSeiTimecode"` } // String returns the string representation func (s H264Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s H264Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *H264Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "H264Settings"} if s.Bitrate != nil && *s.Bitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000)) } if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000)) } if s.NumberReferenceFrames != nil && *s.NumberReferenceFrames < 1 { invalidParams.Add(request.NewErrParamMinValue("NumberReferenceFrames", 1)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1)) } if s.Slices != nil && *s.Slices < 1 { invalidParams.Add(request.NewErrParamMinValue("Slices", 1)) } if s.QvbrSettings != nil { if err := s.QvbrSettings.Validate(); err != nil { invalidParams.AddNested("QvbrSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdaptiveQuantization sets the AdaptiveQuantization field's value. func (s *H264Settings) SetAdaptiveQuantization(v string) *H264Settings { s.AdaptiveQuantization = &v return s } // SetBitrate sets the Bitrate field's value. func (s *H264Settings) SetBitrate(v int64) *H264Settings { s.Bitrate = &v return s } // SetCodecLevel sets the CodecLevel field's value. func (s *H264Settings) SetCodecLevel(v string) *H264Settings { s.CodecLevel = &v return s } // SetCodecProfile sets the CodecProfile field's value. func (s *H264Settings) SetCodecProfile(v string) *H264Settings { s.CodecProfile = &v return s } // SetDynamicSubGop sets the DynamicSubGop field's value. func (s *H264Settings) SetDynamicSubGop(v string) *H264Settings { s.DynamicSubGop = &v return s } // SetEntropyEncoding sets the EntropyEncoding field's value. func (s *H264Settings) SetEntropyEncoding(v string) *H264Settings { s.EntropyEncoding = &v return s } // SetFieldEncoding sets the FieldEncoding field's value. func (s *H264Settings) SetFieldEncoding(v string) *H264Settings { s.FieldEncoding = &v return s } // SetFlickerAdaptiveQuantization sets the FlickerAdaptiveQuantization field's value. func (s *H264Settings) SetFlickerAdaptiveQuantization(v string) *H264Settings { s.FlickerAdaptiveQuantization = &v return s } // SetFramerateControl sets the FramerateControl field's value. func (s *H264Settings) SetFramerateControl(v string) *H264Settings { s.FramerateControl = &v return s } // SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. func (s *H264Settings) SetFramerateConversionAlgorithm(v string) *H264Settings { s.FramerateConversionAlgorithm = &v return s } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *H264Settings) SetFramerateDenominator(v int64) *H264Settings { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *H264Settings) SetFramerateNumerator(v int64) *H264Settings { s.FramerateNumerator = &v return s } // SetGopBReference sets the GopBReference field's value. func (s *H264Settings) SetGopBReference(v string) *H264Settings { s.GopBReference = &v return s } // SetGopClosedCadence sets the GopClosedCadence field's value. func (s *H264Settings) SetGopClosedCadence(v int64) *H264Settings { s.GopClosedCadence = &v return s } // SetGopSize sets the GopSize field's value. func (s *H264Settings) SetGopSize(v float64) *H264Settings { s.GopSize = &v return s } // SetGopSizeUnits sets the GopSizeUnits field's value. func (s *H264Settings) SetGopSizeUnits(v string) *H264Settings { s.GopSizeUnits = &v return s } // SetHrdBufferInitialFillPercentage sets the HrdBufferInitialFillPercentage field's value. func (s *H264Settings) SetHrdBufferInitialFillPercentage(v int64) *H264Settings { s.HrdBufferInitialFillPercentage = &v return s } // SetHrdBufferSize sets the HrdBufferSize field's value. func (s *H264Settings) SetHrdBufferSize(v int64) *H264Settings { s.HrdBufferSize = &v return s } // SetInterlaceMode sets the InterlaceMode field's value. func (s *H264Settings) SetInterlaceMode(v string) *H264Settings { s.InterlaceMode = &v return s } // SetMaxBitrate sets the MaxBitrate field's value. func (s *H264Settings) SetMaxBitrate(v int64) *H264Settings { s.MaxBitrate = &v return s } // SetMinIInterval sets the MinIInterval field's value. func (s *H264Settings) SetMinIInterval(v int64) *H264Settings { s.MinIInterval = &v return s } // SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value. func (s *H264Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *H264Settings { s.NumberBFramesBetweenReferenceFrames = &v return s } // SetNumberReferenceFrames sets the NumberReferenceFrames field's value. func (s *H264Settings) SetNumberReferenceFrames(v int64) *H264Settings { s.NumberReferenceFrames = &v return s } // SetParControl sets the ParControl field's value. func (s *H264Settings) SetParControl(v string) *H264Settings { s.ParControl = &v return s } // SetParDenominator sets the ParDenominator field's value. func (s *H264Settings) SetParDenominator(v int64) *H264Settings { s.ParDenominator = &v return s } // SetParNumerator sets the ParNumerator field's value. func (s *H264Settings) SetParNumerator(v int64) *H264Settings { s.ParNumerator = &v return s } // SetQualityTuningLevel sets the QualityTuningLevel field's value. func (s *H264Settings) SetQualityTuningLevel(v string) *H264Settings { s.QualityTuningLevel = &v return s } // SetQvbrSettings sets the QvbrSettings field's value. func (s *H264Settings) SetQvbrSettings(v *H264QvbrSettings) *H264Settings { s.QvbrSettings = v return s } // SetRateControlMode sets the RateControlMode field's value. func (s *H264Settings) SetRateControlMode(v string) *H264Settings { s.RateControlMode = &v return s } // SetRepeatPps sets the RepeatPps field's value. func (s *H264Settings) SetRepeatPps(v string) *H264Settings { s.RepeatPps = &v return s } // SetScanTypeConversionMode sets the ScanTypeConversionMode field's value. func (s *H264Settings) SetScanTypeConversionMode(v string) *H264Settings { s.ScanTypeConversionMode = &v return s } // SetSceneChangeDetect sets the SceneChangeDetect field's value. func (s *H264Settings) SetSceneChangeDetect(v string) *H264Settings { s.SceneChangeDetect = &v return s } // SetSlices sets the Slices field's value. func (s *H264Settings) SetSlices(v int64) *H264Settings { s.Slices = &v return s } // SetSlowPal sets the SlowPal field's value. func (s *H264Settings) SetSlowPal(v string) *H264Settings { s.SlowPal = &v return s } // SetSoftness sets the Softness field's value. func (s *H264Settings) SetSoftness(v int64) *H264Settings { s.Softness = &v return s } // SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value. func (s *H264Settings) SetSpatialAdaptiveQuantization(v string) *H264Settings { s.SpatialAdaptiveQuantization = &v return s } // SetSyntax sets the Syntax field's value. func (s *H264Settings) SetSyntax(v string) *H264Settings { s.Syntax = &v return s } // SetTelecine sets the Telecine field's value. func (s *H264Settings) SetTelecine(v string) *H264Settings { s.Telecine = &v return s } // SetTemporalAdaptiveQuantization sets the TemporalAdaptiveQuantization field's value. func (s *H264Settings) SetTemporalAdaptiveQuantization(v string) *H264Settings { s.TemporalAdaptiveQuantization = &v return s } // SetUnregisteredSeiTimecode sets the UnregisteredSeiTimecode field's value. func (s *H264Settings) SetUnregisteredSeiTimecode(v string) *H264Settings { s.UnregisteredSeiTimecode = &v return s } // Settings for quality-defined variable bitrate encoding with the H.265 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. type H265QvbrSettings struct { _ struct{} `type:"structure"` // Use this setting only when Rate control mode is QVBR and Quality tuning level // is Multi-pass HQ. For Max average bitrate values suited to the complexity // of your input video, the service limits the average bitrate of the video // part of this output to the value that you choose. That is, the total size // of the video element is less than or equal to the value you set multiplied // by the number of seconds of encoded output. MaxAverageBitrate *int64 `locationName:"maxAverageBitrate" min:"1000" type:"integer"` // Required when you use QVBR rate control mode. That is, when you specify qvbrSettings // within h265Settings. Specify the general target quality level for this output, // from 1 to 10. Use higher numbers for greater quality. Level 10 results in // nearly lossless compression. The quality level for most broadcast-quality // transcodes is between 6 and 9. Optionally, to specify a value between whole // numbers, also provide a value for the setting qvbrQualityLevelFineTune. For // example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel // to 7 and set qvbrQualityLevelFineTune to .33. QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` // Optional. Specify a value here to set the QVBR quality to a level that is // between whole numbers. For example, if you want your QVBR quality level to // be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. // MediaConvert rounds your QVBR quality level to the nearest third of a whole // number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune // to .25, your actual QVBR quality level is 7.33. QvbrQualityLevelFineTune *float64 `locationName:"qvbrQualityLevelFineTune" type:"double"` } // String returns the string representation func (s H265QvbrSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s H265QvbrSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *H265QvbrSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "H265QvbrSettings"} if s.MaxAverageBitrate != nil && *s.MaxAverageBitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("MaxAverageBitrate", 1000)) } if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 { invalidParams.Add(request.NewErrParamMinValue("QvbrQualityLevel", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMaxAverageBitrate sets the MaxAverageBitrate field's value. func (s *H265QvbrSettings) SetMaxAverageBitrate(v int64) *H265QvbrSettings { s.MaxAverageBitrate = &v return s } // SetQvbrQualityLevel sets the QvbrQualityLevel field's value. func (s *H265QvbrSettings) SetQvbrQualityLevel(v int64) *H265QvbrSettings { s.QvbrQualityLevel = &v return s } // SetQvbrQualityLevelFineTune sets the QvbrQualityLevelFineTune field's value. func (s *H265QvbrSettings) SetQvbrQualityLevelFineTune(v float64) *H265QvbrSettings { s.QvbrQualityLevelFineTune = &v return s } // Settings for H265 codec type H265Settings struct { _ struct{} `type:"structure"` // Specify the strength of any adaptive quantization filters that you enable. // The value that you choose here applies to the following settings: Flicker // adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization // (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization). AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H265AdaptiveQuantization"` // Enables Alternate Transfer Function SEI message for outputs using Hybrid // Log Gamma (HLG) Electro-Optical Transfer Function (EOTF). AlternateTransferFunctionSei *string `locationName:"alternateTransferFunctionSei" type:"string" enum:"H265AlternateTransferFunctionSei"` // Specify the average bitrate in bits per second. Required for VBR and CBR. // For MS Smooth outputs, bitrates must be unique when rounded down to the nearest // multiple of 1000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // H.265 Level. CodecLevel *string `locationName:"codecLevel" type:"string" enum:"H265CodecLevel"` // Represents the Profile and Tier, per the HEVC (H.265) specification. Selections // are grouped as [Profile] / [Tier], so "Main/High" represents Main Profile // with High Tier. 4:2:2 profiles are only available with the HEVC 4:2:2 License. CodecProfile *string `locationName:"codecProfile" type:"string" enum:"H265CodecProfile"` // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the // value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). DynamicSubGop *string `locationName:"dynamicSubGop" type:"string" enum:"H265DynamicSubGop"` // Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears // as a visual flicker that can arise when the encoder saves bits by copying // some macroblocks many times from frame to frame, and then refreshes them // at the I-frame. When you enable this setting, the encoder updates these macroblocks // slightly more often to smooth out the flicker. This setting is disabled by // default. Related setting: In addition to enabling this setting, you must // also set adaptiveQuantization to a value other than Off (OFF). FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"H265FlickerAdaptiveQuantization"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H265FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"H265FramerateConversionAlgorithm"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // If enable, use reference B frames for GOP structures that have B frames > // 1. GopBReference *string `locationName:"gopBReference" type:"string" enum:"H265GopBReference"` // Frequency of closed GOPs. In streaming applications, it is recommended that // this be set to 1 so a decoder joining mid-stream will receive an IDR frame // as quickly as possible. Setting this value to 0 will break output segmenting. GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` // GOP Length (keyframe interval) in frames or seconds. Must be greater than // zero. GopSize *float64 `locationName:"gopSize" type:"double"` // Indicates if the GOP Size in H265 is specified in frames or seconds. If seconds // the system will convert the GOP Size into a frame count at run time. GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"H265GopSizeUnits"` // Percentage of the buffer that should initially be filled (HRD buffer model). HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"` // Size of buffer (HRD buffer model) in bits. For example, enter five megabits // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"H265InterlaceMode"` // Maximum bitrate in bits/second. For example, enter five megabits per second // as 5000000. Required when Rate control mode is QVBR. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Enforces separation between repeated (cadence) I-frames and I-frames inserted // by Scene Change Detection. If a scene change I-frame is within I-interval // frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene // change I-frame. GOP stretch requires enabling lookahead as well as setting // I-interval. The normal cadence resumes for the next GOP. This setting is // only used when Scene Change Detect is enabled. Note: Maximum GOP stretch // = GOP size + Min-I-interval - 1 MinIInterval *int64 `locationName:"minIInterval" type:"integer"` // Number of B-frames between reference frames. NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` // Number of reference frames to use. The encoder may use more than requested // if using B-frames and/or interlaced encoding. NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" min:"1" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"H265ParControl"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"H265QualityTuningLevel"` // Settings for quality-defined variable bitrate encoding with the H.265 codec. // Required when you set Rate control mode to QVBR. Not valid when you set Rate // control mode to a value other than QVBR, or when you don't define Rate control // mode. QvbrSettings *H265QvbrSettings `locationName:"qvbrSettings" type:"structure"` // Use this setting to specify whether this output has a variable bitrate (VBR), // constant bitrate (CBR) or quality-defined variable bitrate (QVBR). RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"H265RateControlMode"` // Specify Sample Adaptive Offset (SAO) filter strength. Adaptive mode dynamically // selects best strength based on content SampleAdaptiveOffsetFilterMode *string `locationName:"sampleAdaptiveOffsetFilterMode" type:"string" enum:"H265SampleAdaptiveOffsetFilterMode"` // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"H265ScanTypeConversionMode"` // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) // for further video quality improvement. For more information about QVBR, see // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"H265SceneChangeDetect"` // Number of slices per picture. Must be less than or equal to the number of // macroblock rows for progressive pictures, and less than or equal to half // the number of macroblock rows for interlaced pictures. Slices *int64 `locationName:"slices" min:"1" type:"integer"` // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output. When you enable // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. SlowPal *string `locationName:"slowPal" type:"string" enum:"H265SlowPal"` // Keep the default value, Enabled (ENABLED), to adjust quantization within // each frame based on spatial variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas that can sustain more // distortion with no noticeable visual degradation and uses more bits on areas // where any small distortion will be noticeable. For example, complex textured // blocks are encoded with fewer bits and smooth textured blocks are encoded // with more bits. Enabling this feature will almost always improve your video // quality. Note, though, that this feature doesn't take into account where // the viewer's attention is likely to be. If viewers are likely to be focusing // their attention on a part of the screen with a lot of complex texture, you // might choose to disable this feature. Related setting: When you enable spatial // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) // depending on your content. For homogeneous content, such as cartoons and // video games, set it to Low. For content with a wider variety of textures, // set it to High or Higher. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"H265SpatialAdaptiveQuantization"` // This field applies only if the Streams > Advanced > Framerate (framerate) // field is set to 29.970. This field works with the Streams > Advanced > Preprocessors // > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced // Mode field (interlace_mode) to identify the scan type for the output: Progressive, // Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output // from 23.976 input. - Soft: produces 23.976; the player converts this output // to 29.97i. Telecine *string `locationName:"telecine" type:"string" enum:"H265Telecine"` // Keep the default value, Enabled (ENABLED), to adjust quantization within // each frame based on temporal variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas of the frame that aren't // moving and uses more bits on complex objects with sharp edges that move a // lot. For example, this feature improves the readability of text tickers on // newscasts and scoreboards on sports matches. Enabling this feature will almost // always improve your video quality. Note, though, that this feature doesn't // take into account where the viewer's attention is likely to be. If viewers // are likely to be focusing their attention on a part of the screen that doesn't // have moving objects with sharp edges, such as sports athletes' faces, you // might choose to disable this feature. Related setting: When you enable temporal // quantization, adjust the strength of the filter with the setting Adaptive // quantization (adaptiveQuantization). TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"H265TemporalAdaptiveQuantization"` // Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers // are supported depending on GOP structure: I- and P-frames form one layer, // reference B-frames can form a second layer and non-reference b-frames can // form a third layer. Decoders can optionally decode only the lower temporal // layers to generate a lower frame rate output. For example, given a bitstream // with temporal IDs and with b-frames = 1 (i.e. IbPbPb display order), a decoder // could decode all the frames for full frame rate output or only the I and // P frames (lowest temporal layer) for a half frame rate output. TemporalIds *string `locationName:"temporalIds" type:"string" enum:"H265TemporalIds"` // Enable use of tiles, allowing horizontal as well as vertical subdivision // of the encoded pictures. Tiles *string `locationName:"tiles" type:"string" enum:"H265Tiles"` // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. UnregisteredSeiTimecode *string `locationName:"unregisteredSeiTimecode" type:"string" enum:"H265UnregisteredSeiTimecode"` // If the location of parameter set NAL units doesn't matter in your workflow, // ignore this setting. Use this setting only with CMAF or DASH outputs, or // with standalone file outputs in an MPEG-4 container (MP4 outputs). Choose // HVC1 to mark your output as HVC1. This makes your output compliant with the // following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 // 3rd Edition. For these outputs, the service stores parameter set NAL units // in the sample headers but not in the samples directly. For MP4 outputs, when // you choose HVC1, your output video might not work properly with some downstream // systems and video players. The service defaults to marking your output as // HEV1. For these outputs, the service writes parameter set NAL units directly // into the samples. WriteMp4PackagingType *string `locationName:"writeMp4PackagingType" type:"string" enum:"H265WriteMp4PackagingType"` } // String returns the string representation func (s H265Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s H265Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *H265Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "H265Settings"} if s.Bitrate != nil && *s.Bitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000)) } if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000)) } if s.NumberReferenceFrames != nil && *s.NumberReferenceFrames < 1 { invalidParams.Add(request.NewErrParamMinValue("NumberReferenceFrames", 1)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1)) } if s.Slices != nil && *s.Slices < 1 { invalidParams.Add(request.NewErrParamMinValue("Slices", 1)) } if s.QvbrSettings != nil { if err := s.QvbrSettings.Validate(); err != nil { invalidParams.AddNested("QvbrSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdaptiveQuantization sets the AdaptiveQuantization field's value. func (s *H265Settings) SetAdaptiveQuantization(v string) *H265Settings { s.AdaptiveQuantization = &v return s } // SetAlternateTransferFunctionSei sets the AlternateTransferFunctionSei field's value. func (s *H265Settings) SetAlternateTransferFunctionSei(v string) *H265Settings { s.AlternateTransferFunctionSei = &v return s } // SetBitrate sets the Bitrate field's value. func (s *H265Settings) SetBitrate(v int64) *H265Settings { s.Bitrate = &v return s } // SetCodecLevel sets the CodecLevel field's value. func (s *H265Settings) SetCodecLevel(v string) *H265Settings { s.CodecLevel = &v return s } // SetCodecProfile sets the CodecProfile field's value. func (s *H265Settings) SetCodecProfile(v string) *H265Settings { s.CodecProfile = &v return s } // SetDynamicSubGop sets the DynamicSubGop field's value. func (s *H265Settings) SetDynamicSubGop(v string) *H265Settings { s.DynamicSubGop = &v return s } // SetFlickerAdaptiveQuantization sets the FlickerAdaptiveQuantization field's value. func (s *H265Settings) SetFlickerAdaptiveQuantization(v string) *H265Settings { s.FlickerAdaptiveQuantization = &v return s } // SetFramerateControl sets the FramerateControl field's value. func (s *H265Settings) SetFramerateControl(v string) *H265Settings { s.FramerateControl = &v return s } // SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. func (s *H265Settings) SetFramerateConversionAlgorithm(v string) *H265Settings { s.FramerateConversionAlgorithm = &v return s } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *H265Settings) SetFramerateDenominator(v int64) *H265Settings { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *H265Settings) SetFramerateNumerator(v int64) *H265Settings { s.FramerateNumerator = &v return s } // SetGopBReference sets the GopBReference field's value. func (s *H265Settings) SetGopBReference(v string) *H265Settings { s.GopBReference = &v return s } // SetGopClosedCadence sets the GopClosedCadence field's value. func (s *H265Settings) SetGopClosedCadence(v int64) *H265Settings { s.GopClosedCadence = &v return s } // SetGopSize sets the GopSize field's value. func (s *H265Settings) SetGopSize(v float64) *H265Settings { s.GopSize = &v return s } // SetGopSizeUnits sets the GopSizeUnits field's value. func (s *H265Settings) SetGopSizeUnits(v string) *H265Settings { s.GopSizeUnits = &v return s } // SetHrdBufferInitialFillPercentage sets the HrdBufferInitialFillPercentage field's value. func (s *H265Settings) SetHrdBufferInitialFillPercentage(v int64) *H265Settings { s.HrdBufferInitialFillPercentage = &v return s } // SetHrdBufferSize sets the HrdBufferSize field's value. func (s *H265Settings) SetHrdBufferSize(v int64) *H265Settings { s.HrdBufferSize = &v return s } // SetInterlaceMode sets the InterlaceMode field's value. func (s *H265Settings) SetInterlaceMode(v string) *H265Settings { s.InterlaceMode = &v return s } // SetMaxBitrate sets the MaxBitrate field's value. func (s *H265Settings) SetMaxBitrate(v int64) *H265Settings { s.MaxBitrate = &v return s } // SetMinIInterval sets the MinIInterval field's value. func (s *H265Settings) SetMinIInterval(v int64) *H265Settings { s.MinIInterval = &v return s } // SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value. func (s *H265Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *H265Settings { s.NumberBFramesBetweenReferenceFrames = &v return s } // SetNumberReferenceFrames sets the NumberReferenceFrames field's value. func (s *H265Settings) SetNumberReferenceFrames(v int64) *H265Settings { s.NumberReferenceFrames = &v return s } // SetParControl sets the ParControl field's value. func (s *H265Settings) SetParControl(v string) *H265Settings { s.ParControl = &v return s } // SetParDenominator sets the ParDenominator field's value. func (s *H265Settings) SetParDenominator(v int64) *H265Settings { s.ParDenominator = &v return s } // SetParNumerator sets the ParNumerator field's value. func (s *H265Settings) SetParNumerator(v int64) *H265Settings { s.ParNumerator = &v return s } // SetQualityTuningLevel sets the QualityTuningLevel field's value. func (s *H265Settings) SetQualityTuningLevel(v string) *H265Settings { s.QualityTuningLevel = &v return s } // SetQvbrSettings sets the QvbrSettings field's value. func (s *H265Settings) SetQvbrSettings(v *H265QvbrSettings) *H265Settings { s.QvbrSettings = v return s } // SetRateControlMode sets the RateControlMode field's value. func (s *H265Settings) SetRateControlMode(v string) *H265Settings { s.RateControlMode = &v return s } // SetSampleAdaptiveOffsetFilterMode sets the SampleAdaptiveOffsetFilterMode field's value. func (s *H265Settings) SetSampleAdaptiveOffsetFilterMode(v string) *H265Settings { s.SampleAdaptiveOffsetFilterMode = &v return s } // SetScanTypeConversionMode sets the ScanTypeConversionMode field's value. func (s *H265Settings) SetScanTypeConversionMode(v string) *H265Settings { s.ScanTypeConversionMode = &v return s } // SetSceneChangeDetect sets the SceneChangeDetect field's value. func (s *H265Settings) SetSceneChangeDetect(v string) *H265Settings { s.SceneChangeDetect = &v return s } // SetSlices sets the Slices field's value. func (s *H265Settings) SetSlices(v int64) *H265Settings { s.Slices = &v return s } // SetSlowPal sets the SlowPal field's value. func (s *H265Settings) SetSlowPal(v string) *H265Settings { s.SlowPal = &v return s } // SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value. func (s *H265Settings) SetSpatialAdaptiveQuantization(v string) *H265Settings { s.SpatialAdaptiveQuantization = &v return s } // SetTelecine sets the Telecine field's value. func (s *H265Settings) SetTelecine(v string) *H265Settings { s.Telecine = &v return s } // SetTemporalAdaptiveQuantization sets the TemporalAdaptiveQuantization field's value. func (s *H265Settings) SetTemporalAdaptiveQuantization(v string) *H265Settings { s.TemporalAdaptiveQuantization = &v return s } // SetTemporalIds sets the TemporalIds field's value. func (s *H265Settings) SetTemporalIds(v string) *H265Settings { s.TemporalIds = &v return s } // SetTiles sets the Tiles field's value. func (s *H265Settings) SetTiles(v string) *H265Settings { s.Tiles = &v return s } // SetUnregisteredSeiTimecode sets the UnregisteredSeiTimecode field's value. func (s *H265Settings) SetUnregisteredSeiTimecode(v string) *H265Settings { s.UnregisteredSeiTimecode = &v return s } // SetWriteMp4PackagingType sets the WriteMp4PackagingType field's value. func (s *H265Settings) SetWriteMp4PackagingType(v string) *H265Settings { s.WriteMp4PackagingType = &v return s } // Use these settings to specify static color calibration metadata, as defined // by SMPTE ST 2086. These values don't affect the pixel values that are encoded // in the video stream. They are intended to help the downstream video player // display content in a way that reflects the intentions of the the content // creator. type Hdr10Metadata struct { _ struct{} `type:"structure"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. BluePrimaryX *int64 `locationName:"bluePrimaryX" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. BluePrimaryY *int64 `locationName:"bluePrimaryY" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. GreenPrimaryX *int64 `locationName:"greenPrimaryX" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. GreenPrimaryY *int64 `locationName:"greenPrimaryY" type:"integer"` // Maximum light level among all samples in the coded video sequence, in units // of candelas per square meter. This setting doesn't have a default value; // you must specify a value that is suitable for the content. MaxContentLightLevel *int64 `locationName:"maxContentLightLevel" type:"integer"` // Maximum average light level of any frame in the coded video sequence, in // units of candelas per square meter. This setting doesn't have a default value; // you must specify a value that is suitable for the content. MaxFrameAverageLightLevel *int64 `locationName:"maxFrameAverageLightLevel" type:"integer"` // Nominal maximum mastering display luminance in units of of 0.0001 candelas // per square meter. MaxLuminance *int64 `locationName:"maxLuminance" type:"integer"` // Nominal minimum mastering display luminance in units of of 0.0001 candelas // per square meter MinLuminance *int64 `locationName:"minLuminance" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. RedPrimaryX *int64 `locationName:"redPrimaryX" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. RedPrimaryY *int64 `locationName:"redPrimaryY" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. WhitePointX *int64 `locationName:"whitePointX" type:"integer"` // HDR Master Display Information must be provided by a color grader, using // color grading tools. Range is 0 to 50,000, each increment represents 0.00002 // in CIE1931 color coordinate. Note that this setting is not for color correction. WhitePointY *int64 `locationName:"whitePointY" type:"integer"` } // String returns the string representation func (s Hdr10Metadata) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Hdr10Metadata) GoString() string { return s.String() } // SetBluePrimaryX sets the BluePrimaryX field's value. func (s *Hdr10Metadata) SetBluePrimaryX(v int64) *Hdr10Metadata { s.BluePrimaryX = &v return s } // SetBluePrimaryY sets the BluePrimaryY field's value. func (s *Hdr10Metadata) SetBluePrimaryY(v int64) *Hdr10Metadata { s.BluePrimaryY = &v return s } // SetGreenPrimaryX sets the GreenPrimaryX field's value. func (s *Hdr10Metadata) SetGreenPrimaryX(v int64) *Hdr10Metadata { s.GreenPrimaryX = &v return s } // SetGreenPrimaryY sets the GreenPrimaryY field's value. func (s *Hdr10Metadata) SetGreenPrimaryY(v int64) *Hdr10Metadata { s.GreenPrimaryY = &v return s } // SetMaxContentLightLevel sets the MaxContentLightLevel field's value. func (s *Hdr10Metadata) SetMaxContentLightLevel(v int64) *Hdr10Metadata { s.MaxContentLightLevel = &v return s } // SetMaxFrameAverageLightLevel sets the MaxFrameAverageLightLevel field's value. func (s *Hdr10Metadata) SetMaxFrameAverageLightLevel(v int64) *Hdr10Metadata { s.MaxFrameAverageLightLevel = &v return s } // SetMaxLuminance sets the MaxLuminance field's value. func (s *Hdr10Metadata) SetMaxLuminance(v int64) *Hdr10Metadata { s.MaxLuminance = &v return s } // SetMinLuminance sets the MinLuminance field's value. func (s *Hdr10Metadata) SetMinLuminance(v int64) *Hdr10Metadata { s.MinLuminance = &v return s } // SetRedPrimaryX sets the RedPrimaryX field's value. func (s *Hdr10Metadata) SetRedPrimaryX(v int64) *Hdr10Metadata { s.RedPrimaryX = &v return s } // SetRedPrimaryY sets the RedPrimaryY field's value. func (s *Hdr10Metadata) SetRedPrimaryY(v int64) *Hdr10Metadata { s.RedPrimaryY = &v return s } // SetWhitePointX sets the WhitePointX field's value. func (s *Hdr10Metadata) SetWhitePointX(v int64) *Hdr10Metadata { s.WhitePointX = &v return s } // SetWhitePointY sets the WhitePointY field's value. func (s *Hdr10Metadata) SetWhitePointY(v int64) *Hdr10Metadata { s.WhitePointY = &v return s } // Setting for HDR10+ metadata insertion type Hdr10Plus struct { _ struct{} `type:"structure"` // Specify the HDR10+ mastering display normalized peak luminance, in nits. // This is the normalized actual peak luminance of the mastering display, as // defined by ST 2094-40. MasteringMonitorNits *int64 `locationName:"masteringMonitorNits" type:"integer"` // Specify the HDR10+ target display nominal peak luminance, in nits. This is // the nominal maximum luminance of the target display as defined by ST 2094-40. TargetMonitorNits *int64 `locationName:"targetMonitorNits" type:"integer"` } // String returns the string representation func (s Hdr10Plus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Hdr10Plus) GoString() string { return s.String() } // SetMasteringMonitorNits sets the MasteringMonitorNits field's value. func (s *Hdr10Plus) SetMasteringMonitorNits(v int64) *Hdr10Plus { s.MasteringMonitorNits = &v return s } // SetTargetMonitorNits sets the TargetMonitorNits field's value. func (s *Hdr10Plus) SetTargetMonitorNits(v int64) *Hdr10Plus { s.TargetMonitorNits = &v return s } // Specify the details for each additional HLS manifest that you want the service // to generate for this output group. Each manifest can reference a different // subset of outputs in the group. type HlsAdditionalManifest struct { _ struct{} `type:"structure"` // Specify a name modifier that the service adds to the name of this manifest // to make it different from the file names of the other main manifests in the // output group. For example, say that the default main manifest for your HLS // group is film-name.m3u8. If you enter "-no-premium" for this setting, then // the file name the service generates for this top-level manifest is film-name-no-premium.m3u8. // For HLS output groups, specify a manifestNameModifier that is different from // the nameModifier of the output. The service uses the output name modifier // to create unique names for the individual variant manifests. ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` // Specify the outputs that you want this additional top-level manifest to reference. SelectedOutputs []*string `locationName:"selectedOutputs" type:"list"` } // String returns the string representation func (s HlsAdditionalManifest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s HlsAdditionalManifest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *HlsAdditionalManifest) Validate() error { invalidParams := request.ErrInvalidParams{Context: "HlsAdditionalManifest"} if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { invalidParams.Add(request.NewErrParamMinLen("ManifestNameModifier", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetManifestNameModifier sets the ManifestNameModifier field's value. func (s *HlsAdditionalManifest) SetManifestNameModifier(v string) *HlsAdditionalManifest { s.ManifestNameModifier = &v return s } // SetSelectedOutputs sets the SelectedOutputs field's value. func (s *HlsAdditionalManifest) SetSelectedOutputs(v []*string) *HlsAdditionalManifest { s.SelectedOutputs = v return s } // Caption Language Mapping type HlsCaptionLanguageMapping struct { _ struct{} `type:"structure"` // Caption channel. CaptionChannel *int64 `locationName:"captionChannel" type:"integer"` // Specify the language for this captions channel, using the ISO 639-2 or ISO // 639-3 three-letter language code CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"` // Specify the language, using the ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php. LanguageCode *string `locationName:"languageCode" type:"string" enum:"LanguageCode"` // Caption language description. LanguageDescription *string `locationName:"languageDescription" type:"string"` } // String returns the string representation func (s HlsCaptionLanguageMapping) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s HlsCaptionLanguageMapping) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *HlsCaptionLanguageMapping) Validate() error { invalidParams := request.ErrInvalidParams{Context: "HlsCaptionLanguageMapping"} if s.CaptionChannel != nil && *s.CaptionChannel < -2.147483648e+09 { invalidParams.Add(request.NewErrParamMinValue("CaptionChannel", -2.147483648e+09)) } if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 { invalidParams.Add(request.NewErrParamMinLen("CustomLanguageCode", 3)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCaptionChannel sets the CaptionChannel field's value. func (s *HlsCaptionLanguageMapping) SetCaptionChannel(v int64) *HlsCaptionLanguageMapping { s.CaptionChannel = &v return s } // SetCustomLanguageCode sets the CustomLanguageCode field's value. func (s *HlsCaptionLanguageMapping) SetCustomLanguageCode(v string) *HlsCaptionLanguageMapping { s.CustomLanguageCode = &v return s } // SetLanguageCode sets the LanguageCode field's value. func (s *HlsCaptionLanguageMapping) SetLanguageCode(v string) *HlsCaptionLanguageMapping { s.LanguageCode = &v return s } // SetLanguageDescription sets the LanguageDescription field's value. func (s *HlsCaptionLanguageMapping) SetLanguageDescription(v string) *HlsCaptionLanguageMapping { s.LanguageDescription = &v return s } // Settings for HLS encryption type HlsEncryptionSettings struct { _ struct{} `type:"structure"` // This is a 128-bit, 16-byte hex value represented by a 32-character text string. // If this parameter is not set then the Initialization Vector will follow the // segment number by default. ConstantInitializationVector *string `locationName:"constantInitializationVector" min:"32" type:"string"` // Encrypts the segments with the given encryption scheme. Leave blank to disable. // Selecting 'Disabled' in the web interface also disables encryption. EncryptionMethod *string `locationName:"encryptionMethod" type:"string" enum:"HlsEncryptionType"` // The Initialization Vector is a 128-bit number used in conjunction with the // key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed // in the manifest. Otherwise Initialization Vector is not in the manifest. InitializationVectorInManifest *string `locationName:"initializationVectorInManifest" type:"string" enum:"HlsInitializationVectorInManifest"` // Enable this setting to insert the EXT-X-SESSION-KEY element into the master // playlist. This allows for offline Apple HLS FairPlay content protection. OfflineEncrypted *string `locationName:"offlineEncrypted" type:"string" enum:"HlsOfflineEncrypted"` // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings // when doing DRM encryption with a SPEKE-compliant key provider. If your output // group type is CMAF, use the SpekeKeyProviderCmaf settings instead. SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` // Use these settings to set up encryption with a static key provider. StaticKeyProvider *StaticKeyProvider `locationName:"staticKeyProvider" type:"structure"` // Specify whether your DRM encryption key is static or from a key provider // that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. Type *string `locationName:"type" type:"string" enum:"HlsKeyProviderType"` } // String returns the string representation func (s HlsEncryptionSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s HlsEncryptionSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *HlsEncryptionSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "HlsEncryptionSettings"} if s.ConstantInitializationVector != nil && len(*s.ConstantInitializationVector) < 32 { invalidParams.Add(request.NewErrParamMinLen("ConstantInitializationVector", 32)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetConstantInitializationVector sets the ConstantInitializationVector field's value. func (s *HlsEncryptionSettings) SetConstantInitializationVector(v string) *HlsEncryptionSettings { s.ConstantInitializationVector = &v return s } // SetEncryptionMethod sets the EncryptionMethod field's value. func (s *HlsEncryptionSettings) SetEncryptionMethod(v string) *HlsEncryptionSettings { s.EncryptionMethod = &v return s } // SetInitializationVectorInManifest sets the InitializationVectorInManifest field's value. func (s *HlsEncryptionSettings) SetInitializationVectorInManifest(v string) *HlsEncryptionSettings { s.InitializationVectorInManifest = &v return s } // SetOfflineEncrypted sets the OfflineEncrypted field's value. func (s *HlsEncryptionSettings) SetOfflineEncrypted(v string) *HlsEncryptionSettings { s.OfflineEncrypted = &v return s } // SetSpekeKeyProvider sets the SpekeKeyProvider field's value. func (s *HlsEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProvider) *HlsEncryptionSettings { s.SpekeKeyProvider = v return s } // SetStaticKeyProvider sets the StaticKeyProvider field's value. func (s *HlsEncryptionSettings) SetStaticKeyProvider(v *StaticKeyProvider) *HlsEncryptionSettings { s.StaticKeyProvider = v return s } // SetType sets the Type field's value. func (s *HlsEncryptionSettings) SetType(v string) *HlsEncryptionSettings { s.Type = &v return s } // Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. // When you work directly in your JSON job specification, include this object // and any required children when you set Type, under OutputGroupSettings, to // HLS_GROUP_SETTINGS. type HlsGroupSettings struct { _ struct{} `type:"structure"` // Choose one or more ad marker types to decorate your Apple HLS manifest. This // setting does not determine whether SCTE-35 markers appear in the outputs // themselves. AdMarkers []*string `locationName:"adMarkers" type:"list"` // By default, the service creates one top-level .m3u8 HLS manifest for each // HLS output group in your job. This default manifest references every output // in the output group. To create additional top-level manifests that reference // a subset of the outputs in the output group, specify a list of them here. AdditionalManifests []*HlsAdditionalManifest `locationName:"additionalManifests" type:"list"` // Ignore this setting unless you are using FairPlay DRM with Verimatrix and // you encounter playback issues. Keep the default value, Include (INCLUDE), // to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only // headers from your audio segments. AudioOnlyHeader *string `locationName:"audioOnlyHeader" type:"string" enum:"HlsAudioOnlyHeader"` // A partial URI prefix that will be prepended to each output in the media .m3u8 // file. Can be used if base manifest is delivered from a different URL than // the main .m3u8 file. BaseUrl *string `locationName:"baseUrl" type:"string"` // Language to be used on Caption outputs CaptionLanguageMappings []*HlsCaptionLanguageMapping `locationName:"captionLanguageMappings" type:"list"` // Applies only to 608 Embedded output captions. Insert: Include CLOSED-CAPTIONS // lines in the manifest. Specify at least one language in the CC1 Language // Code field. One CLOSED-CAPTION line is added for each Language Code you specify. // Make sure to specify the languages in the order in which they appear in the // original source (if the source is embedded format) or the order of the caption // selectors (if the source is other than embedded). Otherwise, languages in // the manifest will not match up properly with the output captions. None: Include // CLOSED-CAPTIONS=NONE line in the manifest. Omit: Omit any CLOSED-CAPTIONS // line from the manifest. CaptionLanguageSetting *string `locationName:"captionLanguageSetting" type:"string" enum:"HlsCaptionLanguageSetting"` // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no // tag. Otherwise, keep the default value Enabled (ENABLED) and control caching // in your video distribution set up. For example, use the Cache-Control http // header. ClientCache *string `locationName:"clientCache" type:"string" enum:"HlsClientCache"` // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist // generation. CodecSpecification *string `locationName:"codecSpecification" type:"string" enum:"HlsCodecSpecification"` // Use Destination (Destination) to specify the S3 output location and the output // filename base. Destination accepts format identifiers. If you do not specify // the base filename in the URI, the service will use the filename of the input // file. If your job has multiple inputs, the service uses the filename of the // first input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"` // Indicates whether segments should be placed in subdirectories. DirectoryStructure *string `locationName:"directoryStructure" type:"string" enum:"HlsDirectoryStructure"` // DRM settings. Encryption *HlsEncryptionSettings `locationName:"encryption" type:"structure"` // Specify whether MediaConvert generates images for trick play. Keep the default // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) // to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) // to generate tiled thumbnails and full-resolution images of single frames. // MediaConvert creates a child manifest for each set of images that you generate // and adds corresponding entries to the parent manifest. A common application // for these images is Roku trick mode. The thumbnails and full-frame images // that MediaConvert creates with this feature are compatible with this Roku // specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md ImageBasedTrickPlay *string `locationName:"imageBasedTrickPlay" type:"string" enum:"HlsImageBasedTrickPlay"` // When set to GZIP, compresses HLS playlist. ManifestCompression *string `locationName:"manifestCompression" type:"string" enum:"HlsManifestCompression"` // Indicates whether the output manifest should use floating point values for // segment duration. ManifestDurationFormat *string `locationName:"manifestDurationFormat" type:"string" enum:"HlsManifestDurationFormat"` // Keep this setting at the default value of 0, unless you are troubleshooting // a problem with how devices play back the end of your video asset. If you // know that player devices are hanging on the final segment of your video because // the length of your final segment is too short, use this setting to specify // a minimum final segment length, in seconds. Choose a value that is greater // than or equal to 1 and less than your segment length. When you specify a // value for this setting, the encoder will combine any final segment that is // shorter than the length that you specify with the previous segment. For example, // your segment length is 3 seconds and your final segment is .5 seconds without // a minimum final segment length; when you set the minimum final segment length // to 1, your final segment is 3.5 seconds. MinFinalSegmentLength *float64 `locationName:"minFinalSegmentLength" type:"double"` // When set, Minimum Segment Size is enforced by looking ahead and back within // the specified range for a nearby avail and extending the segment size if // needed. MinSegmentLength *int64 `locationName:"minSegmentLength" type:"integer"` // Indicates whether the .m3u8 manifest file should be generated for this HLS // output group. OutputSelection *string `locationName:"outputSelection" type:"string" enum:"HlsOutputSelection"` // Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. // The value is calculated as follows: either the program date and time are // initialized using the input timecode source, or the time is initialized using // the input timecode source and the date is initialized using the timestamp_offset. ProgramDateTime *string `locationName:"programDateTime" type:"string" enum:"HlsProgramDateTime"` // Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds. ProgramDateTimePeriod *int64 `locationName:"programDateTimePeriod" type:"integer"` // When set to SINGLE_FILE, emits program as a single media resource (.ts) file, // uses #EXT-X-BYTERANGE tags to index segment for playback. SegmentControl *string `locationName:"segmentControl" type:"string" enum:"HlsSegmentControl"` // Length of MPEG-2 Transport Stream segments to create (in seconds). Note that // segments will end on the next keyframe after this number of seconds, so actual // segment length may be longer. SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"` // Number of segments to write to a subdirectory before starting a new one. // directoryStructure must be SINGLE_DIRECTORY for this setting to have an effect. SegmentsPerSubdirectory *int64 `locationName:"segmentsPerSubdirectory" min:"1" type:"integer"` // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag // of variant manifest. StreamInfResolution *string `locationName:"streamInfResolution" type:"string" enum:"HlsStreamInfResolution"` // Indicates ID3 frame that has the timecode. TimedMetadataId3Frame *string `locationName:"timedMetadataId3Frame" type:"string" enum:"HlsTimedMetadataId3Frame"` // Timed Metadata interval in seconds. TimedMetadataId3Period *int64 `locationName:"timedMetadataId3Period" type:"integer"` // Provides an extra millisecond delta offset to fine tune the timestamps. TimestampDeltaMilliseconds *int64 `locationName:"timestampDeltaMilliseconds" type:"integer"` } // String returns the string representation func (s HlsGroupSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s HlsGroupSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *HlsGroupSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "HlsGroupSettings"} if s.SegmentLength != nil && *s.SegmentLength < 1 { invalidParams.Add(request.NewErrParamMinValue("SegmentLength", 1)) } if s.SegmentsPerSubdirectory != nil && *s.SegmentsPerSubdirectory < 1 { invalidParams.Add(request.NewErrParamMinValue("SegmentsPerSubdirectory", 1)) } if s.TimedMetadataId3Period != nil && *s.TimedMetadataId3Period < -2.147483648e+09 { invalidParams.Add(request.NewErrParamMinValue("TimedMetadataId3Period", -2.147483648e+09)) } if s.TimestampDeltaMilliseconds != nil && *s.TimestampDeltaMilliseconds < -2.147483648e+09 { invalidParams.Add(request.NewErrParamMinValue("TimestampDeltaMilliseconds", -2.147483648e+09)) } if s.AdditionalManifests != nil { for i, v := range s.AdditionalManifests { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(request.ErrInvalidParams)) } } } if s.CaptionLanguageMappings != nil { for i, v := range s.CaptionLanguageMappings { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionLanguageMappings", i), err.(request.ErrInvalidParams)) } } } if s.Encryption != nil { if err := s.Encryption.Validate(); err != nil { invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdMarkers sets the AdMarkers field's value. func (s *HlsGroupSettings) SetAdMarkers(v []*string) *HlsGroupSettings { s.AdMarkers = v return s } // SetAdditionalManifests sets the AdditionalManifests field's value. func (s *HlsGroupSettings) SetAdditionalManifests(v []*HlsAdditionalManifest) *HlsGroupSettings { s.AdditionalManifests = v return s } // SetAudioOnlyHeader sets the AudioOnlyHeader field's value. func (s *HlsGroupSettings) SetAudioOnlyHeader(v string) *HlsGroupSettings { s.AudioOnlyHeader = &v return s } // SetBaseUrl sets the BaseUrl field's value. func (s *HlsGroupSettings) SetBaseUrl(v string) *HlsGroupSettings { s.BaseUrl = &v return s } // SetCaptionLanguageMappings sets the CaptionLanguageMappings field's value. func (s *HlsGroupSettings) SetCaptionLanguageMappings(v []*HlsCaptionLanguageMapping) *HlsGroupSettings { s.CaptionLanguageMappings = v return s } // SetCaptionLanguageSetting sets the CaptionLanguageSetting field's value. func (s *HlsGroupSettings) SetCaptionLanguageSetting(v string) *HlsGroupSettings { s.CaptionLanguageSetting = &v return s } // SetClientCache sets the ClientCache field's value. func (s *HlsGroupSettings) SetClientCache(v string) *HlsGroupSettings { s.ClientCache = &v return s } // SetCodecSpecification sets the CodecSpecification field's value. func (s *HlsGroupSettings) SetCodecSpecification(v string) *HlsGroupSettings { s.CodecSpecification = &v return s } // SetDestination sets the Destination field's value. func (s *HlsGroupSettings) SetDestination(v string) *HlsGroupSettings { s.Destination = &v return s } // SetDestinationSettings sets the DestinationSettings field's value. func (s *HlsGroupSettings) SetDestinationSettings(v *DestinationSettings) *HlsGroupSettings { s.DestinationSettings = v return s } // SetDirectoryStructure sets the DirectoryStructure field's value. func (s *HlsGroupSettings) SetDirectoryStructure(v string) *HlsGroupSettings { s.DirectoryStructure = &v return s } // SetEncryption sets the Encryption field's value. func (s *HlsGroupSettings) SetEncryption(v *HlsEncryptionSettings) *HlsGroupSettings { s.Encryption = v return s } // SetImageBasedTrickPlay sets the ImageBasedTrickPlay field's value. func (s *HlsGroupSettings) SetImageBasedTrickPlay(v string) *HlsGroupSettings { s.ImageBasedTrickPlay = &v return s } // SetManifestCompression sets the ManifestCompression field's value. func (s *HlsGroupSettings) SetManifestCompression(v string) *HlsGroupSettings { s.ManifestCompression = &v return s } // SetManifestDurationFormat sets the ManifestDurationFormat field's value. func (s *HlsGroupSettings) SetManifestDurationFormat(v string) *HlsGroupSettings { s.ManifestDurationFormat = &v return s } // SetMinFinalSegmentLength sets the MinFinalSegmentLength field's value. func (s *HlsGroupSettings) SetMinFinalSegmentLength(v float64) *HlsGroupSettings { s.MinFinalSegmentLength = &v return s } // SetMinSegmentLength sets the MinSegmentLength field's value. func (s *HlsGroupSettings) SetMinSegmentLength(v int64) *HlsGroupSettings { s.MinSegmentLength = &v return s } // SetOutputSelection sets the OutputSelection field's value. func (s *HlsGroupSettings) SetOutputSelection(v string) *HlsGroupSettings { s.OutputSelection = &v return s } // SetProgramDateTime sets the ProgramDateTime field's value. func (s *HlsGroupSettings) SetProgramDateTime(v string) *HlsGroupSettings { s.ProgramDateTime = &v return s } // SetProgramDateTimePeriod sets the ProgramDateTimePeriod field's value. func (s *HlsGroupSettings) SetProgramDateTimePeriod(v int64) *HlsGroupSettings { s.ProgramDateTimePeriod = &v return s } // SetSegmentControl sets the SegmentControl field's value. func (s *HlsGroupSettings) SetSegmentControl(v string) *HlsGroupSettings { s.SegmentControl = &v return s } // SetSegmentLength sets the SegmentLength field's value. func (s *HlsGroupSettings) SetSegmentLength(v int64) *HlsGroupSettings { s.SegmentLength = &v return s } // SetSegmentsPerSubdirectory sets the SegmentsPerSubdirectory field's value. func (s *HlsGroupSettings) SetSegmentsPerSubdirectory(v int64) *HlsGroupSettings { s.SegmentsPerSubdirectory = &v return s } // SetStreamInfResolution sets the StreamInfResolution field's value. func (s *HlsGroupSettings) SetStreamInfResolution(v string) *HlsGroupSettings { s.StreamInfResolution = &v return s } // SetTimedMetadataId3Frame sets the TimedMetadataId3Frame field's value. func (s *HlsGroupSettings) SetTimedMetadataId3Frame(v string) *HlsGroupSettings { s.TimedMetadataId3Frame = &v return s } // SetTimedMetadataId3Period sets the TimedMetadataId3Period field's value. func (s *HlsGroupSettings) SetTimedMetadataId3Period(v int64) *HlsGroupSettings { s.TimedMetadataId3Period = &v return s } // SetTimestampDeltaMilliseconds sets the TimestampDeltaMilliseconds field's value. func (s *HlsGroupSettings) SetTimestampDeltaMilliseconds(v int64) *HlsGroupSettings { s.TimestampDeltaMilliseconds = &v return s } // Settings specific to audio sources in an HLS alternate rendition group. Specify // the properties (renditionGroupId, renditionName or renditionLanguageCode) // to identify the unique audio track among the alternative rendition groups // present in the HLS manifest. If no unique track is found, or multiple tracks // match the properties provided, the job fails. If no properties in hlsRenditionGroupSettings // are specified, the default audio track within the video segment is chosen. // If there is no audio within video segment, the alternative audio with DEFAULT=YES // is chosen instead. type HlsRenditionGroupSettings struct { _ struct{} `type:"structure"` // Optional. Specify alternative group ID RenditionGroupId *string `locationName:"renditionGroupId" type:"string"` // Optional. Specify ISO 639-2 or ISO 639-3 code in the language property RenditionLanguageCode *string `locationName:"renditionLanguageCode" type:"string" enum:"LanguageCode"` // Optional. Specify media name RenditionName *string `locationName:"renditionName" type:"string"` } // String returns the string representation func (s HlsRenditionGroupSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s HlsRenditionGroupSettings) GoString() string { return s.String() } // SetRenditionGroupId sets the RenditionGroupId field's value. func (s *HlsRenditionGroupSettings) SetRenditionGroupId(v string) *HlsRenditionGroupSettings { s.RenditionGroupId = &v return s } // SetRenditionLanguageCode sets the RenditionLanguageCode field's value. func (s *HlsRenditionGroupSettings) SetRenditionLanguageCode(v string) *HlsRenditionGroupSettings { s.RenditionLanguageCode = &v return s } // SetRenditionName sets the RenditionName field's value. func (s *HlsRenditionGroupSettings) SetRenditionName(v string) *HlsRenditionGroupSettings { s.RenditionName = &v return s } // Settings for HLS output groups type HlsSettings struct { _ struct{} `type:"structure"` // Specifies the group to which the audio rendition belongs. AudioGroupId *string `locationName:"audioGroupId" type:"string"` // Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream // (M2TS) to create a file in an MPEG2-TS container. Keep the default value // Automatic (AUTOMATIC) to create an audio-only file in a raw container. Regardless // of the value that you specify here, if this output has video, the service // will place the output into an MPEG2-TS container. AudioOnlyContainer *string `locationName:"audioOnlyContainer" type:"string" enum:"HlsAudioOnlyContainer"` // List all the audio groups that are used with the video output stream. Input // all the audio GROUP-IDs that are associated to the video, separate by ','. AudioRenditionSets *string `locationName:"audioRenditionSets" type:"string"` // Four types of audio-only tracks are supported: Audio-Only Variant Stream // The client can play back this audio-only stream instead of video in low-bandwidth // scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest. Alternate // Audio, Auto Select, Default Alternate rendition that the client should try // to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest // with DEFAULT=YES, AUTOSELECT=YES Alternate Audio, Auto Select, Not Default // Alternate rendition that the client may try to play back by default. Represented // as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=YES Alternate // Audio, not Auto Select Alternate rendition that the client will not try to // play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with // DEFAULT=NO, AUTOSELECT=NO AudioTrackType *string `locationName:"audioTrackType" type:"string" enum:"HlsAudioTrackType"` // Specify whether to flag this audio track as descriptive video service (DVS) // in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes // the parameter CHARACTERISTICS="public.accessibility.describes-video" in the // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't // flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can // help with accessibility on Apple devices. For more information, see the Apple // documentation. DescriptiveVideoServiceFlag *string `locationName:"descriptiveVideoServiceFlag" type:"string" enum:"HlsDescriptiveVideoServiceFlag"` // Choose Include (INCLUDE) to have MediaConvert generate a child manifest that // lists only the I-frames for this rendition, in addition to your regular manifest // for this rendition. You might use this manifest as part of a workflow that // creates preview functions for your video. MediaConvert adds both the I-frame // only child manifest and the regular child manifest to the parent manifest. // When you don't need the I-frame only child manifest, keep the default value // Exclude (EXCLUDE). IFrameOnlyManifest *string `locationName:"iFrameOnlyManifest" type:"string" enum:"HlsIFrameOnlyManifest"` // Use this setting to add an identifying string to the filename of each segment. // The service adds this string between the name modifier and segment index // number. You can use format identifiers in the string. For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/using-variables-in-your-job-settings.html SegmentModifier *string `locationName:"segmentModifier" type:"string"` } // String returns the string representation func (s HlsSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s HlsSettings) GoString() string { return s.String() } // SetAudioGroupId sets the AudioGroupId field's value. func (s *HlsSettings) SetAudioGroupId(v string) *HlsSettings { s.AudioGroupId = &v return s } // SetAudioOnlyContainer sets the AudioOnlyContainer field's value. func (s *HlsSettings) SetAudioOnlyContainer(v string) *HlsSettings { s.AudioOnlyContainer = &v return s } // SetAudioRenditionSets sets the AudioRenditionSets field's value. func (s *HlsSettings) SetAudioRenditionSets(v string) *HlsSettings { s.AudioRenditionSets = &v return s } // SetAudioTrackType sets the AudioTrackType field's value. func (s *HlsSettings) SetAudioTrackType(v string) *HlsSettings { s.AudioTrackType = &v return s } // SetDescriptiveVideoServiceFlag sets the DescriptiveVideoServiceFlag field's value. func (s *HlsSettings) SetDescriptiveVideoServiceFlag(v string) *HlsSettings { s.DescriptiveVideoServiceFlag = &v return s } // SetIFrameOnlyManifest sets the IFrameOnlyManifest field's value. func (s *HlsSettings) SetIFrameOnlyManifest(v string) *HlsSettings { s.IFrameOnlyManifest = &v return s } // SetSegmentModifier sets the SegmentModifier field's value. func (s *HlsSettings) SetSegmentModifier(v string) *HlsSettings { s.SegmentModifier = &v return s } // Optional. Configuration for a destination queue to which the job can hop // once a customer-defined minimum wait time has passed. type HopDestination struct { _ struct{} `type:"structure"` // Optional. When you set up a job to use queue hopping, you can specify a different // relative priority for the job in the destination queue. If you don't specify, // the relative priority will remain the same as in the previous queue. Priority *int64 `locationName:"priority" type:"integer"` // Optional unless the job is submitted on the default queue. When you set up // a job to use queue hopping, you can specify a destination queue. This queue // cannot be the original queue to which the job is submitted. If the original // queue isn't the default queue and you don't specify the destination queue, // the job will move to the default queue. Queue *string `locationName:"queue" type:"string"` // Required for setting up a job to use queue hopping. Minimum wait time in // minutes until the job can hop to the destination queue. Valid range is 1 // to 1440 minutes, inclusive. WaitMinutes *int64 `locationName:"waitMinutes" type:"integer"` } // String returns the string representation func (s HopDestination) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s HopDestination) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *HopDestination) Validate() error { invalidParams := request.ErrInvalidParams{Context: "HopDestination"} if s.Priority != nil && *s.Priority < -50 { invalidParams.Add(request.NewErrParamMinValue("Priority", -50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetPriority sets the Priority field's value. func (s *HopDestination) SetPriority(v int64) *HopDestination { s.Priority = &v return s } // SetQueue sets the Queue field's value. func (s *HopDestination) SetQueue(v string) *HopDestination { s.Queue = &v return s } // SetWaitMinutes sets the WaitMinutes field's value. func (s *HopDestination) SetWaitMinutes(v int64) *HopDestination { s.WaitMinutes = &v return s } // To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) // to specify the base 64 encoded string and use Timecode (TimeCode) to specify // the time when the tag should be inserted. To insert multiple ID3 tags in // your output, create multiple instances of ID3 insertion (Id3Insertion). type Id3Insertion struct { _ struct{} `type:"structure"` // Use ID3 tag (Id3) to provide a tag value in base64-encode format. Id3 *string `locationName:"id3" type:"string"` // Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format. Timecode *string `locationName:"timecode" type:"string"` } // String returns the string representation func (s Id3Insertion) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Id3Insertion) GoString() string { return s.String() } // SetId3 sets the Id3 field's value. func (s *Id3Insertion) SetId3(v string) *Id3Insertion { s.Id3 = &v return s } // SetTimecode sets the Timecode field's value. func (s *Id3Insertion) SetTimecode(v string) *Id3Insertion { s.Timecode = &v return s } // Use the image inserter feature to include a graphic overlay on your video. // Enable or disable this feature for each input or output individually. For // more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/graphic-overlay.html. // This setting is disabled by default. type ImageInserter struct { _ struct{} `type:"structure"` // Specify the images that you want to overlay on your video. The images must // be PNG or TGA files. InsertableImages []*InsertableImage `locationName:"insertableImages" type:"list"` } // String returns the string representation func (s ImageInserter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ImageInserter) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ImageInserter) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ImageInserter"} if s.InsertableImages != nil { for i, v := range s.InsertableImages { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InsertableImages", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetInsertableImages sets the InsertableImages field's value. func (s *ImageInserter) SetInsertableImages(v []*InsertableImage) *ImageInserter { s.InsertableImages = v return s } // Settings related to IMSC captions. IMSC is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to IMSC. type ImscDestinationSettings struct { _ struct{} `type:"structure"` // Keep this setting enabled to have MediaConvert use the font style and position // information from the captions source in the output. This option is available // only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting // for simplified output captions. StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"ImscStylePassthrough"` } // String returns the string representation func (s ImscDestinationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ImscDestinationSettings) GoString() string { return s.String() } // SetStylePassthrough sets the StylePassthrough field's value. func (s *ImscDestinationSettings) SetStylePassthrough(v string) *ImscDestinationSettings { s.StylePassthrough = &v return s } // Use inputs to define the source files used in your transcoding job. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/specify-input-settings.html. // You can use multiple video inputs to do input stitching. For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/assembling-multiple-inputs-and-input-clips.html type Input struct { _ struct{} `type:"structure"` // Use audio selector groups to combine multiple sidecar audio inputs so that // you can assign them to a single output audio tab (AudioDescription). Note // that, if you're working with embedded audio, it's simpler to assign multiple // input tracks into a single audio selector rather than use an audio selector // group. AudioSelectorGroups map[string]*AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"` // Use Audio selectors (AudioSelectors) to specify a track or set of tracks // from the input that you will use in your outputs. You can use multiple Audio // selectors per input. AudioSelectors map[string]*AudioSelector `locationName:"audioSelectors" type:"map"` // Use captions selectors to specify the captions data from your input that // you use in your outputs. You can use up to 20 captions selectors per input. CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"` // Use Cropping selection (crop) to specify the video area that the service // will include in the output video frame. If you specify a value here, it will // override any value that you specify in the output setting Cropping selection // (crop). Crop *Rectangle `locationName:"crop" type:"structure"` // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. // Default is disabled. Only manually controllable for MPEG2 and uncompressed // video inputs. DeblockFilter *string `locationName:"deblockFilter" type:"string" enum:"InputDeblockFilter"` // Settings for decrypting any input files that you encrypt before you upload // them to Amazon S3. MediaConvert can decrypt files only when you use AWS Key // Management Service (KMS) to encrypt the data key that you use to encrypt // your content. DecryptionSettings *InputDecryptionSettings `locationName:"decryptionSettings" type:"structure"` // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default // is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video // inputs. DenoiseFilter *string `locationName:"denoiseFilter" type:"string" enum:"InputDenoiseFilter"` // Specify the source file for your transcoding job. You can use multiple inputs // in a single job. The service concatenates these inputs, in the order that // you specify them in the job, to create the outputs. If your input format // is IMF, specify your input by providing the path to your CPL. For example, // "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to // use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs // that contain assets referenced by the CPL. FileInput *string `locationName:"fileInput" type:"string"` // Specify how the transcoding service applies the denoise and deblock filters. // You must also enable the filters separately, with Denoise (InputDenoiseFilter) // and Deblock (InputDeblockFilter). * Auto - The transcoding service determines // whether to apply filtering, depending on input type and quality. * Disable // - The input is not filtered. This is true even if you use the API to enable // them in (InputDeblockFilter) and (InputDeblockFilter). * Force - The input // is filtered regardless of input type. FilterEnable *string `locationName:"filterEnable" type:"string" enum:"InputFilterEnable"` // Use Filter strength (FilterStrength) to adjust the magnitude the input filter // settings (Deblock and Denoise). The range is -5 to 5. Default is 0. FilterStrength *int64 `locationName:"filterStrength" type:"integer"` // Enable the image inserter feature to include a graphic overlay on your video. // Enable or disable this feature for each input individually. This setting // is disabled by default. ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"` // (InputClippings) contains sets of start and end times that together specify // a portion of the input to be used in the outputs. If you provide only a start // time, the clip will be the entire input from that point to the end. If you // provide only an end time, it will be the entire input up to that point. When // you specify more than one input clip, the transcoding service creates the // job outputs by stringing the clips together in the order you specify them. InputClippings []*InputClipping `locationName:"inputClippings" type:"list"` // When you have a progressive segmented frame (PsF) input, use this setting // to flag the input as PsF. MediaConvert doesn't automatically detect PsF. // Therefore, flagging your input as PsF results in better preservation of video // quality when you do deinterlacing and frame rate conversion. If you don't // specify, the default value is Auto (AUTO). Auto is the correct setting for // all inputs that are not PsF. Don't set this value to PsF when your input // is interlaced. Doing so creates horizontal interlacing artifacts. InputScanType *string `locationName:"inputScanType" type:"string" enum:"InputScanType"` // Use Selection placement (position) to define the video area in your output // frame. The area outside of the rectangle that you specify here is black. // If you specify a value here, it will override any value that you specify // in the output setting Selection placement (position). If you specify a value // here, this will override any AFD values in your input, even if you set Respond // to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, // this will ignore anything that you specify for the setting Scaling Behavior // (scalingBehavior). Position *Rectangle `locationName:"position" type:"structure"` // Use Program (programNumber) to select a specific program from within a multi-program // transport stream. Note that Quad 4K is not currently supported. Default is // the first program within the transport stream. If the program you specify // doesn't exist, the transcoding service will use this default. ProgramNumber *int64 `locationName:"programNumber" min:"1" type:"integer"` // Set PSI control (InputPsiControl) for transport stream inputs to specify // which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio // and video. * Use PSI - Scan only PSI data. PsiControl *string `locationName:"psiControl" type:"string" enum:"InputPsiControl"` // Provide a list of any necessary supplemental IMPs. You need supplemental // IMPs if the CPL that you're using for your input is in an incomplete IMP. // Specify either the supplemental IMP directories with a trailing slash or // the ASSETMAP.xml files. For example ["s3://bucket/ov/", "s3://bucket/vf2/ASSETMAP.xml"]. // You don't need to specify the IMP that contains your input CPL, because the // service automatically detects it. SupplementalImps []*string `locationName:"supplementalImps" type:"list"` // Use this Timecode source setting, located under the input settings (InputTimecodeSource), // to specify how the service counts input video frames. This input frame count // affects only the behavior of features that apply to a single input at a time, // such as input clipping and synchronizing some captions formats. Choose Embedded // (EMBEDDED) to use the timecodes in your input video. Choose Start at zero // (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) // to start the first frame at the timecode that you specify in the setting // Start timecode (timecodeStart). If you don't specify a value for Timecode // source, the service will use Embedded by default. For more information about // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeSource *string `locationName:"timecodeSource" type:"string" enum:"InputTimecodeSource"` // Specify the timecode that you want the service to use for this input's initial // frame. To use this setting, you must set the Timecode source setting, located // under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). // For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeStart *string `locationName:"timecodeStart" min:"11" type:"string"` // Input video selectors contain the video settings for the input. Each of your // inputs can have up to one video selector. VideoSelector *VideoSelector `locationName:"videoSelector" type:"structure"` } // String returns the string representation func (s Input) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Input) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Input"} if s.FilterStrength != nil && *s.FilterStrength < -5 { invalidParams.Add(request.NewErrParamMinValue("FilterStrength", -5)) } if s.ProgramNumber != nil && *s.ProgramNumber < 1 { invalidParams.Add(request.NewErrParamMinValue("ProgramNumber", 1)) } if s.TimecodeStart != nil && len(*s.TimecodeStart) < 11 { invalidParams.Add(request.NewErrParamMinLen("TimecodeStart", 11)) } if s.AudioSelectors != nil { for i, v := range s.AudioSelectors { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioSelectors", i), err.(request.ErrInvalidParams)) } } } if s.CaptionSelectors != nil { for i, v := range s.CaptionSelectors { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionSelectors", i), err.(request.ErrInvalidParams)) } } } if s.Crop != nil { if err := s.Crop.Validate(); err != nil { invalidParams.AddNested("Crop", err.(request.ErrInvalidParams)) } } if s.DecryptionSettings != nil { if err := s.DecryptionSettings.Validate(); err != nil { invalidParams.AddNested("DecryptionSettings", err.(request.ErrInvalidParams)) } } if s.ImageInserter != nil { if err := s.ImageInserter.Validate(); err != nil { invalidParams.AddNested("ImageInserter", err.(request.ErrInvalidParams)) } } if s.Position != nil { if err := s.Position.Validate(); err != nil { invalidParams.AddNested("Position", err.(request.ErrInvalidParams)) } } if s.VideoSelector != nil { if err := s.VideoSelector.Validate(); err != nil { invalidParams.AddNested("VideoSelector", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAudioSelectorGroups sets the AudioSelectorGroups field's value. func (s *Input) SetAudioSelectorGroups(v map[string]*AudioSelectorGroup) *Input { s.AudioSelectorGroups = v return s } // SetAudioSelectors sets the AudioSelectors field's value. func (s *Input) SetAudioSelectors(v map[string]*AudioSelector) *Input { s.AudioSelectors = v return s } // SetCaptionSelectors sets the CaptionSelectors field's value. func (s *Input) SetCaptionSelectors(v map[string]*CaptionSelector) *Input { s.CaptionSelectors = v return s } // SetCrop sets the Crop field's value. func (s *Input) SetCrop(v *Rectangle) *Input { s.Crop = v return s } // SetDeblockFilter sets the DeblockFilter field's value. func (s *Input) SetDeblockFilter(v string) *Input { s.DeblockFilter = &v return s } // SetDecryptionSettings sets the DecryptionSettings field's value. func (s *Input) SetDecryptionSettings(v *InputDecryptionSettings) *Input { s.DecryptionSettings = v return s } // SetDenoiseFilter sets the DenoiseFilter field's value. func (s *Input) SetDenoiseFilter(v string) *Input { s.DenoiseFilter = &v return s } // SetFileInput sets the FileInput field's value. func (s *Input) SetFileInput(v string) *Input { s.FileInput = &v return s } // SetFilterEnable sets the FilterEnable field's value. func (s *Input) SetFilterEnable(v string) *Input { s.FilterEnable = &v return s } // SetFilterStrength sets the FilterStrength field's value. func (s *Input) SetFilterStrength(v int64) *Input { s.FilterStrength = &v return s } // SetImageInserter sets the ImageInserter field's value. func (s *Input) SetImageInserter(v *ImageInserter) *Input { s.ImageInserter = v return s } // SetInputClippings sets the InputClippings field's value. func (s *Input) SetInputClippings(v []*InputClipping) *Input { s.InputClippings = v return s } // SetInputScanType sets the InputScanType field's value. func (s *Input) SetInputScanType(v string) *Input { s.InputScanType = &v return s } // SetPosition sets the Position field's value. func (s *Input) SetPosition(v *Rectangle) *Input { s.Position = v return s } // SetProgramNumber sets the ProgramNumber field's value. func (s *Input) SetProgramNumber(v int64) *Input { s.ProgramNumber = &v return s } // SetPsiControl sets the PsiControl field's value. func (s *Input) SetPsiControl(v string) *Input { s.PsiControl = &v return s } // SetSupplementalImps sets the SupplementalImps field's value. func (s *Input) SetSupplementalImps(v []*string) *Input { s.SupplementalImps = v return s } // SetTimecodeSource sets the TimecodeSource field's value. func (s *Input) SetTimecodeSource(v string) *Input { s.TimecodeSource = &v return s } // SetTimecodeStart sets the TimecodeStart field's value. func (s *Input) SetTimecodeStart(v string) *Input { s.TimecodeStart = &v return s } // SetVideoSelector sets the VideoSelector field's value. func (s *Input) SetVideoSelector(v *VideoSelector) *Input { s.VideoSelector = v return s } // To transcode only portions of your input, include one input clip for each // part of your input that you want in your output. All input clips that you // specify will be included in every output of the job. For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/assembling-multiple-inputs-and-input-clips.html. type InputClipping struct { _ struct{} `type:"structure"` // Set End timecode (EndTimecode) to the end of the portion of the input you // are clipping. The frame corresponding to the End timecode value is included // in the clip. Start timecode or End timecode may be left blank, but not both. // Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the // minute, SS is the second, and FF is the frame number. When choosing this // value, take into account your setting for timecode source under input settings // (InputTimecodeSource). For example, if you have embedded timecodes that start // at 01:00:00:00 and you want your clip to end six minutes into the video, // use 01:06:00:00. EndTimecode *string `locationName:"endTimecode" type:"string"` // Set Start timecode (StartTimecode) to the beginning of the portion of the // input you are clipping. The frame corresponding to the Start timecode value // is included in the clip. Start timecode or End timecode may be left blank, // but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the // hour, MM is the minute, SS is the second, and FF is the frame number. When // choosing this value, take into account your setting for Input timecode source. // For example, if you have embedded timecodes that start at 01:00:00:00 and // you want your clip to begin five minutes into the video, use 01:05:00:00. StartTimecode *string `locationName:"startTimecode" type:"string"` } // String returns the string representation func (s InputClipping) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s InputClipping) GoString() string { return s.String() } // SetEndTimecode sets the EndTimecode field's value. func (s *InputClipping) SetEndTimecode(v string) *InputClipping { s.EndTimecode = &v return s } // SetStartTimecode sets the StartTimecode field's value. func (s *InputClipping) SetStartTimecode(v string) *InputClipping { s.StartTimecode = &v return s } // Settings for decrypting any input files that you encrypt before you upload // them to Amazon S3. MediaConvert can decrypt files only when you use AWS Key // Management Service (KMS) to encrypt the data key that you use to encrypt // your content. type InputDecryptionSettings struct { _ struct{} `type:"structure"` // Specify the encryption mode that you used to encrypt your input files. DecryptionMode *string `locationName:"decryptionMode" type:"string" enum:"DecryptionMode"` // Warning! Don't provide your encryption key in plaintext. Your job settings // could be intercepted, making your encrypted content vulnerable. Specify the // encrypted version of the data key that you used to encrypt your content. // The data key must be encrypted by AWS Key Management Service (KMS). The key // can be 128, 192, or 256 bits. EncryptedDecryptionKey *string `locationName:"encryptedDecryptionKey" min:"24" type:"string"` // Specify the initialization vector that you used when you encrypted your content // before uploading it to Amazon S3. You can use a 16-byte initialization vector // with any encryption mode. Or, you can use a 12-byte initialization vector // with GCM or CTR. MediaConvert accepts only initialization vectors that are // base64-encoded. InitializationVector *string `locationName:"initializationVector" min:"16" type:"string"` // Specify the AWS Region for AWS Key Management Service (KMS) that you used // to encrypt your data key, if that Region is different from the one you are // using for AWS Elemental MediaConvert. KmsKeyRegion *string `locationName:"kmsKeyRegion" min:"9" type:"string"` } // String returns the string representation func (s InputDecryptionSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s InputDecryptionSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *InputDecryptionSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputDecryptionSettings"} if s.EncryptedDecryptionKey != nil && len(*s.EncryptedDecryptionKey) < 24 { invalidParams.Add(request.NewErrParamMinLen("EncryptedDecryptionKey", 24)) } if s.InitializationVector != nil && len(*s.InitializationVector) < 16 { invalidParams.Add(request.NewErrParamMinLen("InitializationVector", 16)) } if s.KmsKeyRegion != nil && len(*s.KmsKeyRegion) < 9 { invalidParams.Add(request.NewErrParamMinLen("KmsKeyRegion", 9)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDecryptionMode sets the DecryptionMode field's value. func (s *InputDecryptionSettings) SetDecryptionMode(v string) *InputDecryptionSettings { s.DecryptionMode = &v return s } // SetEncryptedDecryptionKey sets the EncryptedDecryptionKey field's value. func (s *InputDecryptionSettings) SetEncryptedDecryptionKey(v string) *InputDecryptionSettings { s.EncryptedDecryptionKey = &v return s } // SetInitializationVector sets the InitializationVector field's value. func (s *InputDecryptionSettings) SetInitializationVector(v string) *InputDecryptionSettings { s.InitializationVector = &v return s } // SetKmsKeyRegion sets the KmsKeyRegion field's value. func (s *InputDecryptionSettings) SetKmsKeyRegion(v string) *InputDecryptionSettings { s.KmsKeyRegion = &v return s } // Specified video input in a template. type InputTemplate struct { _ struct{} `type:"structure"` // Use audio selector groups to combine multiple sidecar audio inputs so that // you can assign them to a single output audio tab (AudioDescription). Note // that, if you're working with embedded audio, it's simpler to assign multiple // input tracks into a single audio selector rather than use an audio selector // group. AudioSelectorGroups map[string]*AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"` // Use Audio selectors (AudioSelectors) to specify a track or set of tracks // from the input that you will use in your outputs. You can use multiple Audio // selectors per input. AudioSelectors map[string]*AudioSelector `locationName:"audioSelectors" type:"map"` // Use captions selectors to specify the captions data from your input that // you use in your outputs. You can use up to 20 captions selectors per input. CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"` // Use Cropping selection (crop) to specify the video area that the service // will include in the output video frame. If you specify a value here, it will // override any value that you specify in the output setting Cropping selection // (crop). Crop *Rectangle `locationName:"crop" type:"structure"` // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. // Default is disabled. Only manually controllable for MPEG2 and uncompressed // video inputs. DeblockFilter *string `locationName:"deblockFilter" type:"string" enum:"InputDeblockFilter"` // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default // is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video // inputs. DenoiseFilter *string `locationName:"denoiseFilter" type:"string" enum:"InputDenoiseFilter"` // Specify how the transcoding service applies the denoise and deblock filters. // You must also enable the filters separately, with Denoise (InputDenoiseFilter) // and Deblock (InputDeblockFilter). * Auto - The transcoding service determines // whether to apply filtering, depending on input type and quality. * Disable // - The input is not filtered. This is true even if you use the API to enable // them in (InputDeblockFilter) and (InputDeblockFilter). * Force - The input // is filtered regardless of input type. FilterEnable *string `locationName:"filterEnable" type:"string" enum:"InputFilterEnable"` // Use Filter strength (FilterStrength) to adjust the magnitude the input filter // settings (Deblock and Denoise). The range is -5 to 5. Default is 0. FilterStrength *int64 `locationName:"filterStrength" type:"integer"` // Enable the image inserter feature to include a graphic overlay on your video. // Enable or disable this feature for each input individually. This setting // is disabled by default. ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"` // (InputClippings) contains sets of start and end times that together specify // a portion of the input to be used in the outputs. If you provide only a start // time, the clip will be the entire input from that point to the end. If you // provide only an end time, it will be the entire input up to that point. When // you specify more than one input clip, the transcoding service creates the // job outputs by stringing the clips together in the order you specify them. InputClippings []*InputClipping `locationName:"inputClippings" type:"list"` // When you have a progressive segmented frame (PsF) input, use this setting // to flag the input as PsF. MediaConvert doesn't automatically detect PsF. // Therefore, flagging your input as PsF results in better preservation of video // quality when you do deinterlacing and frame rate conversion. If you don't // specify, the default value is Auto (AUTO). Auto is the correct setting for // all inputs that are not PsF. Don't set this value to PsF when your input // is interlaced. Doing so creates horizontal interlacing artifacts. InputScanType *string `locationName:"inputScanType" type:"string" enum:"InputScanType"` // Use Selection placement (position) to define the video area in your output // frame. The area outside of the rectangle that you specify here is black. // If you specify a value here, it will override any value that you specify // in the output setting Selection placement (position). If you specify a value // here, this will override any AFD values in your input, even if you set Respond // to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, // this will ignore anything that you specify for the setting Scaling Behavior // (scalingBehavior). Position *Rectangle `locationName:"position" type:"structure"` // Use Program (programNumber) to select a specific program from within a multi-program // transport stream. Note that Quad 4K is not currently supported. Default is // the first program within the transport stream. If the program you specify // doesn't exist, the transcoding service will use this default. ProgramNumber *int64 `locationName:"programNumber" min:"1" type:"integer"` // Set PSI control (InputPsiControl) for transport stream inputs to specify // which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio // and video. * Use PSI - Scan only PSI data. PsiControl *string `locationName:"psiControl" type:"string" enum:"InputPsiControl"` // Use this Timecode source setting, located under the input settings (InputTimecodeSource), // to specify how the service counts input video frames. This input frame count // affects only the behavior of features that apply to a single input at a time, // such as input clipping and synchronizing some captions formats. Choose Embedded // (EMBEDDED) to use the timecodes in your input video. Choose Start at zero // (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) // to start the first frame at the timecode that you specify in the setting // Start timecode (timecodeStart). If you don't specify a value for Timecode // source, the service will use Embedded by default. For more information about // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeSource *string `locationName:"timecodeSource" type:"string" enum:"InputTimecodeSource"` // Specify the timecode that you want the service to use for this input's initial // frame. To use this setting, you must set the Timecode source setting, located // under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). // For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. TimecodeStart *string `locationName:"timecodeStart" min:"11" type:"string"` // Input video selectors contain the video settings for the input. Each of your // inputs can have up to one video selector. VideoSelector *VideoSelector `locationName:"videoSelector" type:"structure"` } // String returns the string representation func (s InputTemplate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s InputTemplate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *InputTemplate) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputTemplate"} if s.FilterStrength != nil && *s.FilterStrength < -5 { invalidParams.Add(request.NewErrParamMinValue("FilterStrength", -5)) } if s.ProgramNumber != nil && *s.ProgramNumber < 1 { invalidParams.Add(request.NewErrParamMinValue("ProgramNumber", 1)) } if s.TimecodeStart != nil && len(*s.TimecodeStart) < 11 { invalidParams.Add(request.NewErrParamMinLen("TimecodeStart", 11)) } if s.AudioSelectors != nil { for i, v := range s.AudioSelectors { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioSelectors", i), err.(request.ErrInvalidParams)) } } } if s.CaptionSelectors != nil { for i, v := range s.CaptionSelectors { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionSelectors", i), err.(request.ErrInvalidParams)) } } } if s.Crop != nil { if err := s.Crop.Validate(); err != nil { invalidParams.AddNested("Crop", err.(request.ErrInvalidParams)) } } if s.ImageInserter != nil { if err := s.ImageInserter.Validate(); err != nil { invalidParams.AddNested("ImageInserter", err.(request.ErrInvalidParams)) } } if s.Position != nil { if err := s.Position.Validate(); err != nil { invalidParams.AddNested("Position", err.(request.ErrInvalidParams)) } } if s.VideoSelector != nil { if err := s.VideoSelector.Validate(); err != nil { invalidParams.AddNested("VideoSelector", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAudioSelectorGroups sets the AudioSelectorGroups field's value. func (s *InputTemplate) SetAudioSelectorGroups(v map[string]*AudioSelectorGroup) *InputTemplate { s.AudioSelectorGroups = v return s } // SetAudioSelectors sets the AudioSelectors field's value. func (s *InputTemplate) SetAudioSelectors(v map[string]*AudioSelector) *InputTemplate { s.AudioSelectors = v return s } // SetCaptionSelectors sets the CaptionSelectors field's value. func (s *InputTemplate) SetCaptionSelectors(v map[string]*CaptionSelector) *InputTemplate { s.CaptionSelectors = v return s } // SetCrop sets the Crop field's value. func (s *InputTemplate) SetCrop(v *Rectangle) *InputTemplate { s.Crop = v return s } // SetDeblockFilter sets the DeblockFilter field's value. func (s *InputTemplate) SetDeblockFilter(v string) *InputTemplate { s.DeblockFilter = &v return s } // SetDenoiseFilter sets the DenoiseFilter field's value. func (s *InputTemplate) SetDenoiseFilter(v string) *InputTemplate { s.DenoiseFilter = &v return s } // SetFilterEnable sets the FilterEnable field's value. func (s *InputTemplate) SetFilterEnable(v string) *InputTemplate { s.FilterEnable = &v return s } // SetFilterStrength sets the FilterStrength field's value. func (s *InputTemplate) SetFilterStrength(v int64) *InputTemplate { s.FilterStrength = &v return s } // SetImageInserter sets the ImageInserter field's value. func (s *InputTemplate) SetImageInserter(v *ImageInserter) *InputTemplate { s.ImageInserter = v return s } // SetInputClippings sets the InputClippings field's value. func (s *InputTemplate) SetInputClippings(v []*InputClipping) *InputTemplate { s.InputClippings = v return s } // SetInputScanType sets the InputScanType field's value. func (s *InputTemplate) SetInputScanType(v string) *InputTemplate { s.InputScanType = &v return s } // SetPosition sets the Position field's value. func (s *InputTemplate) SetPosition(v *Rectangle) *InputTemplate { s.Position = v return s } // SetProgramNumber sets the ProgramNumber field's value. func (s *InputTemplate) SetProgramNumber(v int64) *InputTemplate { s.ProgramNumber = &v return s } // SetPsiControl sets the PsiControl field's value. func (s *InputTemplate) SetPsiControl(v string) *InputTemplate { s.PsiControl = &v return s } // SetTimecodeSource sets the TimecodeSource field's value. func (s *InputTemplate) SetTimecodeSource(v string) *InputTemplate { s.TimecodeSource = &v return s } // SetTimecodeStart sets the TimecodeStart field's value. func (s *InputTemplate) SetTimecodeStart(v string) *InputTemplate { s.TimecodeStart = &v return s } // SetVideoSelector sets the VideoSelector field's value. func (s *InputTemplate) SetVideoSelector(v *VideoSelector) *InputTemplate { s.VideoSelector = v return s } // These settings apply to a specific graphic overlay. You can include multiple // overlays in your job. type InsertableImage struct { _ struct{} `type:"structure"` // Specify the time, in milliseconds, for the image to remain on the output // video. This duration includes fade-in time but not fade-out time. Duration *int64 `locationName:"duration" type:"integer"` // Specify the length of time, in milliseconds, between the Start time that // you specify for the image insertion and the time that the image appears at // full opacity. Full opacity is the level that you specify for the opacity // setting. If you don't specify a value for Fade-in, the image will appear // abruptly at the overlay start time. FadeIn *int64 `locationName:"fadeIn" type:"integer"` // Specify the length of time, in milliseconds, between the end of the time // that you have specified for the image overlay Duration and when the overlaid // image has faded to total transparency. If you don't specify a value for Fade-out, // the image will disappear abruptly at the end of the inserted image duration. FadeOut *int64 `locationName:"fadeOut" type:"integer"` // Specify the height of the inserted image in pixels. If you specify a value // that's larger than the video resolution height, the service will crop your // overlaid image to fit. To use the native height of the image, keep this setting // blank. Height *int64 `locationName:"height" type:"integer"` // Specify the HTTP, HTTPS, or Amazon S3 location of the image that you want // to overlay on the video. Use a PNG or TGA file. ImageInserterInput *string `locationName:"imageInserterInput" min:"14" type:"string"` // Specify the distance, in pixels, between the inserted image and the left // edge of the video frame. Required for any image overlay that you specify. ImageX *int64 `locationName:"imageX" type:"integer"` // Specify the distance, in pixels, between the overlaid image and the top edge // of the video frame. Required for any image overlay that you specify. ImageY *int64 `locationName:"imageY" type:"integer"` // Specify how overlapping inserted images appear. Images with higher values // for Layer appear on top of images with lower values for Layer. Layer *int64 `locationName:"layer" type:"integer"` // Use Opacity (Opacity) to specify how much of the underlying video shows through // the inserted image. 0 is transparent and 100 is fully opaque. Default is // 50. Opacity *int64 `locationName:"opacity" type:"integer"` // Specify the timecode of the frame that you want the overlay to first appear // on. This must be in timecode (HH:MM:SS:FF or HH:MM:SS;FF) format. Remember // to take into account your timecode source settings. StartTime *string `locationName:"startTime" type:"string"` // Specify the width of the inserted image in pixels. If you specify a value // that's larger than the video resolution width, the service will crop your // overlaid image to fit. To use the native width of the image, keep this setting // blank. Width *int64 `locationName:"width" type:"integer"` } // String returns the string representation func (s InsertableImage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s InsertableImage) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *InsertableImage) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InsertableImage"} if s.ImageInserterInput != nil && len(*s.ImageInserterInput) < 14 { invalidParams.Add(request.NewErrParamMinLen("ImageInserterInput", 14)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDuration sets the Duration field's value. func (s *InsertableImage) SetDuration(v int64) *InsertableImage { s.Duration = &v return s } // SetFadeIn sets the FadeIn field's value. func (s *InsertableImage) SetFadeIn(v int64) *InsertableImage { s.FadeIn = &v return s } // SetFadeOut sets the FadeOut field's value. func (s *InsertableImage) SetFadeOut(v int64) *InsertableImage { s.FadeOut = &v return s } // SetHeight sets the Height field's value. func (s *InsertableImage) SetHeight(v int64) *InsertableImage { s.Height = &v return s } // SetImageInserterInput sets the ImageInserterInput field's value. func (s *InsertableImage) SetImageInserterInput(v string) *InsertableImage { s.ImageInserterInput = &v return s } // SetImageX sets the ImageX field's value. func (s *InsertableImage) SetImageX(v int64) *InsertableImage { s.ImageX = &v return s } // SetImageY sets the ImageY field's value. func (s *InsertableImage) SetImageY(v int64) *InsertableImage { s.ImageY = &v return s } // SetLayer sets the Layer field's value. func (s *InsertableImage) SetLayer(v int64) *InsertableImage { s.Layer = &v return s } // SetOpacity sets the Opacity field's value. func (s *InsertableImage) SetOpacity(v int64) *InsertableImage { s.Opacity = &v return s } // SetStartTime sets the StartTime field's value. func (s *InsertableImage) SetStartTime(v string) *InsertableImage { s.StartTime = &v return s } // SetWidth sets the Width field's value. func (s *InsertableImage) SetWidth(v int64) *InsertableImage { s.Width = &v return s } type InternalServerErrorException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s InternalServerErrorException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s InternalServerErrorException) GoString() string { return s.String() } func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ RespMetadata: v, } } // Code returns the exception type name. func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *InternalServerErrorException) OrigErr() error { return nil } func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. func (s *InternalServerErrorException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *InternalServerErrorException) RequestID() string { return s.RespMetadata.RequestID } // Each job converts an input file into an output file or files. For more information, // see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html type Job struct { _ struct{} `type:"structure"` // Accelerated transcoding can significantly speed up jobs with long, visually // complex content. AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"` // Describes whether the current job is running with accelerated transcoding. // For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus // is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode) // set to ENABLED or PREFERRED, AccelerationStatus is one of the other states. // AccelerationStatus is IN_PROGRESS initially, while the service determines // whether the input files and job settings are compatible with accelerated // transcoding. If they are, AcclerationStatus is ACCELERATED. If your input // files and job settings aren't compatible with accelerated transcoding, the // service either fails your job or runs it without accelerated transcoding, // depending on how you set Acceleration (AccelerationMode). When the service // runs your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED. AccelerationStatus *string `locationName:"accelerationStatus" type:"string" enum:"AccelerationStatus"` // An identifier for this resource that is unique within all of AWS. Arn *string `locationName:"arn" type:"string"` // The tag type that AWS Billing and Cost Management will use to sort your AWS // Elemental MediaConvert costs on any billing report that you set up. BillingTagsSource *string `locationName:"billingTagsSource" type:"string" enum:"BillingTagsSource"` // The time, in Unix epoch format in seconds, when the job got created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"` // A job's phase can be PROBING, TRANSCODING OR UPLOADING CurrentPhase *string `locationName:"currentPhase" type:"string" enum:"JobPhase"` // Error code for the job ErrorCode *int64 `locationName:"errorCode" type:"integer"` // Error message of Job ErrorMessage *string `locationName:"errorMessage" type:"string"` // Optional list of hop destinations. HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"` // A portion of the job's ARN, unique within your AWS Elemental MediaConvert // resources Id *string `locationName:"id" type:"string"` // An estimate of how far your job has progressed. This estimate is shown as // a percentage of the total time from when your job leaves its queue to when // your output files appear in your output Amazon S3 bucket. AWS Elemental MediaConvert // provides jobPercentComplete in CloudWatch STATUS_UPDATE events and in the // response to GetJob and ListJobs requests. The jobPercentComplete estimate // is reliable for the following input containers: Quicktime, Transport Stream, // MP4, and MXF. For some jobs, the service can't provide information about // job progress. In those cases, jobPercentComplete returns a null value. JobPercentComplete *int64 `locationName:"jobPercentComplete" type:"integer"` // The job template that the job is created from, if it is created from a job // template. JobTemplate *string `locationName:"jobTemplate" type:"string"` // Provides messages from the service about jobs that you have already successfully // submitted. Messages *JobMessages `locationName:"messages" type:"structure"` // List of output group details OutputGroupDetails []*OutputGroupDetail `locationName:"outputGroupDetails" type:"list"` // Relative priority on the job. Priority *int64 `locationName:"priority" type:"integer"` // When you create a job, you can specify a queue to send it to. If you don't // specify, the job will go to the default queue. For more about queues, see // the User Guide topic at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html Queue *string `locationName:"queue" type:"string"` // The job's queue hopping history. QueueTransitions []*QueueTransition `locationName:"queueTransitions" type:"list"` // The number of times that the service automatically attempted to process your // job after encountering an error. RetryCount *int64 `locationName:"retryCount" type:"integer"` // The IAM role you use for creating this job. For details about permissions, // see the User Guide topic at the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html // // Role is a required field Role *string `locationName:"role" type:"string" required:"true"` // JobSettings contains all the transcode settings for a job. // // Settings is a required field Settings *JobSettings `locationName:"settings" type:"structure" required:"true"` // Enable this setting when you run a test job to estimate how many reserved // transcoding slots (RTS) you need. When this is enabled, MediaConvert runs // your job from an on-demand queue with similar performance to what you will // see with one RTS in a reserved queue. This setting is disabled by default. SimulateReservedQueue *string `locationName:"simulateReservedQueue" type:"string" enum:"SimulateReservedQueue"` // A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR. Status *string `locationName:"status" type:"string" enum:"JobStatus"` // Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch // Events. Set the interval, in seconds, between status updates. MediaConvert // sends an update at this interval from the time the service begins processing // your job to the time it completes the transcode or encounters an error. StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"` // Information about when jobs are submitted, started, and finished is specified // in Unix epoch format in seconds. Timing *Timing `locationName:"timing" type:"structure"` // User-defined metadata that you want to associate with an MediaConvert job. // You specify metadata in key/value pairs. UserMetadata map[string]*string `locationName:"userMetadata" type:"map"` } // String returns the string representation func (s Job) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Job) GoString() string { return s.String() } // SetAccelerationSettings sets the AccelerationSettings field's value. func (s *Job) SetAccelerationSettings(v *AccelerationSettings) *Job { s.AccelerationSettings = v return s } // SetAccelerationStatus sets the AccelerationStatus field's value. func (s *Job) SetAccelerationStatus(v string) *Job { s.AccelerationStatus = &v return s } // SetArn sets the Arn field's value. func (s *Job) SetArn(v string) *Job { s.Arn = &v return s } // SetBillingTagsSource sets the BillingTagsSource field's value. func (s *Job) SetBillingTagsSource(v string) *Job { s.BillingTagsSource = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *Job) SetCreatedAt(v time.Time) *Job { s.CreatedAt = &v return s } // SetCurrentPhase sets the CurrentPhase field's value. func (s *Job) SetCurrentPhase(v string) *Job { s.CurrentPhase = &v return s } // SetErrorCode sets the ErrorCode field's value. func (s *Job) SetErrorCode(v int64) *Job { s.ErrorCode = &v return s } // SetErrorMessage sets the ErrorMessage field's value. func (s *Job) SetErrorMessage(v string) *Job { s.ErrorMessage = &v return s } // SetHopDestinations sets the HopDestinations field's value. func (s *Job) SetHopDestinations(v []*HopDestination) *Job { s.HopDestinations = v return s } // SetId sets the Id field's value. func (s *Job) SetId(v string) *Job { s.Id = &v return s } // SetJobPercentComplete sets the JobPercentComplete field's value. func (s *Job) SetJobPercentComplete(v int64) *Job { s.JobPercentComplete = &v return s } // SetJobTemplate sets the JobTemplate field's value. func (s *Job) SetJobTemplate(v string) *Job { s.JobTemplate = &v return s } // SetMessages sets the Messages field's value. func (s *Job) SetMessages(v *JobMessages) *Job { s.Messages = v return s } // SetOutputGroupDetails sets the OutputGroupDetails field's value. func (s *Job) SetOutputGroupDetails(v []*OutputGroupDetail) *Job { s.OutputGroupDetails = v return s } // SetPriority sets the Priority field's value. func (s *Job) SetPriority(v int64) *Job { s.Priority = &v return s } // SetQueue sets the Queue field's value. func (s *Job) SetQueue(v string) *Job { s.Queue = &v return s } // SetQueueTransitions sets the QueueTransitions field's value. func (s *Job) SetQueueTransitions(v []*QueueTransition) *Job { s.QueueTransitions = v return s } // SetRetryCount sets the RetryCount field's value. func (s *Job) SetRetryCount(v int64) *Job { s.RetryCount = &v return s } // SetRole sets the Role field's value. func (s *Job) SetRole(v string) *Job { s.Role = &v return s } // SetSettings sets the Settings field's value. func (s *Job) SetSettings(v *JobSettings) *Job { s.Settings = v return s } // SetSimulateReservedQueue sets the SimulateReservedQueue field's value. func (s *Job) SetSimulateReservedQueue(v string) *Job { s.SimulateReservedQueue = &v return s } // SetStatus sets the Status field's value. func (s *Job) SetStatus(v string) *Job { s.Status = &v return s } // SetStatusUpdateInterval sets the StatusUpdateInterval field's value. func (s *Job) SetStatusUpdateInterval(v string) *Job { s.StatusUpdateInterval = &v return s } // SetTiming sets the Timing field's value. func (s *Job) SetTiming(v *Timing) *Job { s.Timing = v return s } // SetUserMetadata sets the UserMetadata field's value. func (s *Job) SetUserMetadata(v map[string]*string) *Job { s.UserMetadata = v return s } // Provides messages from the service about jobs that you have already successfully // submitted. type JobMessages struct { _ struct{} `type:"structure"` // List of messages that are informational only and don't indicate a problem // with your job. Info []*string `locationName:"info" type:"list"` // List of messages that warn about conditions that might cause your job not // to run or to fail. Warning []*string `locationName:"warning" type:"list"` } // String returns the string representation func (s JobMessages) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s JobMessages) GoString() string { return s.String() } // SetInfo sets the Info field's value. func (s *JobMessages) SetInfo(v []*string) *JobMessages { s.Info = v return s } // SetWarning sets the Warning field's value. func (s *JobMessages) SetWarning(v []*string) *JobMessages { s.Warning = v return s } // JobSettings contains all the transcode settings for a job. type JobSettings struct { _ struct{} `type:"structure"` // When specified, this offset (in milliseconds) is added to the input Ad Avail // PTS time. AdAvailOffset *int64 `locationName:"adAvailOffset" type:"integer"` // Settings for ad avail blanking. Video can be blanked or overlaid with an // image, and audio muted during SCTE-35 triggered ad avails. AvailBlanking *AvailBlanking `locationName:"availBlanking" type:"structure"` // Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion, // you can ignore these settings. Esam *EsamSettings `locationName:"esam" type:"structure"` // Use Inputs (inputs) to define source file used in the transcode job. There // can be multiple inputs add in a job. These inputs will be concantenated together // to create the output. Inputs []*Input `locationName:"inputs" type:"list"` // Use these settings only when you use Kantar watermarking. Specify the values // that MediaConvert uses to generate and place Kantar watermarks in your output // audio. These settings apply to every output in your job. In addition to specifying // these values, you also need to store your Kantar credentials in AWS Secrets // Manager. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/kantar-watermarking.html. KantarWatermark *KantarWatermarkSettings `locationName:"kantarWatermark" type:"structure"` // Overlay motion graphics on top of your video. The motion graphics that you // specify here appear on all outputs in all output groups. For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html. MotionImageInserter *MotionImageInserter `locationName:"motionImageInserter" type:"structure"` // Settings for your Nielsen configuration. If you don't do Nielsen measurement // and analytics, ignore these settings. When you enable Nielsen configuration // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs // in the job. To enable Nielsen configuration programmatically, include an // instance of nielsenConfiguration in your JSON job specification. Even if // you don't include any children of nielsenConfiguration, you still enable // the setting. NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` // Ignore these settings unless you are using Nielsen non-linear watermarking. // Specify the values that MediaConvert uses to generate and place Nielsen watermarks // in your output audio. In addition to specifying these values, you also need // to set up your cloud TIC server. These settings apply to every output in // your job. The MediaConvert implementation is currently with the following // Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark // Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0] NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings `locationName:"nielsenNonLinearWatermark" type:"structure"` // (OutputGroups) contains one group of settings for each set of outputs that // share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, // MXF, and no container) are grouped in a single output group as well. Required // in (OutputGroups) is a group of settings that apply to the whole group. This // required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). // Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings // * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings // * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, // CmafGroupSettings OutputGroups []*OutputGroup `locationName:"outputGroups" type:"list"` // These settings control how the service handles timecodes throughout the job. // These settings don't affect input clipping. TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"` // Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags // in any HLS outputs. To include timed metadata, you must enable it here, enable // it in each output container, and specify tags and timecodes in ID3 insertion // (Id3Insertion) objects. TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"` } // String returns the string representation func (s JobSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s JobSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *JobSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "JobSettings"} if s.AdAvailOffset != nil && *s.AdAvailOffset < -1000 { invalidParams.Add(request.NewErrParamMinValue("AdAvailOffset", -1000)) } if s.AvailBlanking != nil { if err := s.AvailBlanking.Validate(); err != nil { invalidParams.AddNested("AvailBlanking", err.(request.ErrInvalidParams)) } } if s.Inputs != nil { for i, v := range s.Inputs { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Inputs", i), err.(request.ErrInvalidParams)) } } } if s.KantarWatermark != nil { if err := s.KantarWatermark.Validate(); err != nil { invalidParams.AddNested("KantarWatermark", err.(request.ErrInvalidParams)) } } if s.MotionImageInserter != nil { if err := s.MotionImageInserter.Validate(); err != nil { invalidParams.AddNested("MotionImageInserter", err.(request.ErrInvalidParams)) } } if s.NielsenNonLinearWatermark != nil { if err := s.NielsenNonLinearWatermark.Validate(); err != nil { invalidParams.AddNested("NielsenNonLinearWatermark", err.(request.ErrInvalidParams)) } } if s.OutputGroups != nil { for i, v := range s.OutputGroups { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputGroups", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdAvailOffset sets the AdAvailOffset field's value. func (s *JobSettings) SetAdAvailOffset(v int64) *JobSettings { s.AdAvailOffset = &v return s } // SetAvailBlanking sets the AvailBlanking field's value. func (s *JobSettings) SetAvailBlanking(v *AvailBlanking) *JobSettings { s.AvailBlanking = v return s } // SetEsam sets the Esam field's value. func (s *JobSettings) SetEsam(v *EsamSettings) *JobSettings { s.Esam = v return s } // SetInputs sets the Inputs field's value. func (s *JobSettings) SetInputs(v []*Input) *JobSettings { s.Inputs = v return s } // SetKantarWatermark sets the KantarWatermark field's value. func (s *JobSettings) SetKantarWatermark(v *KantarWatermarkSettings) *JobSettings { s.KantarWatermark = v return s } // SetMotionImageInserter sets the MotionImageInserter field's value. func (s *JobSettings) SetMotionImageInserter(v *MotionImageInserter) *JobSettings { s.MotionImageInserter = v return s } // SetNielsenConfiguration sets the NielsenConfiguration field's value. func (s *JobSettings) SetNielsenConfiguration(v *NielsenConfiguration) *JobSettings { s.NielsenConfiguration = v return s } // SetNielsenNonLinearWatermark sets the NielsenNonLinearWatermark field's value. func (s *JobSettings) SetNielsenNonLinearWatermark(v *NielsenNonLinearWatermarkSettings) *JobSettings { s.NielsenNonLinearWatermark = v return s } // SetOutputGroups sets the OutputGroups field's value. func (s *JobSettings) SetOutputGroups(v []*OutputGroup) *JobSettings { s.OutputGroups = v return s } // SetTimecodeConfig sets the TimecodeConfig field's value. func (s *JobSettings) SetTimecodeConfig(v *TimecodeConfig) *JobSettings { s.TimecodeConfig = v return s } // SetTimedMetadataInsertion sets the TimedMetadataInsertion field's value. func (s *JobSettings) SetTimedMetadataInsertion(v *TimedMetadataInsertion) *JobSettings { s.TimedMetadataInsertion = v return s } // A job template is a pre-made set of encoding instructions that you can use // to quickly create a job. type JobTemplate struct { _ struct{} `type:"structure"` // Accelerated transcoding can significantly speed up jobs with long, visually // complex content. AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"` // An identifier for this resource that is unique within all of AWS. Arn *string `locationName:"arn" type:"string"` // An optional category you create to organize your job templates. Category *string `locationName:"category" type:"string"` // The timestamp in epoch seconds for Job template creation. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"` // An optional description you create for each job template. Description *string `locationName:"description" type:"string"` // Optional list of hop destinations. HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"` // The timestamp in epoch seconds when the Job template was last updated. LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"` // A name you create for each job template. Each name must be unique within // your account. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` // Relative priority on the job. Priority *int64 `locationName:"priority" type:"integer"` // Optional. The queue that jobs created from this template are assigned to. // If you don't specify this, jobs will go to the default queue. Queue *string `locationName:"queue" type:"string"` // JobTemplateSettings contains all the transcode settings saved in the template // that will be applied to jobs created from it. // // Settings is a required field Settings *JobTemplateSettings `locationName:"settings" type:"structure" required:"true"` // Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch // Events. Set the interval, in seconds, between status updates. MediaConvert // sends an update at this interval from the time the service begins processing // your job to the time it completes the transcode or encounters an error. StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"` // A job template can be of two types: system or custom. System or built-in // job templates can't be modified or deleted by the user. Type *string `locationName:"type" type:"string" enum:"Type"` } // String returns the string representation func (s JobTemplate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s JobTemplate) GoString() string { return s.String() } // SetAccelerationSettings sets the AccelerationSettings field's value. func (s *JobTemplate) SetAccelerationSettings(v *AccelerationSettings) *JobTemplate { s.AccelerationSettings = v return s } // SetArn sets the Arn field's value. func (s *JobTemplate) SetArn(v string) *JobTemplate { s.Arn = &v return s } // SetCategory sets the Category field's value. func (s *JobTemplate) SetCategory(v string) *JobTemplate { s.Category = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *JobTemplate) SetCreatedAt(v time.Time) *JobTemplate { s.CreatedAt = &v return s } // SetDescription sets the Description field's value. func (s *JobTemplate) SetDescription(v string) *JobTemplate { s.Description = &v return s } // SetHopDestinations sets the HopDestinations field's value. func (s *JobTemplate) SetHopDestinations(v []*HopDestination) *JobTemplate { s.HopDestinations = v return s } // SetLastUpdated sets the LastUpdated field's value. func (s *JobTemplate) SetLastUpdated(v time.Time) *JobTemplate { s.LastUpdated = &v return s } // SetName sets the Name field's value. func (s *JobTemplate) SetName(v string) *JobTemplate { s.Name = &v return s } // SetPriority sets the Priority field's value. func (s *JobTemplate) SetPriority(v int64) *JobTemplate { s.Priority = &v return s } // SetQueue sets the Queue field's value. func (s *JobTemplate) SetQueue(v string) *JobTemplate { s.Queue = &v return s } // SetSettings sets the Settings field's value. func (s *JobTemplate) SetSettings(v *JobTemplateSettings) *JobTemplate { s.Settings = v return s } // SetStatusUpdateInterval sets the StatusUpdateInterval field's value. func (s *JobTemplate) SetStatusUpdateInterval(v string) *JobTemplate { s.StatusUpdateInterval = &v return s } // SetType sets the Type field's value. func (s *JobTemplate) SetType(v string) *JobTemplate { s.Type = &v return s } // JobTemplateSettings contains all the transcode settings saved in the template // that will be applied to jobs created from it. type JobTemplateSettings struct { _ struct{} `type:"structure"` // When specified, this offset (in milliseconds) is added to the input Ad Avail // PTS time. AdAvailOffset *int64 `locationName:"adAvailOffset" type:"integer"` // Settings for ad avail blanking. Video can be blanked or overlaid with an // image, and audio muted during SCTE-35 triggered ad avails. AvailBlanking *AvailBlanking `locationName:"availBlanking" type:"structure"` // Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion, // you can ignore these settings. Esam *EsamSettings `locationName:"esam" type:"structure"` // Use Inputs (inputs) to define the source file used in the transcode job. // There can only be one input in a job template. Using the API, you can include // multiple inputs when referencing a job template. Inputs []*InputTemplate `locationName:"inputs" type:"list"` // Use these settings only when you use Kantar watermarking. Specify the values // that MediaConvert uses to generate and place Kantar watermarks in your output // audio. These settings apply to every output in your job. In addition to specifying // these values, you also need to store your Kantar credentials in AWS Secrets // Manager. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/kantar-watermarking.html. KantarWatermark *KantarWatermarkSettings `locationName:"kantarWatermark" type:"structure"` // Overlay motion graphics on top of your video. The motion graphics that you // specify here appear on all outputs in all output groups. For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html. MotionImageInserter *MotionImageInserter `locationName:"motionImageInserter" type:"structure"` // Settings for your Nielsen configuration. If you don't do Nielsen measurement // and analytics, ignore these settings. When you enable Nielsen configuration // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs // in the job. To enable Nielsen configuration programmatically, include an // instance of nielsenConfiguration in your JSON job specification. Even if // you don't include any children of nielsenConfiguration, you still enable // the setting. NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` // Ignore these settings unless you are using Nielsen non-linear watermarking. // Specify the values that MediaConvert uses to generate and place Nielsen watermarks // in your output audio. In addition to specifying these values, you also need // to set up your cloud TIC server. These settings apply to every output in // your job. The MediaConvert implementation is currently with the following // Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark // Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0] NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings `locationName:"nielsenNonLinearWatermark" type:"structure"` // (OutputGroups) contains one group of settings for each set of outputs that // share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, // MXF, and no container) are grouped in a single output group as well. Required // in (OutputGroups) is a group of settings that apply to the whole group. This // required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). // Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings // * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings // * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, // CmafGroupSettings OutputGroups []*OutputGroup `locationName:"outputGroups" type:"list"` // These settings control how the service handles timecodes throughout the job. // These settings don't affect input clipping. TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"` // Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags // in any HLS outputs. To include timed metadata, you must enable it here, enable // it in each output container, and specify tags and timecodes in ID3 insertion // (Id3Insertion) objects. TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"` } // String returns the string representation func (s JobTemplateSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s JobTemplateSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *JobTemplateSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "JobTemplateSettings"} if s.AdAvailOffset != nil && *s.AdAvailOffset < -1000 { invalidParams.Add(request.NewErrParamMinValue("AdAvailOffset", -1000)) } if s.AvailBlanking != nil { if err := s.AvailBlanking.Validate(); err != nil { invalidParams.AddNested("AvailBlanking", err.(request.ErrInvalidParams)) } } if s.Inputs != nil { for i, v := range s.Inputs { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Inputs", i), err.(request.ErrInvalidParams)) } } } if s.KantarWatermark != nil { if err := s.KantarWatermark.Validate(); err != nil { invalidParams.AddNested("KantarWatermark", err.(request.ErrInvalidParams)) } } if s.MotionImageInserter != nil { if err := s.MotionImageInserter.Validate(); err != nil { invalidParams.AddNested("MotionImageInserter", err.(request.ErrInvalidParams)) } } if s.NielsenNonLinearWatermark != nil { if err := s.NielsenNonLinearWatermark.Validate(); err != nil { invalidParams.AddNested("NielsenNonLinearWatermark", err.(request.ErrInvalidParams)) } } if s.OutputGroups != nil { for i, v := range s.OutputGroups { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OutputGroups", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdAvailOffset sets the AdAvailOffset field's value. func (s *JobTemplateSettings) SetAdAvailOffset(v int64) *JobTemplateSettings { s.AdAvailOffset = &v return s } // SetAvailBlanking sets the AvailBlanking field's value. func (s *JobTemplateSettings) SetAvailBlanking(v *AvailBlanking) *JobTemplateSettings { s.AvailBlanking = v return s } // SetEsam sets the Esam field's value. func (s *JobTemplateSettings) SetEsam(v *EsamSettings) *JobTemplateSettings { s.Esam = v return s } // SetInputs sets the Inputs field's value. func (s *JobTemplateSettings) SetInputs(v []*InputTemplate) *JobTemplateSettings { s.Inputs = v return s } // SetKantarWatermark sets the KantarWatermark field's value. func (s *JobTemplateSettings) SetKantarWatermark(v *KantarWatermarkSettings) *JobTemplateSettings { s.KantarWatermark = v return s } // SetMotionImageInserter sets the MotionImageInserter field's value. func (s *JobTemplateSettings) SetMotionImageInserter(v *MotionImageInserter) *JobTemplateSettings { s.MotionImageInserter = v return s } // SetNielsenConfiguration sets the NielsenConfiguration field's value. func (s *JobTemplateSettings) SetNielsenConfiguration(v *NielsenConfiguration) *JobTemplateSettings { s.NielsenConfiguration = v return s } // SetNielsenNonLinearWatermark sets the NielsenNonLinearWatermark field's value. func (s *JobTemplateSettings) SetNielsenNonLinearWatermark(v *NielsenNonLinearWatermarkSettings) *JobTemplateSettings { s.NielsenNonLinearWatermark = v return s } // SetOutputGroups sets the OutputGroups field's value. func (s *JobTemplateSettings) SetOutputGroups(v []*OutputGroup) *JobTemplateSettings { s.OutputGroups = v return s } // SetTimecodeConfig sets the TimecodeConfig field's value. func (s *JobTemplateSettings) SetTimecodeConfig(v *TimecodeConfig) *JobTemplateSettings { s.TimecodeConfig = v return s } // SetTimedMetadataInsertion sets the TimedMetadataInsertion field's value. func (s *JobTemplateSettings) SetTimedMetadataInsertion(v *TimedMetadataInsertion) *JobTemplateSettings { s.TimedMetadataInsertion = v return s } // Use these settings only when you use Kantar watermarking. Specify the values // that MediaConvert uses to generate and place Kantar watermarks in your output // audio. These settings apply to every output in your job. In addition to specifying // these values, you also need to store your Kantar credentials in AWS Secrets // Manager. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/kantar-watermarking.html. type KantarWatermarkSettings struct { _ struct{} `type:"structure"` // Provide an audio channel name from your Kantar audio license. ChannelName *string `locationName:"channelName" min:"1" type:"string"` // Specify a unique identifier for Kantar to use for this piece of content. ContentReference *string `locationName:"contentReference" min:"1" type:"string"` // Provide the name of the AWS Secrets Manager secret where your Kantar credentials // are stored. Note that your MediaConvert service role must provide access // to this secret. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/granting-permissions-for-mediaconvert-to-access-secrets-manager-secret.html. // For instructions on creating a secret, see https://docs.aws.amazon.com/secretsmanager/latest/userguide/tutorials_basic.html, // in the AWS Secrets Manager User Guide. CredentialsSecretName *string `locationName:"credentialsSecretName" min:"1" type:"string"` // Optional. Specify an offset, in whole seconds, from the start of your output // and the beginning of the watermarking. When you don't specify an offset, // Kantar defaults to zero. FileOffset *float64 `locationName:"fileOffset" type:"double"` // Provide your Kantar license ID number. You should get this number from Kantar. KantarLicenseId *int64 `locationName:"kantarLicenseId" type:"integer"` // Provide the HTTPS endpoint to the Kantar server. You should get this endpoint // from Kantar. KantarServerUrl *string `locationName:"kantarServerUrl" type:"string"` // Optional. Specify the Amazon S3 bucket where you want MediaConvert to store // your Kantar watermark XML logs. When you don't specify a bucket, MediaConvert // doesn't save these logs. Note that your MediaConvert service role must provide // access to this location. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html LogDestination *string `locationName:"logDestination" type:"string"` // You can optionally use this field to specify the first timestamp that Kantar // embeds during watermarking. Kantar suggests that you be very cautious when // using this Kantar feature, and that you use it only on channels that are // managed specifically for use with this feature by your Audience Measurement // Operator. For more information about this feature, contact Kantar technical // support. Metadata3 *string `locationName:"metadata3" min:"1" type:"string"` // Additional metadata that MediaConvert sends to Kantar. Maximum length is // 50 characters. Metadata4 *string `locationName:"metadata4" min:"1" type:"string"` // Additional metadata that MediaConvert sends to Kantar. Maximum length is // 50 characters. Metadata5 *string `locationName:"metadata5" min:"1" type:"string"` // Additional metadata that MediaConvert sends to Kantar. Maximum length is // 50 characters. Metadata6 *string `locationName:"metadata6" min:"1" type:"string"` // Additional metadata that MediaConvert sends to Kantar. Maximum length is // 50 characters. Metadata7 *string `locationName:"metadata7" min:"1" type:"string"` // Additional metadata that MediaConvert sends to Kantar. Maximum length is // 50 characters. Metadata8 *string `locationName:"metadata8" min:"1" type:"string"` } // String returns the string representation func (s KantarWatermarkSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s KantarWatermarkSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *KantarWatermarkSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "KantarWatermarkSettings"} if s.ChannelName != nil && len(*s.ChannelName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1)) } if s.ContentReference != nil && len(*s.ContentReference) < 1 { invalidParams.Add(request.NewErrParamMinLen("ContentReference", 1)) } if s.CredentialsSecretName != nil && len(*s.CredentialsSecretName) < 1 { invalidParams.Add(request.NewErrParamMinLen("CredentialsSecretName", 1)) } if s.Metadata3 != nil && len(*s.Metadata3) < 1 { invalidParams.Add(request.NewErrParamMinLen("Metadata3", 1)) } if s.Metadata4 != nil && len(*s.Metadata4) < 1 { invalidParams.Add(request.NewErrParamMinLen("Metadata4", 1)) } if s.Metadata5 != nil && len(*s.Metadata5) < 1 { invalidParams.Add(request.NewErrParamMinLen("Metadata5", 1)) } if s.Metadata6 != nil && len(*s.Metadata6) < 1 { invalidParams.Add(request.NewErrParamMinLen("Metadata6", 1)) } if s.Metadata7 != nil && len(*s.Metadata7) < 1 { invalidParams.Add(request.NewErrParamMinLen("Metadata7", 1)) } if s.Metadata8 != nil && len(*s.Metadata8) < 1 { invalidParams.Add(request.NewErrParamMinLen("Metadata8", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetChannelName sets the ChannelName field's value. func (s *KantarWatermarkSettings) SetChannelName(v string) *KantarWatermarkSettings { s.ChannelName = &v return s } // SetContentReference sets the ContentReference field's value. func (s *KantarWatermarkSettings) SetContentReference(v string) *KantarWatermarkSettings { s.ContentReference = &v return s } // SetCredentialsSecretName sets the CredentialsSecretName field's value. func (s *KantarWatermarkSettings) SetCredentialsSecretName(v string) *KantarWatermarkSettings { s.CredentialsSecretName = &v return s } // SetFileOffset sets the FileOffset field's value. func (s *KantarWatermarkSettings) SetFileOffset(v float64) *KantarWatermarkSettings { s.FileOffset = &v return s } // SetKantarLicenseId sets the KantarLicenseId field's value. func (s *KantarWatermarkSettings) SetKantarLicenseId(v int64) *KantarWatermarkSettings { s.KantarLicenseId = &v return s } // SetKantarServerUrl sets the KantarServerUrl field's value. func (s *KantarWatermarkSettings) SetKantarServerUrl(v string) *KantarWatermarkSettings { s.KantarServerUrl = &v return s } // SetLogDestination sets the LogDestination field's value. func (s *KantarWatermarkSettings) SetLogDestination(v string) *KantarWatermarkSettings { s.LogDestination = &v return s } // SetMetadata3 sets the Metadata3 field's value. func (s *KantarWatermarkSettings) SetMetadata3(v string) *KantarWatermarkSettings { s.Metadata3 = &v return s } // SetMetadata4 sets the Metadata4 field's value. func (s *KantarWatermarkSettings) SetMetadata4(v string) *KantarWatermarkSettings { s.Metadata4 = &v return s } // SetMetadata5 sets the Metadata5 field's value. func (s *KantarWatermarkSettings) SetMetadata5(v string) *KantarWatermarkSettings { s.Metadata5 = &v return s } // SetMetadata6 sets the Metadata6 field's value. func (s *KantarWatermarkSettings) SetMetadata6(v string) *KantarWatermarkSettings { s.Metadata6 = &v return s } // SetMetadata7 sets the Metadata7 field's value. func (s *KantarWatermarkSettings) SetMetadata7(v string) *KantarWatermarkSettings { s.Metadata7 = &v return s } // SetMetadata8 sets the Metadata8 field's value. func (s *KantarWatermarkSettings) SetMetadata8(v string) *KantarWatermarkSettings { s.Metadata8 = &v return s } // You can send list job templates requests with an empty body. Optionally, // you can filter the response by category by specifying it in your request // body. You can also optionally specify the maximum number, up to twenty, of // job templates to be returned. type ListJobTemplatesInput struct { _ struct{} `type:"structure"` // Optionally, specify a job template category to limit responses to only job // templates from that category. Category *string `location:"querystring" locationName:"category" type:"string"` // Optional. When you request a list of job templates, you can choose to list // them alphabetically by NAME or chronologically by CREATION_DATE. If you don't // specify, the service will list them by name. ListBy *string `location:"querystring" locationName:"listBy" type:"string" enum:"JobTemplateListBy"` // Optional. Number of job templates, up to twenty, that will be returned at // one time. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` // Use this string, provided with the response to a previous request, to request // the next batch of job templates. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` // Optional. When you request lists of resources, you can specify whether they // are sorted in ASCENDING or DESCENDING order. Default varies by resource. Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` } // String returns the string representation func (s ListJobTemplatesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListJobTemplatesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListJobTemplatesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListJobTemplatesInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCategory sets the Category field's value. func (s *ListJobTemplatesInput) SetCategory(v string) *ListJobTemplatesInput { s.Category = &v return s } // SetListBy sets the ListBy field's value. func (s *ListJobTemplatesInput) SetListBy(v string) *ListJobTemplatesInput { s.ListBy = &v return s } // SetMaxResults sets the MaxResults field's value. func (s *ListJobTemplatesInput) SetMaxResults(v int64) *ListJobTemplatesInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. func (s *ListJobTemplatesInput) SetNextToken(v string) *ListJobTemplatesInput { s.NextToken = &v return s } // SetOrder sets the Order field's value. func (s *ListJobTemplatesInput) SetOrder(v string) *ListJobTemplatesInput { s.Order = &v return s } // Successful list job templates requests return a JSON array of job templates. // If you don't specify how they are ordered, you will receive them in alphabetical // order by name. type ListJobTemplatesOutput struct { _ struct{} `type:"structure"` // List of Job templates. JobTemplates []*JobTemplate `locationName:"jobTemplates" type:"list"` // Use this string to request the next batch of job templates. NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation func (s ListJobTemplatesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListJobTemplatesOutput) GoString() string { return s.String() } // SetJobTemplates sets the JobTemplates field's value. func (s *ListJobTemplatesOutput) SetJobTemplates(v []*JobTemplate) *ListJobTemplatesOutput { s.JobTemplates = v return s } // SetNextToken sets the NextToken field's value. func (s *ListJobTemplatesOutput) SetNextToken(v string) *ListJobTemplatesOutput { s.NextToken = &v return s } // You can send list jobs requests with an empty body. Optionally, you can filter // the response by queue and/or job status by specifying them in your request // body. You can also optionally specify the maximum number, up to twenty, of // jobs to be returned. type ListJobsInput struct { _ struct{} `type:"structure"` // Optional. Number of jobs, up to twenty, that will be returned at one time. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` // Optional. Use this string, provided with the response to a previous request, // to request the next batch of jobs. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` // Optional. When you request lists of resources, you can specify whether they // are sorted in ASCENDING or DESCENDING order. Default varies by resource. Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` // Optional. Provide a queue name to get back only jobs from that queue. Queue *string `location:"querystring" locationName:"queue" type:"string"` // Optional. A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, // or ERROR. Status *string `location:"querystring" locationName:"status" type:"string" enum:"JobStatus"` } // String returns the string representation func (s ListJobsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListJobsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListJobsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListJobsInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMaxResults sets the MaxResults field's value. func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput { s.NextToken = &v return s } // SetOrder sets the Order field's value. func (s *ListJobsInput) SetOrder(v string) *ListJobsInput { s.Order = &v return s } // SetQueue sets the Queue field's value. func (s *ListJobsInput) SetQueue(v string) *ListJobsInput { s.Queue = &v return s } // SetStatus sets the Status field's value. func (s *ListJobsInput) SetStatus(v string) *ListJobsInput { s.Status = &v return s } // Successful list jobs requests return a JSON array of jobs. If you don't specify // how they are ordered, you will receive the most recently created first. type ListJobsOutput struct { _ struct{} `type:"structure"` // List of jobs Jobs []*Job `locationName:"jobs" type:"list"` // Use this string to request the next batch of jobs. NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation func (s ListJobsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListJobsOutput) GoString() string { return s.String() } // SetJobs sets the Jobs field's value. func (s *ListJobsOutput) SetJobs(v []*Job) *ListJobsOutput { s.Jobs = v return s } // SetNextToken sets the NextToken field's value. func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { s.NextToken = &v return s } // You can send list presets requests with an empty body. Optionally, you can // filter the response by category by specifying it in your request body. You // can also optionally specify the maximum number, up to twenty, of queues to // be returned. type ListPresetsInput struct { _ struct{} `type:"structure"` // Optionally, specify a preset category to limit responses to only presets // from that category. Category *string `location:"querystring" locationName:"category" type:"string"` // Optional. When you request a list of presets, you can choose to list them // alphabetically by NAME or chronologically by CREATION_DATE. If you don't // specify, the service will list them by name. ListBy *string `location:"querystring" locationName:"listBy" type:"string" enum:"PresetListBy"` // Optional. Number of presets, up to twenty, that will be returned at one time MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` // Use this string, provided with the response to a previous request, to request // the next batch of presets. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` // Optional. When you request lists of resources, you can specify whether they // are sorted in ASCENDING or DESCENDING order. Default varies by resource. Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` } // String returns the string representation func (s ListPresetsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListPresetsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListPresetsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListPresetsInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCategory sets the Category field's value. func (s *ListPresetsInput) SetCategory(v string) *ListPresetsInput { s.Category = &v return s } // SetListBy sets the ListBy field's value. func (s *ListPresetsInput) SetListBy(v string) *ListPresetsInput { s.ListBy = &v return s } // SetMaxResults sets the MaxResults field's value. func (s *ListPresetsInput) SetMaxResults(v int64) *ListPresetsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. func (s *ListPresetsInput) SetNextToken(v string) *ListPresetsInput { s.NextToken = &v return s } // SetOrder sets the Order field's value. func (s *ListPresetsInput) SetOrder(v string) *ListPresetsInput { s.Order = &v return s } // Successful list presets requests return a JSON array of presets. If you don't // specify how they are ordered, you will receive them alphabetically by name. type ListPresetsOutput struct { _ struct{} `type:"structure"` // Use this string to request the next batch of presets. NextToken *string `locationName:"nextToken" type:"string"` // List of presets Presets []*Preset `locationName:"presets" type:"list"` } // String returns the string representation func (s ListPresetsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListPresetsOutput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. func (s *ListPresetsOutput) SetNextToken(v string) *ListPresetsOutput { s.NextToken = &v return s } // SetPresets sets the Presets field's value. func (s *ListPresetsOutput) SetPresets(v []*Preset) *ListPresetsOutput { s.Presets = v return s } // You can send list queues requests with an empty body. You can optionally // specify the maximum number, up to twenty, of queues to be returned. type ListQueuesInput struct { _ struct{} `type:"structure"` // Optional. When you request a list of queues, you can choose to list them // alphabetically by NAME or chronologically by CREATION_DATE. If you don't // specify, the service will list them by creation date. ListBy *string `location:"querystring" locationName:"listBy" type:"string" enum:"QueueListBy"` // Optional. Number of queues, up to twenty, that will be returned at one time. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` // Use this string, provided with the response to a previous request, to request // the next batch of queues. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` // Optional. When you request lists of resources, you can specify whether they // are sorted in ASCENDING or DESCENDING order. Default varies by resource. Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` } // String returns the string representation func (s ListQueuesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListQueuesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListQueuesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListQueuesInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetListBy sets the ListBy field's value. func (s *ListQueuesInput) SetListBy(v string) *ListQueuesInput { s.ListBy = &v return s } // SetMaxResults sets the MaxResults field's value. func (s *ListQueuesInput) SetMaxResults(v int64) *ListQueuesInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. func (s *ListQueuesInput) SetNextToken(v string) *ListQueuesInput { s.NextToken = &v return s } // SetOrder sets the Order field's value. func (s *ListQueuesInput) SetOrder(v string) *ListQueuesInput { s.Order = &v return s } // Successful list queues requests return a JSON array of queues. If you don't // specify how they are ordered, you will receive them alphabetically by name. type ListQueuesOutput struct { _ struct{} `type:"structure"` // Use this string to request the next batch of queues. NextToken *string `locationName:"nextToken" type:"string"` // List of queues. Queues []*Queue `locationName:"queues" type:"list"` } // String returns the string representation func (s ListQueuesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListQueuesOutput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. func (s *ListQueuesOutput) SetNextToken(v string) *ListQueuesOutput { s.NextToken = &v return s } // SetQueues sets the Queues field's value. func (s *ListQueuesOutput) SetQueues(v []*Queue) *ListQueuesOutput { s.Queues = v return s } // List the tags for your AWS Elemental MediaConvert resource by sending a request // with the Amazon Resource Name (ARN) of the resource. To get the ARN, send // a GET request with the resource name. type ListTagsForResourceInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the resource that you want to list tags // for. To get the ARN, send a GET request with the resource name. // // Arn is a required field Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"` } // String returns the string representation func (s ListTagsForResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListTagsForResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListTagsForResourceInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} if s.Arn == nil { invalidParams.Add(request.NewErrParamRequired("Arn")) } if s.Arn != nil && len(*s.Arn) < 1 { invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetArn sets the Arn field's value. func (s *ListTagsForResourceInput) SetArn(v string) *ListTagsForResourceInput { s.Arn = &v return s } // A successful request to list the tags for a resource returns a JSON map of // tags. type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) and tags for an AWS Elemental MediaConvert // resource. ResourceTags *ResourceTags `locationName:"resourceTags" type:"structure"` } // String returns the string representation func (s ListTagsForResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListTagsForResourceOutput) GoString() string { return s.String() } // SetResourceTags sets the ResourceTags field's value. func (s *ListTagsForResourceOutput) SetResourceTags(v *ResourceTags) *ListTagsForResourceOutput { s.ResourceTags = v return s } // Settings for SCTE-35 signals from ESAM. Include this in your job settings // to put SCTE-35 markers in your HLS and transport stream outputs at the insertion // points that you specify in an ESAM XML document. Provide the document in // the setting SCC XML (sccXml). type M2tsScte35Esam struct { _ struct{} `type:"structure"` // Packet Identifier (PID) of the SCTE-35 stream in the transport stream generated // by ESAM. Scte35EsamPid *int64 `locationName:"scte35EsamPid" min:"32" type:"integer"` } // String returns the string representation func (s M2tsScte35Esam) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s M2tsScte35Esam) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *M2tsScte35Esam) Validate() error { invalidParams := request.ErrInvalidParams{Context: "M2tsScte35Esam"} if s.Scte35EsamPid != nil && *s.Scte35EsamPid < 32 { invalidParams.Add(request.NewErrParamMinValue("Scte35EsamPid", 32)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetScte35EsamPid sets the Scte35EsamPid field's value. func (s *M2tsScte35Esam) SetScte35EsamPid(v int64) *M2tsScte35Esam { s.Scte35EsamPid = &v return s } // MPEG-2 TS container settings. These apply to outputs in a File output group // when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). // In these assets, data is organized by the program map table (PMT). Each transport // stream program contains subsets of data, including audio, video, and metadata. // Each of these subsets of data has a numerical label called a packet identifier // (PID). Each transport stream program corresponds to one MediaConvert output. // The PMT lists the types of data in a program along with their PID. Downstream // systems and players use the program map table to look up the PID for each // type of data it accesses and then uses the PIDs to locate specific data within // the asset. type M2tsSettings struct { _ struct{} `type:"structure"` // Selects between the DVB and ATSC buffer models for Dolby Digital audio. AudioBufferModel *string `locationName:"audioBufferModel" type:"string" enum:"M2tsAudioBufferModel"` // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences // between video and audio. For this situation, choose Match video duration // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, // MediaConvert pads the output audio streams with silence or trims them to // ensure that the total duration of each audio stream is at least as long as // the total duration of the video stream. After padding or trimming, the audio // stream duration is no more than one frame longer than the video stream. MediaConvert // applies audio padding or trimming only to the end of the last segment of // the output. For unsegmented outputs, MediaConvert adds padding only to the // end of the file. When you keep the default value, any minor discrepancies // between audio and video duration will depend on your output audio codec. AudioDuration *string `locationName:"audioDuration" type:"string" enum:"M2tsAudioDuration"` // The number of audio frames to insert for each PES packet. AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"` // Specify the packet identifiers (PIDs) for any elementary audio streams you // include in this output. Specify multiple PIDs as a JSON array. Default is // the range 482-492. AudioPids []*int64 `locationName:"audioPids" type:"list"` // Specify the output bitrate of the transport stream in bits per second. Setting // to 0 lets the muxer automatically determine the appropriate bitrate. Other // common values are 3750000, 7500000, and 15000000. Bitrate *int64 `locationName:"bitrate" type:"integer"` // Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX, // use multiplex buffer model. If set to NONE, this can lead to lower latency, // but low-memory devices may not be able to play back the stream without interruptions. BufferModel *string `locationName:"bufferModel" type:"string" enum:"M2tsBufferModel"` // Use these settings to insert a DVB Network Information Table (NIT) in the // transport stream of this output. When you work directly in your JSON job // specification, include this object only when your job has a transport stream // output and the container settings contain the object M2tsSettings. DvbNitSettings *DvbNitSettings `locationName:"dvbNitSettings" type:"structure"` // Use these settings to insert a DVB Service Description Table (SDT) in the // transport stream of this output. When you work directly in your JSON job // specification, include this object only when your job has a transport stream // output and the container settings contain the object M2tsSettings. DvbSdtSettings *DvbSdtSettings `locationName:"dvbSdtSettings" type:"structure"` // Specify the packet identifiers (PIDs) for DVB subtitle data included in this // output. Specify multiple PIDs as a JSON array. Default is the range 460-479. DvbSubPids []*int64 `locationName:"dvbSubPids" type:"list"` // Use these settings to insert a DVB Time and Date Table (TDT) in the transport // stream of this output. When you work directly in your JSON job specification, // include this object only when your job has a transport stream output and // the container settings contain the object M2tsSettings. DvbTdtSettings *DvbTdtSettings `locationName:"dvbTdtSettings" type:"structure"` // Specify the packet identifier (PID) for DVB teletext data you include in // this output. Default is 499. DvbTeletextPid *int64 `locationName:"dvbTeletextPid" min:"32" type:"integer"` // When set to VIDEO_AND_FIXED_INTERVALS, audio EBP markers will be added to // partitions 3 and 4. The interval between these additional markers will be // fixed, and will be slightly shorter than the video EBP marker interval. When // set to VIDEO_INTERVAL, these additional markers will not be inserted. Only // applicable when EBP segmentation markers are is selected (segmentationMarkers // is EBP or EBP_LEGACY). EbpAudioInterval *string `locationName:"ebpAudioInterval" type:"string" enum:"M2tsEbpAudioInterval"` // Selects which PIDs to place EBP markers on. They can either be placed only // on the video PID, or on both the video PID and all audio PIDs. Only applicable // when EBP segmentation markers are is selected (segmentationMarkers is EBP // or EBP_LEGACY). EbpPlacement *string `locationName:"ebpPlacement" type:"string" enum:"M2tsEbpPlacement"` // Controls whether to include the ES Rate field in the PES header. EsRateInPes *string `locationName:"esRateInPes" type:"string" enum:"M2tsEsRateInPes"` // Keep the default value (DEFAULT) unless you know that your audio EBP markers // are incorrectly appearing before your video EBP markers. To correct this // problem, set this value to Force (FORCE). ForceTsVideoEbpOrder *string `locationName:"forceTsVideoEbpOrder" type:"string" enum:"M2tsForceTsVideoEbpOrder"` // The length, in seconds, of each fragment. Only used with EBP markers. FragmentTime *float64 `locationName:"fragmentTime" type:"double"` // Specify the maximum time, in milliseconds, between Program Clock References // (PCRs) inserted into the transport stream. MaxPcrInterval *int64 `locationName:"maxPcrInterval" type:"integer"` // When set, enforces that Encoder Boundary Points do not come within the specified // time interval of each other by looking ahead at input video. If another EBP // is going to come in within the specified time interval, the current EBP is // not emitted, and the segment is "stretched" to the next marker. The lookahead // value does not add latency to the system. The Live Event must be configured // elsewhere to create sufficient latency to make the lookahead accurate. MinEbpInterval *int64 `locationName:"minEbpInterval" type:"integer"` // If INSERT, Nielsen inaudible tones for media tracking will be detected in // the input audio and an equivalent ID3 tag will be inserted in the output. NielsenId3 *string `locationName:"nielsenId3" type:"string" enum:"M2tsNielsenId3"` // Value in bits per second of extra null packets to insert into the transport // stream. This can be used if a downstream encryption system requires periodic // null packets. NullPacketBitrate *float64 `locationName:"nullPacketBitrate" type:"double"` // The number of milliseconds between instances of this table in the output // transport stream. PatInterval *int64 `locationName:"patInterval" type:"integer"` // When set to PCR_EVERY_PES_PACKET, a Program Clock Reference value is inserted // for every Packetized Elementary Stream (PES) header. This is effective only // when the PCR PID is the same as the video or audio elementary stream. PcrControl *string `locationName:"pcrControl" type:"string" enum:"M2tsPcrControl"` // Specify the packet identifier (PID) for the program clock reference (PCR) // in this output. If you do not specify a value, the service will use the value // for Video PID (VideoPid). PcrPid *int64 `locationName:"pcrPid" min:"32" type:"integer"` // Specify the number of milliseconds between instances of the program map table // (PMT) in the output transport stream. PmtInterval *int64 `locationName:"pmtInterval" type:"integer"` // Specify the packet identifier (PID) for the program map table (PMT) itself. // Default is 480. PmtPid *int64 `locationName:"pmtPid" min:"32" type:"integer"` // Specify the packet identifier (PID) of the private metadata stream. Default // is 503. PrivateMetadataPid *int64 `locationName:"privateMetadataPid" min:"32" type:"integer"` // Use Program number (programNumber) to specify the program number used in // the program map table (PMT) for this output. Default is 1. Program numbers // and program map tables are parts of MPEG-2 transport stream containers, used // for organizing data. ProgramNumber *int64 `locationName:"programNumber" type:"integer"` // When set to CBR, inserts null packets into transport stream to fill specified // bitrate. When set to VBR, the bitrate setting acts as the maximum bitrate, // but the output will not be padded up to that bitrate. RateMode *string `locationName:"rateMode" type:"string" enum:"M2tsRateMode"` // Include this in your job settings to put SCTE-35 markers in your HLS and // transport stream outputs at the insertion points that you specify in an ESAM // XML document. Provide the document in the setting SCC XML (sccXml). Scte35Esam *M2tsScte35Esam `locationName:"scte35Esam" type:"structure"` // Specify the packet identifier (PID) of the SCTE-35 stream in the transport // stream. Scte35Pid *int64 `locationName:"scte35Pid" min:"32" type:"integer"` // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if // you want SCTE-35 markers that appear in your input to also appear in this // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. // For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also // provide the ESAM XML as a string in the setting Signal processing notification // XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam). Scte35Source *string `locationName:"scte35Source" type:"string" enum:"M2tsScte35Source"` // Inserts segmentation markers at each segmentation_time period. rai_segstart // sets the Random Access Indicator bit in the adaptation field. rai_adapt sets // the RAI bit and adds the current timecode in the private data bytes. psi_segstart // inserts PAT and PMT tables at the start of segments. ebp adds Encoder Boundary // Point information to the adaptation field as per OpenCable specification // OC-SP-EBP-I01-130118. ebp_legacy adds Encoder Boundary Point information // to the adaptation field using a legacy proprietary format. SegmentationMarkers *string `locationName:"segmentationMarkers" type:"string" enum:"M2tsSegmentationMarkers"` // The segmentation style parameter controls how segmentation markers are inserted // into the transport stream. With avails, it is possible that segments may // be truncated, which can influence where future segmentation markers are inserted. // When a segmentation style of "reset_cadence" is selected and a segment is // truncated due to an avail, we will reset the segmentation cadence. This means // the subsequent segment will have a duration of of $segmentation_time seconds. // When a segmentation style of "maintain_cadence" is selected and a segment // is truncated due to an avail, we will not reset the segmentation cadence. // This means the subsequent segment will likely be truncated as well. However, // all segments after that will have a duration of $segmentation_time seconds. // Note that EBP lookahead is a slight exception to this rule. SegmentationStyle *string `locationName:"segmentationStyle" type:"string" enum:"M2tsSegmentationStyle"` // Specify the length, in seconds, of each segment. Required unless markers // is set to _none_. SegmentationTime *float64 `locationName:"segmentationTime" type:"double"` // Specify the packet identifier (PID) for timed metadata in this output. Default // is 502. TimedMetadataPid *int64 `locationName:"timedMetadataPid" min:"32" type:"integer"` // Specify the ID for the transport stream itself in the program map table for // this output. Transport stream IDs and program map tables are parts of MPEG-2 // transport stream containers, used for organizing data. TransportStreamId *int64 `locationName:"transportStreamId" type:"integer"` // Specify the packet identifier (PID) of the elementary video stream in the // transport stream. VideoPid *int64 `locationName:"videoPid" min:"32" type:"integer"` } // String returns the string representation func (s M2tsSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s M2tsSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *M2tsSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "M2tsSettings"} if s.DvbTeletextPid != nil && *s.DvbTeletextPid < 32 { invalidParams.Add(request.NewErrParamMinValue("DvbTeletextPid", 32)) } if s.PcrPid != nil && *s.PcrPid < 32 { invalidParams.Add(request.NewErrParamMinValue("PcrPid", 32)) } if s.PmtPid != nil && *s.PmtPid < 32 { invalidParams.Add(request.NewErrParamMinValue("PmtPid", 32)) } if s.PrivateMetadataPid != nil && *s.PrivateMetadataPid < 32 { invalidParams.Add(request.NewErrParamMinValue("PrivateMetadataPid", 32)) } if s.Scte35Pid != nil && *s.Scte35Pid < 32 { invalidParams.Add(request.NewErrParamMinValue("Scte35Pid", 32)) } if s.TimedMetadataPid != nil && *s.TimedMetadataPid < 32 { invalidParams.Add(request.NewErrParamMinValue("TimedMetadataPid", 32)) } if s.VideoPid != nil && *s.VideoPid < 32 { invalidParams.Add(request.NewErrParamMinValue("VideoPid", 32)) } if s.DvbNitSettings != nil { if err := s.DvbNitSettings.Validate(); err != nil { invalidParams.AddNested("DvbNitSettings", err.(request.ErrInvalidParams)) } } if s.DvbSdtSettings != nil { if err := s.DvbSdtSettings.Validate(); err != nil { invalidParams.AddNested("DvbSdtSettings", err.(request.ErrInvalidParams)) } } if s.DvbTdtSettings != nil { if err := s.DvbTdtSettings.Validate(); err != nil { invalidParams.AddNested("DvbTdtSettings", err.(request.ErrInvalidParams)) } } if s.Scte35Esam != nil { if err := s.Scte35Esam.Validate(); err != nil { invalidParams.AddNested("Scte35Esam", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAudioBufferModel sets the AudioBufferModel field's value. func (s *M2tsSettings) SetAudioBufferModel(v string) *M2tsSettings { s.AudioBufferModel = &v return s } // SetAudioDuration sets the AudioDuration field's value. func (s *M2tsSettings) SetAudioDuration(v string) *M2tsSettings { s.AudioDuration = &v return s } // SetAudioFramesPerPes sets the AudioFramesPerPes field's value. func (s *M2tsSettings) SetAudioFramesPerPes(v int64) *M2tsSettings { s.AudioFramesPerPes = &v return s } // SetAudioPids sets the AudioPids field's value. func (s *M2tsSettings) SetAudioPids(v []*int64) *M2tsSettings { s.AudioPids = v return s } // SetBitrate sets the Bitrate field's value. func (s *M2tsSettings) SetBitrate(v int64) *M2tsSettings { s.Bitrate = &v return s } // SetBufferModel sets the BufferModel field's value. func (s *M2tsSettings) SetBufferModel(v string) *M2tsSettings { s.BufferModel = &v return s } // SetDvbNitSettings sets the DvbNitSettings field's value. func (s *M2tsSettings) SetDvbNitSettings(v *DvbNitSettings) *M2tsSettings { s.DvbNitSettings = v return s } // SetDvbSdtSettings sets the DvbSdtSettings field's value. func (s *M2tsSettings) SetDvbSdtSettings(v *DvbSdtSettings) *M2tsSettings { s.DvbSdtSettings = v return s } // SetDvbSubPids sets the DvbSubPids field's value. func (s *M2tsSettings) SetDvbSubPids(v []*int64) *M2tsSettings { s.DvbSubPids = v return s } // SetDvbTdtSettings sets the DvbTdtSettings field's value. func (s *M2tsSettings) SetDvbTdtSettings(v *DvbTdtSettings) *M2tsSettings { s.DvbTdtSettings = v return s } // SetDvbTeletextPid sets the DvbTeletextPid field's value. func (s *M2tsSettings) SetDvbTeletextPid(v int64) *M2tsSettings { s.DvbTeletextPid = &v return s } // SetEbpAudioInterval sets the EbpAudioInterval field's value. func (s *M2tsSettings) SetEbpAudioInterval(v string) *M2tsSettings { s.EbpAudioInterval = &v return s } // SetEbpPlacement sets the EbpPlacement field's value. func (s *M2tsSettings) SetEbpPlacement(v string) *M2tsSettings { s.EbpPlacement = &v return s } // SetEsRateInPes sets the EsRateInPes field's value. func (s *M2tsSettings) SetEsRateInPes(v string) *M2tsSettings { s.EsRateInPes = &v return s } // SetForceTsVideoEbpOrder sets the ForceTsVideoEbpOrder field's value. func (s *M2tsSettings) SetForceTsVideoEbpOrder(v string) *M2tsSettings { s.ForceTsVideoEbpOrder = &v return s } // SetFragmentTime sets the FragmentTime field's value. func (s *M2tsSettings) SetFragmentTime(v float64) *M2tsSettings { s.FragmentTime = &v return s } // SetMaxPcrInterval sets the MaxPcrInterval field's value. func (s *M2tsSettings) SetMaxPcrInterval(v int64) *M2tsSettings { s.MaxPcrInterval = &v return s } // SetMinEbpInterval sets the MinEbpInterval field's value. func (s *M2tsSettings) SetMinEbpInterval(v int64) *M2tsSettings { s.MinEbpInterval = &v return s } // SetNielsenId3 sets the NielsenId3 field's value. func (s *M2tsSettings) SetNielsenId3(v string) *M2tsSettings { s.NielsenId3 = &v return s } // SetNullPacketBitrate sets the NullPacketBitrate field's value. func (s *M2tsSettings) SetNullPacketBitrate(v float64) *M2tsSettings { s.NullPacketBitrate = &v return s } // SetPatInterval sets the PatInterval field's value. func (s *M2tsSettings) SetPatInterval(v int64) *M2tsSettings { s.PatInterval = &v return s } // SetPcrControl sets the PcrControl field's value. func (s *M2tsSettings) SetPcrControl(v string) *M2tsSettings { s.PcrControl = &v return s } // SetPcrPid sets the PcrPid field's value. func (s *M2tsSettings) SetPcrPid(v int64) *M2tsSettings { s.PcrPid = &v return s } // SetPmtInterval sets the PmtInterval field's value. func (s *M2tsSettings) SetPmtInterval(v int64) *M2tsSettings { s.PmtInterval = &v return s } // SetPmtPid sets the PmtPid field's value. func (s *M2tsSettings) SetPmtPid(v int64) *M2tsSettings { s.PmtPid = &v return s } // SetPrivateMetadataPid sets the PrivateMetadataPid field's value. func (s *M2tsSettings) SetPrivateMetadataPid(v int64) *M2tsSettings { s.PrivateMetadataPid = &v return s } // SetProgramNumber sets the ProgramNumber field's value. func (s *M2tsSettings) SetProgramNumber(v int64) *M2tsSettings { s.ProgramNumber = &v return s } // SetRateMode sets the RateMode field's value. func (s *M2tsSettings) SetRateMode(v string) *M2tsSettings { s.RateMode = &v return s } // SetScte35Esam sets the Scte35Esam field's value. func (s *M2tsSettings) SetScte35Esam(v *M2tsScte35Esam) *M2tsSettings { s.Scte35Esam = v return s } // SetScte35Pid sets the Scte35Pid field's value. func (s *M2tsSettings) SetScte35Pid(v int64) *M2tsSettings { s.Scte35Pid = &v return s } // SetScte35Source sets the Scte35Source field's value. func (s *M2tsSettings) SetScte35Source(v string) *M2tsSettings { s.Scte35Source = &v return s } // SetSegmentationMarkers sets the SegmentationMarkers field's value. func (s *M2tsSettings) SetSegmentationMarkers(v string) *M2tsSettings { s.SegmentationMarkers = &v return s } // SetSegmentationStyle sets the SegmentationStyle field's value. func (s *M2tsSettings) SetSegmentationStyle(v string) *M2tsSettings { s.SegmentationStyle = &v return s } // SetSegmentationTime sets the SegmentationTime field's value. func (s *M2tsSettings) SetSegmentationTime(v float64) *M2tsSettings { s.SegmentationTime = &v return s } // SetTimedMetadataPid sets the TimedMetadataPid field's value. func (s *M2tsSettings) SetTimedMetadataPid(v int64) *M2tsSettings { s.TimedMetadataPid = &v return s } // SetTransportStreamId sets the TransportStreamId field's value. func (s *M2tsSettings) SetTransportStreamId(v int64) *M2tsSettings { s.TransportStreamId = &v return s } // SetVideoPid sets the VideoPid field's value. func (s *M2tsSettings) SetVideoPid(v int64) *M2tsSettings { s.VideoPid = &v return s } // These settings relate to the MPEG-2 transport stream (MPEG2-TS) container // for the MPEG2-TS segments in your HLS outputs. type M3u8Settings struct { _ struct{} `type:"structure"` // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences // between video and audio. For this situation, choose Match video duration // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, // MediaConvert pads the output audio streams with silence or trims them to // ensure that the total duration of each audio stream is at least as long as // the total duration of the video stream. After padding or trimming, the audio // stream duration is no more than one frame longer than the video stream. MediaConvert // applies audio padding or trimming only to the end of the last segment of // the output. For unsegmented outputs, MediaConvert adds padding only to the // end of the file. When you keep the default value, any minor discrepancies // between audio and video duration will depend on your output audio codec. AudioDuration *string `locationName:"audioDuration" type:"string" enum:"M3u8AudioDuration"` // The number of audio frames to insert for each PES packet. AudioFramesPerPes *int64 `locationName:"audioFramesPerPes" type:"integer"` // Packet Identifier (PID) of the elementary audio stream(s) in the transport // stream. Multiple values are accepted, and can be entered in ranges and/or // by comma separation. AudioPids []*int64 `locationName:"audioPids" type:"list"` // Specify the maximum time, in milliseconds, between Program Clock References // (PCRs) inserted into the transport stream. MaxPcrInterval *int64 `locationName:"maxPcrInterval" type:"integer"` // If INSERT, Nielsen inaudible tones for media tracking will be detected in // the input audio and an equivalent ID3 tag will be inserted in the output. NielsenId3 *string `locationName:"nielsenId3" type:"string" enum:"M3u8NielsenId3"` // The number of milliseconds between instances of this table in the output // transport stream. PatInterval *int64 `locationName:"patInterval" type:"integer"` // When set to PCR_EVERY_PES_PACKET a Program Clock Reference value is inserted // for every Packetized Elementary Stream (PES) header. This parameter is effective // only when the PCR PID is the same as the video or audio elementary stream. PcrControl *string `locationName:"pcrControl" type:"string" enum:"M3u8PcrControl"` // Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport // stream. When no value is given, the encoder will assign the same value as // the Video PID. PcrPid *int64 `locationName:"pcrPid" min:"32" type:"integer"` // The number of milliseconds between instances of this table in the output // transport stream. PmtInterval *int64 `locationName:"pmtInterval" type:"integer"` // Packet Identifier (PID) for the Program Map Table (PMT) in the transport // stream. PmtPid *int64 `locationName:"pmtPid" min:"32" type:"integer"` // Packet Identifier (PID) of the private metadata stream in the transport stream. PrivateMetadataPid *int64 `locationName:"privateMetadataPid" min:"32" type:"integer"` // The value of the program number field in the Program Map Table. ProgramNumber *int64 `locationName:"programNumber" type:"integer"` // Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Scte35Pid *int64 `locationName:"scte35Pid" min:"32" type:"integer"` // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if // you want SCTE-35 markers that appear in your input to also appear in this // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. // For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you // don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose // Ad markers (adMarkers) if you do want manifest conditioning. In both cases, // also provide the ESAM XML as a string in the setting Signal processing notification // XML (sccXml). Scte35Source *string `locationName:"scte35Source" type:"string" enum:"M3u8Scte35Source"` // Applies only to HLS outputs. Use this setting to specify whether the service // inserts the ID3 timed metadata from the input in this output. TimedMetadata *string `locationName:"timedMetadata" type:"string" enum:"TimedMetadata"` // Packet Identifier (PID) of the timed metadata stream in the transport stream. TimedMetadataPid *int64 `locationName:"timedMetadataPid" min:"32" type:"integer"` // The value of the transport stream ID field in the Program Map Table. TransportStreamId *int64 `locationName:"transportStreamId" type:"integer"` // Packet Identifier (PID) of the elementary video stream in the transport stream. VideoPid *int64 `locationName:"videoPid" min:"32" type:"integer"` } // String returns the string representation func (s M3u8Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s M3u8Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *M3u8Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "M3u8Settings"} if s.PcrPid != nil && *s.PcrPid < 32 { invalidParams.Add(request.NewErrParamMinValue("PcrPid", 32)) } if s.PmtPid != nil && *s.PmtPid < 32 { invalidParams.Add(request.NewErrParamMinValue("PmtPid", 32)) } if s.PrivateMetadataPid != nil && *s.PrivateMetadataPid < 32 { invalidParams.Add(request.NewErrParamMinValue("PrivateMetadataPid", 32)) } if s.Scte35Pid != nil && *s.Scte35Pid < 32 { invalidParams.Add(request.NewErrParamMinValue("Scte35Pid", 32)) } if s.TimedMetadataPid != nil && *s.TimedMetadataPid < 32 { invalidParams.Add(request.NewErrParamMinValue("TimedMetadataPid", 32)) } if s.VideoPid != nil && *s.VideoPid < 32 { invalidParams.Add(request.NewErrParamMinValue("VideoPid", 32)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAudioDuration sets the AudioDuration field's value. func (s *M3u8Settings) SetAudioDuration(v string) *M3u8Settings { s.AudioDuration = &v return s } // SetAudioFramesPerPes sets the AudioFramesPerPes field's value. func (s *M3u8Settings) SetAudioFramesPerPes(v int64) *M3u8Settings { s.AudioFramesPerPes = &v return s } // SetAudioPids sets the AudioPids field's value. func (s *M3u8Settings) SetAudioPids(v []*int64) *M3u8Settings { s.AudioPids = v return s } // SetMaxPcrInterval sets the MaxPcrInterval field's value. func (s *M3u8Settings) SetMaxPcrInterval(v int64) *M3u8Settings { s.MaxPcrInterval = &v return s } // SetNielsenId3 sets the NielsenId3 field's value. func (s *M3u8Settings) SetNielsenId3(v string) *M3u8Settings { s.NielsenId3 = &v return s } // SetPatInterval sets the PatInterval field's value. func (s *M3u8Settings) SetPatInterval(v int64) *M3u8Settings { s.PatInterval = &v return s } // SetPcrControl sets the PcrControl field's value. func (s *M3u8Settings) SetPcrControl(v string) *M3u8Settings { s.PcrControl = &v return s } // SetPcrPid sets the PcrPid field's value. func (s *M3u8Settings) SetPcrPid(v int64) *M3u8Settings { s.PcrPid = &v return s } // SetPmtInterval sets the PmtInterval field's value. func (s *M3u8Settings) SetPmtInterval(v int64) *M3u8Settings { s.PmtInterval = &v return s } // SetPmtPid sets the PmtPid field's value. func (s *M3u8Settings) SetPmtPid(v int64) *M3u8Settings { s.PmtPid = &v return s } // SetPrivateMetadataPid sets the PrivateMetadataPid field's value. func (s *M3u8Settings) SetPrivateMetadataPid(v int64) *M3u8Settings { s.PrivateMetadataPid = &v return s } // SetProgramNumber sets the ProgramNumber field's value. func (s *M3u8Settings) SetProgramNumber(v int64) *M3u8Settings { s.ProgramNumber = &v return s } // SetScte35Pid sets the Scte35Pid field's value. func (s *M3u8Settings) SetScte35Pid(v int64) *M3u8Settings { s.Scte35Pid = &v return s } // SetScte35Source sets the Scte35Source field's value. func (s *M3u8Settings) SetScte35Source(v string) *M3u8Settings { s.Scte35Source = &v return s } // SetTimedMetadata sets the TimedMetadata field's value. func (s *M3u8Settings) SetTimedMetadata(v string) *M3u8Settings { s.TimedMetadata = &v return s } // SetTimedMetadataPid sets the TimedMetadataPid field's value. func (s *M3u8Settings) SetTimedMetadataPid(v int64) *M3u8Settings { s.TimedMetadataPid = &v return s } // SetTransportStreamId sets the TransportStreamId field's value. func (s *M3u8Settings) SetTransportStreamId(v int64) *M3u8Settings { s.TransportStreamId = &v return s } // SetVideoPid sets the VideoPid field's value. func (s *M3u8Settings) SetVideoPid(v int64) *M3u8Settings { s.VideoPid = &v return s } // Overlay motion graphics on top of your video. The motion graphics that you // specify here appear on all outputs in all output groups. For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html. type MotionImageInserter struct { _ struct{} `type:"structure"` // If your motion graphic asset is a .mov file, keep this setting unspecified. // If your motion graphic asset is a series of .png files, specify the frame // rate of the overlay in frames per second, as a fraction. For example, specify // 24 fps as 24/1. Make sure that the number of images in your series matches // the frame rate and your intended overlay duration. For example, if you want // a 30-second overlay at 30 fps, you should have 900 .png images. This overlay // frame rate doesn't need to match the frame rate of the underlying video. Framerate *MotionImageInsertionFramerate `locationName:"framerate" type:"structure"` // Specify the .mov file or series of .png files that you want to overlay on // your video. For .png files, provide the file name of the first file in the // series. Make sure that the names of the .png files end with sequential numbers // that specify the order that they are played in. For example, overlay_000.png, // overlay_001.png, overlay_002.png, and so on. The sequence must start at zero, // and each image file name must have the same number of digits. Pad your initial // file names with enough zeros to complete the sequence. For example, if the // first image is overlay_0.png, there can be only 10 images in the sequence, // with the last image being overlay_9.png. But if the first image is overlay_00.png, // there can be 100 images in the sequence. Input *string `locationName:"input" min:"14" type:"string"` // Choose the type of motion graphic asset that you are providing for your overlay. // You can choose either a .mov file or a series of .png files. InsertionMode *string `locationName:"insertionMode" type:"string" enum:"MotionImageInsertionMode"` // Use Offset to specify the placement of your motion graphic overlay on the // video frame. Specify in pixels, from the upper-left corner of the frame. // If you don't specify an offset, the service scales your overlay to the full // size of the frame. Otherwise, the service inserts the overlay at its native // resolution and scales the size up or down with any video scaling. Offset *MotionImageInsertionOffset `locationName:"offset" type:"structure"` // Specify whether your motion graphic overlay repeats on a loop or plays only // once. Playback *string `locationName:"playback" type:"string" enum:"MotionImagePlayback"` // Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF // or HH:MM:SS;FF). Make sure that the timecode you provide here takes into // account how you have set up your timecode configuration under both job settings // and input settings. The simplest way to do that is to set both to start at // 0. If you need to set up your job to follow timecodes embedded in your source // that don't start at zero, make sure that you specify a start time that is // after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html // Find job-wide and input timecode configuration settings in your JSON job // settings specification at settings>timecodeConfig>source and settings>inputs>timecodeSource. StartTime *string `locationName:"startTime" min:"11" type:"string"` } // String returns the string representation func (s MotionImageInserter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MotionImageInserter) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *MotionImageInserter) Validate() error { invalidParams := request.ErrInvalidParams{Context: "MotionImageInserter"} if s.Input != nil && len(*s.Input) < 14 { invalidParams.Add(request.NewErrParamMinLen("Input", 14)) } if s.StartTime != nil && len(*s.StartTime) < 11 { invalidParams.Add(request.NewErrParamMinLen("StartTime", 11)) } if s.Framerate != nil { if err := s.Framerate.Validate(); err != nil { invalidParams.AddNested("Framerate", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFramerate sets the Framerate field's value. func (s *MotionImageInserter) SetFramerate(v *MotionImageInsertionFramerate) *MotionImageInserter { s.Framerate = v return s } // SetInput sets the Input field's value. func (s *MotionImageInserter) SetInput(v string) *MotionImageInserter { s.Input = &v return s } // SetInsertionMode sets the InsertionMode field's value. func (s *MotionImageInserter) SetInsertionMode(v string) *MotionImageInserter { s.InsertionMode = &v return s } // SetOffset sets the Offset field's value. func (s *MotionImageInserter) SetOffset(v *MotionImageInsertionOffset) *MotionImageInserter { s.Offset = v return s } // SetPlayback sets the Playback field's value. func (s *MotionImageInserter) SetPlayback(v string) *MotionImageInserter { s.Playback = &v return s } // SetStartTime sets the StartTime field's value. func (s *MotionImageInserter) SetStartTime(v string) *MotionImageInserter { s.StartTime = &v return s } // For motion overlays that don't have a built-in frame rate, specify the frame // rate of the overlay in frames per second, as a fraction. For example, specify // 24 fps as 24/1. The overlay frame rate doesn't need to match the frame rate // of the underlying video. type MotionImageInsertionFramerate struct { _ struct{} `type:"structure"` // The bottom of the fraction that expresses your overlay frame rate. For example, // if your frame rate is 24 fps, set this value to 1. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // The top of the fraction that expresses your overlay frame rate. For example, // if your frame rate is 24 fps, set this value to 24. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` } // String returns the string representation func (s MotionImageInsertionFramerate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MotionImageInsertionFramerate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *MotionImageInsertionFramerate) Validate() error { invalidParams := request.ErrInvalidParams{Context: "MotionImageInsertionFramerate"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *MotionImageInsertionFramerate) SetFramerateDenominator(v int64) *MotionImageInsertionFramerate { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *MotionImageInsertionFramerate) SetFramerateNumerator(v int64) *MotionImageInsertionFramerate { s.FramerateNumerator = &v return s } // Specify the offset between the upper-left corner of the video frame and the // top left corner of the overlay. type MotionImageInsertionOffset struct { _ struct{} `type:"structure"` // Set the distance, in pixels, between the overlay and the left edge of the // video frame. ImageX *int64 `locationName:"imageX" type:"integer"` // Set the distance, in pixels, between the overlay and the top edge of the // video frame. ImageY *int64 `locationName:"imageY" type:"integer"` } // String returns the string representation func (s MotionImageInsertionOffset) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MotionImageInsertionOffset) GoString() string { return s.String() } // SetImageX sets the ImageX field's value. func (s *MotionImageInsertionOffset) SetImageX(v int64) *MotionImageInsertionOffset { s.ImageX = &v return s } // SetImageY sets the ImageY field's value. func (s *MotionImageInsertionOffset) SetImageY(v int64) *MotionImageInsertionOffset { s.ImageY = &v return s } // These settings relate to your QuickTime MOV output container. type MovSettings struct { _ struct{} `type:"structure"` // When enabled, include 'clap' atom if appropriate for the video output settings. ClapAtom *string `locationName:"clapAtom" type:"string" enum:"MovClapAtom"` // When enabled, file composition times will start at zero, composition times // in the 'ctts' (composition time to sample) box for B-frames will be negative, // and a 'cslg' (composition shift least greatest) box will be included per // 14496-1 amendment 1. This improves compatibility with Apple players and tools. CslgAtom *string `locationName:"cslgAtom" type:"string" enum:"MovCslgAtom"` // When set to XDCAM, writes MPEG2 video streams into the QuickTime file using // XDCAM fourcc codes. This increases compatibility with Apple editors and players, // but may decrease compatibility with other players. Only applicable when the // video codec is MPEG2. Mpeg2FourCCControl *string `locationName:"mpeg2FourCCControl" type:"string" enum:"MovMpeg2FourCCControl"` // To make this output compatible with Omenon, keep the default value, OMNEON. // Unless you need Omneon compatibility, set this value to NONE. When you keep // the default value, OMNEON, MediaConvert increases the length of the edit // list atom. This might cause file rejections when a recipient of the output // file doesn't expct this extra padding. PaddingControl *string `locationName:"paddingControl" type:"string" enum:"MovPaddingControl"` // Always keep the default value (SELF_CONTAINED) for this setting. Reference *string `locationName:"reference" type:"string" enum:"MovReference"` } // String returns the string representation func (s MovSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MovSettings) GoString() string { return s.String() } // SetClapAtom sets the ClapAtom field's value. func (s *MovSettings) SetClapAtom(v string) *MovSettings { s.ClapAtom = &v return s } // SetCslgAtom sets the CslgAtom field's value. func (s *MovSettings) SetCslgAtom(v string) *MovSettings { s.CslgAtom = &v return s } // SetMpeg2FourCCControl sets the Mpeg2FourCCControl field's value. func (s *MovSettings) SetMpeg2FourCCControl(v string) *MovSettings { s.Mpeg2FourCCControl = &v return s } // SetPaddingControl sets the PaddingControl field's value. func (s *MovSettings) SetPaddingControl(v string) *MovSettings { s.PaddingControl = &v return s } // SetReference sets the Reference field's value. func (s *MovSettings) SetReference(v string) *MovSettings { s.Reference = &v return s } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value MP2. type Mp2Settings struct { _ struct{} `type:"structure"` // Specify the average bitrate in bits per second. Bitrate *int64 `locationName:"bitrate" min:"32000" type:"integer"` // Set Channels to specify the number of channels in this output audio track. // Choosing Mono in the console will give you 1 output channel; choosing Stereo // will give you 2. In the API, valid values are 1 and 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Sample rate in hz. SampleRate *int64 `locationName:"sampleRate" min:"32000" type:"integer"` } // String returns the string representation func (s Mp2Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Mp2Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Mp2Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Mp2Settings"} if s.Bitrate != nil && *s.Bitrate < 32000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 32000)) } if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(request.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 32000 { invalidParams.Add(request.NewErrParamMinValue("SampleRate", 32000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBitrate sets the Bitrate field's value. func (s *Mp2Settings) SetBitrate(v int64) *Mp2Settings { s.Bitrate = &v return s } // SetChannels sets the Channels field's value. func (s *Mp2Settings) SetChannels(v int64) *Mp2Settings { s.Channels = &v return s } // SetSampleRate sets the SampleRate field's value. func (s *Mp2Settings) SetSampleRate(v int64) *Mp2Settings { s.SampleRate = &v return s } // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value MP3. type Mp3Settings struct { _ struct{} `type:"structure"` // Specify the average bitrate in bits per second. Bitrate *int64 `locationName:"bitrate" min:"16000" type:"integer"` // Specify the number of channels in this output audio track. Choosing Mono // on the console gives you 1 output channel; choosing Stereo gives you 2. In // the API, valid values are 1 and 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Specify whether the service encodes this MP3 audio output with a constant // bitrate (CBR) or a variable bitrate (VBR). RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Mp3RateControlMode"` // Sample rate in hz. SampleRate *int64 `locationName:"sampleRate" min:"22050" type:"integer"` // Required when you set Bitrate control mode (rateControlMode) to VBR. Specify // the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest // quality). VbrQuality *int64 `locationName:"vbrQuality" type:"integer"` } // String returns the string representation func (s Mp3Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Mp3Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Mp3Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Mp3Settings"} if s.Bitrate != nil && *s.Bitrate < 16000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 16000)) } if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(request.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 22050 { invalidParams.Add(request.NewErrParamMinValue("SampleRate", 22050)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBitrate sets the Bitrate field's value. func (s *Mp3Settings) SetBitrate(v int64) *Mp3Settings { s.Bitrate = &v return s } // SetChannels sets the Channels field's value. func (s *Mp3Settings) SetChannels(v int64) *Mp3Settings { s.Channels = &v return s } // SetRateControlMode sets the RateControlMode field's value. func (s *Mp3Settings) SetRateControlMode(v string) *Mp3Settings { s.RateControlMode = &v return s } // SetSampleRate sets the SampleRate field's value. func (s *Mp3Settings) SetSampleRate(v int64) *Mp3Settings { s.SampleRate = &v return s } // SetVbrQuality sets the VbrQuality field's value. func (s *Mp3Settings) SetVbrQuality(v int64) *Mp3Settings { s.VbrQuality = &v return s } // These settings relate to your MP4 output container. You can create audio // only outputs with this container. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/supported-codecs-containers-audio-only.html#output-codecs-and-containers-supported-for-audio-only. type Mp4Settings struct { _ struct{} `type:"structure"` // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences // between video and audio. For this situation, choose Match video duration // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, // MediaConvert pads the output audio streams with silence or trims them to // ensure that the total duration of each audio stream is at least as long as // the total duration of the video stream. After padding or trimming, the audio // stream duration is no more than one frame longer than the video stream. MediaConvert // applies audio padding or trimming only to the end of the last segment of // the output. For unsegmented outputs, MediaConvert adds padding only to the // end of the file. When you keep the default value, any minor discrepancies // between audio and video duration will depend on your output audio codec. AudioDuration *string `locationName:"audioDuration" type:"string" enum:"CmfcAudioDuration"` // When enabled, file composition times will start at zero, composition times // in the 'ctts' (composition time to sample) box for B-frames will be negative, // and a 'cslg' (composition shift least greatest) box will be included per // 14496-1 amendment 1. This improves compatibility with Apple players and tools. CslgAtom *string `locationName:"cslgAtom" type:"string" enum:"Mp4CslgAtom"` // Ignore this setting unless compliance to the CTTS box version specification // matters in your workflow. Specify a value of 1 to set your CTTS box version // to 1 and make your output compliant with the specification. When you specify // a value of 1, you must also set CSLG atom (cslgAtom) to the value INCLUDE. // Keep the default value 0 to set your CTTS box version to 0. This can provide // backward compatibility for some players and packagers. CttsVersion *int64 `locationName:"cttsVersion" type:"integer"` // Inserts a free-space box immediately after the moov box. FreeSpaceBox *string `locationName:"freeSpaceBox" type:"string" enum:"Mp4FreeSpaceBox"` // If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning // of the archive as required for progressive downloading. Otherwise it is placed // normally at the end. MoovPlacement *string `locationName:"moovPlacement" type:"string" enum:"Mp4MoovPlacement"` // Overrides the "Major Brand" field in the output file. Usually not necessary // to specify. Mp4MajorBrand *string `locationName:"mp4MajorBrand" type:"string"` } // String returns the string representation func (s Mp4Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Mp4Settings) GoString() string { return s.String() } // SetAudioDuration sets the AudioDuration field's value. func (s *Mp4Settings) SetAudioDuration(v string) *Mp4Settings { s.AudioDuration = &v return s } // SetCslgAtom sets the CslgAtom field's value. func (s *Mp4Settings) SetCslgAtom(v string) *Mp4Settings { s.CslgAtom = &v return s } // SetCttsVersion sets the CttsVersion field's value. func (s *Mp4Settings) SetCttsVersion(v int64) *Mp4Settings { s.CttsVersion = &v return s } // SetFreeSpaceBox sets the FreeSpaceBox field's value. func (s *Mp4Settings) SetFreeSpaceBox(v string) *Mp4Settings { s.FreeSpaceBox = &v return s } // SetMoovPlacement sets the MoovPlacement field's value. func (s *Mp4Settings) SetMoovPlacement(v string) *Mp4Settings { s.MoovPlacement = &v return s } // SetMp4MajorBrand sets the Mp4MajorBrand field's value. func (s *Mp4Settings) SetMp4MajorBrand(v string) *Mp4Settings { s.Mp4MajorBrand = &v return s } // These settings relate to the fragmented MP4 container for the segments in // your DASH outputs. type MpdSettings struct { _ struct{} `type:"structure"` // Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH // manifest with elements for embedded 608 captions. This markup isn't generally // required, but some video players require it to discover and play embedded // 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements // out. When you enable this setting, this is the markup that MediaConvert includes // in your manifest: AccessibilityCaptionHints *string `locationName:"accessibilityCaptionHints" type:"string" enum:"MpdAccessibilityCaptionHints"` // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences // between video and audio. For this situation, choose Match video duration // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, // MediaConvert pads the output audio streams with silence or trims them to // ensure that the total duration of each audio stream is at least as long as // the total duration of the video stream. After padding or trimming, the audio // stream duration is no more than one frame longer than the video stream. MediaConvert // applies audio padding or trimming only to the end of the last segment of // the output. For unsegmented outputs, MediaConvert adds padding only to the // end of the file. When you keep the default value, any minor discrepancies // between audio and video duration will depend on your output audio codec. AudioDuration *string `locationName:"audioDuration" type:"string" enum:"MpdAudioDuration"` // Use this setting only in DASH output groups that include sidecar TTML or // IMSC captions. You specify sidecar captions in a separate output from your // audio and video. Choose Raw (RAW) for captions in a single XML file in a // raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in // XML format contained within fragmented MP4 files. This set of fragmented // MP4 files is separate from your video and audio fragmented MP4 files. CaptionContainerType *string `locationName:"captionContainerType" type:"string" enum:"MpdCaptionContainerType"` // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting // SCC XML (sccXml). Scte35Esam *string `locationName:"scte35Esam" type:"string" enum:"MpdScte35Esam"` // Ignore this setting unless you have SCTE-35 markers in your input video file. // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear // in your input to also appear in this output. Choose None (NONE) if you don't // want those SCTE-35 markers in this output. Scte35Source *string `locationName:"scte35Source" type:"string" enum:"MpdScte35Source"` } // String returns the string representation func (s MpdSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MpdSettings) GoString() string { return s.String() } // SetAccessibilityCaptionHints sets the AccessibilityCaptionHints field's value. func (s *MpdSettings) SetAccessibilityCaptionHints(v string) *MpdSettings { s.AccessibilityCaptionHints = &v return s } // SetAudioDuration sets the AudioDuration field's value. func (s *MpdSettings) SetAudioDuration(v string) *MpdSettings { s.AudioDuration = &v return s } // SetCaptionContainerType sets the CaptionContainerType field's value. func (s *MpdSettings) SetCaptionContainerType(v string) *MpdSettings { s.CaptionContainerType = &v return s } // SetScte35Esam sets the Scte35Esam field's value. func (s *MpdSettings) SetScte35Esam(v string) *MpdSettings { s.Scte35Esam = &v return s } // SetScte35Source sets the Scte35Source field's value. func (s *MpdSettings) SetScte35Source(v string) *MpdSettings { s.Scte35Source = &v return s } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value MPEG2. type Mpeg2Settings struct { _ struct{} `type:"structure"` // Specify the strength of any adaptive quantization filters that you enable. // The value that you choose here applies to the following settings: Spatial // adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive // quantization (temporalAdaptiveQuantization). AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"Mpeg2AdaptiveQuantization"` // Specify the average bitrate in bits per second. Required for VBR and CBR. // For MS Smooth outputs, bitrates must be unique when rounded down to the nearest // multiple of 1000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output. CodecLevel *string `locationName:"codecLevel" type:"string" enum:"Mpeg2CodecLevel"` // Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output. CodecProfile *string `locationName:"codecProfile" type:"string" enum:"Mpeg2CodecProfile"` // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the // value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). DynamicSubGop *string `locationName:"dynamicSubGop" type:"string" enum:"Mpeg2DynamicSubGop"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Mpeg2FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Mpeg2FramerateConversionAlgorithm"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"` // Frequency of closed GOPs. In streaming applications, it is recommended that // this be set to 1 so a decoder joining mid-stream will receive an IDR frame // as quickly as possible. Setting this value to 0 will break output segmenting. GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` // GOP Length (keyframe interval) in frames or seconds. Must be greater than // zero. GopSize *float64 `locationName:"gopSize" type:"double"` // Indicates if the GOP Size in MPEG2 is specified in frames or seconds. If // seconds the system will convert the GOP Size into a frame count at run time. GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"Mpeg2GopSizeUnits"` // Percentage of the buffer that should initially be filled (HRD buffer model). HrdBufferInitialFillPercentage *int64 `locationName:"hrdBufferInitialFillPercentage" type:"integer"` // Size of buffer (HRD buffer model) in bits. For example, enter five megabits // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"Mpeg2InterlaceMode"` // Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision // for intra-block DC coefficients. If you choose the value auto, the service // will automatically select the precision based on the per-frame compression // ratio. IntraDcPrecision *string `locationName:"intraDcPrecision" type:"string" enum:"Mpeg2IntraDcPrecision"` // Maximum bitrate in bits/second. For example, enter five megabits per second // as 5000000. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Enforces separation between repeated (cadence) I-frames and I-frames inserted // by Scene Change Detection. If a scene change I-frame is within I-interval // frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene // change I-frame. GOP stretch requires enabling lookahead as well as setting // I-interval. The normal cadence resumes for the next GOP. This setting is // only used when Scene Change Detect is enabled. Note: Maximum GOP stretch // = GOP size + Min-I-interval - 1 MinIInterval *int64 `locationName:"minIInterval" type:"integer"` // Number of B-frames between reference frames. NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"Mpeg2ParControl"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Mpeg2QualityTuningLevel"` // Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate // is variable (vbr) or constant (cbr). RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Mpeg2RateControlMode"` // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"Mpeg2ScanTypeConversionMode"` // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"Mpeg2SceneChangeDetect"` // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output. When you enable // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. SlowPal *string `locationName:"slowPal" type:"string" enum:"Mpeg2SlowPal"` // Ignore this setting unless you need to comply with a specification that requires // a specific value. If you don't have a specification requirement, we recommend // that you adjust the softness of your output by using a lower value for the // setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). // The Softness (softness) setting specifies the quantization matrices that // the encoder uses. Keep the default value, 0, to use the AWS Elemental default // matrices. Choose a value from 17 to 128 to use planar interpolation. Increasing // values from 17 to 128 result in increasing reduction of high-frequency data. // The value 128 results in the softest video. Softness *int64 `locationName:"softness" type:"integer"` // Keep the default value, Enabled (ENABLED), to adjust quantization within // each frame based on spatial variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas that can sustain more // distortion with no noticeable visual degradation and uses more bits on areas // where any small distortion will be noticeable. For example, complex textured // blocks are encoded with fewer bits and smooth textured blocks are encoded // with more bits. Enabling this feature will almost always improve your video // quality. Note, though, that this feature doesn't take into account where // the viewer's attention is likely to be. If viewers are likely to be focusing // their attention on a part of the screen with a lot of complex texture, you // might choose to disable this feature. Related setting: When you enable spatial // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) // depending on your content. For homogeneous content, such as cartoons and // video games, set it to Low. For content with a wider variety of textures, // set it to High or Higher. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"Mpeg2SpatialAdaptiveQuantization"` // Specify whether this output's video uses the D10 syntax. Keep the default // value to not use the syntax. Related settings: When you choose D10 (D_10) // for your MXF profile (profile), you must also set this value to to D10 (D_10). Syntax *string `locationName:"syntax" type:"string" enum:"Mpeg2Syntax"` // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable // hard or soft telecine to create a smoother picture. Hard telecine (HARD) // produces a 29.97i output. Soft telecine (SOFT) produces an output with a // 23.976 output that signals to the video player device to do the conversion // during play back. When you keep the default value, None (NONE), MediaConvert // does a standard frame rate conversion to 29.97 without doing anything with // the field polarity to create a smoother picture. Telecine *string `locationName:"telecine" type:"string" enum:"Mpeg2Telecine"` // Keep the default value, Enabled (ENABLED), to adjust quantization within // each frame based on temporal variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas of the frame that aren't // moving and uses more bits on complex objects with sharp edges that move a // lot. For example, this feature improves the readability of text tickers on // newscasts and scoreboards on sports matches. Enabling this feature will almost // always improve your video quality. Note, though, that this feature doesn't // take into account where the viewer's attention is likely to be. If viewers // are likely to be focusing their attention on a part of the screen that doesn't // have moving objects with sharp edges, such as sports athletes' faces, you // might choose to disable this feature. Related setting: When you enable temporal // quantization, adjust the strength of the filter with the setting Adaptive // quantization (adaptiveQuantization). TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"Mpeg2TemporalAdaptiveQuantization"` } // String returns the string representation func (s Mpeg2Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Mpeg2Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Mpeg2Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Mpeg2Settings"} if s.Bitrate != nil && *s.Bitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000)) } if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 24 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 24)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdaptiveQuantization sets the AdaptiveQuantization field's value. func (s *Mpeg2Settings) SetAdaptiveQuantization(v string) *Mpeg2Settings { s.AdaptiveQuantization = &v return s } // SetBitrate sets the Bitrate field's value. func (s *Mpeg2Settings) SetBitrate(v int64) *Mpeg2Settings { s.Bitrate = &v return s } // SetCodecLevel sets the CodecLevel field's value. func (s *Mpeg2Settings) SetCodecLevel(v string) *Mpeg2Settings { s.CodecLevel = &v return s } // SetCodecProfile sets the CodecProfile field's value. func (s *Mpeg2Settings) SetCodecProfile(v string) *Mpeg2Settings { s.CodecProfile = &v return s } // SetDynamicSubGop sets the DynamicSubGop field's value. func (s *Mpeg2Settings) SetDynamicSubGop(v string) *Mpeg2Settings { s.DynamicSubGop = &v return s } // SetFramerateControl sets the FramerateControl field's value. func (s *Mpeg2Settings) SetFramerateControl(v string) *Mpeg2Settings { s.FramerateControl = &v return s } // SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. func (s *Mpeg2Settings) SetFramerateConversionAlgorithm(v string) *Mpeg2Settings { s.FramerateConversionAlgorithm = &v return s } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *Mpeg2Settings) SetFramerateDenominator(v int64) *Mpeg2Settings { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *Mpeg2Settings) SetFramerateNumerator(v int64) *Mpeg2Settings { s.FramerateNumerator = &v return s } // SetGopClosedCadence sets the GopClosedCadence field's value. func (s *Mpeg2Settings) SetGopClosedCadence(v int64) *Mpeg2Settings { s.GopClosedCadence = &v return s } // SetGopSize sets the GopSize field's value. func (s *Mpeg2Settings) SetGopSize(v float64) *Mpeg2Settings { s.GopSize = &v return s } // SetGopSizeUnits sets the GopSizeUnits field's value. func (s *Mpeg2Settings) SetGopSizeUnits(v string) *Mpeg2Settings { s.GopSizeUnits = &v return s } // SetHrdBufferInitialFillPercentage sets the HrdBufferInitialFillPercentage field's value. func (s *Mpeg2Settings) SetHrdBufferInitialFillPercentage(v int64) *Mpeg2Settings { s.HrdBufferInitialFillPercentage = &v return s } // SetHrdBufferSize sets the HrdBufferSize field's value. func (s *Mpeg2Settings) SetHrdBufferSize(v int64) *Mpeg2Settings { s.HrdBufferSize = &v return s } // SetInterlaceMode sets the InterlaceMode field's value. func (s *Mpeg2Settings) SetInterlaceMode(v string) *Mpeg2Settings { s.InterlaceMode = &v return s } // SetIntraDcPrecision sets the IntraDcPrecision field's value. func (s *Mpeg2Settings) SetIntraDcPrecision(v string) *Mpeg2Settings { s.IntraDcPrecision = &v return s } // SetMaxBitrate sets the MaxBitrate field's value. func (s *Mpeg2Settings) SetMaxBitrate(v int64) *Mpeg2Settings { s.MaxBitrate = &v return s } // SetMinIInterval sets the MinIInterval field's value. func (s *Mpeg2Settings) SetMinIInterval(v int64) *Mpeg2Settings { s.MinIInterval = &v return s } // SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value. func (s *Mpeg2Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *Mpeg2Settings { s.NumberBFramesBetweenReferenceFrames = &v return s } // SetParControl sets the ParControl field's value. func (s *Mpeg2Settings) SetParControl(v string) *Mpeg2Settings { s.ParControl = &v return s } // SetParDenominator sets the ParDenominator field's value. func (s *Mpeg2Settings) SetParDenominator(v int64) *Mpeg2Settings { s.ParDenominator = &v return s } // SetParNumerator sets the ParNumerator field's value. func (s *Mpeg2Settings) SetParNumerator(v int64) *Mpeg2Settings { s.ParNumerator = &v return s } // SetQualityTuningLevel sets the QualityTuningLevel field's value. func (s *Mpeg2Settings) SetQualityTuningLevel(v string) *Mpeg2Settings { s.QualityTuningLevel = &v return s } // SetRateControlMode sets the RateControlMode field's value. func (s *Mpeg2Settings) SetRateControlMode(v string) *Mpeg2Settings { s.RateControlMode = &v return s } // SetScanTypeConversionMode sets the ScanTypeConversionMode field's value. func (s *Mpeg2Settings) SetScanTypeConversionMode(v string) *Mpeg2Settings { s.ScanTypeConversionMode = &v return s } // SetSceneChangeDetect sets the SceneChangeDetect field's value. func (s *Mpeg2Settings) SetSceneChangeDetect(v string) *Mpeg2Settings { s.SceneChangeDetect = &v return s } // SetSlowPal sets the SlowPal field's value. func (s *Mpeg2Settings) SetSlowPal(v string) *Mpeg2Settings { s.SlowPal = &v return s } // SetSoftness sets the Softness field's value. func (s *Mpeg2Settings) SetSoftness(v int64) *Mpeg2Settings { s.Softness = &v return s } // SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value. func (s *Mpeg2Settings) SetSpatialAdaptiveQuantization(v string) *Mpeg2Settings { s.SpatialAdaptiveQuantization = &v return s } // SetSyntax sets the Syntax field's value. func (s *Mpeg2Settings) SetSyntax(v string) *Mpeg2Settings { s.Syntax = &v return s } // SetTelecine sets the Telecine field's value. func (s *Mpeg2Settings) SetTelecine(v string) *Mpeg2Settings { s.Telecine = &v return s } // SetTemporalAdaptiveQuantization sets the TemporalAdaptiveQuantization field's value. func (s *Mpeg2Settings) SetTemporalAdaptiveQuantization(v string) *Mpeg2Settings { s.TemporalAdaptiveQuantization = &v return s } // Specify the details for each additional Microsoft Smooth Streaming manifest // that you want the service to generate for this output group. Each manifest // can reference a different subset of outputs in the group. type MsSmoothAdditionalManifest struct { _ struct{} `type:"structure"` // Specify a name modifier that the service adds to the name of this manifest // to make it different from the file names of the other main manifests in the // output group. For example, say that the default main manifest for your Microsoft // Smooth group is film-name.ismv. If you enter "-no-premium" for this setting, // then the file name the service generates for this top-level manifest is film-name-no-premium.ismv. ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` // Specify the outputs that you want this additional top-level manifest to reference. SelectedOutputs []*string `locationName:"selectedOutputs" type:"list"` } // String returns the string representation func (s MsSmoothAdditionalManifest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MsSmoothAdditionalManifest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *MsSmoothAdditionalManifest) Validate() error { invalidParams := request.ErrInvalidParams{Context: "MsSmoothAdditionalManifest"} if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { invalidParams.Add(request.NewErrParamMinLen("ManifestNameModifier", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetManifestNameModifier sets the ManifestNameModifier field's value. func (s *MsSmoothAdditionalManifest) SetManifestNameModifier(v string) *MsSmoothAdditionalManifest { s.ManifestNameModifier = &v return s } // SetSelectedOutputs sets the SelectedOutputs field's value. func (s *MsSmoothAdditionalManifest) SetSelectedOutputs(v []*string) *MsSmoothAdditionalManifest { s.SelectedOutputs = v return s } // If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify // the value SpekeKeyProvider. type MsSmoothEncryptionSettings struct { _ struct{} `type:"structure"` // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings // when doing DRM encryption with a SPEKE-compliant key provider. If your output // group type is CMAF, use the SpekeKeyProviderCmaf settings instead. SpekeKeyProvider *SpekeKeyProvider `locationName:"spekeKeyProvider" type:"structure"` } // String returns the string representation func (s MsSmoothEncryptionSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MsSmoothEncryptionSettings) GoString() string { return s.String() } // SetSpekeKeyProvider sets the SpekeKeyProvider field's value. func (s *MsSmoothEncryptionSettings) SetSpekeKeyProvider(v *SpekeKeyProvider) *MsSmoothEncryptionSettings { s.SpekeKeyProvider = v return s } // Settings related to your Microsoft Smooth Streaming output package. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. // When you work directly in your JSON job specification, include this object // and any required children when you set Type, under OutputGroupSettings, to // MS_SMOOTH_GROUP_SETTINGS. type MsSmoothGroupSettings struct { _ struct{} `type:"structure"` // By default, the service creates one .ism Microsoft Smooth Streaming manifest // for each Microsoft Smooth Streaming output group in your job. This default // manifest references every output in the output group. To create additional // manifests that reference a subset of the outputs in the output group, specify // a list of them here. AdditionalManifests []*MsSmoothAdditionalManifest `locationName:"additionalManifests" type:"list"` // COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across // a Microsoft Smooth output group into a single audio stream. AudioDeduplication *string `locationName:"audioDeduplication" type:"string" enum:"MsSmoothAudioDeduplication"` // Use Destination (Destination) to specify the S3 output location and the output // filename base. Destination accepts format identifiers. If you do not specify // the base filename in the URI, the service will use the filename of the input // file. If your job has multiple inputs, the service uses the filename of the // first input file. Destination *string `locationName:"destination" type:"string"` // Settings associated with the destination. Will vary based on the type of // destination DestinationSettings *DestinationSettings `locationName:"destinationSettings" type:"structure"` // If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify // the value SpekeKeyProvider. Encryption *MsSmoothEncryptionSettings `locationName:"encryption" type:"structure"` // Use Fragment length (FragmentLength) to specify the mp4 fragment sizes in // seconds. Fragment length must be compatible with GOP size and frame rate. FragmentLength *int64 `locationName:"fragmentLength" min:"1" type:"integer"` // Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding // format for the server and client manifest. Valid options are utf8 and utf16. ManifestEncoding *string `locationName:"manifestEncoding" type:"string" enum:"MsSmoothManifestEncoding"` } // String returns the string representation func (s MsSmoothGroupSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MsSmoothGroupSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *MsSmoothGroupSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "MsSmoothGroupSettings"} if s.FragmentLength != nil && *s.FragmentLength < 1 { invalidParams.Add(request.NewErrParamMinValue("FragmentLength", 1)) } if s.AdditionalManifests != nil { for i, v := range s.AdditionalManifests { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdditionalManifests sets the AdditionalManifests field's value. func (s *MsSmoothGroupSettings) SetAdditionalManifests(v []*MsSmoothAdditionalManifest) *MsSmoothGroupSettings { s.AdditionalManifests = v return s } // SetAudioDeduplication sets the AudioDeduplication field's value. func (s *MsSmoothGroupSettings) SetAudioDeduplication(v string) *MsSmoothGroupSettings { s.AudioDeduplication = &v return s } // SetDestination sets the Destination field's value. func (s *MsSmoothGroupSettings) SetDestination(v string) *MsSmoothGroupSettings { s.Destination = &v return s } // SetDestinationSettings sets the DestinationSettings field's value. func (s *MsSmoothGroupSettings) SetDestinationSettings(v *DestinationSettings) *MsSmoothGroupSettings { s.DestinationSettings = v return s } // SetEncryption sets the Encryption field's value. func (s *MsSmoothGroupSettings) SetEncryption(v *MsSmoothEncryptionSettings) *MsSmoothGroupSettings { s.Encryption = v return s } // SetFragmentLength sets the FragmentLength field's value. func (s *MsSmoothGroupSettings) SetFragmentLength(v int64) *MsSmoothGroupSettings { s.FragmentLength = &v return s } // SetManifestEncoding sets the ManifestEncoding field's value. func (s *MsSmoothGroupSettings) SetManifestEncoding(v string) *MsSmoothGroupSettings { s.ManifestEncoding = &v return s } // These settings relate to your MXF output container. type MxfSettings struct { _ struct{} `type:"structure"` // Optional. When you have AFD signaling set up in your output video stream, // use this setting to choose whether to also include it in the MXF wrapper. // Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. // Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from // the video stream for this output to the MXF wrapper. Regardless of which // option you choose, the AFD values remain in the video stream. Related settings: // To set up your output to include or exclude AFD values, see AfdSignaling, // under VideoDescription. On the console, find AFD signaling under the output's // video encoding settings. AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"MxfAfdSignaling"` // Specify the MXF profile, also called shim, for this output. When you choose // Auto, MediaConvert chooses a profile based on the video codec and resolution. // For a list of codecs supported with each MXF profile, see https://docs.aws.amazon.com/mediaconvert/latest/ug/codecs-supported-with-each-mxf-profile.html. // For more information about the automatic selection behavior, see https://docs.aws.amazon.com/mediaconvert/latest/ug/default-automatic-selection-of-mxf-profiles.html. Profile *string `locationName:"profile" type:"string" enum:"MxfProfile"` // Specify the XAVC profile settings for MXF outputs when you set your MXF profile // to XAVC. XavcProfileSettings *MxfXavcProfileSettings `locationName:"xavcProfileSettings" type:"structure"` } // String returns the string representation func (s MxfSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MxfSettings) GoString() string { return s.String() } // SetAfdSignaling sets the AfdSignaling field's value. func (s *MxfSettings) SetAfdSignaling(v string) *MxfSettings { s.AfdSignaling = &v return s } // SetProfile sets the Profile field's value. func (s *MxfSettings) SetProfile(v string) *MxfSettings { s.Profile = &v return s } // SetXavcProfileSettings sets the XavcProfileSettings field's value. func (s *MxfSettings) SetXavcProfileSettings(v *MxfXavcProfileSettings) *MxfSettings { s.XavcProfileSettings = v return s } // Specify the XAVC profile settings for MXF outputs when you set your MXF profile // to XAVC. type MxfXavcProfileSettings struct { _ struct{} `type:"structure"` // To create an output that complies with the XAVC file format guidelines for // interoperability, keep the default value, Drop frames for compliance (DROP_FRAMES_FOR_COMPLIANCE). // To include all frames from your input in this output, keep the default setting, // Allow any duration (ALLOW_ANY_DURATION). The number of frames that MediaConvert // excludes when you set this to Drop frames for compliance depends on the output // frame rate and duration. DurationMode *string `locationName:"durationMode" type:"string" enum:"MxfXavcDurationMode"` // Specify a value for this setting only for outputs that you set up with one // of these two XAVC profiles: XAVC HD Intra CBG (XAVC_HD_INTRA_CBG) or XAVC // 4K Intra CBG (XAVC_4K_INTRA_CBG). Specify the amount of space in each frame // that the service reserves for ancillary data, such as teletext captions. // The default value for this setting is 1492 bytes per frame. This should be // sufficient to prevent overflow unless you have multiple pages of teletext // captions data. If you have a large amount of teletext data, specify a larger // number. MaxAncDataSize *int64 `locationName:"maxAncDataSize" type:"integer"` } // String returns the string representation func (s MxfXavcProfileSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MxfXavcProfileSettings) GoString() string { return s.String() } // SetDurationMode sets the DurationMode field's value. func (s *MxfXavcProfileSettings) SetDurationMode(v string) *MxfXavcProfileSettings { s.DurationMode = &v return s } // SetMaxAncDataSize sets the MaxAncDataSize field's value. func (s *MxfXavcProfileSettings) SetMaxAncDataSize(v int64) *MxfXavcProfileSettings { s.MaxAncDataSize = &v return s } // For forensic video watermarking, MediaConvert supports Nagra NexGuard File // Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2) // and OTT Streaming workflows. type NexGuardFileMarkerSettings struct { _ struct{} `type:"structure"` // Use the base64 license string that Nagra provides you. Enter it directly // in your JSON job specification or in the console. Required when you include // Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in // your job. License *string `locationName:"license" min:"1" type:"string"` // Specify the payload ID that you want associated with this output. Valid values // vary depending on your Nagra NexGuard forensic watermarking workflow. Required // when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) // in your job. For PreRelease Content (NGPR/G2), specify an integer from 1 // through 4,194,303. You must generate a unique ID for each asset you watermark, // and keep a record of which ID you have assigned to each asset. Neither Nagra // nor MediaConvert keep track of the relationship between output files and // your IDs. For OTT Streaming, create two adaptive bitrate (ABR) stacks for // each asset. Do this by setting up two output groups. For one output group, // set the value of Payload ID (payload) to 0 in every output. For the other // output group, set Payload ID (payload) to 1 in every output. Payload *int64 `locationName:"payload" type:"integer"` // Enter one of the watermarking preset strings that Nagra provides you. Required // when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) // in your job. Preset *string `locationName:"preset" min:"1" type:"string"` // Optional. Ignore this setting unless Nagra support directs you to specify // a value. When you don't specify a value here, the Nagra NexGuard library // uses its default value. Strength *string `locationName:"strength" type:"string" enum:"WatermarkingStrength"` } // String returns the string representation func (s NexGuardFileMarkerSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NexGuardFileMarkerSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *NexGuardFileMarkerSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "NexGuardFileMarkerSettings"} if s.License != nil && len(*s.License) < 1 { invalidParams.Add(request.NewErrParamMinLen("License", 1)) } if s.Preset != nil && len(*s.Preset) < 1 { invalidParams.Add(request.NewErrParamMinLen("Preset", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetLicense sets the License field's value. func (s *NexGuardFileMarkerSettings) SetLicense(v string) *NexGuardFileMarkerSettings { s.License = &v return s } // SetPayload sets the Payload field's value. func (s *NexGuardFileMarkerSettings) SetPayload(v int64) *NexGuardFileMarkerSettings { s.Payload = &v return s } // SetPreset sets the Preset field's value. func (s *NexGuardFileMarkerSettings) SetPreset(v string) *NexGuardFileMarkerSettings { s.Preset = &v return s } // SetStrength sets the Strength field's value. func (s *NexGuardFileMarkerSettings) SetStrength(v string) *NexGuardFileMarkerSettings { s.Strength = &v return s } // Settings for your Nielsen configuration. If you don't do Nielsen measurement // and analytics, ignore these settings. When you enable Nielsen configuration // (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs // in the job. To enable Nielsen configuration programmatically, include an // instance of nielsenConfiguration in your JSON job specification. Even if // you don't include any children of nielsenConfiguration, you still enable // the setting. type NielsenConfiguration struct { _ struct{} `type:"structure"` // Nielsen has discontinued the use of breakout code functionality. If you must // include this property, set the value to zero. BreakoutCode *int64 `locationName:"breakoutCode" type:"integer"` // Use Distributor ID (DistributorID) to specify the distributor ID that is // assigned to your organization by Neilsen. DistributorId *string `locationName:"distributorId" type:"string"` } // String returns the string representation func (s NielsenConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NielsenConfiguration) GoString() string { return s.String() } // SetBreakoutCode sets the BreakoutCode field's value. func (s *NielsenConfiguration) SetBreakoutCode(v int64) *NielsenConfiguration { s.BreakoutCode = &v return s } // SetDistributorId sets the DistributorId field's value. func (s *NielsenConfiguration) SetDistributorId(v string) *NielsenConfiguration { s.DistributorId = &v return s } // Ignore these settings unless you are using Nielsen non-linear watermarking. // Specify the values that MediaConvert uses to generate and place Nielsen watermarks // in your output audio. In addition to specifying these values, you also need // to set up your cloud TIC server. These settings apply to every output in // your job. The MediaConvert implementation is currently with the following // Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark // Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0] type NielsenNonLinearWatermarkSettings struct { _ struct{} `type:"structure"` // Choose the type of Nielsen watermarks that you want in your outputs. When // you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the // setting SID (sourceId). When you choose CBET (CBET), you must provide a value // for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET // (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings. ActiveWatermarkProcess *string `locationName:"activeWatermarkProcess" type:"string" enum:"NielsenActiveWatermarkProcessType"` // Optional. Use this setting when you want the service to include an ADI file // in the Nielsen metadata .zip file. To provide an ADI file, store it in Amazon // S3 and provide a URL to it here. The URL should be in the following format: // S3://bucket/path/ADI-file. For more information about the metadata .zip file, // see the setting Metadata destination (metadataDestination). AdiFilename *string `locationName:"adiFilename" type:"string"` // Use the asset ID that you provide to Nielsen to uniquely identify this asset. // Required for all Nielsen non-linear watermarking. AssetId *string `locationName:"assetId" min:"1" type:"string"` // Use the asset name that you provide to Nielsen for this asset. Required for // all Nielsen non-linear watermarking. AssetName *string `locationName:"assetName" min:"1" type:"string"` // Use the CSID that Nielsen provides to you. This CBET source ID should be // unique to your Nielsen account but common to all of your output assets that // have CBET watermarking. Required when you choose a value for the setting // Watermark types (ActiveWatermarkProcess) that includes CBET. CbetSourceId *string `locationName:"cbetSourceId" type:"string"` // Optional. If this asset uses an episode ID with Nielsen, provide it here. EpisodeId *string `locationName:"episodeId" min:"1" type:"string"` // Specify the Amazon S3 location where you want MediaConvert to save your Nielsen // non-linear metadata .zip file. This Amazon S3 bucket must be in the same // Region as the one where you do your MediaConvert transcoding. If you want // to include an ADI file in this .zip file, use the setting ADI file (adiFilename) // to specify it. MediaConvert delivers the Nielsen metadata .zip files only // to your metadata destination Amazon S3 bucket. It doesn't deliver the .zip // files to Nielsen. You are responsible for delivering the metadata .zip files // to Nielsen. MetadataDestination *string `locationName:"metadataDestination" type:"string"` // Use the SID that Nielsen provides to you. This source ID should be unique // to your Nielsen account but common to all of your output assets. Required // for all Nielsen non-linear watermarking. This ID should be unique to your // Nielsen account but common to all of your output assets. Required for all // Nielsen non-linear watermarking. SourceId *int64 `locationName:"sourceId" type:"integer"` // Required. Specify whether your source content already contains Nielsen non-linear // watermarks. When you set this value to Watermarked (WATERMARKED), the service // fails the job. Nielsen requires that you add non-linear watermarking to only // clean content that doesn't already have non-linear Nielsen watermarks. SourceWatermarkStatus *string `locationName:"sourceWatermarkStatus" type:"string" enum:"NielsenSourceWatermarkStatusType"` // Specify the endpoint for the TIC server that you have deployed and configured // in the AWS Cloud. Required for all Nielsen non-linear watermarking. MediaConvert // can't connect directly to a TIC server. Instead, you must use API Gateway // to provide a RESTful interface between MediaConvert and a TIC server that // you deploy in your AWS account. For more information on deploying a TIC server // in your AWS account and the required API Gateway, contact Nielsen support. TicServerUrl *string `locationName:"ticServerUrl" type:"string"` // To create assets that have the same TIC values in each audio track, keep // the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that // have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK). UniqueTicPerAudioTrack *string `locationName:"uniqueTicPerAudioTrack" type:"string" enum:"NielsenUniqueTicPerAudioTrackType"` } // String returns the string representation func (s NielsenNonLinearWatermarkSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NielsenNonLinearWatermarkSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *NielsenNonLinearWatermarkSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "NielsenNonLinearWatermarkSettings"} if s.AssetId != nil && len(*s.AssetId) < 1 { invalidParams.Add(request.NewErrParamMinLen("AssetId", 1)) } if s.AssetName != nil && len(*s.AssetName) < 1 { invalidParams.Add(request.NewErrParamMinLen("AssetName", 1)) } if s.EpisodeId != nil && len(*s.EpisodeId) < 1 { invalidParams.Add(request.NewErrParamMinLen("EpisodeId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetActiveWatermarkProcess sets the ActiveWatermarkProcess field's value. func (s *NielsenNonLinearWatermarkSettings) SetActiveWatermarkProcess(v string) *NielsenNonLinearWatermarkSettings { s.ActiveWatermarkProcess = &v return s } // SetAdiFilename sets the AdiFilename field's value. func (s *NielsenNonLinearWatermarkSettings) SetAdiFilename(v string) *NielsenNonLinearWatermarkSettings { s.AdiFilename = &v return s } // SetAssetId sets the AssetId field's value. func (s *NielsenNonLinearWatermarkSettings) SetAssetId(v string) *NielsenNonLinearWatermarkSettings { s.AssetId = &v return s } // SetAssetName sets the AssetName field's value. func (s *NielsenNonLinearWatermarkSettings) SetAssetName(v string) *NielsenNonLinearWatermarkSettings { s.AssetName = &v return s } // SetCbetSourceId sets the CbetSourceId field's value. func (s *NielsenNonLinearWatermarkSettings) SetCbetSourceId(v string) *NielsenNonLinearWatermarkSettings { s.CbetSourceId = &v return s } // SetEpisodeId sets the EpisodeId field's value. func (s *NielsenNonLinearWatermarkSettings) SetEpisodeId(v string) *NielsenNonLinearWatermarkSettings { s.EpisodeId = &v return s } // SetMetadataDestination sets the MetadataDestination field's value. func (s *NielsenNonLinearWatermarkSettings) SetMetadataDestination(v string) *NielsenNonLinearWatermarkSettings { s.MetadataDestination = &v return s } // SetSourceId sets the SourceId field's value. func (s *NielsenNonLinearWatermarkSettings) SetSourceId(v int64) *NielsenNonLinearWatermarkSettings { s.SourceId = &v return s } // SetSourceWatermarkStatus sets the SourceWatermarkStatus field's value. func (s *NielsenNonLinearWatermarkSettings) SetSourceWatermarkStatus(v string) *NielsenNonLinearWatermarkSettings { s.SourceWatermarkStatus = &v return s } // SetTicServerUrl sets the TicServerUrl field's value. func (s *NielsenNonLinearWatermarkSettings) SetTicServerUrl(v string) *NielsenNonLinearWatermarkSettings { s.TicServerUrl = &v return s } // SetUniqueTicPerAudioTrack sets the UniqueTicPerAudioTrack field's value. func (s *NielsenNonLinearWatermarkSettings) SetUniqueTicPerAudioTrack(v string) *NielsenNonLinearWatermarkSettings { s.UniqueTicPerAudioTrack = &v return s } // Enable the Noise reducer (NoiseReducer) feature to remove noise from your // video output if necessary. Enable or disable this feature for each output // individually. This setting is disabled by default. When you enable Noise // reducer (NoiseReducer), you must also select a value for Noise reducer filter // (NoiseReducerFilter). type NoiseReducer struct { _ struct{} `type:"structure"` // Use Noise reducer filter (NoiseReducerFilter) to select one of the following // spatial image filtering functions. To use this setting, you must also enable // Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing // noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution // filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain // filtering based on JND principles. * Temporal optimizes video quality for // complex motion. Filter *string `locationName:"filter" type:"string" enum:"NoiseReducerFilter"` // Settings for a noise reducer filter FilterSettings *NoiseReducerFilterSettings `locationName:"filterSettings" type:"structure"` // Noise reducer filter settings for spatial filter. SpatialFilterSettings *NoiseReducerSpatialFilterSettings `locationName:"spatialFilterSettings" type:"structure"` // Noise reducer filter settings for temporal filter. TemporalFilterSettings *NoiseReducerTemporalFilterSettings `locationName:"temporalFilterSettings" type:"structure"` } // String returns the string representation func (s NoiseReducer) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NoiseReducer) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *NoiseReducer) Validate() error { invalidParams := request.ErrInvalidParams{Context: "NoiseReducer"} if s.SpatialFilterSettings != nil { if err := s.SpatialFilterSettings.Validate(); err != nil { invalidParams.AddNested("SpatialFilterSettings", err.(request.ErrInvalidParams)) } } if s.TemporalFilterSettings != nil { if err := s.TemporalFilterSettings.Validate(); err != nil { invalidParams.AddNested("TemporalFilterSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFilter sets the Filter field's value. func (s *NoiseReducer) SetFilter(v string) *NoiseReducer { s.Filter = &v return s } // SetFilterSettings sets the FilterSettings field's value. func (s *NoiseReducer) SetFilterSettings(v *NoiseReducerFilterSettings) *NoiseReducer { s.FilterSettings = v return s } // SetSpatialFilterSettings sets the SpatialFilterSettings field's value. func (s *NoiseReducer) SetSpatialFilterSettings(v *NoiseReducerSpatialFilterSettings) *NoiseReducer { s.SpatialFilterSettings = v return s } // SetTemporalFilterSettings sets the TemporalFilterSettings field's value. func (s *NoiseReducer) SetTemporalFilterSettings(v *NoiseReducerTemporalFilterSettings) *NoiseReducer { s.TemporalFilterSettings = v return s } // Settings for a noise reducer filter type NoiseReducerFilterSettings struct { _ struct{} `type:"structure"` // Relative strength of noise reducing filter. Higher values produce stronger // filtering. Strength *int64 `locationName:"strength" type:"integer"` } // String returns the string representation func (s NoiseReducerFilterSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NoiseReducerFilterSettings) GoString() string { return s.String() } // SetStrength sets the Strength field's value. func (s *NoiseReducerFilterSettings) SetStrength(v int64) *NoiseReducerFilterSettings { s.Strength = &v return s } // Noise reducer filter settings for spatial filter. type NoiseReducerSpatialFilterSettings struct { _ struct{} `type:"structure"` // Specify strength of post noise reduction sharpening filter, with 0 disabling // the filter and 3 enabling it at maximum strength. PostFilterSharpenStrength *int64 `locationName:"postFilterSharpenStrength" type:"integer"` // The speed of the filter, from -2 (lower speed) to 3 (higher speed), with // 0 being the nominal value. Speed *int64 `locationName:"speed" type:"integer"` // Relative strength of noise reducing filter. Higher values produce stronger // filtering. Strength *int64 `locationName:"strength" type:"integer"` } // String returns the string representation func (s NoiseReducerSpatialFilterSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NoiseReducerSpatialFilterSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *NoiseReducerSpatialFilterSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "NoiseReducerSpatialFilterSettings"} if s.Speed != nil && *s.Speed < -2 { invalidParams.Add(request.NewErrParamMinValue("Speed", -2)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetPostFilterSharpenStrength sets the PostFilterSharpenStrength field's value. func (s *NoiseReducerSpatialFilterSettings) SetPostFilterSharpenStrength(v int64) *NoiseReducerSpatialFilterSettings { s.PostFilterSharpenStrength = &v return s } // SetSpeed sets the Speed field's value. func (s *NoiseReducerSpatialFilterSettings) SetSpeed(v int64) *NoiseReducerSpatialFilterSettings { s.Speed = &v return s } // SetStrength sets the Strength field's value. func (s *NoiseReducerSpatialFilterSettings) SetStrength(v int64) *NoiseReducerSpatialFilterSettings { s.Strength = &v return s } // Noise reducer filter settings for temporal filter. type NoiseReducerTemporalFilterSettings struct { _ struct{} `type:"structure"` // Use Aggressive mode for content that has complex motion. Higher values produce // stronger temporal filtering. This filters highly complex scenes more aggressively // and creates better VQ for low bitrate outputs. AggressiveMode *int64 `locationName:"aggressiveMode" type:"integer"` // Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), // you can use this setting to apply sharpening. The default behavior, Auto // (AUTO), allows the transcoder to determine whether to apply filtering, depending // on input type and quality. When you set Noise reducer to Temporal, your output // bandwidth is reduced. When Post temporal sharpening is also enabled, that // bandwidth reduction is smaller. PostTemporalSharpening *string `locationName:"postTemporalSharpening" type:"string" enum:"NoiseFilterPostTemporalSharpening"` // The speed of the filter (higher number is faster). Low setting reduces bit // rate at the cost of transcode time, high setting improves transcode time // at the cost of bit rate. Speed *int64 `locationName:"speed" type:"integer"` // Specify the strength of the noise reducing filter on this output. Higher // values produce stronger filtering. We recommend the following value ranges, // depending on the result that you want: * 0-2 for complexity reduction with // minimal sharpness loss * 2-8 for complexity reduction with image preservation // * 8-16 for a high level of complexity reduction Strength *int64 `locationName:"strength" type:"integer"` } // String returns the string representation func (s NoiseReducerTemporalFilterSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NoiseReducerTemporalFilterSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *NoiseReducerTemporalFilterSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "NoiseReducerTemporalFilterSettings"} if s.Speed != nil && *s.Speed < -1 { invalidParams.Add(request.NewErrParamMinValue("Speed", -1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAggressiveMode sets the AggressiveMode field's value. func (s *NoiseReducerTemporalFilterSettings) SetAggressiveMode(v int64) *NoiseReducerTemporalFilterSettings { s.AggressiveMode = &v return s } // SetPostTemporalSharpening sets the PostTemporalSharpening field's value. func (s *NoiseReducerTemporalFilterSettings) SetPostTemporalSharpening(v string) *NoiseReducerTemporalFilterSettings { s.PostTemporalSharpening = &v return s } // SetSpeed sets the Speed field's value. func (s *NoiseReducerTemporalFilterSettings) SetSpeed(v int64) *NoiseReducerTemporalFilterSettings { s.Speed = &v return s } // SetStrength sets the Strength field's value. func (s *NoiseReducerTemporalFilterSettings) SetStrength(v int64) *NoiseReducerTemporalFilterSettings { s.Strength = &v return s } type NotFoundException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s NotFoundException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NotFoundException) GoString() string { return s.String() } func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ RespMetadata: v, } } // Code returns the exception type name. func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *NotFoundException) OrigErr() error { return nil } func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. func (s *NotFoundException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *NotFoundException) RequestID() string { return s.RespMetadata.RequestID } // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value OPUS. type OpusSettings struct { _ struct{} `type:"structure"` // Optional. Specify the average bitrate in bits per second. Valid values are // multiples of 8000, from 32000 through 192000. The default value is 96000, // which we recommend for quality and bandwidth. Bitrate *int64 `locationName:"bitrate" min:"32000" type:"integer"` // Specify the number of channels in this output audio track. Choosing Mono // on the console gives you 1 output channel; choosing Stereo gives you 2. In // the API, valid values are 1 and 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Optional. Sample rate in hz. Valid values are 16000, 24000, and 48000. The // default value is 48000. SampleRate *int64 `locationName:"sampleRate" min:"16000" type:"integer"` } // String returns the string representation func (s OpusSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s OpusSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *OpusSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "OpusSettings"} if s.Bitrate != nil && *s.Bitrate < 32000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 32000)) } if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(request.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 16000 { invalidParams.Add(request.NewErrParamMinValue("SampleRate", 16000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBitrate sets the Bitrate field's value. func (s *OpusSettings) SetBitrate(v int64) *OpusSettings { s.Bitrate = &v return s } // SetChannels sets the Channels field's value. func (s *OpusSettings) SetChannels(v int64) *OpusSettings { s.Channels = &v return s } // SetSampleRate sets the SampleRate field's value. func (s *OpusSettings) SetSampleRate(v int64) *OpusSettings { s.SampleRate = &v return s } // Each output in your job is a collection of settings that describes how you // want MediaConvert to encode a single output file or stream. For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/create-outputs.html. type Output struct { _ struct{} `type:"structure"` // (AudioDescriptions) contains groups of audio encoding settings organized // by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) // can contain multiple groups of encoding settings. AudioDescriptions []*AudioDescription `locationName:"audioDescriptions" type:"list"` // (CaptionDescriptions) contains groups of captions settings. For each output // that has captions, include one instance of (CaptionDescriptions). (CaptionDescriptions) // can contain multiple groups of captions settings. CaptionDescriptions []*CaptionDescription `locationName:"captionDescriptions" type:"list"` // Container specific settings. ContainerSettings *ContainerSettings `locationName:"containerSettings" type:"structure"` // Use Extension (Extension) to specify the file extension for outputs in File // output groups. If you do not specify a value, the service will use default // extensions by container type as follows * MPEG-2 transport stream, m2ts * // Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, // webm * No Container, the service will use codec extensions (e.g. AAC, H265, // H265, AC3) Extension *string `locationName:"extension" type:"string"` // Use Name modifier (NameModifier) to have the service add a string to the // end of each output filename. You specify the base filename as part of your // destination URI. When you create multiple outputs in the same output group, // Name modifier (NameModifier) is required. Name modifier also accepts format // identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ // or $Time$ in one output, you must use them in the same way in all outputs // of the output group. NameModifier *string `locationName:"nameModifier" min:"1" type:"string"` // Specific settings for this type of output. OutputSettings *OutputSettings `locationName:"outputSettings" type:"structure"` // Use Preset (Preset) to specify a preset for your transcoding settings. Provide // the system or custom preset name. You can specify either Preset (Preset) // or Container settings (ContainerSettings), but not both. Preset *string `locationName:"preset" type:"string"` // VideoDescription contains a group of video encoding settings. The specific // video settings depend on the video codec that you choose for the property // codec. Include one instance of VideoDescription per output. VideoDescription *VideoDescription `locationName:"videoDescription" type:"structure"` } // String returns the string representation func (s Output) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Output) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Output) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Output"} if s.NameModifier != nil && len(*s.NameModifier) < 1 { invalidParams.Add(request.NewErrParamMinLen("NameModifier", 1)) } if s.AudioDescriptions != nil { for i, v := range s.AudioDescriptions { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioDescriptions", i), err.(request.ErrInvalidParams)) } } } if s.CaptionDescriptions != nil { for i, v := range s.CaptionDescriptions { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionDescriptions", i), err.(request.ErrInvalidParams)) } } } if s.ContainerSettings != nil { if err := s.ContainerSettings.Validate(); err != nil { invalidParams.AddNested("ContainerSettings", err.(request.ErrInvalidParams)) } } if s.VideoDescription != nil { if err := s.VideoDescription.Validate(); err != nil { invalidParams.AddNested("VideoDescription", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAudioDescriptions sets the AudioDescriptions field's value. func (s *Output) SetAudioDescriptions(v []*AudioDescription) *Output { s.AudioDescriptions = v return s } // SetCaptionDescriptions sets the CaptionDescriptions field's value. func (s *Output) SetCaptionDescriptions(v []*CaptionDescription) *Output { s.CaptionDescriptions = v return s } // SetContainerSettings sets the ContainerSettings field's value. func (s *Output) SetContainerSettings(v *ContainerSettings) *Output { s.ContainerSettings = v return s } // SetExtension sets the Extension field's value. func (s *Output) SetExtension(v string) *Output { s.Extension = &v return s } // SetNameModifier sets the NameModifier field's value. func (s *Output) SetNameModifier(v string) *Output { s.NameModifier = &v return s } // SetOutputSettings sets the OutputSettings field's value. func (s *Output) SetOutputSettings(v *OutputSettings) *Output { s.OutputSettings = v return s } // SetPreset sets the Preset field's value. func (s *Output) SetPreset(v string) *Output { s.Preset = &v return s } // SetVideoDescription sets the VideoDescription field's value. func (s *Output) SetVideoDescription(v *VideoDescription) *Output { s.VideoDescription = v return s } // OutputChannel mapping settings. type OutputChannelMapping struct { _ struct{} `type:"structure"` // Use this setting to specify your remix values when they are integers, such // as -10, 0, or 4. InputChannels []*int64 `locationName:"inputChannels" type:"list"` // Use this setting to specify your remix values when they have a decimal component, // such as -10.312, 0.08, or 4.9. MediaConvert rounds your remixing values to // the nearest thousandth. InputChannelsFineTune []*float64 `locationName:"inputChannelsFineTune" type:"list"` } // String returns the string representation func (s OutputChannelMapping) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s OutputChannelMapping) GoString() string { return s.String() } // SetInputChannels sets the InputChannels field's value. func (s *OutputChannelMapping) SetInputChannels(v []*int64) *OutputChannelMapping { s.InputChannels = v return s } // SetInputChannelsFineTune sets the InputChannelsFineTune field's value. func (s *OutputChannelMapping) SetInputChannelsFineTune(v []*float64) *OutputChannelMapping { s.InputChannelsFineTune = v return s } // Details regarding output type OutputDetail struct { _ struct{} `type:"structure"` // Duration in milliseconds DurationInMs *int64 `locationName:"durationInMs" type:"integer"` // Contains details about the output's video stream VideoDetails *VideoDetail `locationName:"videoDetails" type:"structure"` } // String returns the string representation func (s OutputDetail) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s OutputDetail) GoString() string { return s.String() } // SetDurationInMs sets the DurationInMs field's value. func (s *OutputDetail) SetDurationInMs(v int64) *OutputDetail { s.DurationInMs = &v return s } // SetVideoDetails sets the VideoDetails field's value. func (s *OutputDetail) SetVideoDetails(v *VideoDetail) *OutputDetail { s.VideoDetails = v return s } // Group of outputs type OutputGroup struct { _ struct{} `type:"structure"` // Use automated encoding to have MediaConvert choose your encoding settings // for you, based on characteristics of your input video. AutomatedEncodingSettings *AutomatedEncodingSettings `locationName:"automatedEncodingSettings" type:"structure"` // Use Custom Group Name (CustomName) to specify a name for the output group. // This value is displayed on the console and can make your job settings JSON // more human-readable. It does not affect your outputs. Use up to twelve characters // that are either letters, numbers, spaces, or underscores. CustomName *string `locationName:"customName" type:"string"` // Name of the output group Name *string `locationName:"name" type:"string"` // Output Group settings, including type OutputGroupSettings *OutputGroupSettings `locationName:"outputGroupSettings" type:"structure"` // This object holds groups of encoding settings, one group of settings per // output. Outputs []*Output `locationName:"outputs" type:"list"` } // String returns the string representation func (s OutputGroup) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s OutputGroup) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *OutputGroup) Validate() error { invalidParams := request.ErrInvalidParams{Context: "OutputGroup"} if s.AutomatedEncodingSettings != nil { if err := s.AutomatedEncodingSettings.Validate(); err != nil { invalidParams.AddNested("AutomatedEncodingSettings", err.(request.ErrInvalidParams)) } } if s.OutputGroupSettings != nil { if err := s.OutputGroupSettings.Validate(); err != nil { invalidParams.AddNested("OutputGroupSettings", err.(request.ErrInvalidParams)) } } if s.Outputs != nil { for i, v := range s.Outputs { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Outputs", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAutomatedEncodingSettings sets the AutomatedEncodingSettings field's value. func (s *OutputGroup) SetAutomatedEncodingSettings(v *AutomatedEncodingSettings) *OutputGroup { s.AutomatedEncodingSettings = v return s } // SetCustomName sets the CustomName field's value. func (s *OutputGroup) SetCustomName(v string) *OutputGroup { s.CustomName = &v return s } // SetName sets the Name field's value. func (s *OutputGroup) SetName(v string) *OutputGroup { s.Name = &v return s } // SetOutputGroupSettings sets the OutputGroupSettings field's value. func (s *OutputGroup) SetOutputGroupSettings(v *OutputGroupSettings) *OutputGroup { s.OutputGroupSettings = v return s } // SetOutputs sets the Outputs field's value. func (s *OutputGroup) SetOutputs(v []*Output) *OutputGroup { s.Outputs = v return s } // Contains details about the output groups specified in the job settings. type OutputGroupDetail struct { _ struct{} `type:"structure"` // Details about the output OutputDetails []*OutputDetail `locationName:"outputDetails" type:"list"` } // String returns the string representation func (s OutputGroupDetail) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s OutputGroupDetail) GoString() string { return s.String() } // SetOutputDetails sets the OutputDetails field's value. func (s *OutputGroupDetail) SetOutputDetails(v []*OutputDetail) *OutputGroupDetail { s.OutputDetails = v return s } // Output Group settings, including type type OutputGroupSettings struct { _ struct{} `type:"structure"` // Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. // When you work directly in your JSON job specification, include this object // and any required children when you set Type, under OutputGroupSettings, to // CMAF_GROUP_SETTINGS. CmafGroupSettings *CmafGroupSettings `locationName:"cmafGroupSettings" type:"structure"` // Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. // When you work directly in your JSON job specification, include this object // and any required children when you set Type, under OutputGroupSettings, to // DASH_ISO_GROUP_SETTINGS. DashIsoGroupSettings *DashIsoGroupSettings `locationName:"dashIsoGroupSettings" type:"structure"` // Settings related to your File output group. MediaConvert uses this group // of settings to generate a single standalone file, rather than a streaming // package. When you work directly in your JSON job specification, include this // object and any required children when you set Type, under OutputGroupSettings, // to FILE_GROUP_SETTINGS. FileGroupSettings *FileGroupSettings `locationName:"fileGroupSettings" type:"structure"` // Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. // When you work directly in your JSON job specification, include this object // and any required children when you set Type, under OutputGroupSettings, to // HLS_GROUP_SETTINGS. HlsGroupSettings *HlsGroupSettings `locationName:"hlsGroupSettings" type:"structure"` // Settings related to your Microsoft Smooth Streaming output package. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. // When you work directly in your JSON job specification, include this object // and any required children when you set Type, under OutputGroupSettings, to // MS_SMOOTH_GROUP_SETTINGS. MsSmoothGroupSettings *MsSmoothGroupSettings `locationName:"msSmoothGroupSettings" type:"structure"` // Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming, // CMAF) Type *string `locationName:"type" type:"string" enum:"OutputGroupType"` } // String returns the string representation func (s OutputGroupSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s OutputGroupSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *OutputGroupSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "OutputGroupSettings"} if s.CmafGroupSettings != nil { if err := s.CmafGroupSettings.Validate(); err != nil { invalidParams.AddNested("CmafGroupSettings", err.(request.ErrInvalidParams)) } } if s.DashIsoGroupSettings != nil { if err := s.DashIsoGroupSettings.Validate(); err != nil { invalidParams.AddNested("DashIsoGroupSettings", err.(request.ErrInvalidParams)) } } if s.HlsGroupSettings != nil { if err := s.HlsGroupSettings.Validate(); err != nil { invalidParams.AddNested("HlsGroupSettings", err.(request.ErrInvalidParams)) } } if s.MsSmoothGroupSettings != nil { if err := s.MsSmoothGroupSettings.Validate(); err != nil { invalidParams.AddNested("MsSmoothGroupSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCmafGroupSettings sets the CmafGroupSettings field's value. func (s *OutputGroupSettings) SetCmafGroupSettings(v *CmafGroupSettings) *OutputGroupSettings { s.CmafGroupSettings = v return s } // SetDashIsoGroupSettings sets the DashIsoGroupSettings field's value. func (s *OutputGroupSettings) SetDashIsoGroupSettings(v *DashIsoGroupSettings) *OutputGroupSettings { s.DashIsoGroupSettings = v return s } // SetFileGroupSettings sets the FileGroupSettings field's value. func (s *OutputGroupSettings) SetFileGroupSettings(v *FileGroupSettings) *OutputGroupSettings { s.FileGroupSettings = v return s } // SetHlsGroupSettings sets the HlsGroupSettings field's value. func (s *OutputGroupSettings) SetHlsGroupSettings(v *HlsGroupSettings) *OutputGroupSettings { s.HlsGroupSettings = v return s } // SetMsSmoothGroupSettings sets the MsSmoothGroupSettings field's value. func (s *OutputGroupSettings) SetMsSmoothGroupSettings(v *MsSmoothGroupSettings) *OutputGroupSettings { s.MsSmoothGroupSettings = v return s } // SetType sets the Type field's value. func (s *OutputGroupSettings) SetType(v string) *OutputGroupSettings { s.Type = &v return s } // Specific settings for this type of output. type OutputSettings struct { _ struct{} `type:"structure"` // Settings for HLS output groups HlsSettings *HlsSettings `locationName:"hlsSettings" type:"structure"` } // String returns the string representation func (s OutputSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s OutputSettings) GoString() string { return s.String() } // SetHlsSettings sets the HlsSettings field's value. func (s *OutputSettings) SetHlsSettings(v *HlsSettings) *OutputSettings { s.HlsSettings = v return s } // If you work with a third party video watermarking partner, use the group // of settings that correspond with your watermarking partner to include watermarks // in your output. type PartnerWatermarking struct { _ struct{} `type:"structure"` // For forensic video watermarking, MediaConvert supports Nagra NexGuard File // Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2) // and OTT Streaming workflows. NexguardFileMarkerSettings *NexGuardFileMarkerSettings `locationName:"nexguardFileMarkerSettings" type:"structure"` } // String returns the string representation func (s PartnerWatermarking) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PartnerWatermarking) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *PartnerWatermarking) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PartnerWatermarking"} if s.NexguardFileMarkerSettings != nil { if err := s.NexguardFileMarkerSettings.Validate(); err != nil { invalidParams.AddNested("NexguardFileMarkerSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetNexguardFileMarkerSettings sets the NexguardFileMarkerSettings field's value. func (s *PartnerWatermarking) SetNexguardFileMarkerSettings(v *NexGuardFileMarkerSettings) *PartnerWatermarking { s.NexguardFileMarkerSettings = v return s } // A preset is a collection of preconfigured media conversion settings that // you want MediaConvert to apply to the output during the conversion process. type Preset struct { _ struct{} `type:"structure"` // An identifier for this resource that is unique within all of AWS. Arn *string `locationName:"arn" type:"string"` // An optional category you create to organize your presets. Category *string `locationName:"category" type:"string"` // The timestamp in epoch seconds for preset creation. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"` // An optional description you create for each preset. Description *string `locationName:"description" type:"string"` // The timestamp in epoch seconds when the preset was last updated. LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"` // A name you create for each preset. Each name must be unique within your account. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` // Settings for preset // // Settings is a required field Settings *PresetSettings `locationName:"settings" type:"structure" required:"true"` // A preset can be of two types: system or custom. System or built-in preset // can't be modified or deleted by the user. Type *string `locationName:"type" type:"string" enum:"Type"` } // String returns the string representation func (s Preset) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Preset) GoString() string { return s.String() } // SetArn sets the Arn field's value. func (s *Preset) SetArn(v string) *Preset { s.Arn = &v return s } // SetCategory sets the Category field's value. func (s *Preset) SetCategory(v string) *Preset { s.Category = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *Preset) SetCreatedAt(v time.Time) *Preset { s.CreatedAt = &v return s } // SetDescription sets the Description field's value. func (s *Preset) SetDescription(v string) *Preset { s.Description = &v return s } // SetLastUpdated sets the LastUpdated field's value. func (s *Preset) SetLastUpdated(v time.Time) *Preset { s.LastUpdated = &v return s } // SetName sets the Name field's value. func (s *Preset) SetName(v string) *Preset { s.Name = &v return s } // SetSettings sets the Settings field's value. func (s *Preset) SetSettings(v *PresetSettings) *Preset { s.Settings = v return s } // SetType sets the Type field's value. func (s *Preset) SetType(v string) *Preset { s.Type = &v return s } // Settings for preset type PresetSettings struct { _ struct{} `type:"structure"` // (AudioDescriptions) contains groups of audio encoding settings organized // by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) // can contain multiple groups of encoding settings. AudioDescriptions []*AudioDescription `locationName:"audioDescriptions" type:"list"` // This object holds groups of settings related to captions for one output. // For each output that has captions, include one instance of CaptionDescriptions. CaptionDescriptions []*CaptionDescriptionPreset `locationName:"captionDescriptions" type:"list"` // Container specific settings. ContainerSettings *ContainerSettings `locationName:"containerSettings" type:"structure"` // VideoDescription contains a group of video encoding settings. The specific // video settings depend on the video codec that you choose for the property // codec. Include one instance of VideoDescription per output. VideoDescription *VideoDescription `locationName:"videoDescription" type:"structure"` } // String returns the string representation func (s PresetSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PresetSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *PresetSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PresetSettings"} if s.AudioDescriptions != nil { for i, v := range s.AudioDescriptions { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AudioDescriptions", i), err.(request.ErrInvalidParams)) } } } if s.CaptionDescriptions != nil { for i, v := range s.CaptionDescriptions { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CaptionDescriptions", i), err.(request.ErrInvalidParams)) } } } if s.ContainerSettings != nil { if err := s.ContainerSettings.Validate(); err != nil { invalidParams.AddNested("ContainerSettings", err.(request.ErrInvalidParams)) } } if s.VideoDescription != nil { if err := s.VideoDescription.Validate(); err != nil { invalidParams.AddNested("VideoDescription", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAudioDescriptions sets the AudioDescriptions field's value. func (s *PresetSettings) SetAudioDescriptions(v []*AudioDescription) *PresetSettings { s.AudioDescriptions = v return s } // SetCaptionDescriptions sets the CaptionDescriptions field's value. func (s *PresetSettings) SetCaptionDescriptions(v []*CaptionDescriptionPreset) *PresetSettings { s.CaptionDescriptions = v return s } // SetContainerSettings sets the ContainerSettings field's value. func (s *PresetSettings) SetContainerSettings(v *ContainerSettings) *PresetSettings { s.ContainerSettings = v return s } // SetVideoDescription sets the VideoDescription field's value. func (s *PresetSettings) SetVideoDescription(v *VideoDescription) *PresetSettings { s.VideoDescription = v return s } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value PRORES. type ProresSettings struct { _ struct{} `type:"structure"` // This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that // you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 // sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma // sampling. You must specify a value for this setting when your output codec // profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma // sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose // an output codec profile that supports 4:4:4 chroma sampling. These values // for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444 // (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When // you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all // video preprocessors except for Nexguard file marker (PartnerWatermarking). // When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate // conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm) // to Drop duplicate (DUPLICATE_DROP). ChromaSampling *string `locationName:"chromaSampling" type:"string" enum:"ProresChromaSampling"` // Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec // to use for this output. CodecProfile *string `locationName:"codecProfile" type:"string" enum:"ProresCodecProfile"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"ProresFramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"ProresFramerateConversionAlgorithm"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"ProresInterlaceMode"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"ProresParControl"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"ProresScanTypeConversionMode"` // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output. When you enable // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. SlowPal *string `locationName:"slowPal" type:"string" enum:"ProresSlowPal"` // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable // hard telecine (HARD) to create a smoother picture. When you keep the default // value, None (NONE), MediaConvert does a standard frame rate conversion to // 29.97 without doing anything with the field polarity to create a smoother // picture. Telecine *string `locationName:"telecine" type:"string" enum:"ProresTelecine"` } // String returns the string representation func (s ProresSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ProresSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ProresSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ProresSettings"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetChromaSampling sets the ChromaSampling field's value. func (s *ProresSettings) SetChromaSampling(v string) *ProresSettings { s.ChromaSampling = &v return s } // SetCodecProfile sets the CodecProfile field's value. func (s *ProresSettings) SetCodecProfile(v string) *ProresSettings { s.CodecProfile = &v return s } // SetFramerateControl sets the FramerateControl field's value. func (s *ProresSettings) SetFramerateControl(v string) *ProresSettings { s.FramerateControl = &v return s } // SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. func (s *ProresSettings) SetFramerateConversionAlgorithm(v string) *ProresSettings { s.FramerateConversionAlgorithm = &v return s } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *ProresSettings) SetFramerateDenominator(v int64) *ProresSettings { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *ProresSettings) SetFramerateNumerator(v int64) *ProresSettings { s.FramerateNumerator = &v return s } // SetInterlaceMode sets the InterlaceMode field's value. func (s *ProresSettings) SetInterlaceMode(v string) *ProresSettings { s.InterlaceMode = &v return s } // SetParControl sets the ParControl field's value. func (s *ProresSettings) SetParControl(v string) *ProresSettings { s.ParControl = &v return s } // SetParDenominator sets the ParDenominator field's value. func (s *ProresSettings) SetParDenominator(v int64) *ProresSettings { s.ParDenominator = &v return s } // SetParNumerator sets the ParNumerator field's value. func (s *ProresSettings) SetParNumerator(v int64) *ProresSettings { s.ParNumerator = &v return s } // SetScanTypeConversionMode sets the ScanTypeConversionMode field's value. func (s *ProresSettings) SetScanTypeConversionMode(v string) *ProresSettings { s.ScanTypeConversionMode = &v return s } // SetSlowPal sets the SlowPal field's value. func (s *ProresSettings) SetSlowPal(v string) *ProresSettings { s.SlowPal = &v return s } // SetTelecine sets the Telecine field's value. func (s *ProresSettings) SetTelecine(v string) *ProresSettings { s.Telecine = &v return s } // You can use queues to manage the resources that are available to your AWS // account for running multiple transcoding jobs at the same time. If you don't // specify a queue, the service sends all jobs through the default queue. For // more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html. type Queue struct { _ struct{} `type:"structure"` // An identifier for this resource that is unique within all of AWS. Arn *string `locationName:"arn" type:"string"` // The timestamp in epoch seconds for when you created the queue. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"` // An optional description that you create for each queue. Description *string `locationName:"description" type:"string"` // The timestamp in epoch seconds for when you most recently updated the queue. LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"` // A name that you create for each queue. Each name must be unique within your // account. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` // Specifies whether the pricing plan for the queue is on-demand or reserved. // For on-demand, you pay per minute, billed in increments of .01 minute. For // reserved, you pay for the transcoding capacity of the entire queue, regardless // of how much or how little you use it. Reserved pricing requires a 12-month // commitment. PricingPlan *string `locationName:"pricingPlan" type:"string" enum:"PricingPlan"` // The estimated number of jobs with a PROGRESSING status. ProgressingJobsCount *int64 `locationName:"progressingJobsCount" type:"integer"` // Details about the pricing plan for your reserved queue. Required for reserved // queues and not applicable to on-demand queues. ReservationPlan *ReservationPlan `locationName:"reservationPlan" type:"structure"` // Queues can be ACTIVE or PAUSED. If you pause a queue, the service won't begin // processing jobs in that queue. Jobs that are running when you pause the queue // continue to run until they finish or result in an error. Status *string `locationName:"status" type:"string" enum:"QueueStatus"` // The estimated number of jobs with a SUBMITTED status. SubmittedJobsCount *int64 `locationName:"submittedJobsCount" type:"integer"` // Specifies whether this on-demand queue is system or custom. System queues // are built in. You can't modify or delete system queues. You can create and // modify custom queues. Type *string `locationName:"type" type:"string" enum:"Type"` } // String returns the string representation func (s Queue) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Queue) GoString() string { return s.String() } // SetArn sets the Arn field's value. func (s *Queue) SetArn(v string) *Queue { s.Arn = &v return s } // SetCreatedAt sets the CreatedAt field's value. func (s *Queue) SetCreatedAt(v time.Time) *Queue { s.CreatedAt = &v return s } // SetDescription sets the Description field's value. func (s *Queue) SetDescription(v string) *Queue { s.Description = &v return s } // SetLastUpdated sets the LastUpdated field's value. func (s *Queue) SetLastUpdated(v time.Time) *Queue { s.LastUpdated = &v return s } // SetName sets the Name field's value. func (s *Queue) SetName(v string) *Queue { s.Name = &v return s } // SetPricingPlan sets the PricingPlan field's value. func (s *Queue) SetPricingPlan(v string) *Queue { s.PricingPlan = &v return s } // SetProgressingJobsCount sets the ProgressingJobsCount field's value. func (s *Queue) SetProgressingJobsCount(v int64) *Queue { s.ProgressingJobsCount = &v return s } // SetReservationPlan sets the ReservationPlan field's value. func (s *Queue) SetReservationPlan(v *ReservationPlan) *Queue { s.ReservationPlan = v return s } // SetStatus sets the Status field's value. func (s *Queue) SetStatus(v string) *Queue { s.Status = &v return s } // SetSubmittedJobsCount sets the SubmittedJobsCount field's value. func (s *Queue) SetSubmittedJobsCount(v int64) *Queue { s.SubmittedJobsCount = &v return s } // SetType sets the Type field's value. func (s *Queue) SetType(v string) *Queue { s.Type = &v return s } // Description of the source and destination queues between which the job has // moved, along with the timestamp of the move type QueueTransition struct { _ struct{} `type:"structure"` // The queue that the job was on after the transition. DestinationQueue *string `locationName:"destinationQueue" type:"string"` // The queue that the job was on before the transition. SourceQueue *string `locationName:"sourceQueue" type:"string"` // The time, in Unix epoch format, that the job moved from the source queue // to the destination queue. Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"unixTimestamp"` } // String returns the string representation func (s QueueTransition) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s QueueTransition) GoString() string { return s.String() } // SetDestinationQueue sets the DestinationQueue field's value. func (s *QueueTransition) SetDestinationQueue(v string) *QueueTransition { s.DestinationQueue = &v return s } // SetSourceQueue sets the SourceQueue field's value. func (s *QueueTransition) SetSourceQueue(v string) *QueueTransition { s.SourceQueue = &v return s } // SetTimestamp sets the Timestamp field's value. func (s *QueueTransition) SetTimestamp(v time.Time) *QueueTransition { s.Timestamp = &v return s } // Use Rectangle to identify a specific area of the video frame. type Rectangle struct { _ struct{} `type:"structure"` // Height of rectangle in pixels. Specify only even numbers. Height *int64 `locationName:"height" min:"2" type:"integer"` // Width of rectangle in pixels. Specify only even numbers. Width *int64 `locationName:"width" min:"2" type:"integer"` // The distance, in pixels, between the rectangle and the left edge of the video // frame. Specify only even numbers. X *int64 `locationName:"x" type:"integer"` // The distance, in pixels, between the rectangle and the top edge of the video // frame. Specify only even numbers. Y *int64 `locationName:"y" type:"integer"` } // String returns the string representation func (s Rectangle) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Rectangle) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Rectangle) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Rectangle"} if s.Height != nil && *s.Height < 2 { invalidParams.Add(request.NewErrParamMinValue("Height", 2)) } if s.Width != nil && *s.Width < 2 { invalidParams.Add(request.NewErrParamMinValue("Width", 2)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetHeight sets the Height field's value. func (s *Rectangle) SetHeight(v int64) *Rectangle { s.Height = &v return s } // SetWidth sets the Width field's value. func (s *Rectangle) SetWidth(v int64) *Rectangle { s.Width = &v return s } // SetX sets the X field's value. func (s *Rectangle) SetX(v int64) *Rectangle { s.X = &v return s } // SetY sets the Y field's value. func (s *Rectangle) SetY(v int64) *Rectangle { s.Y = &v return s } // Use Manual audio remixing (RemixSettings) to adjust audio levels for each // audio channel in each output of your job. With audio remixing, you can output // more or fewer audio channels than your input audio source provides. type RemixSettings struct { _ struct{} `type:"structure"` // Channel mapping (ChannelMapping) contains the group of fields that hold the // remixing value for each channel, in dB. Specify remix values to indicate // how much of the content from your input audio channel you want in your output // audio channels. Each instance of the InputChannels or InputChannelsFineTune // array specifies these values for one output channel. Use one instance of // this array for each output channel. In the console, each array corresponds // to a column in the graphical depiction of the mapping matrix. The rows of // the graphical matrix correspond to input channels. Valid values are within // the range from -60 (mute) through 6. A setting of 0 passes the input channel // unchanged to the output channel (no attenuation or amplification). Use InputChannels // or InputChannelsFineTune to specify your remix values. Don't use both. ChannelMapping *ChannelMapping `locationName:"channelMapping" type:"structure"` // Specify the number of audio channels from your input that you want to use // in your output. With remixing, you might combine or split the data in these // channels, so the number of channels in your final output might be different. // If you are doing both input channel mapping and output channel mapping, the // number of output channels in your input mapping must be the same as the number // of input channels in your output mapping. ChannelsIn *int64 `locationName:"channelsIn" min:"1" type:"integer"` // Specify the number of channels in this output after remixing. Valid values: // 1, 2, 4, 6, 8... 64. (1 and even numbers to 64.) If you are doing both input // channel mapping and output channel mapping, the number of output channels // in your input mapping must be the same as the number of input channels in // your output mapping. ChannelsOut *int64 `locationName:"channelsOut" min:"1" type:"integer"` } // String returns the string representation func (s RemixSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RemixSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RemixSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RemixSettings"} if s.ChannelsIn != nil && *s.ChannelsIn < 1 { invalidParams.Add(request.NewErrParamMinValue("ChannelsIn", 1)) } if s.ChannelsOut != nil && *s.ChannelsOut < 1 { invalidParams.Add(request.NewErrParamMinValue("ChannelsOut", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetChannelMapping sets the ChannelMapping field's value. func (s *RemixSettings) SetChannelMapping(v *ChannelMapping) *RemixSettings { s.ChannelMapping = v return s } // SetChannelsIn sets the ChannelsIn field's value. func (s *RemixSettings) SetChannelsIn(v int64) *RemixSettings { s.ChannelsIn = &v return s } // SetChannelsOut sets the ChannelsOut field's value. func (s *RemixSettings) SetChannelsOut(v int64) *RemixSettings { s.ChannelsOut = &v return s } // Details about the pricing plan for your reserved queue. Required for reserved // queues and not applicable to on-demand queues. type ReservationPlan struct { _ struct{} `type:"structure"` // The length of the term of your reserved queue pricing plan commitment. Commitment *string `locationName:"commitment" type:"string" enum:"Commitment"` // The timestamp in epoch seconds for when the current pricing plan term for // this reserved queue expires. ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp" timestampFormat:"unixTimestamp"` // The timestamp in epoch seconds for when you set up the current pricing plan // for this reserved queue. PurchasedAt *time.Time `locationName:"purchasedAt" type:"timestamp" timestampFormat:"unixTimestamp"` // Specifies whether the term of your reserved queue pricing plan is automatically // extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term. RenewalType *string `locationName:"renewalType" type:"string" enum:"RenewalType"` // Specifies the number of reserved transcode slots (RTS) for this queue. The // number of RTS determines how many jobs the queue can process in parallel; // each RTS can process one job at a time. When you increase this number, you // extend your existing commitment with a new 12-month commitment for a larger // number of RTS. The new commitment begins when you purchase the additional // capacity. You can't decrease the number of RTS in your reserved queue. ReservedSlots *int64 `locationName:"reservedSlots" type:"integer"` // Specifies whether the pricing plan for your reserved queue is ACTIVE or EXPIRED. Status *string `locationName:"status" type:"string" enum:"ReservationPlanStatus"` } // String returns the string representation func (s ReservationPlan) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ReservationPlan) GoString() string { return s.String() } // SetCommitment sets the Commitment field's value. func (s *ReservationPlan) SetCommitment(v string) *ReservationPlan { s.Commitment = &v return s } // SetExpiresAt sets the ExpiresAt field's value. func (s *ReservationPlan) SetExpiresAt(v time.Time) *ReservationPlan { s.ExpiresAt = &v return s } // SetPurchasedAt sets the PurchasedAt field's value. func (s *ReservationPlan) SetPurchasedAt(v time.Time) *ReservationPlan { s.PurchasedAt = &v return s } // SetRenewalType sets the RenewalType field's value. func (s *ReservationPlan) SetRenewalType(v string) *ReservationPlan { s.RenewalType = &v return s } // SetReservedSlots sets the ReservedSlots field's value. func (s *ReservationPlan) SetReservedSlots(v int64) *ReservationPlan { s.ReservedSlots = &v return s } // SetStatus sets the Status field's value. func (s *ReservationPlan) SetStatus(v string) *ReservationPlan { s.Status = &v return s } // Details about the pricing plan for your reserved queue. Required for reserved // queues and not applicable to on-demand queues. type ReservationPlanSettings struct { _ struct{} `type:"structure"` // The length of the term of your reserved queue pricing plan commitment. // // Commitment is a required field Commitment *string `locationName:"commitment" type:"string" required:"true" enum:"Commitment"` // Specifies whether the term of your reserved queue pricing plan is automatically // extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term. When your // term is auto renewed, you extend your commitment by 12 months from the auto // renew date. You can cancel this commitment. // // RenewalType is a required field RenewalType *string `locationName:"renewalType" type:"string" required:"true" enum:"RenewalType"` // Specifies the number of reserved transcode slots (RTS) for this queue. The // number of RTS determines how many jobs the queue can process in parallel; // each RTS can process one job at a time. You can't decrease the number of // RTS in your reserved queue. You can increase the number of RTS by extending // your existing commitment with a new 12-month commitment for the larger number. // The new commitment begins when you purchase the additional capacity. You // can't cancel your commitment or revert to your original commitment after // you increase the capacity. // // ReservedSlots is a required field ReservedSlots *int64 `locationName:"reservedSlots" type:"integer" required:"true"` } // String returns the string representation func (s ReservationPlanSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ReservationPlanSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ReservationPlanSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ReservationPlanSettings"} if s.Commitment == nil { invalidParams.Add(request.NewErrParamRequired("Commitment")) } if s.RenewalType == nil { invalidParams.Add(request.NewErrParamRequired("RenewalType")) } if s.ReservedSlots == nil { invalidParams.Add(request.NewErrParamRequired("ReservedSlots")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCommitment sets the Commitment field's value. func (s *ReservationPlanSettings) SetCommitment(v string) *ReservationPlanSettings { s.Commitment = &v return s } // SetRenewalType sets the RenewalType field's value. func (s *ReservationPlanSettings) SetRenewalType(v string) *ReservationPlanSettings { s.RenewalType = &v return s } // SetReservedSlots sets the ReservedSlots field's value. func (s *ReservationPlanSettings) SetReservedSlots(v int64) *ReservationPlanSettings { s.ReservedSlots = &v return s } // The Amazon Resource Name (ARN) and tags for an AWS Elemental MediaConvert // resource. type ResourceTags struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the resource. Arn *string `locationName:"arn" type:"string"` // The tags for the resource. Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation func (s ResourceTags) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ResourceTags) GoString() string { return s.String() } // SetArn sets the Arn field's value. func (s *ResourceTags) SetArn(v string) *ResourceTags { s.Arn = &v return s } // SetTags sets the Tags field's value. func (s *ResourceTags) SetTags(v map[string]*string) *ResourceTags { s.Tags = v return s } // Optional. Have MediaConvert automatically apply Amazon S3 access control // for the outputs in this output group. When you don't use this setting, S3 // automatically applies the default access control list PRIVATE. type S3DestinationAccessControl struct { _ struct{} `type:"structure"` // Choose an Amazon S3 canned ACL for MediaConvert to apply to this output. CannedAcl *string `locationName:"cannedAcl" type:"string" enum:"S3ObjectCannedAcl"` } // String returns the string representation func (s S3DestinationAccessControl) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s S3DestinationAccessControl) GoString() string { return s.String() } // SetCannedAcl sets the CannedAcl field's value. func (s *S3DestinationAccessControl) SetCannedAcl(v string) *S3DestinationAccessControl { s.CannedAcl = &v return s } // Settings associated with S3 destination type S3DestinationSettings struct { _ struct{} `type:"structure"` // Optional. Have MediaConvert automatically apply Amazon S3 access control // for the outputs in this output group. When you don't use this setting, S3 // automatically applies the default access control list PRIVATE. AccessControl *S3DestinationAccessControl `locationName:"accessControl" type:"structure"` // Settings for how your job outputs are encrypted as they are uploaded to Amazon // S3. Encryption *S3EncryptionSettings `locationName:"encryption" type:"structure"` } // String returns the string representation func (s S3DestinationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s S3DestinationSettings) GoString() string { return s.String() } // SetAccessControl sets the AccessControl field's value. func (s *S3DestinationSettings) SetAccessControl(v *S3DestinationAccessControl) *S3DestinationSettings { s.AccessControl = v return s } // SetEncryption sets the Encryption field's value. func (s *S3DestinationSettings) SetEncryption(v *S3EncryptionSettings) *S3DestinationSettings { s.Encryption = v return s } // Settings for how your job outputs are encrypted as they are uploaded to Amazon // S3. type S3EncryptionSettings struct { _ struct{} `type:"structure"` // Specify how you want your data keys managed. AWS uses data keys to encrypt // your content. AWS also encrypts the data keys themselves, using a customer // master key (CMK), and then stores the encrypted data keys alongside your // encrypted content. Use this setting to specify which AWS service manages // the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). // If you want your master key to be managed by AWS Key Management Service (KMS), // choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose // AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with // Amazon S3 to encrypt your data keys. You can optionally choose to specify // a different, customer managed CMK. Do so by specifying the Amazon Resource // Name (ARN) of the key for the setting KMS ARN (kmsKeyArn). EncryptionType *string `locationName:"encryptionType" type:"string" enum:"S3ServerSideEncryptionType"` // Optionally, specify the customer master key (CMK) that you want to use to // encrypt the data key that AWS uses to encrypt your output content. Enter // the Amazon Resource Name (ARN) of the CMK. To use this setting, you must // also set Server-side encryption (S3ServerSideEncryptionType) to AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). // If you set Server-side encryption to AWS KMS but don't specify a CMK here, // AWS uses the AWS managed CMK associated with Amazon S3. KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"` } // String returns the string representation func (s S3EncryptionSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s S3EncryptionSettings) GoString() string { return s.String() } // SetEncryptionType sets the EncryptionType field's value. func (s *S3EncryptionSettings) SetEncryptionType(v string) *S3EncryptionSettings { s.EncryptionType = &v return s } // SetKmsKeyArn sets the KmsKeyArn field's value. func (s *S3EncryptionSettings) SetKmsKeyArn(v string) *S3EncryptionSettings { s.KmsKeyArn = &v return s } // Settings related to SCC captions. SCC is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to SCC. type SccDestinationSettings struct { _ struct{} `type:"structure"` // Set Framerate (SccDestinationFramerate) to make sure that the captions and // the video are synchronized in the output. Specify a frame rate that matches // the frame rate of the associated video. If the video frame rate is 29.97, // choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has // video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 // non-dropframe (FRAMERATE_29_97_NON_DROPFRAME). Framerate *string `locationName:"framerate" type:"string" enum:"SccDestinationFramerate"` } // String returns the string representation func (s SccDestinationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SccDestinationSettings) GoString() string { return s.String() } // SetFramerate sets the Framerate field's value. func (s *SccDestinationSettings) SetFramerate(v string) *SccDestinationSettings { s.Framerate = &v return s } // If your output group type is HLS, DASH, or Microsoft Smooth, use these settings // when doing DRM encryption with a SPEKE-compliant key provider. If your output // group type is CMAF, use the SpekeKeyProviderCmaf settings instead. type SpekeKeyProvider struct { _ struct{} `type:"structure"` // If you want your key provider to encrypt the content keys that it provides // to MediaConvert, set up a certificate with a master key using AWS Certificate // Manager. Specify the certificate's Amazon Resource Name (ARN) here. CertificateArn *string `locationName:"certificateArn" type:"string"` // Specify the resource ID that your SPEKE-compliant key provider uses to identify // this content. ResourceId *string `locationName:"resourceId" type:"string"` // Relates to SPEKE implementation. DRM system identifiers. DASH output groups // support a max of two system ids. Other group types support one system id. // See https://dashif.org/identifiers/content_protection/ for more details. SystemIds []*string `locationName:"systemIds" type:"list"` // Specify the URL to the key server that your SPEKE-compliant DRM key provider // uses to provide keys for encrypting your content. Url *string `locationName:"url" type:"string"` } // String returns the string representation func (s SpekeKeyProvider) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SpekeKeyProvider) GoString() string { return s.String() } // SetCertificateArn sets the CertificateArn field's value. func (s *SpekeKeyProvider) SetCertificateArn(v string) *SpekeKeyProvider { s.CertificateArn = &v return s } // SetResourceId sets the ResourceId field's value. func (s *SpekeKeyProvider) SetResourceId(v string) *SpekeKeyProvider { s.ResourceId = &v return s } // SetSystemIds sets the SystemIds field's value. func (s *SpekeKeyProvider) SetSystemIds(v []*string) *SpekeKeyProvider { s.SystemIds = v return s } // SetUrl sets the Url field's value. func (s *SpekeKeyProvider) SetUrl(v string) *SpekeKeyProvider { s.Url = &v return s } // If your output group type is CMAF, use these settings when doing DRM encryption // with a SPEKE-compliant key provider. If your output group type is HLS, DASH, // or Microsoft Smooth, use the SpekeKeyProvider settings instead. type SpekeKeyProviderCmaf struct { _ struct{} `type:"structure"` // If you want your key provider to encrypt the content keys that it provides // to MediaConvert, set up a certificate with a master key using AWS Certificate // Manager. Specify the certificate's Amazon Resource Name (ARN) here. CertificateArn *string `locationName:"certificateArn" type:"string"` // Specify the DRM system IDs that you want signaled in the DASH manifest that // MediaConvert creates as part of this CMAF package. The DASH manifest can // currently signal up to three system IDs. For more information, see https://dashif.org/identifiers/content_protection/. DashSignaledSystemIds []*string `locationName:"dashSignaledSystemIds" type:"list"` // Specify the DRM system ID that you want signaled in the HLS manifest that // MediaConvert creates as part of this CMAF package. The HLS manifest can currently // signal only one system ID. For more information, see https://dashif.org/identifiers/content_protection/. HlsSignaledSystemIds []*string `locationName:"hlsSignaledSystemIds" type:"list"` // Specify the resource ID that your SPEKE-compliant key provider uses to identify // this content. ResourceId *string `locationName:"resourceId" type:"string"` // Specify the URL to the key server that your SPEKE-compliant DRM key provider // uses to provide keys for encrypting your content. Url *string `locationName:"url" type:"string"` } // String returns the string representation func (s SpekeKeyProviderCmaf) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SpekeKeyProviderCmaf) GoString() string { return s.String() } // SetCertificateArn sets the CertificateArn field's value. func (s *SpekeKeyProviderCmaf) SetCertificateArn(v string) *SpekeKeyProviderCmaf { s.CertificateArn = &v return s } // SetDashSignaledSystemIds sets the DashSignaledSystemIds field's value. func (s *SpekeKeyProviderCmaf) SetDashSignaledSystemIds(v []*string) *SpekeKeyProviderCmaf { s.DashSignaledSystemIds = v return s } // SetHlsSignaledSystemIds sets the HlsSignaledSystemIds field's value. func (s *SpekeKeyProviderCmaf) SetHlsSignaledSystemIds(v []*string) *SpekeKeyProviderCmaf { s.HlsSignaledSystemIds = v return s } // SetResourceId sets the ResourceId field's value. func (s *SpekeKeyProviderCmaf) SetResourceId(v string) *SpekeKeyProviderCmaf { s.ResourceId = &v return s } // SetUrl sets the Url field's value. func (s *SpekeKeyProviderCmaf) SetUrl(v string) *SpekeKeyProviderCmaf { s.Url = &v return s } // Use these settings to set up encryption with a static key provider. type StaticKeyProvider struct { _ struct{} `type:"structure"` // Relates to DRM implementation. Sets the value of the KEYFORMAT attribute. // Must be 'identity' or a reverse DNS string. May be omitted to indicate an // implicit value of 'identity'. KeyFormat *string `locationName:"keyFormat" type:"string"` // Relates to DRM implementation. Either a single positive integer version value // or a slash delimited list of version values (1/2/3). KeyFormatVersions *string `locationName:"keyFormatVersions" type:"string"` // Relates to DRM implementation. Use a 32-character hexidecimal string to specify // Key Value (StaticKeyValue). StaticKeyValue *string `locationName:"staticKeyValue" type:"string"` // Relates to DRM implementation. The location of the license server used for // protecting content. Url *string `locationName:"url" type:"string"` } // String returns the string representation func (s StaticKeyProvider) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s StaticKeyProvider) GoString() string { return s.String() } // SetKeyFormat sets the KeyFormat field's value. func (s *StaticKeyProvider) SetKeyFormat(v string) *StaticKeyProvider { s.KeyFormat = &v return s } // SetKeyFormatVersions sets the KeyFormatVersions field's value. func (s *StaticKeyProvider) SetKeyFormatVersions(v string) *StaticKeyProvider { s.KeyFormatVersions = &v return s } // SetStaticKeyValue sets the StaticKeyValue field's value. func (s *StaticKeyProvider) SetStaticKeyValue(v string) *StaticKeyProvider { s.StaticKeyValue = &v return s } // SetUrl sets the Url field's value. func (s *StaticKeyProvider) SetUrl(v string) *StaticKeyProvider { s.Url = &v return s } // To add tags to a queue, preset, or job template, send a request with the // Amazon Resource Name (ARN) of the resource and the tags that you want to // add. type TagResourceInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the resource that you want to tag. To get // the ARN, send a GET request with the resource name. // // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` // The tags that you want to add to the resource. You can tag resources with // a key-value pair or with only a key. // // Tags is a required field Tags map[string]*string `locationName:"tags" type:"map" required:"true"` } // String returns the string representation func (s TagResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TagResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *TagResourceInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} if s.Arn == nil { invalidParams.Add(request.NewErrParamRequired("Arn")) } if s.Tags == nil { invalidParams.Add(request.NewErrParamRequired("Tags")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetArn sets the Arn field's value. func (s *TagResourceInput) SetArn(v string) *TagResourceInput { s.Arn = &v return s } // SetTags sets the Tags field's value. func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { s.Tags = v return s } // A successful request to add tags to a resource returns an OK message. type TagResourceOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s TagResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TagResourceOutput) GoString() string { return s.String() } // Settings related to teletext captions. Set up teletext captions in the same // output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to TELETEXT. type TeletextDestinationSettings struct { _ struct{} `type:"structure"` // Set pageNumber to the Teletext page number for the destination captions for // this output. This value must be a three-digit hexadecimal string; strings // ending in -FF are invalid. If you are passing through the entire set of Teletext // data, do not use this field. PageNumber *string `locationName:"pageNumber" min:"3" type:"string"` // Specify the page types for this Teletext page. If you don't specify a value // here, the service sets the page type to the default value Subtitle (PAGE_TYPE_SUBTITLE). // If you pass through the entire set of Teletext data, don't use this field. // When you pass through a set of Teletext pages, your output has the same page // types as your input. PageTypes []*string `locationName:"pageTypes" type:"list"` } // String returns the string representation func (s TeletextDestinationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TeletextDestinationSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *TeletextDestinationSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TeletextDestinationSettings"} if s.PageNumber != nil && len(*s.PageNumber) < 3 { invalidParams.Add(request.NewErrParamMinLen("PageNumber", 3)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetPageNumber sets the PageNumber field's value. func (s *TeletextDestinationSettings) SetPageNumber(v string) *TeletextDestinationSettings { s.PageNumber = &v return s } // SetPageTypes sets the PageTypes field's value. func (s *TeletextDestinationSettings) SetPageTypes(v []*string) *TeletextDestinationSettings { s.PageTypes = v return s } // Settings specific to Teletext caption sources, including Page number. type TeletextSourceSettings struct { _ struct{} `type:"structure"` // Use Page Number (PageNumber) to specify the three-digit hexadecimal page // number that will be used for Teletext captions. Do not use this setting if // you are passing through teletext from the input source to output. PageNumber *string `locationName:"pageNumber" min:"3" type:"string"` } // String returns the string representation func (s TeletextSourceSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TeletextSourceSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *TeletextSourceSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TeletextSourceSettings"} if s.PageNumber != nil && len(*s.PageNumber) < 3 { invalidParams.Add(request.NewErrParamMinLen("PageNumber", 3)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetPageNumber sets the PageNumber field's value. func (s *TeletextSourceSettings) SetPageNumber(v string) *TeletextSourceSettings { s.PageNumber = &v return s } // Settings for burning the output timecode and specified prefix into the output. type TimecodeBurnin struct { _ struct{} `type:"structure"` // Use Font Size (FontSize) to set the font size of any burned-in timecode. // Valid values are 10, 16, 32, 48. FontSize *int64 `locationName:"fontSize" min:"10" type:"integer"` // Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to // specify the location the burned-in timecode on output video. Position *string `locationName:"position" type:"string" enum:"TimecodeBurninPosition"` // Use Prefix (Prefix) to place ASCII characters before any burned-in timecode. // For example, a prefix of "EZ-" will result in the timecode "EZ-00:00:00:00". // Provide either the characters themselves or the ASCII code equivalents. The // supported range of characters is 0x20 through 0x7e. This includes letters, // numbers, and all special characters represented on a standard English keyboard. Prefix *string `locationName:"prefix" type:"string"` } // String returns the string representation func (s TimecodeBurnin) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TimecodeBurnin) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *TimecodeBurnin) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TimecodeBurnin"} if s.FontSize != nil && *s.FontSize < 10 { invalidParams.Add(request.NewErrParamMinValue("FontSize", 10)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFontSize sets the FontSize field's value. func (s *TimecodeBurnin) SetFontSize(v int64) *TimecodeBurnin { s.FontSize = &v return s } // SetPosition sets the Position field's value. func (s *TimecodeBurnin) SetPosition(v string) *TimecodeBurnin { s.Position = &v return s } // SetPrefix sets the Prefix field's value. func (s *TimecodeBurnin) SetPrefix(v string) *TimecodeBurnin { s.Prefix = &v return s } // These settings control how the service handles timecodes throughout the job. // These settings don't affect input clipping. type TimecodeConfig struct { _ struct{} `type:"structure"` // If you use an editing platform that relies on an anchor timecode, use Anchor // Timecode (Anchor) to specify a timecode that will match the input video frame // to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) // or (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior // for Anchor Timecode varies depending on your setting for Source (TimecodeSource). // * If Source (TimecodeSource) is set to Specified Start (SPECIFIEDSTART), // the first input frame is the specified value in Start Timecode (Start). Anchor // Timecode (Anchor) and Start Timecode (Start) are used calculate output timecode. // * If Source (TimecodeSource) is set to Start at 0 (ZEROBASED) the first frame // is 00:00:00:00. * If Source (TimecodeSource) is set to Embedded (EMBEDDED), // the first frame is the timecode value on the first input frame of the input. Anchor *string `locationName:"anchor" type:"string"` // Use Source (TimecodeSource) to set how timecodes are handled within this // job. To make sure that your video, audio, captions, and markers are synchronized // and that time-based features, such as image inserter, work correctly, choose // the Timecode source option that matches your assets. All timecodes are in // a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - // Use the timecode that is in the input video. If no embedded timecode is in // the source, the service will use Start at 0 (ZEROBASED) instead. * Start // at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. // * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame // to a value other than zero. You use Start timecode (Start) to provide this // value. Source *string `locationName:"source" type:"string" enum:"TimecodeSource"` // Only use when you set Source (TimecodeSource) to Specified start (SPECIFIEDSTART). // Use Start timecode (Start) to specify the timecode for the initial frame. // Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF). Start *string `locationName:"start" type:"string"` // Only applies to outputs that support program-date-time stamp. Use Timestamp // offset (TimestampOffset) to overwrite the timecode date without affecting // the time and frame number. Provide the new date as a string in the format // "yyyy-mm-dd". To use Time stamp offset, you must also enable Insert program-date-time // (InsertProgramDateTime) in the output settings. For example, if the date // part of your timecodes is 2002-1-25 and you want to change it to one year // later, set Timestamp offset (TimestampOffset) to 2003-1-25. TimestampOffset *string `locationName:"timestampOffset" type:"string"` } // String returns the string representation func (s TimecodeConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TimecodeConfig) GoString() string { return s.String() } // SetAnchor sets the Anchor field's value. func (s *TimecodeConfig) SetAnchor(v string) *TimecodeConfig { s.Anchor = &v return s } // SetSource sets the Source field's value. func (s *TimecodeConfig) SetSource(v string) *TimecodeConfig { s.Source = &v return s } // SetStart sets the Start field's value. func (s *TimecodeConfig) SetStart(v string) *TimecodeConfig { s.Start = &v return s } // SetTimestampOffset sets the TimestampOffset field's value. func (s *TimecodeConfig) SetTimestampOffset(v string) *TimecodeConfig { s.TimestampOffset = &v return s } // Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags // in any HLS outputs. To include timed metadata, you must enable it here, enable // it in each output container, and specify tags and timecodes in ID3 insertion // (Id3Insertion) objects. type TimedMetadataInsertion struct { _ struct{} `type:"structure"` // Id3Insertions contains the array of Id3Insertion instances. Id3Insertions []*Id3Insertion `locationName:"id3Insertions" type:"list"` } // String returns the string representation func (s TimedMetadataInsertion) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TimedMetadataInsertion) GoString() string { return s.String() } // SetId3Insertions sets the Id3Insertions field's value. func (s *TimedMetadataInsertion) SetId3Insertions(v []*Id3Insertion) *TimedMetadataInsertion { s.Id3Insertions = v return s } // Information about when jobs are submitted, started, and finished is specified // in Unix epoch format in seconds. type Timing struct { _ struct{} `type:"structure"` // The time, in Unix epoch format, that the transcoding job finished FinishTime *time.Time `locationName:"finishTime" type:"timestamp" timestampFormat:"unixTimestamp"` // The time, in Unix epoch format, that transcoding for the job began. StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"unixTimestamp"` // The time, in Unix epoch format, that you submitted the job. SubmitTime *time.Time `locationName:"submitTime" type:"timestamp" timestampFormat:"unixTimestamp"` } // String returns the string representation func (s Timing) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Timing) GoString() string { return s.String() } // SetFinishTime sets the FinishTime field's value. func (s *Timing) SetFinishTime(v time.Time) *Timing { s.FinishTime = &v return s } // SetStartTime sets the StartTime field's value. func (s *Timing) SetStartTime(v time.Time) *Timing { s.StartTime = &v return s } // SetSubmitTime sets the SubmitTime field's value. func (s *Timing) SetSubmitTime(v time.Time) *Timing { s.SubmitTime = &v return s } type TooManyRequestsException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s TooManyRequestsException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TooManyRequestsException) GoString() string { return s.String() } func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ RespMetadata: v, } } // Code returns the exception type name. func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *TooManyRequestsException) OrigErr() error { return nil } func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. func (s *TooManyRequestsException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *TooManyRequestsException) RequestID() string { return s.RespMetadata.RequestID } // Settings specific to caption sources that are specified by track number. // Currently, this is only IMSC captions in an IMF package. If your caption // source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead // of TrackSourceSettings. type TrackSourceSettings struct { _ struct{} `type:"structure"` // Use this setting to select a single captions track from a source. Track numbers // correspond to the order in the captions source file. For IMF sources, track // numbering is based on the order that the captions appear in the CPL. For // example, use 1 to select the captions asset that is listed first in the CPL. // To include more than one captions track in your job outputs, create multiple // input captions selectors. Specify one track per selector. TrackNumber *int64 `locationName:"trackNumber" min:"1" type:"integer"` } // String returns the string representation func (s TrackSourceSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TrackSourceSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *TrackSourceSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TrackSourceSettings"} if s.TrackNumber != nil && *s.TrackNumber < 1 { invalidParams.Add(request.NewErrParamMinValue("TrackNumber", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetTrackNumber sets the TrackNumber field's value. func (s *TrackSourceSettings) SetTrackNumber(v int64) *TrackSourceSettings { s.TrackNumber = &v return s } // Settings related to TTML captions. TTML is a sidecar format that holds captions // in a file that is separate from the video container. Set up sidecar captions // in the same output group, but different output from your video. For more // information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. // When you work directly in your JSON job specification, include this object // and any required children when you set destinationType to TTML. type TtmlDestinationSettings struct { _ struct{} `type:"structure"` // Pass through style and position information from a TTML-like input source // (TTML, IMSC, SMPTE-TT) to the TTML output. StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"TtmlStylePassthrough"` } // String returns the string representation func (s TtmlDestinationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TtmlDestinationSettings) GoString() string { return s.String() } // SetStylePassthrough sets the StylePassthrough field's value. func (s *TtmlDestinationSettings) SetStylePassthrough(v string) *TtmlDestinationSettings { s.StylePassthrough = &v return s } // To remove tags from a resource, send a request with the Amazon Resource Name // (ARN) of the resource and the keys of the tags that you want to remove. type UntagResourceInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the resource that you want to remove tags // from. To get the ARN, send a GET request with the resource name. // // Arn is a required field Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"` // The keys of the tags that you want to remove from the resource. TagKeys []*string `locationName:"tagKeys" type:"list"` } // String returns the string representation func (s UntagResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UntagResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UntagResourceInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} if s.Arn == nil { invalidParams.Add(request.NewErrParamRequired("Arn")) } if s.Arn != nil && len(*s.Arn) < 1 { invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetArn sets the Arn field's value. func (s *UntagResourceInput) SetArn(v string) *UntagResourceInput { s.Arn = &v return s } // SetTagKeys sets the TagKeys field's value. func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { s.TagKeys = v return s } // A successful request to remove tags from a resource returns an OK message. type UntagResourceOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s UntagResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UntagResourceOutput) GoString() string { return s.String() } // Modify a job template by sending a request with the job template name and // any of the following that you wish to change: description, category, and // queue. type UpdateJobTemplateInput struct { _ struct{} `type:"structure"` // Accelerated transcoding can significantly speed up jobs with long, visually // complex content. Outputs that use this feature incur pro-tier pricing. For // information about feature limitations, see the AWS Elemental MediaConvert // User Guide. AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"` // The new category for the job template, if you are changing it. Category *string `locationName:"category" type:"string"` // The new description for the job template, if you are changing it. Description *string `locationName:"description" type:"string"` // Optional list of hop destinations. HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"` // The name of the job template you are modifying // // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` // Specify the relative priority for this job. In any given queue, the service // begins processing the job with the highest value first. When more than one // job has the same priority, the service begins processing the job that you // submitted first. If you don't specify a priority, the service uses the default // value 0. Priority *int64 `locationName:"priority" type:"integer"` // The new queue for the job template, if you are changing it. Queue *string `locationName:"queue" type:"string"` // JobTemplateSettings contains all the transcode settings saved in the template // that will be applied to jobs created from it. Settings *JobTemplateSettings `locationName:"settings" type:"structure"` // Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch // Events. Set the interval, in seconds, between status updates. MediaConvert // sends an update at this interval from the time the service begins processing // your job to the time it completes the transcode or encounters an error. StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"` } // String returns the string representation func (s UpdateJobTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateJobTemplateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateJobTemplateInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateJobTemplateInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if s.Priority != nil && *s.Priority < -50 { invalidParams.Add(request.NewErrParamMinValue("Priority", -50)) } if s.AccelerationSettings != nil { if err := s.AccelerationSettings.Validate(); err != nil { invalidParams.AddNested("AccelerationSettings", err.(request.ErrInvalidParams)) } } if s.HopDestinations != nil { for i, v := range s.HopDestinations { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HopDestinations", i), err.(request.ErrInvalidParams)) } } } if s.Settings != nil { if err := s.Settings.Validate(); err != nil { invalidParams.AddNested("Settings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAccelerationSettings sets the AccelerationSettings field's value. func (s *UpdateJobTemplateInput) SetAccelerationSettings(v *AccelerationSettings) *UpdateJobTemplateInput { s.AccelerationSettings = v return s } // SetCategory sets the Category field's value. func (s *UpdateJobTemplateInput) SetCategory(v string) *UpdateJobTemplateInput { s.Category = &v return s } // SetDescription sets the Description field's value. func (s *UpdateJobTemplateInput) SetDescription(v string) *UpdateJobTemplateInput { s.Description = &v return s } // SetHopDestinations sets the HopDestinations field's value. func (s *UpdateJobTemplateInput) SetHopDestinations(v []*HopDestination) *UpdateJobTemplateInput { s.HopDestinations = v return s } // SetName sets the Name field's value. func (s *UpdateJobTemplateInput) SetName(v string) *UpdateJobTemplateInput { s.Name = &v return s } // SetPriority sets the Priority field's value. func (s *UpdateJobTemplateInput) SetPriority(v int64) *UpdateJobTemplateInput { s.Priority = &v return s } // SetQueue sets the Queue field's value. func (s *UpdateJobTemplateInput) SetQueue(v string) *UpdateJobTemplateInput { s.Queue = &v return s } // SetSettings sets the Settings field's value. func (s *UpdateJobTemplateInput) SetSettings(v *JobTemplateSettings) *UpdateJobTemplateInput { s.Settings = v return s } // SetStatusUpdateInterval sets the StatusUpdateInterval field's value. func (s *UpdateJobTemplateInput) SetStatusUpdateInterval(v string) *UpdateJobTemplateInput { s.StatusUpdateInterval = &v return s } // Successful update job template requests will return the new job template // JSON. type UpdateJobTemplateOutput struct { _ struct{} `type:"structure"` // A job template is a pre-made set of encoding instructions that you can use // to quickly create a job. JobTemplate *JobTemplate `locationName:"jobTemplate" type:"structure"` } // String returns the string representation func (s UpdateJobTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateJobTemplateOutput) GoString() string { return s.String() } // SetJobTemplate sets the JobTemplate field's value. func (s *UpdateJobTemplateOutput) SetJobTemplate(v *JobTemplate) *UpdateJobTemplateOutput { s.JobTemplate = v return s } // Modify a preset by sending a request with the preset name and any of the // following that you wish to change: description, category, and transcoding // settings. type UpdatePresetInput struct { _ struct{} `type:"structure"` // The new category for the preset, if you are changing it. Category *string `locationName:"category" type:"string"` // The new description for the preset, if you are changing it. Description *string `locationName:"description" type:"string"` // The name of the preset you are modifying. // // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` // Settings for preset Settings *PresetSettings `locationName:"settings" type:"structure"` } // String returns the string representation func (s UpdatePresetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdatePresetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdatePresetInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdatePresetInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if s.Settings != nil { if err := s.Settings.Validate(); err != nil { invalidParams.AddNested("Settings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCategory sets the Category field's value. func (s *UpdatePresetInput) SetCategory(v string) *UpdatePresetInput { s.Category = &v return s } // SetDescription sets the Description field's value. func (s *UpdatePresetInput) SetDescription(v string) *UpdatePresetInput { s.Description = &v return s } // SetName sets the Name field's value. func (s *UpdatePresetInput) SetName(v string) *UpdatePresetInput { s.Name = &v return s } // SetSettings sets the Settings field's value. func (s *UpdatePresetInput) SetSettings(v *PresetSettings) *UpdatePresetInput { s.Settings = v return s } // Successful update preset requests will return the new preset JSON. type UpdatePresetOutput struct { _ struct{} `type:"structure"` // A preset is a collection of preconfigured media conversion settings that // you want MediaConvert to apply to the output during the conversion process. Preset *Preset `locationName:"preset" type:"structure"` } // String returns the string representation func (s UpdatePresetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdatePresetOutput) GoString() string { return s.String() } // SetPreset sets the Preset field's value. func (s *UpdatePresetOutput) SetPreset(v *Preset) *UpdatePresetOutput { s.Preset = v return s } // Modify a queue by sending a request with the queue name and any changes to // the queue. type UpdateQueueInput struct { _ struct{} `type:"structure"` // The new description for the queue, if you are changing it. Description *string `locationName:"description" type:"string"` // The name of the queue that you are modifying. // // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` // The new details of your pricing plan for your reserved queue. When you set // up a new pricing plan to replace an expired one, you enter into another 12-month // commitment. When you add capacity to your queue by increasing the number // of RTS, you extend the term of your commitment to 12 months from when you // add capacity. After you make these commitments, you can't cancel them. ReservationPlanSettings *ReservationPlanSettings `locationName:"reservationPlanSettings" type:"structure"` // Pause or activate a queue by changing its status between ACTIVE and PAUSED. // If you pause a queue, jobs in that queue won't begin. Jobs that are running // when you pause the queue continue to run until they finish or result in an // error. Status *string `locationName:"status" type:"string" enum:"QueueStatus"` } // String returns the string representation func (s UpdateQueueInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateQueueInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateQueueInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateQueueInput"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if s.ReservationPlanSettings != nil { if err := s.ReservationPlanSettings.Validate(); err != nil { invalidParams.AddNested("ReservationPlanSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDescription sets the Description field's value. func (s *UpdateQueueInput) SetDescription(v string) *UpdateQueueInput { s.Description = &v return s } // SetName sets the Name field's value. func (s *UpdateQueueInput) SetName(v string) *UpdateQueueInput { s.Name = &v return s } // SetReservationPlanSettings sets the ReservationPlanSettings field's value. func (s *UpdateQueueInput) SetReservationPlanSettings(v *ReservationPlanSettings) *UpdateQueueInput { s.ReservationPlanSettings = v return s } // SetStatus sets the Status field's value. func (s *UpdateQueueInput) SetStatus(v string) *UpdateQueueInput { s.Status = &v return s } // Successful update queue requests return the new queue information in JSON // format. type UpdateQueueOutput struct { _ struct{} `type:"structure"` // You can use queues to manage the resources that are available to your AWS // account for running multiple transcoding jobs at the same time. If you don't // specify a queue, the service sends all jobs through the default queue. For // more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html. Queue *Queue `locationName:"queue" type:"structure"` } // String returns the string representation func (s UpdateQueueOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateQueueOutput) GoString() string { return s.String() } // SetQueue sets the Queue field's value. func (s *UpdateQueueOutput) SetQueue(v *Queue) *UpdateQueueOutput { s.Queue = v return s } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value VC3 type Vc3Settings struct { _ struct{} `type:"structure"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vc3FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Vc3FramerateConversionAlgorithm"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"` // Optional. Choose the scan line type for this output. If you don't specify // a value, MediaConvert will create a progressive output. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"Vc3InterlaceMode"` // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). ScanTypeConversionMode *string `locationName:"scanTypeConversionMode" type:"string" enum:"Vc3ScanTypeConversionMode"` // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output by relabeling the // video frames and resampling your audio. Note that enabling this setting will // slightly reduce the duration of your video. Related settings: You must also // set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. SlowPal *string `locationName:"slowPal" type:"string" enum:"Vc3SlowPal"` // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable // hard telecine (HARD) to create a smoother picture. When you keep the default // value, None (NONE), MediaConvert does a standard frame rate conversion to // 29.97 without doing anything with the field polarity to create a smoother // picture. Telecine *string `locationName:"telecine" type:"string" enum:"Vc3Telecine"` // Specify the VC3 class to choose the quality characteristics for this output. // VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) // and Resolution (height and width), determine your output bitrate. For example, // say that your video resolution is 1920x1080 and your framerate is 29.97. // Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately // 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of // approximately 220 Mbps. VC3 class also specifies the color bit depth of your // output. Vc3Class *string `locationName:"vc3Class" type:"string" enum:"Vc3Class"` } // String returns the string representation func (s Vc3Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Vc3Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Vc3Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Vc3Settings"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 24 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 24)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFramerateControl sets the FramerateControl field's value. func (s *Vc3Settings) SetFramerateControl(v string) *Vc3Settings { s.FramerateControl = &v return s } // SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. func (s *Vc3Settings) SetFramerateConversionAlgorithm(v string) *Vc3Settings { s.FramerateConversionAlgorithm = &v return s } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *Vc3Settings) SetFramerateDenominator(v int64) *Vc3Settings { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *Vc3Settings) SetFramerateNumerator(v int64) *Vc3Settings { s.FramerateNumerator = &v return s } // SetInterlaceMode sets the InterlaceMode field's value. func (s *Vc3Settings) SetInterlaceMode(v string) *Vc3Settings { s.InterlaceMode = &v return s } // SetScanTypeConversionMode sets the ScanTypeConversionMode field's value. func (s *Vc3Settings) SetScanTypeConversionMode(v string) *Vc3Settings { s.ScanTypeConversionMode = &v return s } // SetSlowPal sets the SlowPal field's value. func (s *Vc3Settings) SetSlowPal(v string) *Vc3Settings { s.SlowPal = &v return s } // SetTelecine sets the Telecine field's value. func (s *Vc3Settings) SetTelecine(v string) *Vc3Settings { s.Telecine = &v return s } // SetVc3Class sets the Vc3Class field's value. func (s *Vc3Settings) SetVc3Class(v string) *Vc3Settings { s.Vc3Class = &v return s } // Video codec settings, (CodecSettings) under (VideoDescription), contains // the group of settings related to video encoding. The settings in this group // vary depending on the value that you choose for Video codec (Codec). For // each codec enum that you choose, define the corresponding settings object. // The following lists the codec enum, settings object pairs. * AV1, Av1Settings // * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, // H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings // * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings type VideoCodecSettings struct { _ struct{} `type:"structure"` // Required when you set Codec, under VideoDescription>CodecSettings to the // value AV1. Av1Settings *Av1Settings `locationName:"av1Settings" type:"structure"` // Required when you choose AVC-Intra for your output video codec. For more // information about the AVC-Intra settings, see the relevant specification. // For detailed information about SD and HD in AVC-Intra, see https://ieeexplore.ieee.org/document/7290936. // For information about 4K/2K in AVC-Intra, see https://pro-av.panasonic.net/en/avc-ultra/AVC-ULTRAoverview.pdf. AvcIntraSettings *AvcIntraSettings `locationName:"avcIntraSettings" type:"structure"` // Specifies the video codec. This must be equal to one of the enum values defined // by the object VideoCodec. Codec *string `locationName:"codec" type:"string" enum:"VideoCodec"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value FRAME_CAPTURE. FrameCaptureSettings *FrameCaptureSettings `locationName:"frameCaptureSettings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value H_264. H264Settings *H264Settings `locationName:"h264Settings" type:"structure"` // Settings for H265 codec H265Settings *H265Settings `locationName:"h265Settings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value MPEG2. Mpeg2Settings *Mpeg2Settings `locationName:"mpeg2Settings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value PRORES. ProresSettings *ProresSettings `locationName:"proresSettings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value VC3 Vc3Settings *Vc3Settings `locationName:"vc3Settings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value VP8. Vp8Settings *Vp8Settings `locationName:"vp8Settings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value VP9. Vp9Settings *Vp9Settings `locationName:"vp9Settings" type:"structure"` // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value XAVC. XavcSettings *XavcSettings `locationName:"xavcSettings" type:"structure"` } // String returns the string representation func (s VideoCodecSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VideoCodecSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *VideoCodecSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "VideoCodecSettings"} if s.Av1Settings != nil { if err := s.Av1Settings.Validate(); err != nil { invalidParams.AddNested("Av1Settings", err.(request.ErrInvalidParams)) } } if s.AvcIntraSettings != nil { if err := s.AvcIntraSettings.Validate(); err != nil { invalidParams.AddNested("AvcIntraSettings", err.(request.ErrInvalidParams)) } } if s.FrameCaptureSettings != nil { if err := s.FrameCaptureSettings.Validate(); err != nil { invalidParams.AddNested("FrameCaptureSettings", err.(request.ErrInvalidParams)) } } if s.H264Settings != nil { if err := s.H264Settings.Validate(); err != nil { invalidParams.AddNested("H264Settings", err.(request.ErrInvalidParams)) } } if s.H265Settings != nil { if err := s.H265Settings.Validate(); err != nil { invalidParams.AddNested("H265Settings", err.(request.ErrInvalidParams)) } } if s.Mpeg2Settings != nil { if err := s.Mpeg2Settings.Validate(); err != nil { invalidParams.AddNested("Mpeg2Settings", err.(request.ErrInvalidParams)) } } if s.ProresSettings != nil { if err := s.ProresSettings.Validate(); err != nil { invalidParams.AddNested("ProresSettings", err.(request.ErrInvalidParams)) } } if s.Vc3Settings != nil { if err := s.Vc3Settings.Validate(); err != nil { invalidParams.AddNested("Vc3Settings", err.(request.ErrInvalidParams)) } } if s.Vp8Settings != nil { if err := s.Vp8Settings.Validate(); err != nil { invalidParams.AddNested("Vp8Settings", err.(request.ErrInvalidParams)) } } if s.Vp9Settings != nil { if err := s.Vp9Settings.Validate(); err != nil { invalidParams.AddNested("Vp9Settings", err.(request.ErrInvalidParams)) } } if s.XavcSettings != nil { if err := s.XavcSettings.Validate(); err != nil { invalidParams.AddNested("XavcSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAv1Settings sets the Av1Settings field's value. func (s *VideoCodecSettings) SetAv1Settings(v *Av1Settings) *VideoCodecSettings { s.Av1Settings = v return s } // SetAvcIntraSettings sets the AvcIntraSettings field's value. func (s *VideoCodecSettings) SetAvcIntraSettings(v *AvcIntraSettings) *VideoCodecSettings { s.AvcIntraSettings = v return s } // SetCodec sets the Codec field's value. func (s *VideoCodecSettings) SetCodec(v string) *VideoCodecSettings { s.Codec = &v return s } // SetFrameCaptureSettings sets the FrameCaptureSettings field's value. func (s *VideoCodecSettings) SetFrameCaptureSettings(v *FrameCaptureSettings) *VideoCodecSettings { s.FrameCaptureSettings = v return s } // SetH264Settings sets the H264Settings field's value. func (s *VideoCodecSettings) SetH264Settings(v *H264Settings) *VideoCodecSettings { s.H264Settings = v return s } // SetH265Settings sets the H265Settings field's value. func (s *VideoCodecSettings) SetH265Settings(v *H265Settings) *VideoCodecSettings { s.H265Settings = v return s } // SetMpeg2Settings sets the Mpeg2Settings field's value. func (s *VideoCodecSettings) SetMpeg2Settings(v *Mpeg2Settings) *VideoCodecSettings { s.Mpeg2Settings = v return s } // SetProresSettings sets the ProresSettings field's value. func (s *VideoCodecSettings) SetProresSettings(v *ProresSettings) *VideoCodecSettings { s.ProresSettings = v return s } // SetVc3Settings sets the Vc3Settings field's value. func (s *VideoCodecSettings) SetVc3Settings(v *Vc3Settings) *VideoCodecSettings { s.Vc3Settings = v return s } // SetVp8Settings sets the Vp8Settings field's value. func (s *VideoCodecSettings) SetVp8Settings(v *Vp8Settings) *VideoCodecSettings { s.Vp8Settings = v return s } // SetVp9Settings sets the Vp9Settings field's value. func (s *VideoCodecSettings) SetVp9Settings(v *Vp9Settings) *VideoCodecSettings { s.Vp9Settings = v return s } // SetXavcSettings sets the XavcSettings field's value. func (s *VideoCodecSettings) SetXavcSettings(v *XavcSettings) *VideoCodecSettings { s.XavcSettings = v return s } // Settings related to video encoding of your output. The specific video settings // depend on the video codec that you choose. When you work directly in your // JSON job specification, include one instance of Video description (VideoDescription) // per output. type VideoDescription struct { _ struct{} `type:"structure"` // This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert // AFD signaling (AfdSignaling) to specify whether the service includes AFD // values in the output video data and what those values are. * Choose None // to remove all AFD values from this output. * Choose Fixed to ignore input // AFD values and instead encode the value specified in the job. * Choose Auto // to calculate output AFD values based on the input AFD scaler data. AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"AfdSignaling"` // The anti-alias filter is automatically applied to all outputs. The service // no longer accepts the value DISABLED for AntiAlias. If you specify that in // your job, the service will ignore the setting. AntiAlias *string `locationName:"antiAlias" type:"string" enum:"AntiAlias"` // Video codec settings, (CodecSettings) under (VideoDescription), contains // the group of settings related to video encoding. The settings in this group // vary depending on the value that you choose for Video codec (Codec). For // each codec enum that you choose, define the corresponding settings object. // The following lists the codec enum, settings object pairs. * AV1, Av1Settings // * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, // H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings // * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings CodecSettings *VideoCodecSettings `locationName:"codecSettings" type:"structure"` // Choose Insert (INSERT) for this setting to include color metadata in this // output. Choose Ignore (IGNORE) to exclude color metadata from this output. // If you don't specify a value, the service sets this to Insert by default. ColorMetadata *string `locationName:"colorMetadata" type:"string" enum:"ColorMetadata"` // Use Cropping selection (crop) to specify the video area that the service // will include in the output video frame. Crop *Rectangle `locationName:"crop" type:"structure"` // Applies only to 29.97 fps outputs. When this feature is enabled, the service // will use drop-frame timecode on outputs. If it is not possible to use drop-frame // timecode, the system will fall back to non-drop-frame. This setting is enabled // by default when Timecode insertion (TimecodeInsertion) is enabled. DropFrameTimecode *string `locationName:"dropFrameTimecode" type:"string" enum:"DropFrameTimecode"` // Applies only if you set AFD Signaling(AfdSignaling) to Fixed (FIXED). Use // Fixed (FixedAfd) to specify a four-bit AFD value which the service will write // on all frames of this video output. FixedAfd *int64 `locationName:"fixedAfd" type:"integer"` // Use the Height (Height) setting to define the video resolution height for // this output. Specify in pixels. If you don't provide a value here, the service // will use the input height. Height *int64 `locationName:"height" min:"32" type:"integer"` // Use Selection placement (position) to define the video area in your output // frame. The area outside of the rectangle that you specify here is black. Position *Rectangle `locationName:"position" type:"structure"` // Use Respond to AFD (RespondToAfd) to specify how the service changes the // video itself in response to AFD values in the input. * Choose Respond to // clip the input video frame according to the AFD value, input display aspect // ratio, and output display aspect ratio. * Choose Passthrough to include the // input AFD values. Do not choose this when AfdSignaling is set to (NONE). // A preferred implementation of this workflow is to set RespondToAfd to (NONE) // and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values // from this output. RespondToAfd *string `locationName:"respondToAfd" type:"string" enum:"RespondToAfd"` // Specify how the service handles outputs that have a different aspect ratio // from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) // to have the service stretch your video image to fit. Keep the setting Default // (DEFAULT) to have the service letterbox your video instead. This setting // overrides any value that you specify for the setting Selection placement // (position) in this output. ScalingBehavior *string `locationName:"scalingBehavior" type:"string" enum:"ScalingBehavior"` // Use Sharpness (Sharpness) setting to specify the strength of anti-aliasing. // This setting changes the width of the anti-alias filter kernel used for scaling. // Sharpness only applies if your output resolution is different from your input // resolution. 0 is the softest setting, 100 the sharpest, and 50 recommended // for most content. Sharpness *int64 `locationName:"sharpness" type:"integer"` // Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode // insertion when the input frame rate is identical to the output frame rate. // To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) // to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. // When the service inserts timecodes in an output, by default, it uses any // embedded timecodes from the input. If none are present, the service will // set the timecode for the first output frame to zero. To change this default // behavior, adjust the settings under Timecode configuration (TimecodeConfig). // In the console, these settings are located under Job > Job settings > Timecode // configuration. Note - Timecode source under input settings (InputTimecodeSource) // does not affect the timecodes that are inserted in the output. Source under // Job settings > Timecode configuration (TimecodeSource) does. TimecodeInsertion *string `locationName:"timecodeInsertion" type:"string" enum:"VideoTimecodeInsertion"` // Find additional transcoding features under Preprocessors (VideoPreprocessors). // Enable the features at each output individually. These features are disabled // by default. VideoPreprocessors *VideoPreprocessor `locationName:"videoPreprocessors" type:"structure"` // Use Width (Width) to define the video resolution width, in pixels, for this // output. If you don't provide a value here, the service will use the input // width. Width *int64 `locationName:"width" min:"32" type:"integer"` } // String returns the string representation func (s VideoDescription) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VideoDescription) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *VideoDescription) Validate() error { invalidParams := request.ErrInvalidParams{Context: "VideoDescription"} if s.Height != nil && *s.Height < 32 { invalidParams.Add(request.NewErrParamMinValue("Height", 32)) } if s.Width != nil && *s.Width < 32 { invalidParams.Add(request.NewErrParamMinValue("Width", 32)) } if s.CodecSettings != nil { if err := s.CodecSettings.Validate(); err != nil { invalidParams.AddNested("CodecSettings", err.(request.ErrInvalidParams)) } } if s.Crop != nil { if err := s.Crop.Validate(); err != nil { invalidParams.AddNested("Crop", err.(request.ErrInvalidParams)) } } if s.Position != nil { if err := s.Position.Validate(); err != nil { invalidParams.AddNested("Position", err.(request.ErrInvalidParams)) } } if s.VideoPreprocessors != nil { if err := s.VideoPreprocessors.Validate(); err != nil { invalidParams.AddNested("VideoPreprocessors", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAfdSignaling sets the AfdSignaling field's value. func (s *VideoDescription) SetAfdSignaling(v string) *VideoDescription { s.AfdSignaling = &v return s } // SetAntiAlias sets the AntiAlias field's value. func (s *VideoDescription) SetAntiAlias(v string) *VideoDescription { s.AntiAlias = &v return s } // SetCodecSettings sets the CodecSettings field's value. func (s *VideoDescription) SetCodecSettings(v *VideoCodecSettings) *VideoDescription { s.CodecSettings = v return s } // SetColorMetadata sets the ColorMetadata field's value. func (s *VideoDescription) SetColorMetadata(v string) *VideoDescription { s.ColorMetadata = &v return s } // SetCrop sets the Crop field's value. func (s *VideoDescription) SetCrop(v *Rectangle) *VideoDescription { s.Crop = v return s } // SetDropFrameTimecode sets the DropFrameTimecode field's value. func (s *VideoDescription) SetDropFrameTimecode(v string) *VideoDescription { s.DropFrameTimecode = &v return s } // SetFixedAfd sets the FixedAfd field's value. func (s *VideoDescription) SetFixedAfd(v int64) *VideoDescription { s.FixedAfd = &v return s } // SetHeight sets the Height field's value. func (s *VideoDescription) SetHeight(v int64) *VideoDescription { s.Height = &v return s } // SetPosition sets the Position field's value. func (s *VideoDescription) SetPosition(v *Rectangle) *VideoDescription { s.Position = v return s } // SetRespondToAfd sets the RespondToAfd field's value. func (s *VideoDescription) SetRespondToAfd(v string) *VideoDescription { s.RespondToAfd = &v return s } // SetScalingBehavior sets the ScalingBehavior field's value. func (s *VideoDescription) SetScalingBehavior(v string) *VideoDescription { s.ScalingBehavior = &v return s } // SetSharpness sets the Sharpness field's value. func (s *VideoDescription) SetSharpness(v int64) *VideoDescription { s.Sharpness = &v return s } // SetTimecodeInsertion sets the TimecodeInsertion field's value. func (s *VideoDescription) SetTimecodeInsertion(v string) *VideoDescription { s.TimecodeInsertion = &v return s } // SetVideoPreprocessors sets the VideoPreprocessors field's value. func (s *VideoDescription) SetVideoPreprocessors(v *VideoPreprocessor) *VideoDescription { s.VideoPreprocessors = v return s } // SetWidth sets the Width field's value. func (s *VideoDescription) SetWidth(v int64) *VideoDescription { s.Width = &v return s } // Contains details about the output's video stream type VideoDetail struct { _ struct{} `type:"structure"` // Height in pixels for the output HeightInPx *int64 `locationName:"heightInPx" type:"integer"` // Width in pixels for the output WidthInPx *int64 `locationName:"widthInPx" type:"integer"` } // String returns the string representation func (s VideoDetail) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VideoDetail) GoString() string { return s.String() } // SetHeightInPx sets the HeightInPx field's value. func (s *VideoDetail) SetHeightInPx(v int64) *VideoDetail { s.HeightInPx = &v return s } // SetWidthInPx sets the WidthInPx field's value. func (s *VideoDetail) SetWidthInPx(v int64) *VideoDetail { s.WidthInPx = &v return s } // Find additional transcoding features under Preprocessors (VideoPreprocessors). // Enable the features at each output individually. These features are disabled // by default. type VideoPreprocessor struct { _ struct{} `type:"structure"` // Use these settings to convert the color space or to modify properties such // as hue and contrast for this output. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/converting-the-color-space.html. ColorCorrector *ColorCorrector `locationName:"colorCorrector" type:"structure"` // Use the deinterlacer to produce smoother motion and a clearer picture. For // more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-scan-type.html. Deinterlacer *Deinterlacer `locationName:"deinterlacer" type:"structure"` // Enable Dolby Vision feature to produce Dolby Vision compatible video output. DolbyVision *DolbyVision `locationName:"dolbyVision" type:"structure"` // Enable HDR10+ analyis and metadata injection. Compatible with HEVC only. Hdr10Plus *Hdr10Plus `locationName:"hdr10Plus" type:"structure"` // Enable the Image inserter (ImageInserter) feature to include a graphic overlay // on your video. Enable or disable this feature for each output individually. // This setting is disabled by default. ImageInserter *ImageInserter `locationName:"imageInserter" type:"structure"` // Enable the Noise reducer (NoiseReducer) feature to remove noise from your // video output if necessary. Enable or disable this feature for each output // individually. This setting is disabled by default. NoiseReducer *NoiseReducer `locationName:"noiseReducer" type:"structure"` // If you work with a third party video watermarking partner, use the group // of settings that correspond with your watermarking partner to include watermarks // in your output. PartnerWatermarking *PartnerWatermarking `locationName:"partnerWatermarking" type:"structure"` // Settings for burning the output timecode and specified prefix into the output. TimecodeBurnin *TimecodeBurnin `locationName:"timecodeBurnin" type:"structure"` } // String returns the string representation func (s VideoPreprocessor) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VideoPreprocessor) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *VideoPreprocessor) Validate() error { invalidParams := request.ErrInvalidParams{Context: "VideoPreprocessor"} if s.ColorCorrector != nil { if err := s.ColorCorrector.Validate(); err != nil { invalidParams.AddNested("ColorCorrector", err.(request.ErrInvalidParams)) } } if s.ImageInserter != nil { if err := s.ImageInserter.Validate(); err != nil { invalidParams.AddNested("ImageInserter", err.(request.ErrInvalidParams)) } } if s.NoiseReducer != nil { if err := s.NoiseReducer.Validate(); err != nil { invalidParams.AddNested("NoiseReducer", err.(request.ErrInvalidParams)) } } if s.PartnerWatermarking != nil { if err := s.PartnerWatermarking.Validate(); err != nil { invalidParams.AddNested("PartnerWatermarking", err.(request.ErrInvalidParams)) } } if s.TimecodeBurnin != nil { if err := s.TimecodeBurnin.Validate(); err != nil { invalidParams.AddNested("TimecodeBurnin", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetColorCorrector sets the ColorCorrector field's value. func (s *VideoPreprocessor) SetColorCorrector(v *ColorCorrector) *VideoPreprocessor { s.ColorCorrector = v return s } // SetDeinterlacer sets the Deinterlacer field's value. func (s *VideoPreprocessor) SetDeinterlacer(v *Deinterlacer) *VideoPreprocessor { s.Deinterlacer = v return s } // SetDolbyVision sets the DolbyVision field's value. func (s *VideoPreprocessor) SetDolbyVision(v *DolbyVision) *VideoPreprocessor { s.DolbyVision = v return s } // SetHdr10Plus sets the Hdr10Plus field's value. func (s *VideoPreprocessor) SetHdr10Plus(v *Hdr10Plus) *VideoPreprocessor { s.Hdr10Plus = v return s } // SetImageInserter sets the ImageInserter field's value. func (s *VideoPreprocessor) SetImageInserter(v *ImageInserter) *VideoPreprocessor { s.ImageInserter = v return s } // SetNoiseReducer sets the NoiseReducer field's value. func (s *VideoPreprocessor) SetNoiseReducer(v *NoiseReducer) *VideoPreprocessor { s.NoiseReducer = v return s } // SetPartnerWatermarking sets the PartnerWatermarking field's value. func (s *VideoPreprocessor) SetPartnerWatermarking(v *PartnerWatermarking) *VideoPreprocessor { s.PartnerWatermarking = v return s } // SetTimecodeBurnin sets the TimecodeBurnin field's value. func (s *VideoPreprocessor) SetTimecodeBurnin(v *TimecodeBurnin) *VideoPreprocessor { s.TimecodeBurnin = v return s } // Input video selectors contain the video settings for the input. Each of your // inputs can have up to one video selector. type VideoSelector struct { _ struct{} `type:"structure"` // Ignore this setting unless this input is a QuickTime animation with an alpha // channel. Use this setting to create separate Key and Fill outputs. In each // output, specify which part of the input MediaConvert uses. Leave this setting // at the default value DISCARD to delete the alpha channel and preserve the // video. Set it to REMAP_TO_LUMA to delete the video and map the alpha channel // to the luma channel of your outputs. AlphaBehavior *string `locationName:"alphaBehavior" type:"string" enum:"AlphaBehavior"` // If your input video has accurate color space metadata, or if you don't know // about color space, leave this set to the default value Follow (FOLLOW). The // service will automatically detect your input color space. If your input video // has metadata indicating the wrong color space, specify the accurate color // space here. If your input video is HDR 10 and the SMPTE ST 2086 Mastering // Display Color Volume static metadata isn't present in your video stream, // or if that metadata is present but not accurate, choose Force HDR 10 (FORCE_HDR10) // here and specify correct values in the input HDR 10 metadata (Hdr10Metadata) // settings. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. ColorSpace *string `locationName:"colorSpace" type:"string" enum:"ColorSpace"` // There are two sources for color metadata, the input file and the job input // settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). // The Color space usage setting determines which takes precedence. Choose Force // (FORCE) to use color metadata from the input job settings. If you don't specify // values for those settings, the service defaults to using metadata from your // input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the // source when it is present. If there's no color metadata in your input file, // the service defaults to using values you specify in the input settings. ColorSpaceUsage *string `locationName:"colorSpaceUsage" type:"string" enum:"ColorSpaceUsage"` // Use these settings to provide HDR 10 metadata that is missing or inaccurate // in your input video. Appropriate values vary depending on the input video // and must be provided by a color grader. The color grader generates these // values during the HDR 10 mastering process. The valid range for each of these // settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color // coordinate. Related settings - When you specify these values, you must also // set Color space (ColorSpace) to HDR 10 (HDR10). To specify whether the the // values you specify here take precedence over the values in the metadata of // your input file, set Color space usage (ColorSpaceUsage). To specify whether // color metadata is included in an output, set Color metadata (ColorMetadata). // For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. Hdr10Metadata *Hdr10Metadata `locationName:"hdr10Metadata" type:"structure"` // Use PID (Pid) to select specific video data from an input file. Specify this // value as an integer; the system automatically converts it to the hexidecimal // value. For example, 257 selects PID 0x101. A PID, or packet identifier, is // an identifier for a set of data in an MPEG-2 transport stream container. Pid *int64 `locationName:"pid" min:"1" type:"integer"` // Selects a specific program from within a multi-program transport stream. // Note that Quad 4K is not currently supported. ProgramNumber *int64 `locationName:"programNumber" type:"integer"` // Use Rotate (InputRotate) to specify how the service rotates your video. You // can choose automatic rotation or specify a rotation. You can specify a clockwise // rotation of 0, 90, 180, or 270 degrees. If your input video container is // .mov or .mp4 and your input has rotation metadata, you can choose Automatic // to have the service rotate your video according to the rotation specified // in the metadata. The rotation must be within one degree of 90, 180, or 270 // degrees. If the rotation metadata specifies any other rotation, the service // will default to no rotation. By default, the service does no rotation, even // if your input video has rotation metadata. The service doesn't pass through // rotation metadata. Rotate *string `locationName:"rotate" type:"string" enum:"InputRotate"` // Use this setting when your input video codec is AVC-Intra. Ignore this setting // for all other inputs. If the sample range metadata in your input video is // accurate, or if you don't know about sample range, keep the default value, // Follow (FOLLOW), for this setting. When you do, the service automatically // detects your input sample range. If your input video has metadata indicating // the wrong sample range, specify the accurate sample range here. When you // do, MediaConvert ignores any sample range information in the input metadata. // Regardless of whether MediaConvert uses the input sample range or the sample // range that you specify, MediaConvert uses the sample range for transcoding // and also writes it to the output metadata. SampleRange *string `locationName:"sampleRange" type:"string" enum:"InputSampleRange"` } // String returns the string representation func (s VideoSelector) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VideoSelector) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *VideoSelector) Validate() error { invalidParams := request.ErrInvalidParams{Context: "VideoSelector"} if s.Pid != nil && *s.Pid < 1 { invalidParams.Add(request.NewErrParamMinValue("Pid", 1)) } if s.ProgramNumber != nil && *s.ProgramNumber < -2.147483648e+09 { invalidParams.Add(request.NewErrParamMinValue("ProgramNumber", -2.147483648e+09)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAlphaBehavior sets the AlphaBehavior field's value. func (s *VideoSelector) SetAlphaBehavior(v string) *VideoSelector { s.AlphaBehavior = &v return s } // SetColorSpace sets the ColorSpace field's value. func (s *VideoSelector) SetColorSpace(v string) *VideoSelector { s.ColorSpace = &v return s } // SetColorSpaceUsage sets the ColorSpaceUsage field's value. func (s *VideoSelector) SetColorSpaceUsage(v string) *VideoSelector { s.ColorSpaceUsage = &v return s } // SetHdr10Metadata sets the Hdr10Metadata field's value. func (s *VideoSelector) SetHdr10Metadata(v *Hdr10Metadata) *VideoSelector { s.Hdr10Metadata = v return s } // SetPid sets the Pid field's value. func (s *VideoSelector) SetPid(v int64) *VideoSelector { s.Pid = &v return s } // SetProgramNumber sets the ProgramNumber field's value. func (s *VideoSelector) SetProgramNumber(v int64) *VideoSelector { s.ProgramNumber = &v return s } // SetRotate sets the Rotate field's value. func (s *VideoSelector) SetRotate(v string) *VideoSelector { s.Rotate = &v return s } // SetSampleRange sets the SampleRange field's value. func (s *VideoSelector) SetSampleRange(v string) *VideoSelector { s.SampleRange = &v return s } // Required when you set Codec, under AudioDescriptions>CodecSettings, to the // value Vorbis. type VorbisSettings struct { _ struct{} `type:"structure"` // Optional. Specify the number of channels in this output audio track. Choosing // Mono on the console gives you 1 output channel; choosing Stereo gives you // 2. In the API, valid values are 1 and 2. The default value is 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // Optional. Specify the audio sample rate in Hz. Valid values are 22050, 32000, // 44100, and 48000. The default value is 48000. SampleRate *int64 `locationName:"sampleRate" min:"22050" type:"integer"` // Optional. Specify the variable audio quality of this Vorbis output from -1 // (lowest quality, ~45 kbit/s) to 10 (highest quality, ~500 kbit/s). The default // value is 4 (~128 kbit/s). Values 5 and 6 are approximately 160 and 192 kbit/s, // respectively. VbrQuality *int64 `locationName:"vbrQuality" type:"integer"` } // String returns the string representation func (s VorbisSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VorbisSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *VorbisSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "VorbisSettings"} if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(request.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 22050 { invalidParams.Add(request.NewErrParamMinValue("SampleRate", 22050)) } if s.VbrQuality != nil && *s.VbrQuality < -1 { invalidParams.Add(request.NewErrParamMinValue("VbrQuality", -1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetChannels sets the Channels field's value. func (s *VorbisSettings) SetChannels(v int64) *VorbisSettings { s.Channels = &v return s } // SetSampleRate sets the SampleRate field's value. func (s *VorbisSettings) SetSampleRate(v int64) *VorbisSettings { s.SampleRate = &v return s } // SetVbrQuality sets the VbrQuality field's value. func (s *VorbisSettings) SetVbrQuality(v int64) *VorbisSettings { s.VbrQuality = &v return s } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value VP8. type Vp8Settings struct { _ struct{} `type:"structure"` // Target bitrate in bits/second. For example, enter five megabits per second // as 5000000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vp8FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Vp8FramerateConversionAlgorithm"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // GOP Length (keyframe interval) in frames. Must be greater than zero. GopSize *float64 `locationName:"gopSize" type:"double"` // Optional. Size of buffer (HRD buffer model) in bits. For example, enter five // megabits as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional. // Specify the maximum bitrate in bits/second. For example, enter five megabits // per second as 5000000. The default behavior uses twice the target bitrate // as the maximum bitrate. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"Vp8ParControl"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, multi-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Vp8QualityTuningLevel"` // With the VP8 codec, you can use only the variable bitrate (VBR) rate control // mode. RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Vp8RateControlMode"` } // String returns the string representation func (s Vp8Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Vp8Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Vp8Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Vp8Settings"} if s.Bitrate != nil && *s.Bitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000)) } if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBitrate sets the Bitrate field's value. func (s *Vp8Settings) SetBitrate(v int64) *Vp8Settings { s.Bitrate = &v return s } // SetFramerateControl sets the FramerateControl field's value. func (s *Vp8Settings) SetFramerateControl(v string) *Vp8Settings { s.FramerateControl = &v return s } // SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. func (s *Vp8Settings) SetFramerateConversionAlgorithm(v string) *Vp8Settings { s.FramerateConversionAlgorithm = &v return s } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *Vp8Settings) SetFramerateDenominator(v int64) *Vp8Settings { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *Vp8Settings) SetFramerateNumerator(v int64) *Vp8Settings { s.FramerateNumerator = &v return s } // SetGopSize sets the GopSize field's value. func (s *Vp8Settings) SetGopSize(v float64) *Vp8Settings { s.GopSize = &v return s } // SetHrdBufferSize sets the HrdBufferSize field's value. func (s *Vp8Settings) SetHrdBufferSize(v int64) *Vp8Settings { s.HrdBufferSize = &v return s } // SetMaxBitrate sets the MaxBitrate field's value. func (s *Vp8Settings) SetMaxBitrate(v int64) *Vp8Settings { s.MaxBitrate = &v return s } // SetParControl sets the ParControl field's value. func (s *Vp8Settings) SetParControl(v string) *Vp8Settings { s.ParControl = &v return s } // SetParDenominator sets the ParDenominator field's value. func (s *Vp8Settings) SetParDenominator(v int64) *Vp8Settings { s.ParDenominator = &v return s } // SetParNumerator sets the ParNumerator field's value. func (s *Vp8Settings) SetParNumerator(v int64) *Vp8Settings { s.ParNumerator = &v return s } // SetQualityTuningLevel sets the QualityTuningLevel field's value. func (s *Vp8Settings) SetQualityTuningLevel(v string) *Vp8Settings { s.QualityTuningLevel = &v return s } // SetRateControlMode sets the RateControlMode field's value. func (s *Vp8Settings) SetRateControlMode(v string) *Vp8Settings { s.RateControlMode = &v return s } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value VP9. type Vp9Settings struct { _ struct{} `type:"structure"` // Target bitrate in bits/second. For example, enter five megabits per second // as 5000000. Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vp9FramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Vp9FramerateConversionAlgorithm"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // GOP Length (keyframe interval) in frames. Must be greater than zero. GopSize *float64 `locationName:"gopSize" type:"double"` // Size of buffer (HRD buffer model) in bits. For example, enter five megabits // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional. // Specify the maximum bitrate in bits/second. For example, enter five megabits // per second as 5000000. The default behavior uses twice the target bitrate // as the maximum bitrate. MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` // Optional. Specify how the service determines the pixel aspect ratio for this // output. The default behavior is to use the same pixel aspect ratio as your // input video. ParControl *string `locationName:"parControl" type:"string" enum:"Vp9ParControl"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the // console, this corresponds to any value other than Follow source. When you // specify an output pixel aspect ratio (PAR) that is different from your input // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC // widescreen, you would specify the ratio 40:33. In this example, the value // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, multi-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Vp9QualityTuningLevel"` // With the VP9 codec, you can use only the variable bitrate (VBR) rate control // mode. RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Vp9RateControlMode"` } // String returns the string representation func (s Vp9Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Vp9Settings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Vp9Settings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Vp9Settings"} if s.Bitrate != nil && *s.Bitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000)) } if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) } if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000)) } if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1)) } if s.ParNumerator != nil && *s.ParNumerator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBitrate sets the Bitrate field's value. func (s *Vp9Settings) SetBitrate(v int64) *Vp9Settings { s.Bitrate = &v return s } // SetFramerateControl sets the FramerateControl field's value. func (s *Vp9Settings) SetFramerateControl(v string) *Vp9Settings { s.FramerateControl = &v return s } // SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. func (s *Vp9Settings) SetFramerateConversionAlgorithm(v string) *Vp9Settings { s.FramerateConversionAlgorithm = &v return s } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *Vp9Settings) SetFramerateDenominator(v int64) *Vp9Settings { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *Vp9Settings) SetFramerateNumerator(v int64) *Vp9Settings { s.FramerateNumerator = &v return s } // SetGopSize sets the GopSize field's value. func (s *Vp9Settings) SetGopSize(v float64) *Vp9Settings { s.GopSize = &v return s } // SetHrdBufferSize sets the HrdBufferSize field's value. func (s *Vp9Settings) SetHrdBufferSize(v int64) *Vp9Settings { s.HrdBufferSize = &v return s } // SetMaxBitrate sets the MaxBitrate field's value. func (s *Vp9Settings) SetMaxBitrate(v int64) *Vp9Settings { s.MaxBitrate = &v return s } // SetParControl sets the ParControl field's value. func (s *Vp9Settings) SetParControl(v string) *Vp9Settings { s.ParControl = &v return s } // SetParDenominator sets the ParDenominator field's value. func (s *Vp9Settings) SetParDenominator(v int64) *Vp9Settings { s.ParDenominator = &v return s } // SetParNumerator sets the ParNumerator field's value. func (s *Vp9Settings) SetParNumerator(v int64) *Vp9Settings { s.ParNumerator = &v return s } // SetQualityTuningLevel sets the QualityTuningLevel field's value. func (s *Vp9Settings) SetQualityTuningLevel(v string) *Vp9Settings { s.QualityTuningLevel = &v return s } // SetRateControlMode sets the RateControlMode field's value. func (s *Vp9Settings) SetRateControlMode(v string) *Vp9Settings { s.RateControlMode = &v return s } // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value WAV. type WavSettings struct { _ struct{} `type:"structure"` // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding // quality for this audio track. BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"` // Specify the number of channels in this output audio track. Valid values are // 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64. Channels *int64 `locationName:"channels" min:"1" type:"integer"` // The service defaults to using RIFF for WAV outputs. If your output audio // is likely to exceed 4 GB in file size, or if you otherwise need the extended // support of the RF64 format, set your output WAV file format to RF64. Format *string `locationName:"format" type:"string" enum:"WavFormat"` // Sample rate in Hz. SampleRate *int64 `locationName:"sampleRate" min:"8000" type:"integer"` } // String returns the string representation func (s WavSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s WavSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *WavSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "WavSettings"} if s.BitDepth != nil && *s.BitDepth < 16 { invalidParams.Add(request.NewErrParamMinValue("BitDepth", 16)) } if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(request.NewErrParamMinValue("Channels", 1)) } if s.SampleRate != nil && *s.SampleRate < 8000 { invalidParams.Add(request.NewErrParamMinValue("SampleRate", 8000)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBitDepth sets the BitDepth field's value. func (s *WavSettings) SetBitDepth(v int64) *WavSettings { s.BitDepth = &v return s } // SetChannels sets the Channels field's value. func (s *WavSettings) SetChannels(v int64) *WavSettings { s.Channels = &v return s } // SetFormat sets the Format field's value. func (s *WavSettings) SetFormat(v string) *WavSettings { s.Format = &v return s } // SetSampleRate sets the SampleRate field's value. func (s *WavSettings) SetSampleRate(v int64) *WavSettings { s.SampleRate = &v return s } // WEBVTT Destination Settings type WebvttDestinationSettings struct { _ struct{} `type:"structure"` // Choose Enabled (ENABLED) to have MediaConvert use the font style, color, // and position information from the captions source in the input. Keep the // default value, Disabled (DISABLED), for simplified output captions. StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"WebvttStylePassthrough"` } // String returns the string representation func (s WebvttDestinationSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s WebvttDestinationSettings) GoString() string { return s.String() } // SetStylePassthrough sets the StylePassthrough field's value. func (s *WebvttDestinationSettings) SetStylePassthrough(v string) *WebvttDestinationSettings { s.StylePassthrough = &v return s } // Settings specific to WebVTT sources in HLS alternative rendition group. Specify // the properties (renditionGroupId, renditionName or renditionLanguageCode) // to identify the unique subtitle track among the alternative rendition groups // present in the HLS manifest. If no unique track is found, or multiple tracks // match the specified properties, the job fails. If there is only one subtitle // track in the rendition group, the settings can be left empty and the default // subtitle track will be chosen. If your caption source is a sidecar file, // use FileSourceSettings instead of WebvttHlsSourceSettings. type WebvttHlsSourceSettings struct { _ struct{} `type:"structure"` // Optional. Specify alternative group ID RenditionGroupId *string `locationName:"renditionGroupId" type:"string"` // Optional. Specify ISO 639-2 or ISO 639-3 code in the language property RenditionLanguageCode *string `locationName:"renditionLanguageCode" type:"string" enum:"LanguageCode"` // Optional. Specify media name RenditionName *string `locationName:"renditionName" type:"string"` } // String returns the string representation func (s WebvttHlsSourceSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s WebvttHlsSourceSettings) GoString() string { return s.String() } // SetRenditionGroupId sets the RenditionGroupId field's value. func (s *WebvttHlsSourceSettings) SetRenditionGroupId(v string) *WebvttHlsSourceSettings { s.RenditionGroupId = &v return s } // SetRenditionLanguageCode sets the RenditionLanguageCode field's value. func (s *WebvttHlsSourceSettings) SetRenditionLanguageCode(v string) *WebvttHlsSourceSettings { s.RenditionLanguageCode = &v return s } // SetRenditionName sets the RenditionName field's value. func (s *WebvttHlsSourceSettings) SetRenditionName(v string) *WebvttHlsSourceSettings { s.RenditionName = &v return s } // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) // to the value XAVC_4K_INTRA_CBG. type Xavc4kIntraCbgProfileSettings struct { _ struct{} `type:"structure"` // Specify the XAVC Intra 4k (CBG) Class to set the bitrate of your output. // Outputs of the same class have similar image quality over the operating points // that are valid for that class. XavcClass *string `locationName:"xavcClass" type:"string" enum:"Xavc4kIntraCbgProfileClass"` } // String returns the string representation func (s Xavc4kIntraCbgProfileSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Xavc4kIntraCbgProfileSettings) GoString() string { return s.String() } // SetXavcClass sets the XavcClass field's value. func (s *Xavc4kIntraCbgProfileSettings) SetXavcClass(v string) *Xavc4kIntraCbgProfileSettings { s.XavcClass = &v return s } // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) // to the value XAVC_4K_INTRA_VBR. type Xavc4kIntraVbrProfileSettings struct { _ struct{} `type:"structure"` // Specify the XAVC Intra 4k (VBR) Class to set the bitrate of your output. // Outputs of the same class have similar image quality over the operating points // that are valid for that class. XavcClass *string `locationName:"xavcClass" type:"string" enum:"Xavc4kIntraVbrProfileClass"` } // String returns the string representation func (s Xavc4kIntraVbrProfileSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Xavc4kIntraVbrProfileSettings) GoString() string { return s.String() } // SetXavcClass sets the XavcClass field's value. func (s *Xavc4kIntraVbrProfileSettings) SetXavcClass(v string) *Xavc4kIntraVbrProfileSettings { s.XavcClass = &v return s } // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) // to the value XAVC_4K. type Xavc4kProfileSettings struct { _ struct{} `type:"structure"` // Specify the XAVC 4k (Long GOP) Bitrate Class to set the bitrate of your output. // Outputs of the same class have similar image quality over the operating points // that are valid for that class. BitrateClass *string `locationName:"bitrateClass" type:"string" enum:"Xavc4kProfileBitrateClass"` // Specify the codec profile for this output. Choose High, 8-bit, 4:2:0 (HIGH) // or High, 10-bit, 4:2:2 (HIGH_422). These profiles are specified in ITU-T // H.264. CodecProfile *string `locationName:"codecProfile" type:"string" enum:"Xavc4kProfileCodecProfile"` // The best way to set up adaptive quantization is to keep the default value, // Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). // When you do so, MediaConvert automatically applies the best types of quantization // for your video content. Include this setting in your JSON job specification // only when you choose to change the default value for Adaptive quantization. // Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears // as a visual flicker that can arise when the encoder saves bits by copying // some macroblocks many times from frame to frame, and then refreshes them // at the I-frame. When you enable this setting, the encoder updates these macroblocks // slightly more often to smooth out the flicker. This setting is disabled by // default. Related setting: In addition to enabling this setting, you must // also set Adaptive quantization (adaptiveQuantization) to a value other than // Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree // of smoothing that Flicker adaptive quantization provides. FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"XavcFlickerAdaptiveQuantization"` // Specify whether the encoder uses B-frames as reference frames for other pictures // in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames // as reference frames. Choose Don't allow (DISABLED) to prevent the encoder // from using B-frames as reference frames. GopBReference *string `locationName:"gopBReference" type:"string" enum:"XavcGopBReference"` // Frequency of closed GOPs. In streaming applications, it is recommended that // this be set to 1 so a decoder joining mid-stream will receive an IDR frame // as quickly as possible. Setting this value to 0 will break output segmenting. GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` // Specify the size of the buffer that MediaConvert uses in the HRD buffer model // for this output. Specify this value in bits; for example, enter five megabits // as 5000000. When you don't set this value, or you set it to zero, MediaConvert // calculates the default by doubling the bitrate of this output point. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Xavc4kProfileQualityTuningLevel"` // Number of slices per picture. Must be less than or equal to the number of // macroblock rows for progressive pictures, and less than or equal to half // the number of macroblock rows for interlaced pictures. Slices *int64 `locationName:"slices" min:"8" type:"integer"` } // String returns the string representation func (s Xavc4kProfileSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Xavc4kProfileSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Xavc4kProfileSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Xavc4kProfileSettings"} if s.Slices != nil && *s.Slices < 8 { invalidParams.Add(request.NewErrParamMinValue("Slices", 8)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBitrateClass sets the BitrateClass field's value. func (s *Xavc4kProfileSettings) SetBitrateClass(v string) *Xavc4kProfileSettings { s.BitrateClass = &v return s } // SetCodecProfile sets the CodecProfile field's value. func (s *Xavc4kProfileSettings) SetCodecProfile(v string) *Xavc4kProfileSettings { s.CodecProfile = &v return s } // SetFlickerAdaptiveQuantization sets the FlickerAdaptiveQuantization field's value. func (s *Xavc4kProfileSettings) SetFlickerAdaptiveQuantization(v string) *Xavc4kProfileSettings { s.FlickerAdaptiveQuantization = &v return s } // SetGopBReference sets the GopBReference field's value. func (s *Xavc4kProfileSettings) SetGopBReference(v string) *Xavc4kProfileSettings { s.GopBReference = &v return s } // SetGopClosedCadence sets the GopClosedCadence field's value. func (s *Xavc4kProfileSettings) SetGopClosedCadence(v int64) *Xavc4kProfileSettings { s.GopClosedCadence = &v return s } // SetHrdBufferSize sets the HrdBufferSize field's value. func (s *Xavc4kProfileSettings) SetHrdBufferSize(v int64) *Xavc4kProfileSettings { s.HrdBufferSize = &v return s } // SetQualityTuningLevel sets the QualityTuningLevel field's value. func (s *Xavc4kProfileSettings) SetQualityTuningLevel(v string) *Xavc4kProfileSettings { s.QualityTuningLevel = &v return s } // SetSlices sets the Slices field's value. func (s *Xavc4kProfileSettings) SetSlices(v int64) *Xavc4kProfileSettings { s.Slices = &v return s } // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) // to the value XAVC_HD_INTRA_CBG. type XavcHdIntraCbgProfileSettings struct { _ struct{} `type:"structure"` // Specify the XAVC Intra HD (CBG) Class to set the bitrate of your output. // Outputs of the same class have similar image quality over the operating points // that are valid for that class. XavcClass *string `locationName:"xavcClass" type:"string" enum:"XavcHdIntraCbgProfileClass"` } // String returns the string representation func (s XavcHdIntraCbgProfileSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s XavcHdIntraCbgProfileSettings) GoString() string { return s.String() } // SetXavcClass sets the XavcClass field's value. func (s *XavcHdIntraCbgProfileSettings) SetXavcClass(v string) *XavcHdIntraCbgProfileSettings { s.XavcClass = &v return s } // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) // to the value XAVC_HD. type XavcHdProfileSettings struct { _ struct{} `type:"structure"` // Specify the XAVC HD (Long GOP) Bitrate Class to set the bitrate of your output. // Outputs of the same class have similar image quality over the operating points // that are valid for that class. BitrateClass *string `locationName:"bitrateClass" type:"string" enum:"XavcHdProfileBitrateClass"` // The best way to set up adaptive quantization is to keep the default value, // Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). // When you do so, MediaConvert automatically applies the best types of quantization // for your video content. Include this setting in your JSON job specification // only when you choose to change the default value for Adaptive quantization. // Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears // as a visual flicker that can arise when the encoder saves bits by copying // some macroblocks many times from frame to frame, and then refreshes them // at the I-frame. When you enable this setting, the encoder updates these macroblocks // slightly more often to smooth out the flicker. This setting is disabled by // default. Related setting: In addition to enabling this setting, you must // also set Adaptive quantization (adaptiveQuantization) to a value other than // Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree // of smoothing that Flicker adaptive quantization provides. FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"XavcFlickerAdaptiveQuantization"` // Specify whether the encoder uses B-frames as reference frames for other pictures // in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames // as reference frames. Choose Don't allow (DISABLED) to prevent the encoder // from using B-frames as reference frames. GopBReference *string `locationName:"gopBReference" type:"string" enum:"XavcGopBReference"` // Frequency of closed GOPs. In streaming applications, it is recommended that // this be set to 1 so a decoder joining mid-stream will receive an IDR frame // as quickly as possible. Setting this value to 0 will break output segmenting. GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` // Specify the size of the buffer that MediaConvert uses in the HRD buffer model // for this output. Specify this value in bits; for example, enter five megabits // as 5000000. When you don't set this value, or you set it to zero, MediaConvert // calculates the default by doubling the bitrate of this output point. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"XavcInterlaceMode"` // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"XavcHdProfileQualityTuningLevel"` // Number of slices per picture. Must be less than or equal to the number of // macroblock rows for progressive pictures, and less than or equal to half // the number of macroblock rows for interlaced pictures. Slices *int64 `locationName:"slices" min:"4" type:"integer"` // Ignore this setting unless you set Frame rate (framerateNumerator divided // by framerateDenominator) to 29.970. If your input framerate is 23.976, choose // Hard (HARD). Otherwise, keep the default value None (NONE). For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html. Telecine *string `locationName:"telecine" type:"string" enum:"XavcHdProfileTelecine"` } // String returns the string representation func (s XavcHdProfileSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s XavcHdProfileSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *XavcHdProfileSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "XavcHdProfileSettings"} if s.Slices != nil && *s.Slices < 4 { invalidParams.Add(request.NewErrParamMinValue("Slices", 4)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBitrateClass sets the BitrateClass field's value. func (s *XavcHdProfileSettings) SetBitrateClass(v string) *XavcHdProfileSettings { s.BitrateClass = &v return s } // SetFlickerAdaptiveQuantization sets the FlickerAdaptiveQuantization field's value. func (s *XavcHdProfileSettings) SetFlickerAdaptiveQuantization(v string) *XavcHdProfileSettings { s.FlickerAdaptiveQuantization = &v return s } // SetGopBReference sets the GopBReference field's value. func (s *XavcHdProfileSettings) SetGopBReference(v string) *XavcHdProfileSettings { s.GopBReference = &v return s } // SetGopClosedCadence sets the GopClosedCadence field's value. func (s *XavcHdProfileSettings) SetGopClosedCadence(v int64) *XavcHdProfileSettings { s.GopClosedCadence = &v return s } // SetHrdBufferSize sets the HrdBufferSize field's value. func (s *XavcHdProfileSettings) SetHrdBufferSize(v int64) *XavcHdProfileSettings { s.HrdBufferSize = &v return s } // SetInterlaceMode sets the InterlaceMode field's value. func (s *XavcHdProfileSettings) SetInterlaceMode(v string) *XavcHdProfileSettings { s.InterlaceMode = &v return s } // SetQualityTuningLevel sets the QualityTuningLevel field's value. func (s *XavcHdProfileSettings) SetQualityTuningLevel(v string) *XavcHdProfileSettings { s.QualityTuningLevel = &v return s } // SetSlices sets the Slices field's value. func (s *XavcHdProfileSettings) SetSlices(v int64) *XavcHdProfileSettings { s.Slices = &v return s } // SetTelecine sets the Telecine field's value. func (s *XavcHdProfileSettings) SetTelecine(v string) *XavcHdProfileSettings { s.Telecine = &v return s } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value XAVC. type XavcSettings struct { _ struct{} `type:"structure"` // Keep the default value, Auto (AUTO), for this setting to have MediaConvert // automatically apply the best types of quantization for your video content. // When you want to apply your quantization settings manually, you must set // Adaptive quantization (adaptiveQuantization) to a value other than Auto (AUTO). // Use this setting to specify the strength of any adaptive quantization filters // that you enable. If you don't want MediaConvert to do any adaptive quantization // in this transcode, set Adaptive quantization to Off (OFF). Related settings: // The value that you choose here applies to the following settings: Flicker // adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization // (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization). AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"XavcAdaptiveQuantization"` // Optional. Choose a specific entropy encoding mode only when you want to override // XAVC recommendations. If you choose the value auto, MediaConvert uses the // mode that the XAVC file format specifies given this output's operating point. EntropyEncoding *string `locationName:"entropyEncoding" type:"string" enum:"XavcEntropyEncoding"` // If you are using the console, use the Frame rate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list. The framerates shown in the dropdown // list are decimal approximations of fractions. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate that you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"XavcFramerateControl"` // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"XavcFramerateConversionAlgorithm"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateDenominator to specify the denominator of this fraction. In this // example, use 1001 for the value of FramerateDenominator. When you use the // console for transcode jobs that use frame rate conversion, provide the value // as a decimal number for Frame rate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, // use 24000 for the value of FramerateNumerator. When you use the console for // transcode jobs that use frame rate conversion, provide the value as a decimal // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"` // Specify the XAVC profile for this output. For more information, see the Sony // documentation at https://www.xavc-info.org/. Note that MediaConvert doesn't // support the interlaced video XAVC operating points for XAVC_HD_INTRA_CBG. // To create an interlaced XAVC output, choose the profile XAVC_HD. Profile *string `locationName:"profile" type:"string" enum:"XavcProfile"` // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output by relabeling the // video frames and resampling your audio. Note that enabling this setting will // slightly reduce the duration of your video. Related settings: You must also // set Frame rate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. SlowPal *string `locationName:"slowPal" type:"string" enum:"XavcSlowPal"` // Ignore this setting unless your downstream workflow requires that you specify // it explicitly. Otherwise, we recommend that you adjust the softness of your // output by using a lower value for the setting Sharpness (sharpness) or by // enabling a noise reducer filter (noiseReducerFilter). The Softness (softness) // setting specifies the quantization matrices that the encoder uses. Keep the // default value, 0, for flat quantization. Choose the value 1 or 16 to use // the default JVT softening quantization matricies from the H.264 specification. // Choose a value from 17 to 128 to use planar interpolation. Increasing values // from 17 to 128 result in increasing reduction of high-frequency data. The // value 128 results in the softest video. Softness *int64 `locationName:"softness" type:"integer"` // The best way to set up adaptive quantization is to keep the default value, // Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). // When you do so, MediaConvert automatically applies the best types of quantization // for your video content. Include this setting in your JSON job specification // only when you choose to change the default value for Adaptive quantization. // For this setting, keep the default value, Enabled (ENABLED), to adjust quantization // within each frame based on spatial variation of content complexity. When // you enable this feature, the encoder uses fewer bits on areas that can sustain // more distortion with no noticeable visual degradation and uses more bits // on areas where any small distortion will be noticeable. For example, complex // textured blocks are encoded with fewer bits and smooth textured blocks are // encoded with more bits. Enabling this feature will almost always improve // your video quality. Note, though, that this feature doesn't take into account // where the viewer's attention is likely to be. If viewers are likely to be // focusing their attention on a part of the screen with a lot of complex texture, // you might choose to disable this feature. Related setting: When you enable // spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) // depending on your content. For homogeneous content, such as cartoons and // video games, set it to Low. For content with a wider variety of textures, // set it to High or Higher. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"XavcSpatialAdaptiveQuantization"` // The best way to set up adaptive quantization is to keep the default value, // Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). // When you do so, MediaConvert automatically applies the best types of quantization // for your video content. Include this setting in your JSON job specification // only when you choose to change the default value for Adaptive quantization. // For this setting, keep the default value, Enabled (ENABLED), to adjust quantization // within each frame based on temporal variation of content complexity. When // you enable this feature, the encoder uses fewer bits on areas of the frame // that aren't moving and uses more bits on complex objects with sharp edges // that move a lot. For example, this feature improves the readability of text // tickers on newscasts and scoreboards on sports matches. Enabling this feature // will almost always improve your video quality. Note, though, that this feature // doesn't take into account where the viewer's attention is likely to be. If // viewers are likely to be focusing their attention on a part of the screen // that doesn't have moving objects with sharp edges, such as sports athletes' // faces, you might choose to disable this feature. Related setting: When you // enable temporal adaptive quantization, adjust the strength of the filter // with the setting Adaptive quantization (adaptiveQuantization). TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"XavcTemporalAdaptiveQuantization"` // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) // to the value XAVC_4K_INTRA_CBG. Xavc4kIntraCbgProfileSettings *Xavc4kIntraCbgProfileSettings `locationName:"xavc4kIntraCbgProfileSettings" type:"structure"` // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) // to the value XAVC_4K_INTRA_VBR. Xavc4kIntraVbrProfileSettings *Xavc4kIntraVbrProfileSettings `locationName:"xavc4kIntraVbrProfileSettings" type:"structure"` // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) // to the value XAVC_4K. Xavc4kProfileSettings *Xavc4kProfileSettings `locationName:"xavc4kProfileSettings" type:"structure"` // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) // to the value XAVC_HD_INTRA_CBG. XavcHdIntraCbgProfileSettings *XavcHdIntraCbgProfileSettings `locationName:"xavcHdIntraCbgProfileSettings" type:"structure"` // Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) // to the value XAVC_HD. XavcHdProfileSettings *XavcHdProfileSettings `locationName:"xavcHdProfileSettings" type:"structure"` } // String returns the string representation func (s XavcSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s XavcSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *XavcSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "XavcSettings"} if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) } if s.FramerateNumerator != nil && *s.FramerateNumerator < 24 { invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 24)) } if s.Xavc4kProfileSettings != nil { if err := s.Xavc4kProfileSettings.Validate(); err != nil { invalidParams.AddNested("Xavc4kProfileSettings", err.(request.ErrInvalidParams)) } } if s.XavcHdProfileSettings != nil { if err := s.XavcHdProfileSettings.Validate(); err != nil { invalidParams.AddNested("XavcHdProfileSettings", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdaptiveQuantization sets the AdaptiveQuantization field's value. func (s *XavcSettings) SetAdaptiveQuantization(v string) *XavcSettings { s.AdaptiveQuantization = &v return s } // SetEntropyEncoding sets the EntropyEncoding field's value. func (s *XavcSettings) SetEntropyEncoding(v string) *XavcSettings { s.EntropyEncoding = &v return s } // SetFramerateControl sets the FramerateControl field's value. func (s *XavcSettings) SetFramerateControl(v string) *XavcSettings { s.FramerateControl = &v return s } // SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. func (s *XavcSettings) SetFramerateConversionAlgorithm(v string) *XavcSettings { s.FramerateConversionAlgorithm = &v return s } // SetFramerateDenominator sets the FramerateDenominator field's value. func (s *XavcSettings) SetFramerateDenominator(v int64) *XavcSettings { s.FramerateDenominator = &v return s } // SetFramerateNumerator sets the FramerateNumerator field's value. func (s *XavcSettings) SetFramerateNumerator(v int64) *XavcSettings { s.FramerateNumerator = &v return s } // SetProfile sets the Profile field's value. func (s *XavcSettings) SetProfile(v string) *XavcSettings { s.Profile = &v return s } // SetSlowPal sets the SlowPal field's value. func (s *XavcSettings) SetSlowPal(v string) *XavcSettings { s.SlowPal = &v return s } // SetSoftness sets the Softness field's value. func (s *XavcSettings) SetSoftness(v int64) *XavcSettings { s.Softness = &v return s } // SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value. func (s *XavcSettings) SetSpatialAdaptiveQuantization(v string) *XavcSettings { s.SpatialAdaptiveQuantization = &v return s } // SetTemporalAdaptiveQuantization sets the TemporalAdaptiveQuantization field's value. func (s *XavcSettings) SetTemporalAdaptiveQuantization(v string) *XavcSettings { s.TemporalAdaptiveQuantization = &v return s } // SetXavc4kIntraCbgProfileSettings sets the Xavc4kIntraCbgProfileSettings field's value. func (s *XavcSettings) SetXavc4kIntraCbgProfileSettings(v *Xavc4kIntraCbgProfileSettings) *XavcSettings { s.Xavc4kIntraCbgProfileSettings = v return s } // SetXavc4kIntraVbrProfileSettings sets the Xavc4kIntraVbrProfileSettings field's value. func (s *XavcSettings) SetXavc4kIntraVbrProfileSettings(v *Xavc4kIntraVbrProfileSettings) *XavcSettings { s.Xavc4kIntraVbrProfileSettings = v return s } // SetXavc4kProfileSettings sets the Xavc4kProfileSettings field's value. func (s *XavcSettings) SetXavc4kProfileSettings(v *Xavc4kProfileSettings) *XavcSettings { s.Xavc4kProfileSettings = v return s } // SetXavcHdIntraCbgProfileSettings sets the XavcHdIntraCbgProfileSettings field's value. func (s *XavcSettings) SetXavcHdIntraCbgProfileSettings(v *XavcHdIntraCbgProfileSettings) *XavcSettings { s.XavcHdIntraCbgProfileSettings = v return s } // SetXavcHdProfileSettings sets the XavcHdProfileSettings field's value. func (s *XavcSettings) SetXavcHdProfileSettings(v *XavcHdProfileSettings) *XavcSettings { s.XavcHdProfileSettings = v return s } // Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio // + audio description (AD) as a stereo pair. The value for AudioType will be // set to 3, which signals to downstream systems that this stream contains "broadcaster // mixed AD". Note that the input received by the encoder must contain pre-mixed // audio; the encoder does not perform the mixing. When you choose BROADCASTER_MIXED_AD, // the encoder ignores any values you provide in AudioType and FollowInputAudioType. // Choose NORMAL when the input does not contain pre-mixed audio + audio description // (AD). In this case, the encoder will use any values you provide for AudioType // and FollowInputAudioType. const ( // AacAudioDescriptionBroadcasterMixBroadcasterMixedAd is a AacAudioDescriptionBroadcasterMix enum value AacAudioDescriptionBroadcasterMixBroadcasterMixedAd = "BROADCASTER_MIXED_AD" // AacAudioDescriptionBroadcasterMixNormal is a AacAudioDescriptionBroadcasterMix enum value AacAudioDescriptionBroadcasterMixNormal = "NORMAL" ) // AacAudioDescriptionBroadcasterMix_Values returns all elements of the AacAudioDescriptionBroadcasterMix enum func AacAudioDescriptionBroadcasterMix_Values() []string { return []string{ AacAudioDescriptionBroadcasterMixBroadcasterMixedAd, AacAudioDescriptionBroadcasterMixNormal, } } // AAC Profile. const ( // AacCodecProfileLc is a AacCodecProfile enum value AacCodecProfileLc = "LC" // AacCodecProfileHev1 is a AacCodecProfile enum value AacCodecProfileHev1 = "HEV1" // AacCodecProfileHev2 is a AacCodecProfile enum value AacCodecProfileHev2 = "HEV2" ) // AacCodecProfile_Values returns all elements of the AacCodecProfile enum func AacCodecProfile_Values() []string { return []string{ AacCodecProfileLc, AacCodecProfileHev1, AacCodecProfileHev2, } } // Mono (Audio Description), Mono, Stereo, or 5.1 channel layout. Valid values // depend on rate control mode and profile. "1.0 - Audio Description (Receiver // Mix)" setting receives a stereo description plus control track and emits // a mono AAC encode of the description track, with control data emitted in // the PES header as per ETSI TS 101 154 Annex E. const ( // AacCodingModeAdReceiverMix is a AacCodingMode enum value AacCodingModeAdReceiverMix = "AD_RECEIVER_MIX" // AacCodingModeCodingMode10 is a AacCodingMode enum value AacCodingModeCodingMode10 = "CODING_MODE_1_0" // AacCodingModeCodingMode11 is a AacCodingMode enum value AacCodingModeCodingMode11 = "CODING_MODE_1_1" // AacCodingModeCodingMode20 is a AacCodingMode enum value AacCodingModeCodingMode20 = "CODING_MODE_2_0" // AacCodingModeCodingMode51 is a AacCodingMode enum value AacCodingModeCodingMode51 = "CODING_MODE_5_1" ) // AacCodingMode_Values returns all elements of the AacCodingMode enum func AacCodingMode_Values() []string { return []string{ AacCodingModeAdReceiverMix, AacCodingModeCodingMode10, AacCodingModeCodingMode11, AacCodingModeCodingMode20, AacCodingModeCodingMode51, } } // Rate Control Mode. const ( // AacRateControlModeCbr is a AacRateControlMode enum value AacRateControlModeCbr = "CBR" // AacRateControlModeVbr is a AacRateControlMode enum value AacRateControlModeVbr = "VBR" ) // AacRateControlMode_Values returns all elements of the AacRateControlMode enum func AacRateControlMode_Values() []string { return []string{ AacRateControlModeCbr, AacRateControlModeVbr, } } // Enables LATM/LOAS AAC output. Note that if you use LATM/LOAS AAC in an output, // you must choose "No container" for the output container. const ( // AacRawFormatLatmLoas is a AacRawFormat enum value AacRawFormatLatmLoas = "LATM_LOAS" // AacRawFormatNone is a AacRawFormat enum value AacRawFormatNone = "NONE" ) // AacRawFormat_Values returns all elements of the AacRawFormat enum func AacRawFormat_Values() []string { return []string{ AacRawFormatLatmLoas, AacRawFormatNone, } } // Use MPEG-2 AAC instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream // containers. const ( // AacSpecificationMpeg2 is a AacSpecification enum value AacSpecificationMpeg2 = "MPEG2" // AacSpecificationMpeg4 is a AacSpecification enum value AacSpecificationMpeg4 = "MPEG4" ) // AacSpecification_Values returns all elements of the AacSpecification enum func AacSpecification_Values() []string { return []string{ AacSpecificationMpeg2, AacSpecificationMpeg4, } } // VBR Quality Level - Only used if rate_control_mode is VBR. const ( // AacVbrQualityLow is a AacVbrQuality enum value AacVbrQualityLow = "LOW" // AacVbrQualityMediumLow is a AacVbrQuality enum value AacVbrQualityMediumLow = "MEDIUM_LOW" // AacVbrQualityMediumHigh is a AacVbrQuality enum value AacVbrQualityMediumHigh = "MEDIUM_HIGH" // AacVbrQualityHigh is a AacVbrQuality enum value AacVbrQualityHigh = "HIGH" ) // AacVbrQuality_Values returns all elements of the AacVbrQuality enum func AacVbrQuality_Values() []string { return []string{ AacVbrQualityLow, AacVbrQualityMediumLow, AacVbrQualityMediumHigh, AacVbrQualityHigh, } } // Specify the bitstream mode for the AC-3 stream that the encoder emits. For // more information about the AC3 bitstream mode, see ATSC A/52-2012 (Annex // E). const ( // Ac3BitstreamModeCompleteMain is a Ac3BitstreamMode enum value Ac3BitstreamModeCompleteMain = "COMPLETE_MAIN" // Ac3BitstreamModeCommentary is a Ac3BitstreamMode enum value Ac3BitstreamModeCommentary = "COMMENTARY" // Ac3BitstreamModeDialogue is a Ac3BitstreamMode enum value Ac3BitstreamModeDialogue = "DIALOGUE" // Ac3BitstreamModeEmergency is a Ac3BitstreamMode enum value Ac3BitstreamModeEmergency = "EMERGENCY" // Ac3BitstreamModeHearingImpaired is a Ac3BitstreamMode enum value Ac3BitstreamModeHearingImpaired = "HEARING_IMPAIRED" // Ac3BitstreamModeMusicAndEffects is a Ac3BitstreamMode enum value Ac3BitstreamModeMusicAndEffects = "MUSIC_AND_EFFECTS" // Ac3BitstreamModeVisuallyImpaired is a Ac3BitstreamMode enum value Ac3BitstreamModeVisuallyImpaired = "VISUALLY_IMPAIRED" // Ac3BitstreamModeVoiceOver is a Ac3BitstreamMode enum value Ac3BitstreamModeVoiceOver = "VOICE_OVER" ) // Ac3BitstreamMode_Values returns all elements of the Ac3BitstreamMode enum func Ac3BitstreamMode_Values() []string { return []string{ Ac3BitstreamModeCompleteMain, Ac3BitstreamModeCommentary, Ac3BitstreamModeDialogue, Ac3BitstreamModeEmergency, Ac3BitstreamModeHearingImpaired, Ac3BitstreamModeMusicAndEffects, Ac3BitstreamModeVisuallyImpaired, Ac3BitstreamModeVoiceOver, } } // Dolby Digital coding mode. Determines number of channels. const ( // Ac3CodingModeCodingMode10 is a Ac3CodingMode enum value Ac3CodingModeCodingMode10 = "CODING_MODE_1_0" // Ac3CodingModeCodingMode11 is a Ac3CodingMode enum value Ac3CodingModeCodingMode11 = "CODING_MODE_1_1" // Ac3CodingModeCodingMode20 is a Ac3CodingMode enum value Ac3CodingModeCodingMode20 = "CODING_MODE_2_0" // Ac3CodingModeCodingMode32Lfe is a Ac3CodingMode enum value Ac3CodingModeCodingMode32Lfe = "CODING_MODE_3_2_LFE" ) // Ac3CodingMode_Values returns all elements of the Ac3CodingMode enum func Ac3CodingMode_Values() []string { return []string{ Ac3CodingModeCodingMode10, Ac3CodingModeCodingMode11, Ac3CodingModeCodingMode20, Ac3CodingModeCodingMode32Lfe, } } // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the line // operating mode. Related setting: When you use this setting, MediaConvert // ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). // For information about the Dolby Digital DRC operating modes and profiles, // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Ac3DynamicRangeCompressionLineFilmStandard is a Ac3DynamicRangeCompressionLine enum value Ac3DynamicRangeCompressionLineFilmStandard = "FILM_STANDARD" // Ac3DynamicRangeCompressionLineFilmLight is a Ac3DynamicRangeCompressionLine enum value Ac3DynamicRangeCompressionLineFilmLight = "FILM_LIGHT" // Ac3DynamicRangeCompressionLineMusicStandard is a Ac3DynamicRangeCompressionLine enum value Ac3DynamicRangeCompressionLineMusicStandard = "MUSIC_STANDARD" // Ac3DynamicRangeCompressionLineMusicLight is a Ac3DynamicRangeCompressionLine enum value Ac3DynamicRangeCompressionLineMusicLight = "MUSIC_LIGHT" // Ac3DynamicRangeCompressionLineSpeech is a Ac3DynamicRangeCompressionLine enum value Ac3DynamicRangeCompressionLineSpeech = "SPEECH" // Ac3DynamicRangeCompressionLineNone is a Ac3DynamicRangeCompressionLine enum value Ac3DynamicRangeCompressionLineNone = "NONE" ) // Ac3DynamicRangeCompressionLine_Values returns all elements of the Ac3DynamicRangeCompressionLine enum func Ac3DynamicRangeCompressionLine_Values() []string { return []string{ Ac3DynamicRangeCompressionLineFilmStandard, Ac3DynamicRangeCompressionLineFilmLight, Ac3DynamicRangeCompressionLineMusicStandard, Ac3DynamicRangeCompressionLineMusicLight, Ac3DynamicRangeCompressionLineSpeech, Ac3DynamicRangeCompressionLineNone, } } // When you want to add Dolby dynamic range compression (DRC) signaling to your // output stream, we recommend that you use the mode-specific settings instead // of Dynamic range compression profile (DynamicRangeCompressionProfile). The // mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine) // and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf). // Note that when you specify values for all three settings, MediaConvert ignores // the value of this setting in favor of the mode-specific settings. If you // do use this setting instead of the mode-specific settings, choose None (NONE) // to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD) // to set the profile to Dolby's film standard profile for all operating modes. const ( // Ac3DynamicRangeCompressionProfileFilmStandard is a Ac3DynamicRangeCompressionProfile enum value Ac3DynamicRangeCompressionProfileFilmStandard = "FILM_STANDARD" // Ac3DynamicRangeCompressionProfileNone is a Ac3DynamicRangeCompressionProfile enum value Ac3DynamicRangeCompressionProfileNone = "NONE" ) // Ac3DynamicRangeCompressionProfile_Values returns all elements of the Ac3DynamicRangeCompressionProfile enum func Ac3DynamicRangeCompressionProfile_Values() []string { return []string{ Ac3DynamicRangeCompressionProfileFilmStandard, Ac3DynamicRangeCompressionProfileNone, } } // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the RF operating // mode. Related setting: When you use this setting, MediaConvert ignores any // value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). // For information about the Dolby Digital DRC operating modes and profiles, // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Ac3DynamicRangeCompressionRfFilmStandard is a Ac3DynamicRangeCompressionRf enum value Ac3DynamicRangeCompressionRfFilmStandard = "FILM_STANDARD" // Ac3DynamicRangeCompressionRfFilmLight is a Ac3DynamicRangeCompressionRf enum value Ac3DynamicRangeCompressionRfFilmLight = "FILM_LIGHT" // Ac3DynamicRangeCompressionRfMusicStandard is a Ac3DynamicRangeCompressionRf enum value Ac3DynamicRangeCompressionRfMusicStandard = "MUSIC_STANDARD" // Ac3DynamicRangeCompressionRfMusicLight is a Ac3DynamicRangeCompressionRf enum value Ac3DynamicRangeCompressionRfMusicLight = "MUSIC_LIGHT" // Ac3DynamicRangeCompressionRfSpeech is a Ac3DynamicRangeCompressionRf enum value Ac3DynamicRangeCompressionRfSpeech = "SPEECH" // Ac3DynamicRangeCompressionRfNone is a Ac3DynamicRangeCompressionRf enum value Ac3DynamicRangeCompressionRfNone = "NONE" ) // Ac3DynamicRangeCompressionRf_Values returns all elements of the Ac3DynamicRangeCompressionRf enum func Ac3DynamicRangeCompressionRf_Values() []string { return []string{ Ac3DynamicRangeCompressionRfFilmStandard, Ac3DynamicRangeCompressionRfFilmLight, Ac3DynamicRangeCompressionRfMusicStandard, Ac3DynamicRangeCompressionRfMusicLight, Ac3DynamicRangeCompressionRfSpeech, Ac3DynamicRangeCompressionRfNone, } } // Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only // valid with 3_2_LFE coding mode. const ( // Ac3LfeFilterEnabled is a Ac3LfeFilter enum value Ac3LfeFilterEnabled = "ENABLED" // Ac3LfeFilterDisabled is a Ac3LfeFilter enum value Ac3LfeFilterDisabled = "DISABLED" ) // Ac3LfeFilter_Values returns all elements of the Ac3LfeFilter enum func Ac3LfeFilter_Values() []string { return []string{ Ac3LfeFilterEnabled, Ac3LfeFilterDisabled, } } // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, // or DolbyE decoder that supplied this audio data. If audio was not supplied // from one of these streams, then the static metadata settings will be used. const ( // Ac3MetadataControlFollowInput is a Ac3MetadataControl enum value Ac3MetadataControlFollowInput = "FOLLOW_INPUT" // Ac3MetadataControlUseConfigured is a Ac3MetadataControl enum value Ac3MetadataControlUseConfigured = "USE_CONFIGURED" ) // Ac3MetadataControl_Values returns all elements of the Ac3MetadataControl enum func Ac3MetadataControl_Values() []string { return []string{ Ac3MetadataControlFollowInput, Ac3MetadataControlUseConfigured, } } // Specify whether the service runs your job with accelerated transcoding. Choose // DISABLED if you don't want accelerated transcoding. Choose ENABLED if you // want your job to run with accelerated transcoding and to fail if your input // files or your job settings aren't compatible with accelerated transcoding. // Choose PREFERRED if you want your job to run with accelerated transcoding // if the job is compatible with the feature and to run at standard speed if // it's not. const ( // AccelerationModeDisabled is a AccelerationMode enum value AccelerationModeDisabled = "DISABLED" // AccelerationModeEnabled is a AccelerationMode enum value AccelerationModeEnabled = "ENABLED" // AccelerationModePreferred is a AccelerationMode enum value AccelerationModePreferred = "PREFERRED" ) // AccelerationMode_Values returns all elements of the AccelerationMode enum func AccelerationMode_Values() []string { return []string{ AccelerationModeDisabled, AccelerationModeEnabled, AccelerationModePreferred, } } // Describes whether the current job is running with accelerated transcoding. // For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus // is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode) // set to ENABLED or PREFERRED, AccelerationStatus is one of the other states. // AccelerationStatus is IN_PROGRESS initially, while the service determines // whether the input files and job settings are compatible with accelerated // transcoding. If they are, AcclerationStatus is ACCELERATED. If your input // files and job settings aren't compatible with accelerated transcoding, the // service either fails your job or runs it without accelerated transcoding, // depending on how you set Acceleration (AccelerationMode). When the service // runs your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED. const ( // AccelerationStatusNotApplicable is a AccelerationStatus enum value AccelerationStatusNotApplicable = "NOT_APPLICABLE" // AccelerationStatusInProgress is a AccelerationStatus enum value AccelerationStatusInProgress = "IN_PROGRESS" // AccelerationStatusAccelerated is a AccelerationStatus enum value AccelerationStatusAccelerated = "ACCELERATED" // AccelerationStatusNotAccelerated is a AccelerationStatus enum value AccelerationStatusNotAccelerated = "NOT_ACCELERATED" ) // AccelerationStatus_Values returns all elements of the AccelerationStatus enum func AccelerationStatus_Values() []string { return []string{ AccelerationStatusNotApplicable, AccelerationStatusInProgress, AccelerationStatusAccelerated, AccelerationStatusNotAccelerated, } } // This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert // AFD signaling (AfdSignaling) to specify whether the service includes AFD // values in the output video data and what those values are. * Choose None // to remove all AFD values from this output. * Choose Fixed to ignore input // AFD values and instead encode the value specified in the job. * Choose Auto // to calculate output AFD values based on the input AFD scaler data. const ( // AfdSignalingNone is a AfdSignaling enum value AfdSignalingNone = "NONE" // AfdSignalingAuto is a AfdSignaling enum value AfdSignalingAuto = "AUTO" // AfdSignalingFixed is a AfdSignaling enum value AfdSignalingFixed = "FIXED" ) // AfdSignaling_Values returns all elements of the AfdSignaling enum func AfdSignaling_Values() []string { return []string{ AfdSignalingNone, AfdSignalingAuto, AfdSignalingFixed, } } // Ignore this setting unless this input is a QuickTime animation with an alpha // channel. Use this setting to create separate Key and Fill outputs. In each // output, specify which part of the input MediaConvert uses. Leave this setting // at the default value DISCARD to delete the alpha channel and preserve the // video. Set it to REMAP_TO_LUMA to delete the video and map the alpha channel // to the luma channel of your outputs. const ( // AlphaBehaviorDiscard is a AlphaBehavior enum value AlphaBehaviorDiscard = "DISCARD" // AlphaBehaviorRemapToLuma is a AlphaBehavior enum value AlphaBehaviorRemapToLuma = "REMAP_TO_LUMA" ) // AlphaBehavior_Values returns all elements of the AlphaBehavior enum func AlphaBehavior_Values() []string { return []string{ AlphaBehaviorDiscard, AlphaBehaviorRemapToLuma, } } // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 // compatibility bytes fields of the 708 wrapper, and it also translates the // 608 data into 708. const ( // AncillaryConvert608To708Upconvert is a AncillaryConvert608To708 enum value AncillaryConvert608To708Upconvert = "UPCONVERT" // AncillaryConvert608To708Disabled is a AncillaryConvert608To708 enum value AncillaryConvert608To708Disabled = "DISABLED" ) // AncillaryConvert608To708_Values returns all elements of the AncillaryConvert608To708 enum func AncillaryConvert608To708_Values() []string { return []string{ AncillaryConvert608To708Upconvert, AncillaryConvert608To708Disabled, } } // By default, the service terminates any unterminated captions at the end of // each input. If you want the caption to continue onto your next input, disable // this setting. const ( // AncillaryTerminateCaptionsEndOfInput is a AncillaryTerminateCaptions enum value AncillaryTerminateCaptionsEndOfInput = "END_OF_INPUT" // AncillaryTerminateCaptionsDisabled is a AncillaryTerminateCaptions enum value AncillaryTerminateCaptionsDisabled = "DISABLED" ) // AncillaryTerminateCaptions_Values returns all elements of the AncillaryTerminateCaptions enum func AncillaryTerminateCaptions_Values() []string { return []string{ AncillaryTerminateCaptionsEndOfInput, AncillaryTerminateCaptionsDisabled, } } // The anti-alias filter is automatically applied to all outputs. The service // no longer accepts the value DISABLED for AntiAlias. If you specify that in // your job, the service will ignore the setting. const ( // AntiAliasDisabled is a AntiAlias enum value AntiAliasDisabled = "DISABLED" // AntiAliasEnabled is a AntiAlias enum value AntiAliasEnabled = "ENABLED" ) // AntiAlias_Values returns all elements of the AntiAlias enum func AntiAlias_Values() []string { return []string{ AntiAliasDisabled, AntiAliasEnabled, } } // You can add a tag for this mono-channel audio track to mimic its placement // in a multi-channel layout. For example, if this track is the left surround // channel, choose Left surround (LS). const ( // AudioChannelTagL is a AudioChannelTag enum value AudioChannelTagL = "L" // AudioChannelTagR is a AudioChannelTag enum value AudioChannelTagR = "R" // AudioChannelTagC is a AudioChannelTag enum value AudioChannelTagC = "C" // AudioChannelTagLfe is a AudioChannelTag enum value AudioChannelTagLfe = "LFE" // AudioChannelTagLs is a AudioChannelTag enum value AudioChannelTagLs = "LS" // AudioChannelTagRs is a AudioChannelTag enum value AudioChannelTagRs = "RS" // AudioChannelTagLc is a AudioChannelTag enum value AudioChannelTagLc = "LC" // AudioChannelTagRc is a AudioChannelTag enum value AudioChannelTagRc = "RC" // AudioChannelTagCs is a AudioChannelTag enum value AudioChannelTagCs = "CS" // AudioChannelTagLsd is a AudioChannelTag enum value AudioChannelTagLsd = "LSD" // AudioChannelTagRsd is a AudioChannelTag enum value AudioChannelTagRsd = "RSD" // AudioChannelTagTcs is a AudioChannelTag enum value AudioChannelTagTcs = "TCS" // AudioChannelTagVhl is a AudioChannelTag enum value AudioChannelTagVhl = "VHL" // AudioChannelTagVhc is a AudioChannelTag enum value AudioChannelTagVhc = "VHC" // AudioChannelTagVhr is a AudioChannelTag enum value AudioChannelTagVhr = "VHR" ) // AudioChannelTag_Values returns all elements of the AudioChannelTag enum func AudioChannelTag_Values() []string { return []string{ AudioChannelTagL, AudioChannelTagR, AudioChannelTagC, AudioChannelTagLfe, AudioChannelTagLs, AudioChannelTagRs, AudioChannelTagLc, AudioChannelTagRc, AudioChannelTagCs, AudioChannelTagLsd, AudioChannelTagRsd, AudioChannelTagTcs, AudioChannelTagVhl, AudioChannelTagVhc, AudioChannelTagVhr, } } // Choose the audio codec for this output. Note that the option Dolby Digital // passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital // Plus audio inputs. Make sure that you choose a codec that's supported with // your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio // For audio-only outputs, make sure that both your input audio codec and your // output audio codec are supported for audio-only workflows. For more information, // see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only // and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output const ( // AudioCodecAac is a AudioCodec enum value AudioCodecAac = "AAC" // AudioCodecMp2 is a AudioCodec enum value AudioCodecMp2 = "MP2" // AudioCodecMp3 is a AudioCodec enum value AudioCodecMp3 = "MP3" // AudioCodecWav is a AudioCodec enum value AudioCodecWav = "WAV" // AudioCodecAiff is a AudioCodec enum value AudioCodecAiff = "AIFF" // AudioCodecAc3 is a AudioCodec enum value AudioCodecAc3 = "AC3" // AudioCodecEac3 is a AudioCodec enum value AudioCodecEac3 = "EAC3" // AudioCodecEac3Atmos is a AudioCodec enum value AudioCodecEac3Atmos = "EAC3_ATMOS" // AudioCodecVorbis is a AudioCodec enum value AudioCodecVorbis = "VORBIS" // AudioCodecOpus is a AudioCodec enum value AudioCodecOpus = "OPUS" // AudioCodecPassthrough is a AudioCodec enum value AudioCodecPassthrough = "PASSTHROUGH" ) // AudioCodec_Values returns all elements of the AudioCodec enum func AudioCodec_Values() []string { return []string{ AudioCodecAac, AudioCodecMp2, AudioCodecMp3, AudioCodecWav, AudioCodecAiff, AudioCodecAc3, AudioCodecEac3, AudioCodecEac3Atmos, AudioCodecVorbis, AudioCodecOpus, AudioCodecPassthrough, } } // Enable this setting on one audio selector to set it as the default for the // job. The service uses this default for outputs where it can't find the specified // input audio. If you don't set a default, those outputs have no audio. const ( // AudioDefaultSelectionDefault is a AudioDefaultSelection enum value AudioDefaultSelectionDefault = "DEFAULT" // AudioDefaultSelectionNotDefault is a AudioDefaultSelection enum value AudioDefaultSelectionNotDefault = "NOT_DEFAULT" ) // AudioDefaultSelection_Values returns all elements of the AudioDefaultSelection enum func AudioDefaultSelection_Values() []string { return []string{ AudioDefaultSelectionDefault, AudioDefaultSelectionNotDefault, } } // Specify which source for language code takes precedence for this audio track. // When you choose Follow input (FOLLOW_INPUT), the service uses the language // code from the input track if it's present. If there's no languge code on // the input track, the service uses the code that you specify in the setting // Language code (languageCode or customLanguageCode). When you choose Use configured // (USE_CONFIGURED), the service uses the language code that you specify. const ( // AudioLanguageCodeControlFollowInput is a AudioLanguageCodeControl enum value AudioLanguageCodeControlFollowInput = "FOLLOW_INPUT" // AudioLanguageCodeControlUseConfigured is a AudioLanguageCodeControl enum value AudioLanguageCodeControlUseConfigured = "USE_CONFIGURED" ) // AudioLanguageCodeControl_Values returns all elements of the AudioLanguageCodeControl enum func AudioLanguageCodeControl_Values() []string { return []string{ AudioLanguageCodeControlFollowInput, AudioLanguageCodeControlUseConfigured, } } // Choose one of the following audio normalization algorithms: ITU-R BS.1770-1: // Ungated loudness. A measurement of ungated average loudness for an entire // piece of content, suitable for measurement of short-form content under ATSC // recommendation A/85. Supports up to 5.1 audio channels. ITU-R BS.1770-2: // Gated loudness. A measurement of gated average loudness compliant with the // requirements of EBU-R128. Supports up to 5.1 audio channels. ITU-R BS.1770-3: // Modified peak. The same loudness measurement algorithm as 1770-2, with an // updated true peak measurement. ITU-R BS.1770-4: Higher channel count. Allows // for more audio channels than the other algorithms, including configurations // such as 7.1. const ( // AudioNormalizationAlgorithmItuBs17701 is a AudioNormalizationAlgorithm enum value AudioNormalizationAlgorithmItuBs17701 = "ITU_BS_1770_1" // AudioNormalizationAlgorithmItuBs17702 is a AudioNormalizationAlgorithm enum value AudioNormalizationAlgorithmItuBs17702 = "ITU_BS_1770_2" // AudioNormalizationAlgorithmItuBs17703 is a AudioNormalizationAlgorithm enum value AudioNormalizationAlgorithmItuBs17703 = "ITU_BS_1770_3" // AudioNormalizationAlgorithmItuBs17704 is a AudioNormalizationAlgorithm enum value AudioNormalizationAlgorithmItuBs17704 = "ITU_BS_1770_4" ) // AudioNormalizationAlgorithm_Values returns all elements of the AudioNormalizationAlgorithm enum func AudioNormalizationAlgorithm_Values() []string { return []string{ AudioNormalizationAlgorithmItuBs17701, AudioNormalizationAlgorithmItuBs17702, AudioNormalizationAlgorithmItuBs17703, AudioNormalizationAlgorithmItuBs17704, } } // When enabled the output audio is corrected using the chosen algorithm. If // disabled, the audio will be measured but not adjusted. const ( // AudioNormalizationAlgorithmControlCorrectAudio is a AudioNormalizationAlgorithmControl enum value AudioNormalizationAlgorithmControlCorrectAudio = "CORRECT_AUDIO" // AudioNormalizationAlgorithmControlMeasureOnly is a AudioNormalizationAlgorithmControl enum value AudioNormalizationAlgorithmControlMeasureOnly = "MEASURE_ONLY" ) // AudioNormalizationAlgorithmControl_Values returns all elements of the AudioNormalizationAlgorithmControl enum func AudioNormalizationAlgorithmControl_Values() []string { return []string{ AudioNormalizationAlgorithmControlCorrectAudio, AudioNormalizationAlgorithmControlMeasureOnly, } } // If set to LOG, log each output's audio track loudness to a CSV file. const ( // AudioNormalizationLoudnessLoggingLog is a AudioNormalizationLoudnessLogging enum value AudioNormalizationLoudnessLoggingLog = "LOG" // AudioNormalizationLoudnessLoggingDontLog is a AudioNormalizationLoudnessLogging enum value AudioNormalizationLoudnessLoggingDontLog = "DONT_LOG" ) // AudioNormalizationLoudnessLogging_Values returns all elements of the AudioNormalizationLoudnessLogging enum func AudioNormalizationLoudnessLogging_Values() []string { return []string{ AudioNormalizationLoudnessLoggingLog, AudioNormalizationLoudnessLoggingDontLog, } } // If set to TRUE_PEAK, calculate and log the TruePeak for each output's audio // track loudness. const ( // AudioNormalizationPeakCalculationTruePeak is a AudioNormalizationPeakCalculation enum value AudioNormalizationPeakCalculationTruePeak = "TRUE_PEAK" // AudioNormalizationPeakCalculationNone is a AudioNormalizationPeakCalculation enum value AudioNormalizationPeakCalculationNone = "NONE" ) // AudioNormalizationPeakCalculation_Values returns all elements of the AudioNormalizationPeakCalculation enum func AudioNormalizationPeakCalculation_Values() []string { return []string{ AudioNormalizationPeakCalculationTruePeak, AudioNormalizationPeakCalculationNone, } } // Specifies the type of the audio selector. const ( // AudioSelectorTypePid is a AudioSelectorType enum value AudioSelectorTypePid = "PID" // AudioSelectorTypeTrack is a AudioSelectorType enum value AudioSelectorTypeTrack = "TRACK" // AudioSelectorTypeLanguageCode is a AudioSelectorType enum value AudioSelectorTypeLanguageCode = "LANGUAGE_CODE" // AudioSelectorTypeHlsRenditionGroup is a AudioSelectorType enum value AudioSelectorTypeHlsRenditionGroup = "HLS_RENDITION_GROUP" ) // AudioSelectorType_Values returns all elements of the AudioSelectorType enum func AudioSelectorType_Values() []string { return []string{ AudioSelectorTypePid, AudioSelectorTypeTrack, AudioSelectorTypeLanguageCode, AudioSelectorTypeHlsRenditionGroup, } } // When set to FOLLOW_INPUT, if the input contains an ISO 639 audio_type, then // that value is passed through to the output. If the input contains no ISO // 639 audio_type, the value in Audio Type is included in the output. Otherwise // the value in Audio Type is included in the output. Note that this field and // audioType are both ignored if audioDescriptionBroadcasterMix is set to BROADCASTER_MIXED_AD. const ( // AudioTypeControlFollowInput is a AudioTypeControl enum value AudioTypeControlFollowInput = "FOLLOW_INPUT" // AudioTypeControlUseConfigured is a AudioTypeControl enum value AudioTypeControlUseConfigured = "USE_CONFIGURED" ) // AudioTypeControl_Values returns all elements of the AudioTypeControl enum func AudioTypeControl_Values() []string { return []string{ AudioTypeControlFollowInput, AudioTypeControlUseConfigured, } } // Specify the strength of any adaptive quantization filters that you enable. // The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization). const ( // Av1AdaptiveQuantizationOff is a Av1AdaptiveQuantization enum value Av1AdaptiveQuantizationOff = "OFF" // Av1AdaptiveQuantizationLow is a Av1AdaptiveQuantization enum value Av1AdaptiveQuantizationLow = "LOW" // Av1AdaptiveQuantizationMedium is a Av1AdaptiveQuantization enum value Av1AdaptiveQuantizationMedium = "MEDIUM" // Av1AdaptiveQuantizationHigh is a Av1AdaptiveQuantization enum value Av1AdaptiveQuantizationHigh = "HIGH" // Av1AdaptiveQuantizationHigher is a Av1AdaptiveQuantization enum value Av1AdaptiveQuantizationHigher = "HIGHER" // Av1AdaptiveQuantizationMax is a Av1AdaptiveQuantization enum value Av1AdaptiveQuantizationMax = "MAX" ) // Av1AdaptiveQuantization_Values returns all elements of the Av1AdaptiveQuantization enum func Av1AdaptiveQuantization_Values() []string { return []string{ Av1AdaptiveQuantizationOff, Av1AdaptiveQuantizationLow, Av1AdaptiveQuantizationMedium, Av1AdaptiveQuantizationHigh, Av1AdaptiveQuantizationHigher, Av1AdaptiveQuantizationMax, } } // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. const ( // Av1FramerateControlInitializeFromSource is a Av1FramerateControl enum value Av1FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // Av1FramerateControlSpecified is a Av1FramerateControl enum value Av1FramerateControlSpecified = "SPECIFIED" ) // Av1FramerateControl_Values returns all elements of the Av1FramerateControl enum func Av1FramerateControl_Values() []string { return []string{ Av1FramerateControlInitializeFromSource, Av1FramerateControlSpecified, } } // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. const ( // Av1FramerateConversionAlgorithmDuplicateDrop is a Av1FramerateConversionAlgorithm enum value Av1FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // Av1FramerateConversionAlgorithmInterpolate is a Av1FramerateConversionAlgorithm enum value Av1FramerateConversionAlgorithmInterpolate = "INTERPOLATE" // Av1FramerateConversionAlgorithmFrameformer is a Av1FramerateConversionAlgorithm enum value Av1FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) // Av1FramerateConversionAlgorithm_Values returns all elements of the Av1FramerateConversionAlgorithm enum func Av1FramerateConversionAlgorithm_Values() []string { return []string{ Av1FramerateConversionAlgorithmDuplicateDrop, Av1FramerateConversionAlgorithmInterpolate, Av1FramerateConversionAlgorithmFrameformer, } } // 'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined // variable bitrate (QVBR). You can''t use CBR or VBR.' const ( // Av1RateControlModeQvbr is a Av1RateControlMode enum value Av1RateControlModeQvbr = "QVBR" ) // Av1RateControlMode_Values returns all elements of the Av1RateControlMode enum func Av1RateControlMode_Values() []string { return []string{ Av1RateControlModeQvbr, } } // Keep the default value, Enabled (ENABLED), to adjust quantization within // each frame based on spatial variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas that can sustain more // distortion with no noticeable visual degradation and uses more bits on areas // where any small distortion will be noticeable. For example, complex textured // blocks are encoded with fewer bits and smooth textured blocks are encoded // with more bits. Enabling this feature will almost always improve your video // quality. Note, though, that this feature doesn't take into account where // the viewer's attention is likely to be. If viewers are likely to be focusing // their attention on a part of the screen with a lot of complex texture, you // might choose to disable this feature. Related setting: When you enable spatial // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) // depending on your content. For homogeneous content, such as cartoons and // video games, set it to Low. For content with a wider variety of textures, // set it to High or Higher. const ( // Av1SpatialAdaptiveQuantizationDisabled is a Av1SpatialAdaptiveQuantization enum value Av1SpatialAdaptiveQuantizationDisabled = "DISABLED" // Av1SpatialAdaptiveQuantizationEnabled is a Av1SpatialAdaptiveQuantization enum value Av1SpatialAdaptiveQuantizationEnabled = "ENABLED" ) // Av1SpatialAdaptiveQuantization_Values returns all elements of the Av1SpatialAdaptiveQuantization enum func Av1SpatialAdaptiveQuantization_Values() []string { return []string{ Av1SpatialAdaptiveQuantizationDisabled, Av1SpatialAdaptiveQuantizationEnabled, } } // Specify the AVC-Intra class of your output. The AVC-Intra class selection // determines the output video bit rate depending on the frame rate of the output. // Outputs with higher class values have higher bitrates and improved image // quality. Note that for Class 4K/2K, MediaConvert supports only 4:2:2 chroma // subsampling. const ( // AvcIntraClassClass50 is a AvcIntraClass enum value AvcIntraClassClass50 = "CLASS_50" // AvcIntraClassClass100 is a AvcIntraClass enum value AvcIntraClassClass100 = "CLASS_100" // AvcIntraClassClass200 is a AvcIntraClass enum value AvcIntraClassClass200 = "CLASS_200" // AvcIntraClassClass4k2k is a AvcIntraClass enum value AvcIntraClassClass4k2k = "CLASS_4K_2K" ) // AvcIntraClass_Values returns all elements of the AvcIntraClass enum func AvcIntraClass_Values() []string { return []string{ AvcIntraClassClass50, AvcIntraClassClass100, AvcIntraClassClass200, AvcIntraClassClass4k2k, } } // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. const ( // AvcIntraFramerateControlInitializeFromSource is a AvcIntraFramerateControl enum value AvcIntraFramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // AvcIntraFramerateControlSpecified is a AvcIntraFramerateControl enum value AvcIntraFramerateControlSpecified = "SPECIFIED" ) // AvcIntraFramerateControl_Values returns all elements of the AvcIntraFramerateControl enum func AvcIntraFramerateControl_Values() []string { return []string{ AvcIntraFramerateControlInitializeFromSource, AvcIntraFramerateControlSpecified, } } // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. const ( // AvcIntraFramerateConversionAlgorithmDuplicateDrop is a AvcIntraFramerateConversionAlgorithm enum value AvcIntraFramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // AvcIntraFramerateConversionAlgorithmInterpolate is a AvcIntraFramerateConversionAlgorithm enum value AvcIntraFramerateConversionAlgorithmInterpolate = "INTERPOLATE" // AvcIntraFramerateConversionAlgorithmFrameformer is a AvcIntraFramerateConversionAlgorithm enum value AvcIntraFramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) // AvcIntraFramerateConversionAlgorithm_Values returns all elements of the AvcIntraFramerateConversionAlgorithm enum func AvcIntraFramerateConversionAlgorithm_Values() []string { return []string{ AvcIntraFramerateConversionAlgorithmDuplicateDrop, AvcIntraFramerateConversionAlgorithmInterpolate, AvcIntraFramerateConversionAlgorithmFrameformer, } } // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. const ( // AvcIntraInterlaceModeProgressive is a AvcIntraInterlaceMode enum value AvcIntraInterlaceModeProgressive = "PROGRESSIVE" // AvcIntraInterlaceModeTopField is a AvcIntraInterlaceMode enum value AvcIntraInterlaceModeTopField = "TOP_FIELD" // AvcIntraInterlaceModeBottomField is a AvcIntraInterlaceMode enum value AvcIntraInterlaceModeBottomField = "BOTTOM_FIELD" // AvcIntraInterlaceModeFollowTopField is a AvcIntraInterlaceMode enum value AvcIntraInterlaceModeFollowTopField = "FOLLOW_TOP_FIELD" // AvcIntraInterlaceModeFollowBottomField is a AvcIntraInterlaceMode enum value AvcIntraInterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" ) // AvcIntraInterlaceMode_Values returns all elements of the AvcIntraInterlaceMode enum func AvcIntraInterlaceMode_Values() []string { return []string{ AvcIntraInterlaceModeProgressive, AvcIntraInterlaceModeTopField, AvcIntraInterlaceModeBottomField, AvcIntraInterlaceModeFollowTopField, AvcIntraInterlaceModeFollowBottomField, } } // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). const ( // AvcIntraScanTypeConversionModeInterlaced is a AvcIntraScanTypeConversionMode enum value AvcIntraScanTypeConversionModeInterlaced = "INTERLACED" // AvcIntraScanTypeConversionModeInterlacedOptimize is a AvcIntraScanTypeConversionMode enum value AvcIntraScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE" ) // AvcIntraScanTypeConversionMode_Values returns all elements of the AvcIntraScanTypeConversionMode enum func AvcIntraScanTypeConversionMode_Values() []string { return []string{ AvcIntraScanTypeConversionModeInterlaced, AvcIntraScanTypeConversionModeInterlacedOptimize, } } // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output. When you enable // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. const ( // AvcIntraSlowPalDisabled is a AvcIntraSlowPal enum value AvcIntraSlowPalDisabled = "DISABLED" // AvcIntraSlowPalEnabled is a AvcIntraSlowPal enum value AvcIntraSlowPalEnabled = "ENABLED" ) // AvcIntraSlowPal_Values returns all elements of the AvcIntraSlowPal enum func AvcIntraSlowPal_Values() []string { return []string{ AvcIntraSlowPalDisabled, AvcIntraSlowPalEnabled, } } // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable // hard telecine (HARD) to create a smoother picture. When you keep the default // value, None (NONE), MediaConvert does a standard frame rate conversion to // 29.97 without doing anything with the field polarity to create a smoother // picture. const ( // AvcIntraTelecineNone is a AvcIntraTelecine enum value AvcIntraTelecineNone = "NONE" // AvcIntraTelecineHard is a AvcIntraTelecine enum value AvcIntraTelecineHard = "HARD" ) // AvcIntraTelecine_Values returns all elements of the AvcIntraTelecine enum func AvcIntraTelecine_Values() []string { return []string{ AvcIntraTelecineNone, AvcIntraTelecineHard, } } // Optional. Use Quality tuning level (qualityTuningLevel) to choose how many // transcoding passes MediaConvert does with your video. When you choose Multi-pass // (MULTI_PASS), your video quality is better and your output bitrate is more // accurate. That is, the actual bitrate of your output is closer to the target // bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS), // your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS). const ( // AvcIntraUhdQualityTuningLevelSinglePass is a AvcIntraUhdQualityTuningLevel enum value AvcIntraUhdQualityTuningLevelSinglePass = "SINGLE_PASS" // AvcIntraUhdQualityTuningLevelMultiPass is a AvcIntraUhdQualityTuningLevel enum value AvcIntraUhdQualityTuningLevelMultiPass = "MULTI_PASS" ) // AvcIntraUhdQualityTuningLevel_Values returns all elements of the AvcIntraUhdQualityTuningLevel enum func AvcIntraUhdQualityTuningLevel_Values() []string { return []string{ AvcIntraUhdQualityTuningLevelSinglePass, AvcIntraUhdQualityTuningLevelMultiPass, } } // The tag type that AWS Billing and Cost Management will use to sort your AWS // Elemental MediaConvert costs on any billing report that you set up. const ( // BillingTagsSourceQueue is a BillingTagsSource enum value BillingTagsSourceQueue = "QUEUE" // BillingTagsSourcePreset is a BillingTagsSource enum value BillingTagsSourcePreset = "PRESET" // BillingTagsSourceJobTemplate is a BillingTagsSource enum value BillingTagsSourceJobTemplate = "JOB_TEMPLATE" // BillingTagsSourceJob is a BillingTagsSource enum value BillingTagsSourceJob = "JOB" ) // BillingTagsSource_Values returns all elements of the BillingTagsSource enum func BillingTagsSource_Values() []string { return []string{ BillingTagsSourceQueue, BillingTagsSourcePreset, BillingTagsSourceJobTemplate, BillingTagsSourceJob, } } // If no explicit x_position or y_position is provided, setting alignment to // centered will place the captions at the bottom center of the output. Similarly, // setting a left alignment will align captions to the bottom left of the output. // If x and y positions are given in conjunction with the alignment parameter, // the font will be justified (either left or centered) relative to those coordinates. // This option is not valid for source captions that are STL, 608/embedded or // teletext. These source settings are already pre-defined by the caption stream. // All burn-in and DVB-Sub font settings must match. const ( // BurninSubtitleAlignmentCentered is a BurninSubtitleAlignment enum value BurninSubtitleAlignmentCentered = "CENTERED" // BurninSubtitleAlignmentLeft is a BurninSubtitleAlignment enum value BurninSubtitleAlignmentLeft = "LEFT" ) // BurninSubtitleAlignment_Values returns all elements of the BurninSubtitleAlignment enum func BurninSubtitleAlignment_Values() []string { return []string{ BurninSubtitleAlignmentCentered, BurninSubtitleAlignmentLeft, } } // Specifies the color of the rectangle behind the captions.All burn-in and // DVB-Sub font settings must match. const ( // BurninSubtitleBackgroundColorNone is a BurninSubtitleBackgroundColor enum value BurninSubtitleBackgroundColorNone = "NONE" // BurninSubtitleBackgroundColorBlack is a BurninSubtitleBackgroundColor enum value BurninSubtitleBackgroundColorBlack = "BLACK" // BurninSubtitleBackgroundColorWhite is a BurninSubtitleBackgroundColor enum value BurninSubtitleBackgroundColorWhite = "WHITE" ) // BurninSubtitleBackgroundColor_Values returns all elements of the BurninSubtitleBackgroundColor enum func BurninSubtitleBackgroundColor_Values() []string { return []string{ BurninSubtitleBackgroundColorNone, BurninSubtitleBackgroundColorBlack, BurninSubtitleBackgroundColorWhite, } } // Specifies the color of the burned-in captions. This option is not valid for // source captions that are STL, 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font // settings must match. const ( // BurninSubtitleFontColorWhite is a BurninSubtitleFontColor enum value BurninSubtitleFontColorWhite = "WHITE" // BurninSubtitleFontColorBlack is a BurninSubtitleFontColor enum value BurninSubtitleFontColorBlack = "BLACK" // BurninSubtitleFontColorYellow is a BurninSubtitleFontColor enum value BurninSubtitleFontColorYellow = "YELLOW" // BurninSubtitleFontColorRed is a BurninSubtitleFontColor enum value BurninSubtitleFontColorRed = "RED" // BurninSubtitleFontColorGreen is a BurninSubtitleFontColor enum value BurninSubtitleFontColorGreen = "GREEN" // BurninSubtitleFontColorBlue is a BurninSubtitleFontColor enum value BurninSubtitleFontColorBlue = "BLUE" ) // BurninSubtitleFontColor_Values returns all elements of the BurninSubtitleFontColor enum func BurninSubtitleFontColor_Values() []string { return []string{ BurninSubtitleFontColorWhite, BurninSubtitleFontColorBlack, BurninSubtitleFontColorYellow, BurninSubtitleFontColorRed, BurninSubtitleFontColorGreen, BurninSubtitleFontColorBlue, } } // Specifies font outline color. This option is not valid for source captions // that are either 608/embedded or teletext. These source settings are already // pre-defined by the caption stream. All burn-in and DVB-Sub font settings // must match. const ( // BurninSubtitleOutlineColorBlack is a BurninSubtitleOutlineColor enum value BurninSubtitleOutlineColorBlack = "BLACK" // BurninSubtitleOutlineColorWhite is a BurninSubtitleOutlineColor enum value BurninSubtitleOutlineColorWhite = "WHITE" // BurninSubtitleOutlineColorYellow is a BurninSubtitleOutlineColor enum value BurninSubtitleOutlineColorYellow = "YELLOW" // BurninSubtitleOutlineColorRed is a BurninSubtitleOutlineColor enum value BurninSubtitleOutlineColorRed = "RED" // BurninSubtitleOutlineColorGreen is a BurninSubtitleOutlineColor enum value BurninSubtitleOutlineColorGreen = "GREEN" // BurninSubtitleOutlineColorBlue is a BurninSubtitleOutlineColor enum value BurninSubtitleOutlineColorBlue = "BLUE" ) // BurninSubtitleOutlineColor_Values returns all elements of the BurninSubtitleOutlineColor enum func BurninSubtitleOutlineColor_Values() []string { return []string{ BurninSubtitleOutlineColorBlack, BurninSubtitleOutlineColorWhite, BurninSubtitleOutlineColorYellow, BurninSubtitleOutlineColorRed, BurninSubtitleOutlineColorGreen, BurninSubtitleOutlineColorBlue, } } // Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub // font settings must match. const ( // BurninSubtitleShadowColorNone is a BurninSubtitleShadowColor enum value BurninSubtitleShadowColorNone = "NONE" // BurninSubtitleShadowColorBlack is a BurninSubtitleShadowColor enum value BurninSubtitleShadowColorBlack = "BLACK" // BurninSubtitleShadowColorWhite is a BurninSubtitleShadowColor enum value BurninSubtitleShadowColorWhite = "WHITE" ) // BurninSubtitleShadowColor_Values returns all elements of the BurninSubtitleShadowColor enum func BurninSubtitleShadowColor_Values() []string { return []string{ BurninSubtitleShadowColorNone, BurninSubtitleShadowColorBlack, BurninSubtitleShadowColorWhite, } } // Only applies to jobs with input captions in Teletext or STL formats. Specify // whether the spacing between letters in your captions is set by the captions // grid or varies depending on letter width. Choose fixed grid to conform to // the spacing specified in the captions file more accurately. Choose proportional // to make the text easier to read if the captions are closed caption. const ( // BurninSubtitleTeletextSpacingFixedGrid is a BurninSubtitleTeletextSpacing enum value BurninSubtitleTeletextSpacingFixedGrid = "FIXED_GRID" // BurninSubtitleTeletextSpacingProportional is a BurninSubtitleTeletextSpacing enum value BurninSubtitleTeletextSpacingProportional = "PROPORTIONAL" ) // BurninSubtitleTeletextSpacing_Values returns all elements of the BurninSubtitleTeletextSpacing enum func BurninSubtitleTeletextSpacing_Values() []string { return []string{ BurninSubtitleTeletextSpacingFixedGrid, BurninSubtitleTeletextSpacingProportional, } } // Specify the format for this set of captions on this output. The default format // is embedded without SCTE-20. Note that your choice of video output container // constrains your choice of output captions format. For more information, see // https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. // If you are using SCTE-20 and you want to create an output that complies with // the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To // create a non-compliant output where the embedded captions come first, choose // Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20). const ( // CaptionDestinationTypeBurnIn is a CaptionDestinationType enum value CaptionDestinationTypeBurnIn = "BURN_IN" // CaptionDestinationTypeDvbSub is a CaptionDestinationType enum value CaptionDestinationTypeDvbSub = "DVB_SUB" // CaptionDestinationTypeEmbedded is a CaptionDestinationType enum value CaptionDestinationTypeEmbedded = "EMBEDDED" // CaptionDestinationTypeEmbeddedPlusScte20 is a CaptionDestinationType enum value CaptionDestinationTypeEmbeddedPlusScte20 = "EMBEDDED_PLUS_SCTE20" // CaptionDestinationTypeImsc is a CaptionDestinationType enum value CaptionDestinationTypeImsc = "IMSC" // CaptionDestinationTypeScte20PlusEmbedded is a CaptionDestinationType enum value CaptionDestinationTypeScte20PlusEmbedded = "SCTE20_PLUS_EMBEDDED" // CaptionDestinationTypeScc is a CaptionDestinationType enum value CaptionDestinationTypeScc = "SCC" // CaptionDestinationTypeSrt is a CaptionDestinationType enum value CaptionDestinationTypeSrt = "SRT" // CaptionDestinationTypeSmi is a CaptionDestinationType enum value CaptionDestinationTypeSmi = "SMI" // CaptionDestinationTypeTeletext is a CaptionDestinationType enum value CaptionDestinationTypeTeletext = "TELETEXT" // CaptionDestinationTypeTtml is a CaptionDestinationType enum value CaptionDestinationTypeTtml = "TTML" // CaptionDestinationTypeWebvtt is a CaptionDestinationType enum value CaptionDestinationTypeWebvtt = "WEBVTT" ) // CaptionDestinationType_Values returns all elements of the CaptionDestinationType enum func CaptionDestinationType_Values() []string { return []string{ CaptionDestinationTypeBurnIn, CaptionDestinationTypeDvbSub, CaptionDestinationTypeEmbedded, CaptionDestinationTypeEmbeddedPlusScte20, CaptionDestinationTypeImsc, CaptionDestinationTypeScte20PlusEmbedded, CaptionDestinationTypeScc, CaptionDestinationTypeSrt, CaptionDestinationTypeSmi, CaptionDestinationTypeTeletext, CaptionDestinationTypeTtml, CaptionDestinationTypeWebvtt, } } // Use Source (SourceType) to identify the format of your input captions. The // service cannot auto-detect caption format. const ( // CaptionSourceTypeAncillary is a CaptionSourceType enum value CaptionSourceTypeAncillary = "ANCILLARY" // CaptionSourceTypeDvbSub is a CaptionSourceType enum value CaptionSourceTypeDvbSub = "DVB_SUB" // CaptionSourceTypeEmbedded is a CaptionSourceType enum value CaptionSourceTypeEmbedded = "EMBEDDED" // CaptionSourceTypeScte20 is a CaptionSourceType enum value CaptionSourceTypeScte20 = "SCTE20" // CaptionSourceTypeScc is a CaptionSourceType enum value CaptionSourceTypeScc = "SCC" // CaptionSourceTypeTtml is a CaptionSourceType enum value CaptionSourceTypeTtml = "TTML" // CaptionSourceTypeStl is a CaptionSourceType enum value CaptionSourceTypeStl = "STL" // CaptionSourceTypeSrt is a CaptionSourceType enum value CaptionSourceTypeSrt = "SRT" // CaptionSourceTypeSmi is a CaptionSourceType enum value CaptionSourceTypeSmi = "SMI" // CaptionSourceTypeSmpteTt is a CaptionSourceType enum value CaptionSourceTypeSmpteTt = "SMPTE_TT" // CaptionSourceTypeTeletext is a CaptionSourceType enum value CaptionSourceTypeTeletext = "TELETEXT" // CaptionSourceTypeNullSource is a CaptionSourceType enum value CaptionSourceTypeNullSource = "NULL_SOURCE" // CaptionSourceTypeImsc is a CaptionSourceType enum value CaptionSourceTypeImsc = "IMSC" // CaptionSourceTypeWebvtt is a CaptionSourceType enum value CaptionSourceTypeWebvtt = "WEBVTT" ) // CaptionSourceType_Values returns all elements of the CaptionSourceType enum func CaptionSourceType_Values() []string { return []string{ CaptionSourceTypeAncillary, CaptionSourceTypeDvbSub, CaptionSourceTypeEmbedded, CaptionSourceTypeScte20, CaptionSourceTypeScc, CaptionSourceTypeTtml, CaptionSourceTypeStl, CaptionSourceTypeSrt, CaptionSourceTypeSmi, CaptionSourceTypeSmpteTt, CaptionSourceTypeTeletext, CaptionSourceTypeNullSource, CaptionSourceTypeImsc, CaptionSourceTypeWebvtt, } } // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no // tag. Otherwise, keep the default value Enabled (ENABLED) and control caching // in your video distribution set up. For example, use the Cache-Control http // header. const ( // CmafClientCacheDisabled is a CmafClientCache enum value CmafClientCacheDisabled = "DISABLED" // CmafClientCacheEnabled is a CmafClientCache enum value CmafClientCacheEnabled = "ENABLED" ) // CmafClientCache_Values returns all elements of the CmafClientCache enum func CmafClientCache_Values() []string { return []string{ CmafClientCacheDisabled, CmafClientCacheEnabled, } } // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist // generation. const ( // CmafCodecSpecificationRfc6381 is a CmafCodecSpecification enum value CmafCodecSpecificationRfc6381 = "RFC_6381" // CmafCodecSpecificationRfc4281 is a CmafCodecSpecification enum value CmafCodecSpecificationRfc4281 = "RFC_4281" ) // CmafCodecSpecification_Values returns all elements of the CmafCodecSpecification enum func CmafCodecSpecification_Values() []string { return []string{ CmafCodecSpecificationRfc6381, CmafCodecSpecificationRfc4281, } } // Specify the encryption scheme that you want the service to use when encrypting // your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR). const ( // CmafEncryptionTypeSampleAes is a CmafEncryptionType enum value CmafEncryptionTypeSampleAes = "SAMPLE_AES" // CmafEncryptionTypeAesCtr is a CmafEncryptionType enum value CmafEncryptionTypeAesCtr = "AES_CTR" ) // CmafEncryptionType_Values returns all elements of the CmafEncryptionType enum func CmafEncryptionType_Values() []string { return []string{ CmafEncryptionTypeSampleAes, CmafEncryptionTypeAesCtr, } } // Specify whether MediaConvert generates images for trick play. Keep the default // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) // to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) // to generate tiled thumbnails and full-resolution images of single frames. // When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates // a child manifest for each set of images that you generate and adds corresponding // entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest), // MediaConvert adds an entry in the .mpd manifest for each set of images that // you generate. A common application for these images is Roku trick mode. The // thumbnails and full-frame images that MediaConvert creates with this feature // are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md const ( // CmafImageBasedTrickPlayNone is a CmafImageBasedTrickPlay enum value CmafImageBasedTrickPlayNone = "NONE" // CmafImageBasedTrickPlayThumbnail is a CmafImageBasedTrickPlay enum value CmafImageBasedTrickPlayThumbnail = "THUMBNAIL" // CmafImageBasedTrickPlayThumbnailAndFullframe is a CmafImageBasedTrickPlay enum value CmafImageBasedTrickPlayThumbnailAndFullframe = "THUMBNAIL_AND_FULLFRAME" ) // CmafImageBasedTrickPlay_Values returns all elements of the CmafImageBasedTrickPlay enum func CmafImageBasedTrickPlay_Values() []string { return []string{ CmafImageBasedTrickPlayNone, CmafImageBasedTrickPlayThumbnail, CmafImageBasedTrickPlayThumbnailAndFullframe, } } // When you use DRM with CMAF outputs, choose whether the service writes the // 128-bit encryption initialization vector in the HLS and DASH manifests. const ( // CmafInitializationVectorInManifestInclude is a CmafInitializationVectorInManifest enum value CmafInitializationVectorInManifestInclude = "INCLUDE" // CmafInitializationVectorInManifestExclude is a CmafInitializationVectorInManifest enum value CmafInitializationVectorInManifestExclude = "EXCLUDE" ) // CmafInitializationVectorInManifest_Values returns all elements of the CmafInitializationVectorInManifest enum func CmafInitializationVectorInManifest_Values() []string { return []string{ CmafInitializationVectorInManifestInclude, CmafInitializationVectorInManifestExclude, } } // Specify whether your DRM encryption key is static or from a key provider // that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. const ( // CmafKeyProviderTypeSpeke is a CmafKeyProviderType enum value CmafKeyProviderTypeSpeke = "SPEKE" // CmafKeyProviderTypeStaticKey is a CmafKeyProviderType enum value CmafKeyProviderTypeStaticKey = "STATIC_KEY" ) // CmafKeyProviderType_Values returns all elements of the CmafKeyProviderType enum func CmafKeyProviderType_Values() []string { return []string{ CmafKeyProviderTypeSpeke, CmafKeyProviderTypeStaticKey, } } // When set to GZIP, compresses HLS playlist. const ( // CmafManifestCompressionGzip is a CmafManifestCompression enum value CmafManifestCompressionGzip = "GZIP" // CmafManifestCompressionNone is a CmafManifestCompression enum value CmafManifestCompressionNone = "NONE" ) // CmafManifestCompression_Values returns all elements of the CmafManifestCompression enum func CmafManifestCompression_Values() []string { return []string{ CmafManifestCompressionGzip, CmafManifestCompressionNone, } } // Indicates whether the output manifest should use floating point values for // segment duration. const ( // CmafManifestDurationFormatFloatingPoint is a CmafManifestDurationFormat enum value CmafManifestDurationFormatFloatingPoint = "FLOATING_POINT" // CmafManifestDurationFormatInteger is a CmafManifestDurationFormat enum value CmafManifestDurationFormatInteger = "INTEGER" ) // CmafManifestDurationFormat_Values returns all elements of the CmafManifestDurationFormat enum func CmafManifestDurationFormat_Values() []string { return []string{ CmafManifestDurationFormatFloatingPoint, CmafManifestDurationFormatInteger, } } // Specify whether your DASH profile is on-demand or main. When you choose Main // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. // When you choose On-demand, you must also set the output group setting Segment // control (SegmentControl) to Single file (SINGLE_FILE). const ( // CmafMpdProfileMainProfile is a CmafMpdProfile enum value CmafMpdProfileMainProfile = "MAIN_PROFILE" // CmafMpdProfileOnDemandProfile is a CmafMpdProfile enum value CmafMpdProfileOnDemandProfile = "ON_DEMAND_PROFILE" ) // CmafMpdProfile_Values returns all elements of the CmafMpdProfile enum func CmafMpdProfile_Values() []string { return []string{ CmafMpdProfileMainProfile, CmafMpdProfileOnDemandProfile, } } // Use this setting only when your output video stream has B-frames, which causes // the initial presentation time stamp (PTS) to be offset from the initial decode // time stamp (DTS). Specify how MediaConvert handles PTS when writing time // stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) // when you want MediaConvert to use the initial PTS as the first time stamp // in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore // the initial PTS in the video stream and instead write the initial time stamp // as zero in the manifest. For outputs that don't have B-frames, the time stamps // in your DASH manifests start at zero regardless of your choice here. const ( // CmafPtsOffsetHandlingForBFramesZeroBased is a CmafPtsOffsetHandlingForBFrames enum value CmafPtsOffsetHandlingForBFramesZeroBased = "ZERO_BASED" // CmafPtsOffsetHandlingForBFramesMatchInitialPts is a CmafPtsOffsetHandlingForBFrames enum value CmafPtsOffsetHandlingForBFramesMatchInitialPts = "MATCH_INITIAL_PTS" ) // CmafPtsOffsetHandlingForBFrames_Values returns all elements of the CmafPtsOffsetHandlingForBFrames enum func CmafPtsOffsetHandlingForBFrames_Values() []string { return []string{ CmafPtsOffsetHandlingForBFramesZeroBased, CmafPtsOffsetHandlingForBFramesMatchInitialPts, } } // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. const ( // CmafSegmentControlSingleFile is a CmafSegmentControl enum value CmafSegmentControlSingleFile = "SINGLE_FILE" // CmafSegmentControlSegmentedFiles is a CmafSegmentControl enum value CmafSegmentControlSegmentedFiles = "SEGMENTED_FILES" ) // CmafSegmentControl_Values returns all elements of the CmafSegmentControl enum func CmafSegmentControl_Values() []string { return []string{ CmafSegmentControlSingleFile, CmafSegmentControlSegmentedFiles, } } // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag // of variant manifest. const ( // CmafStreamInfResolutionInclude is a CmafStreamInfResolution enum value CmafStreamInfResolutionInclude = "INCLUDE" // CmafStreamInfResolutionExclude is a CmafStreamInfResolution enum value CmafStreamInfResolutionExclude = "EXCLUDE" ) // CmafStreamInfResolution_Values returns all elements of the CmafStreamInfResolution enum func CmafStreamInfResolution_Values() []string { return []string{ CmafStreamInfResolutionInclude, CmafStreamInfResolutionExclude, } } // When set to ENABLED, a DASH MPD manifest will be generated for this output. const ( // CmafWriteDASHManifestDisabled is a CmafWriteDASHManifest enum value CmafWriteDASHManifestDisabled = "DISABLED" // CmafWriteDASHManifestEnabled is a CmafWriteDASHManifest enum value CmafWriteDASHManifestEnabled = "ENABLED" ) // CmafWriteDASHManifest_Values returns all elements of the CmafWriteDASHManifest enum func CmafWriteDASHManifest_Values() []string { return []string{ CmafWriteDASHManifestDisabled, CmafWriteDASHManifestEnabled, } } // When set to ENABLED, an Apple HLS manifest will be generated for this output. const ( // CmafWriteHLSManifestDisabled is a CmafWriteHLSManifest enum value CmafWriteHLSManifestDisabled = "DISABLED" // CmafWriteHLSManifestEnabled is a CmafWriteHLSManifest enum value CmafWriteHLSManifestEnabled = "ENABLED" ) // CmafWriteHLSManifest_Values returns all elements of the CmafWriteHLSManifest enum func CmafWriteHLSManifest_Values() []string { return []string{ CmafWriteHLSManifestDisabled, CmafWriteHLSManifestEnabled, } } // When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), // your DASH manifest shows precise segment durations. The segment duration // information appears inside the SegmentTimeline element, inside SegmentTemplate // at the Representation level. When this feature isn't enabled, the segment // durations in your DASH manifest are approximate. The segment duration information // appears in the duration attribute of the SegmentTemplate element. const ( // CmafWriteSegmentTimelineInRepresentationEnabled is a CmafWriteSegmentTimelineInRepresentation enum value CmafWriteSegmentTimelineInRepresentationEnabled = "ENABLED" // CmafWriteSegmentTimelineInRepresentationDisabled is a CmafWriteSegmentTimelineInRepresentation enum value CmafWriteSegmentTimelineInRepresentationDisabled = "DISABLED" ) // CmafWriteSegmentTimelineInRepresentation_Values returns all elements of the CmafWriteSegmentTimelineInRepresentation enum func CmafWriteSegmentTimelineInRepresentation_Values() []string { return []string{ CmafWriteSegmentTimelineInRepresentationEnabled, CmafWriteSegmentTimelineInRepresentationDisabled, } } // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences // between video and audio. For this situation, choose Match video duration // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, // MediaConvert pads the output audio streams with silence or trims them to // ensure that the total duration of each audio stream is at least as long as // the total duration of the video stream. After padding or trimming, the audio // stream duration is no more than one frame longer than the video stream. MediaConvert // applies audio padding or trimming only to the end of the last segment of // the output. For unsegmented outputs, MediaConvert adds padding only to the // end of the file. When you keep the default value, any minor discrepancies // between audio and video duration will depend on your output audio codec. const ( // CmfcAudioDurationDefaultCodecDuration is a CmfcAudioDuration enum value CmfcAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" // CmfcAudioDurationMatchVideoDuration is a CmfcAudioDuration enum value CmfcAudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION" ) // CmfcAudioDuration_Values returns all elements of the CmfcAudioDuration enum func CmfcAudioDuration_Values() []string { return []string{ CmfcAudioDurationDefaultCodecDuration, CmfcAudioDurationMatchVideoDuration, } } // Use this setting to control the values that MediaConvert puts in your HLS // parent playlist to control how the client player selects which audio track // to play. The other options for this setting determine the values that MediaConvert // writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry // for the audio variant. For more information about these attributes, see the // Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. // Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) // to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant // in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) // to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select // to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this // setting, MediaConvert defaults to Alternate audio, auto select, default. // When there is more than one variant in your output group, you must explicitly // choose a value for this setting. const ( // CmfcAudioTrackTypeAlternateAudioAutoSelectDefault is a CmfcAudioTrackType enum value CmfcAudioTrackTypeAlternateAudioAutoSelectDefault = "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT" // CmfcAudioTrackTypeAlternateAudioAutoSelect is a CmfcAudioTrackType enum value CmfcAudioTrackTypeAlternateAudioAutoSelect = "ALTERNATE_AUDIO_AUTO_SELECT" // CmfcAudioTrackTypeAlternateAudioNotAutoSelect is a CmfcAudioTrackType enum value CmfcAudioTrackTypeAlternateAudioNotAutoSelect = "ALTERNATE_AUDIO_NOT_AUTO_SELECT" ) // CmfcAudioTrackType_Values returns all elements of the CmfcAudioTrackType enum func CmfcAudioTrackType_Values() []string { return []string{ CmfcAudioTrackTypeAlternateAudioAutoSelectDefault, CmfcAudioTrackTypeAlternateAudioAutoSelect, CmfcAudioTrackTypeAlternateAudioNotAutoSelect, } } // Specify whether to flag this audio track as descriptive video service (DVS) // in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes // the parameter CHARACTERISTICS="public.accessibility.describes-video" in the // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't // flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can // help with accessibility on Apple devices. For more information, see the Apple // documentation. const ( // CmfcDescriptiveVideoServiceFlagDontFlag is a CmfcDescriptiveVideoServiceFlag enum value CmfcDescriptiveVideoServiceFlagDontFlag = "DONT_FLAG" // CmfcDescriptiveVideoServiceFlagFlag is a CmfcDescriptiveVideoServiceFlag enum value CmfcDescriptiveVideoServiceFlagFlag = "FLAG" ) // CmfcDescriptiveVideoServiceFlag_Values returns all elements of the CmfcDescriptiveVideoServiceFlag enum func CmfcDescriptiveVideoServiceFlag_Values() []string { return []string{ CmfcDescriptiveVideoServiceFlagDontFlag, CmfcDescriptiveVideoServiceFlagFlag, } } // Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest // that lists only the I-frames for this rendition, in addition to your regular // manifest for this rendition. You might use this manifest as part of a workflow // that creates preview functions for your video. MediaConvert adds both the // I-frame only child manifest and the regular child manifest to the parent // manifest. When you don't need the I-frame only child manifest, keep the default // value Exclude (EXCLUDE). const ( // CmfcIFrameOnlyManifestInclude is a CmfcIFrameOnlyManifest enum value CmfcIFrameOnlyManifestInclude = "INCLUDE" // CmfcIFrameOnlyManifestExclude is a CmfcIFrameOnlyManifest enum value CmfcIFrameOnlyManifestExclude = "EXCLUDE" ) // CmfcIFrameOnlyManifest_Values returns all elements of the CmfcIFrameOnlyManifest enum func CmfcIFrameOnlyManifest_Values() []string { return []string{ CmfcIFrameOnlyManifestInclude, CmfcIFrameOnlyManifestExclude, } } // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting // SCC XML (sccXml). const ( // CmfcScte35EsamInsert is a CmfcScte35Esam enum value CmfcScte35EsamInsert = "INSERT" // CmfcScte35EsamNone is a CmfcScte35Esam enum value CmfcScte35EsamNone = "NONE" ) // CmfcScte35Esam_Values returns all elements of the CmfcScte35Esam enum func CmfcScte35Esam_Values() []string { return []string{ CmfcScte35EsamInsert, CmfcScte35EsamNone, } } // Ignore this setting unless you have SCTE-35 markers in your input video file. // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear // in your input to also appear in this output. Choose None (NONE) if you don't // want those SCTE-35 markers in this output. const ( // CmfcScte35SourcePassthrough is a CmfcScte35Source enum value CmfcScte35SourcePassthrough = "PASSTHROUGH" // CmfcScte35SourceNone is a CmfcScte35Source enum value CmfcScte35SourceNone = "NONE" ) // CmfcScte35Source_Values returns all elements of the CmfcScte35Source enum func CmfcScte35Source_Values() []string { return []string{ CmfcScte35SourcePassthrough, CmfcScte35SourceNone, } } // Choose Insert (INSERT) for this setting to include color metadata in this // output. Choose Ignore (IGNORE) to exclude color metadata from this output. // If you don't specify a value, the service sets this to Insert by default. const ( // ColorMetadataIgnore is a ColorMetadata enum value ColorMetadataIgnore = "IGNORE" // ColorMetadataInsert is a ColorMetadata enum value ColorMetadataInsert = "INSERT" ) // ColorMetadata_Values returns all elements of the ColorMetadata enum func ColorMetadata_Values() []string { return []string{ ColorMetadataIgnore, ColorMetadataInsert, } } // If your input video has accurate color space metadata, or if you don't know // about color space, leave this set to the default value Follow (FOLLOW). The // service will automatically detect your input color space. If your input video // has metadata indicating the wrong color space, specify the accurate color // space here. If your input video is HDR 10 and the SMPTE ST 2086 Mastering // Display Color Volume static metadata isn't present in your video stream, // or if that metadata is present but not accurate, choose Force HDR 10 (FORCE_HDR10) // here and specify correct values in the input HDR 10 metadata (Hdr10Metadata) // settings. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. const ( // ColorSpaceFollow is a ColorSpace enum value ColorSpaceFollow = "FOLLOW" // ColorSpaceRec601 is a ColorSpace enum value ColorSpaceRec601 = "REC_601" // ColorSpaceRec709 is a ColorSpace enum value ColorSpaceRec709 = "REC_709" // ColorSpaceHdr10 is a ColorSpace enum value ColorSpaceHdr10 = "HDR10" // ColorSpaceHlg2020 is a ColorSpace enum value ColorSpaceHlg2020 = "HLG_2020" ) // ColorSpace_Values returns all elements of the ColorSpace enum func ColorSpace_Values() []string { return []string{ ColorSpaceFollow, ColorSpaceRec601, ColorSpaceRec709, ColorSpaceHdr10, ColorSpaceHlg2020, } } // Specify the color space you want for this output. The service supports conversion // between HDR formats, between SDR formats, from SDR to HDR, and from HDR to // SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted // video has an HDR format, but visually appears the same as an unconverted // output. HDR to SDR conversion uses Elemental tone mapping technology to approximate // the outcome of manually regrading from HDR to SDR. const ( // ColorSpaceConversionNone is a ColorSpaceConversion enum value ColorSpaceConversionNone = "NONE" // ColorSpaceConversionForce601 is a ColorSpaceConversion enum value ColorSpaceConversionForce601 = "FORCE_601" // ColorSpaceConversionForce709 is a ColorSpaceConversion enum value ColorSpaceConversionForce709 = "FORCE_709" // ColorSpaceConversionForceHdr10 is a ColorSpaceConversion enum value ColorSpaceConversionForceHdr10 = "FORCE_HDR10" // ColorSpaceConversionForceHlg2020 is a ColorSpaceConversion enum value ColorSpaceConversionForceHlg2020 = "FORCE_HLG_2020" ) // ColorSpaceConversion_Values returns all elements of the ColorSpaceConversion enum func ColorSpaceConversion_Values() []string { return []string{ ColorSpaceConversionNone, ColorSpaceConversionForce601, ColorSpaceConversionForce709, ColorSpaceConversionForceHdr10, ColorSpaceConversionForceHlg2020, } } // There are two sources for color metadata, the input file and the job input // settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). // The Color space usage setting determines which takes precedence. Choose Force // (FORCE) to use color metadata from the input job settings. If you don't specify // values for those settings, the service defaults to using metadata from your // input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the // source when it is present. If there's no color metadata in your input file, // the service defaults to using values you specify in the input settings. const ( // ColorSpaceUsageForce is a ColorSpaceUsage enum value ColorSpaceUsageForce = "FORCE" // ColorSpaceUsageFallback is a ColorSpaceUsage enum value ColorSpaceUsageFallback = "FALLBACK" ) // ColorSpaceUsage_Values returns all elements of the ColorSpaceUsage enum func ColorSpaceUsage_Values() []string { return []string{ ColorSpaceUsageForce, ColorSpaceUsageFallback, } } // The length of the term of your reserved queue pricing plan commitment. const ( // CommitmentOneYear is a Commitment enum value CommitmentOneYear = "ONE_YEAR" ) // Commitment_Values returns all elements of the Commitment enum func Commitment_Values() []string { return []string{ CommitmentOneYear, } } // Container for this output. Some containers require a container settings object. // If not specified, the default object will be created. const ( // ContainerTypeF4v is a ContainerType enum value ContainerTypeF4v = "F4V" // ContainerTypeIsmv is a ContainerType enum value ContainerTypeIsmv = "ISMV" // ContainerTypeM2ts is a ContainerType enum value ContainerTypeM2ts = "M2TS" // ContainerTypeM3u8 is a ContainerType enum value ContainerTypeM3u8 = "M3U8" // ContainerTypeCmfc is a ContainerType enum value ContainerTypeCmfc = "CMFC" // ContainerTypeMov is a ContainerType enum value ContainerTypeMov = "MOV" // ContainerTypeMp4 is a ContainerType enum value ContainerTypeMp4 = "MP4" // ContainerTypeMpd is a ContainerType enum value ContainerTypeMpd = "MPD" // ContainerTypeMxf is a ContainerType enum value ContainerTypeMxf = "MXF" // ContainerTypeWebm is a ContainerType enum value ContainerTypeWebm = "WEBM" // ContainerTypeRaw is a ContainerType enum value ContainerTypeRaw = "RAW" ) // ContainerType_Values returns all elements of the ContainerType enum func ContainerType_Values() []string { return []string{ ContainerTypeF4v, ContainerTypeIsmv, ContainerTypeM2ts, ContainerTypeM3u8, ContainerTypeCmfc, ContainerTypeMov, ContainerTypeMp4, ContainerTypeMpd, ContainerTypeMxf, ContainerTypeWebm, ContainerTypeRaw, } } // Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or // Atmos) and your downstream workflow requires that your DASH manifest use // the Dolby channel configuration tag, rather than the MPEG one. For example, // you might need to use this to make dynamic ad insertion work. Specify which // audio channel configuration scheme ID URI MediaConvert writes in your DASH // manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), // to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. // Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have // MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011. const ( // DashIsoGroupAudioChannelConfigSchemeIdUriMpegChannelConfiguration is a DashIsoGroupAudioChannelConfigSchemeIdUri enum value DashIsoGroupAudioChannelConfigSchemeIdUriMpegChannelConfiguration = "MPEG_CHANNEL_CONFIGURATION" // DashIsoGroupAudioChannelConfigSchemeIdUriDolbyChannelConfiguration is a DashIsoGroupAudioChannelConfigSchemeIdUri enum value DashIsoGroupAudioChannelConfigSchemeIdUriDolbyChannelConfiguration = "DOLBY_CHANNEL_CONFIGURATION" ) // DashIsoGroupAudioChannelConfigSchemeIdUri_Values returns all elements of the DashIsoGroupAudioChannelConfigSchemeIdUri enum func DashIsoGroupAudioChannelConfigSchemeIdUri_Values() []string { return []string{ DashIsoGroupAudioChannelConfigSchemeIdUriMpegChannelConfiguration, DashIsoGroupAudioChannelConfigSchemeIdUriDolbyChannelConfiguration, } } // Supports HbbTV specification as indicated const ( // DashIsoHbbtvComplianceHbbtv15 is a DashIsoHbbtvCompliance enum value DashIsoHbbtvComplianceHbbtv15 = "HBBTV_1_5" // DashIsoHbbtvComplianceNone is a DashIsoHbbtvCompliance enum value DashIsoHbbtvComplianceNone = "NONE" ) // DashIsoHbbtvCompliance_Values returns all elements of the DashIsoHbbtvCompliance enum func DashIsoHbbtvCompliance_Values() []string { return []string{ DashIsoHbbtvComplianceHbbtv15, DashIsoHbbtvComplianceNone, } } // Specify whether MediaConvert generates images for trick play. Keep the default // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) // to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) // to generate tiled thumbnails and full-resolution images of single frames. // MediaConvert adds an entry in the .mpd manifest for each set of images that // you generate. A common application for these images is Roku trick mode. The // thumbnails and full-frame images that MediaConvert creates with this feature // are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md const ( // DashIsoImageBasedTrickPlayNone is a DashIsoImageBasedTrickPlay enum value DashIsoImageBasedTrickPlayNone = "NONE" // DashIsoImageBasedTrickPlayThumbnail is a DashIsoImageBasedTrickPlay enum value DashIsoImageBasedTrickPlayThumbnail = "THUMBNAIL" // DashIsoImageBasedTrickPlayThumbnailAndFullframe is a DashIsoImageBasedTrickPlay enum value DashIsoImageBasedTrickPlayThumbnailAndFullframe = "THUMBNAIL_AND_FULLFRAME" ) // DashIsoImageBasedTrickPlay_Values returns all elements of the DashIsoImageBasedTrickPlay enum func DashIsoImageBasedTrickPlay_Values() []string { return []string{ DashIsoImageBasedTrickPlayNone, DashIsoImageBasedTrickPlayThumbnail, DashIsoImageBasedTrickPlayThumbnailAndFullframe, } } // Specify whether your DASH profile is on-demand or main. When you choose Main // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. // When you choose On-demand, you must also set the output group setting Segment // control (SegmentControl) to Single file (SINGLE_FILE). const ( // DashIsoMpdProfileMainProfile is a DashIsoMpdProfile enum value DashIsoMpdProfileMainProfile = "MAIN_PROFILE" // DashIsoMpdProfileOnDemandProfile is a DashIsoMpdProfile enum value DashIsoMpdProfileOnDemandProfile = "ON_DEMAND_PROFILE" ) // DashIsoMpdProfile_Values returns all elements of the DashIsoMpdProfile enum func DashIsoMpdProfile_Values() []string { return []string{ DashIsoMpdProfileMainProfile, DashIsoMpdProfileOnDemandProfile, } } // This setting can improve the compatibility of your output with video players // on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. // Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback // on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). // If you choose Unencrypted SEI, for that output, the service will exclude // the access unit delimiter and will leave the SEI NAL units unencrypted. const ( // DashIsoPlaybackDeviceCompatibilityCencV1 is a DashIsoPlaybackDeviceCompatibility enum value DashIsoPlaybackDeviceCompatibilityCencV1 = "CENC_V1" // DashIsoPlaybackDeviceCompatibilityUnencryptedSei is a DashIsoPlaybackDeviceCompatibility enum value DashIsoPlaybackDeviceCompatibilityUnencryptedSei = "UNENCRYPTED_SEI" ) // DashIsoPlaybackDeviceCompatibility_Values returns all elements of the DashIsoPlaybackDeviceCompatibility enum func DashIsoPlaybackDeviceCompatibility_Values() []string { return []string{ DashIsoPlaybackDeviceCompatibilityCencV1, DashIsoPlaybackDeviceCompatibilityUnencryptedSei, } } // Use this setting only when your output video stream has B-frames, which causes // the initial presentation time stamp (PTS) to be offset from the initial decode // time stamp (DTS). Specify how MediaConvert handles PTS when writing time // stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) // when you want MediaConvert to use the initial PTS as the first time stamp // in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore // the initial PTS in the video stream and instead write the initial time stamp // as zero in the manifest. For outputs that don't have B-frames, the time stamps // in your DASH manifests start at zero regardless of your choice here. const ( // DashIsoPtsOffsetHandlingForBFramesZeroBased is a DashIsoPtsOffsetHandlingForBFrames enum value DashIsoPtsOffsetHandlingForBFramesZeroBased = "ZERO_BASED" // DashIsoPtsOffsetHandlingForBFramesMatchInitialPts is a DashIsoPtsOffsetHandlingForBFrames enum value DashIsoPtsOffsetHandlingForBFramesMatchInitialPts = "MATCH_INITIAL_PTS" ) // DashIsoPtsOffsetHandlingForBFrames_Values returns all elements of the DashIsoPtsOffsetHandlingForBFrames enum func DashIsoPtsOffsetHandlingForBFrames_Values() []string { return []string{ DashIsoPtsOffsetHandlingForBFramesZeroBased, DashIsoPtsOffsetHandlingForBFramesMatchInitialPts, } } // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. const ( // DashIsoSegmentControlSingleFile is a DashIsoSegmentControl enum value DashIsoSegmentControlSingleFile = "SINGLE_FILE" // DashIsoSegmentControlSegmentedFiles is a DashIsoSegmentControl enum value DashIsoSegmentControlSegmentedFiles = "SEGMENTED_FILES" ) // DashIsoSegmentControl_Values returns all elements of the DashIsoSegmentControl enum func DashIsoSegmentControl_Values() []string { return []string{ DashIsoSegmentControlSingleFile, DashIsoSegmentControlSegmentedFiles, } } // When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation), // your DASH manifest shows precise segment durations. The segment duration // information appears inside the SegmentTimeline element, inside SegmentTemplate // at the Representation level. When this feature isn't enabled, the segment // durations in your DASH manifest are approximate. The segment duration information // appears in the duration attribute of the SegmentTemplate element. const ( // DashIsoWriteSegmentTimelineInRepresentationEnabled is a DashIsoWriteSegmentTimelineInRepresentation enum value DashIsoWriteSegmentTimelineInRepresentationEnabled = "ENABLED" // DashIsoWriteSegmentTimelineInRepresentationDisabled is a DashIsoWriteSegmentTimelineInRepresentation enum value DashIsoWriteSegmentTimelineInRepresentationDisabled = "DISABLED" ) // DashIsoWriteSegmentTimelineInRepresentation_Values returns all elements of the DashIsoWriteSegmentTimelineInRepresentation enum func DashIsoWriteSegmentTimelineInRepresentation_Values() []string { return []string{ DashIsoWriteSegmentTimelineInRepresentationEnabled, DashIsoWriteSegmentTimelineInRepresentationDisabled, } } // Specify the encryption mode that you used to encrypt your input files. const ( // DecryptionModeAesCtr is a DecryptionMode enum value DecryptionModeAesCtr = "AES_CTR" // DecryptionModeAesCbc is a DecryptionMode enum value DecryptionModeAesCbc = "AES_CBC" // DecryptionModeAesGcm is a DecryptionMode enum value DecryptionModeAesGcm = "AES_GCM" ) // DecryptionMode_Values returns all elements of the DecryptionMode enum func DecryptionMode_Values() []string { return []string{ DecryptionModeAesCtr, DecryptionModeAesCbc, DecryptionModeAesGcm, } } // Only applies when you set Deinterlacer (DeinterlaceMode) to Deinterlace (DEINTERLACE) // or Adaptive (ADAPTIVE). Motion adaptive interpolate (INTERPOLATE) produces // sharper pictures, while blend (BLEND) produces smoother motion. Use (INTERPOLATE_TICKER) // OR (BLEND_TICKER) if your source file includes a ticker, such as a scrolling // headline at the bottom of the frame. const ( // DeinterlaceAlgorithmInterpolate is a DeinterlaceAlgorithm enum value DeinterlaceAlgorithmInterpolate = "INTERPOLATE" // DeinterlaceAlgorithmInterpolateTicker is a DeinterlaceAlgorithm enum value DeinterlaceAlgorithmInterpolateTicker = "INTERPOLATE_TICKER" // DeinterlaceAlgorithmBlend is a DeinterlaceAlgorithm enum value DeinterlaceAlgorithmBlend = "BLEND" // DeinterlaceAlgorithmBlendTicker is a DeinterlaceAlgorithm enum value DeinterlaceAlgorithmBlendTicker = "BLEND_TICKER" ) // DeinterlaceAlgorithm_Values returns all elements of the DeinterlaceAlgorithm enum func DeinterlaceAlgorithm_Values() []string { return []string{ DeinterlaceAlgorithmInterpolate, DeinterlaceAlgorithmInterpolateTicker, DeinterlaceAlgorithmBlend, DeinterlaceAlgorithmBlendTicker, } } // - When set to NORMAL (default), the deinterlacer does not convert frames // that are tagged in metadata as progressive. It will only convert those that // are tagged as some other type. - When set to FORCE_ALL_FRAMES, the deinterlacer // converts every frame to progressive - even those that are already tagged // as progressive. Turn Force mode on only if there is a good chance that the // metadata has tagged frames as progressive when they are not progressive. // Do not turn on otherwise; processing frames that are already progressive // into progressive will probably result in lower quality video. const ( // DeinterlacerControlForceAllFrames is a DeinterlacerControl enum value DeinterlacerControlForceAllFrames = "FORCE_ALL_FRAMES" // DeinterlacerControlNormal is a DeinterlacerControl enum value DeinterlacerControlNormal = "NORMAL" ) // DeinterlacerControl_Values returns all elements of the DeinterlacerControl enum func DeinterlacerControl_Values() []string { return []string{ DeinterlacerControlForceAllFrames, DeinterlacerControlNormal, } } // Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. // Default is Deinterlace. - Deinterlace converts interlaced to progressive. // - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. // - Adaptive auto-detects and converts to progressive. const ( // DeinterlacerModeDeinterlace is a DeinterlacerMode enum value DeinterlacerModeDeinterlace = "DEINTERLACE" // DeinterlacerModeInverseTelecine is a DeinterlacerMode enum value DeinterlacerModeInverseTelecine = "INVERSE_TELECINE" // DeinterlacerModeAdaptive is a DeinterlacerMode enum value DeinterlacerModeAdaptive = "ADAPTIVE" ) // DeinterlacerMode_Values returns all elements of the DeinterlacerMode enum func DeinterlacerMode_Values() []string { return []string{ DeinterlacerModeDeinterlace, DeinterlacerModeInverseTelecine, DeinterlacerModeAdaptive, } } // Optional field, defaults to DEFAULT. Specify DEFAULT for this operation to // return your endpoints if any exist, or to create an endpoint for you and // return it if one doesn't already exist. Specify GET_ONLY to return your endpoints // if any exist, or an empty list if none exist. const ( // DescribeEndpointsModeDefault is a DescribeEndpointsMode enum value DescribeEndpointsModeDefault = "DEFAULT" // DescribeEndpointsModeGetOnly is a DescribeEndpointsMode enum value DescribeEndpointsModeGetOnly = "GET_ONLY" ) // DescribeEndpointsMode_Values returns all elements of the DescribeEndpointsMode enum func DescribeEndpointsMode_Values() []string { return []string{ DescribeEndpointsModeDefault, DescribeEndpointsModeGetOnly, } } // Use Dolby Vision Mode to choose how the service will handle Dolby Vision // MaxCLL and MaxFALL properies. const ( // DolbyVisionLevel6ModePassthrough is a DolbyVisionLevel6Mode enum value DolbyVisionLevel6ModePassthrough = "PASSTHROUGH" // DolbyVisionLevel6ModeRecalculate is a DolbyVisionLevel6Mode enum value DolbyVisionLevel6ModeRecalculate = "RECALCULATE" // DolbyVisionLevel6ModeSpecify is a DolbyVisionLevel6Mode enum value DolbyVisionLevel6ModeSpecify = "SPECIFY" ) // DolbyVisionLevel6Mode_Values returns all elements of the DolbyVisionLevel6Mode enum func DolbyVisionLevel6Mode_Values() []string { return []string{ DolbyVisionLevel6ModePassthrough, DolbyVisionLevel6ModeRecalculate, DolbyVisionLevel6ModeSpecify, } } // In the current MediaConvert implementation, the Dolby Vision profile is always // 5 (PROFILE_5). Therefore, all of your inputs must contain Dolby Vision frame // interleaved data. const ( // DolbyVisionProfileProfile5 is a DolbyVisionProfile enum value DolbyVisionProfileProfile5 = "PROFILE_5" ) // DolbyVisionProfile_Values returns all elements of the DolbyVisionProfile enum func DolbyVisionProfile_Values() []string { return []string{ DolbyVisionProfileProfile5, } } // Applies only to 29.97 fps outputs. When this feature is enabled, the service // will use drop-frame timecode on outputs. If it is not possible to use drop-frame // timecode, the system will fall back to non-drop-frame. This setting is enabled // by default when Timecode insertion (TimecodeInsertion) is enabled. const ( // DropFrameTimecodeDisabled is a DropFrameTimecode enum value DropFrameTimecodeDisabled = "DISABLED" // DropFrameTimecodeEnabled is a DropFrameTimecode enum value DropFrameTimecodeEnabled = "ENABLED" ) // DropFrameTimecode_Values returns all elements of the DropFrameTimecode enum func DropFrameTimecode_Values() []string { return []string{ DropFrameTimecodeDisabled, DropFrameTimecodeEnabled, } } // If no explicit x_position or y_position is provided, setting alignment to // centered will place the captions at the bottom center of the output. Similarly, // setting a left alignment will align captions to the bottom left of the output. // If x and y positions are given in conjunction with the alignment parameter, // the font will be justified (either left or centered) relative to those coordinates. // This option is not valid for source captions that are STL, 608/embedded or // teletext. These source settings are already pre-defined by the caption stream. // All burn-in and DVB-Sub font settings must match. const ( // DvbSubtitleAlignmentCentered is a DvbSubtitleAlignment enum value DvbSubtitleAlignmentCentered = "CENTERED" // DvbSubtitleAlignmentLeft is a DvbSubtitleAlignment enum value DvbSubtitleAlignmentLeft = "LEFT" ) // DvbSubtitleAlignment_Values returns all elements of the DvbSubtitleAlignment enum func DvbSubtitleAlignment_Values() []string { return []string{ DvbSubtitleAlignmentCentered, DvbSubtitleAlignmentLeft, } } // Specifies the color of the rectangle behind the captions.All burn-in and // DVB-Sub font settings must match. const ( // DvbSubtitleBackgroundColorNone is a DvbSubtitleBackgroundColor enum value DvbSubtitleBackgroundColorNone = "NONE" // DvbSubtitleBackgroundColorBlack is a DvbSubtitleBackgroundColor enum value DvbSubtitleBackgroundColorBlack = "BLACK" // DvbSubtitleBackgroundColorWhite is a DvbSubtitleBackgroundColor enum value DvbSubtitleBackgroundColorWhite = "WHITE" ) // DvbSubtitleBackgroundColor_Values returns all elements of the DvbSubtitleBackgroundColor enum func DvbSubtitleBackgroundColor_Values() []string { return []string{ DvbSubtitleBackgroundColorNone, DvbSubtitleBackgroundColorBlack, DvbSubtitleBackgroundColorWhite, } } // Specifies the color of the burned-in captions. This option is not valid for // source captions that are STL, 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font // settings must match. const ( // DvbSubtitleFontColorWhite is a DvbSubtitleFontColor enum value DvbSubtitleFontColorWhite = "WHITE" // DvbSubtitleFontColorBlack is a DvbSubtitleFontColor enum value DvbSubtitleFontColorBlack = "BLACK" // DvbSubtitleFontColorYellow is a DvbSubtitleFontColor enum value DvbSubtitleFontColorYellow = "YELLOW" // DvbSubtitleFontColorRed is a DvbSubtitleFontColor enum value DvbSubtitleFontColorRed = "RED" // DvbSubtitleFontColorGreen is a DvbSubtitleFontColor enum value DvbSubtitleFontColorGreen = "GREEN" // DvbSubtitleFontColorBlue is a DvbSubtitleFontColor enum value DvbSubtitleFontColorBlue = "BLUE" ) // DvbSubtitleFontColor_Values returns all elements of the DvbSubtitleFontColor enum func DvbSubtitleFontColor_Values() []string { return []string{ DvbSubtitleFontColorWhite, DvbSubtitleFontColorBlack, DvbSubtitleFontColorYellow, DvbSubtitleFontColorRed, DvbSubtitleFontColorGreen, DvbSubtitleFontColorBlue, } } // Specifies font outline color. This option is not valid for source captions // that are either 608/embedded or teletext. These source settings are already // pre-defined by the caption stream. All burn-in and DVB-Sub font settings // must match. const ( // DvbSubtitleOutlineColorBlack is a DvbSubtitleOutlineColor enum value DvbSubtitleOutlineColorBlack = "BLACK" // DvbSubtitleOutlineColorWhite is a DvbSubtitleOutlineColor enum value DvbSubtitleOutlineColorWhite = "WHITE" // DvbSubtitleOutlineColorYellow is a DvbSubtitleOutlineColor enum value DvbSubtitleOutlineColorYellow = "YELLOW" // DvbSubtitleOutlineColorRed is a DvbSubtitleOutlineColor enum value DvbSubtitleOutlineColorRed = "RED" // DvbSubtitleOutlineColorGreen is a DvbSubtitleOutlineColor enum value DvbSubtitleOutlineColorGreen = "GREEN" // DvbSubtitleOutlineColorBlue is a DvbSubtitleOutlineColor enum value DvbSubtitleOutlineColorBlue = "BLUE" ) // DvbSubtitleOutlineColor_Values returns all elements of the DvbSubtitleOutlineColor enum func DvbSubtitleOutlineColor_Values() []string { return []string{ DvbSubtitleOutlineColorBlack, DvbSubtitleOutlineColorWhite, DvbSubtitleOutlineColorYellow, DvbSubtitleOutlineColorRed, DvbSubtitleOutlineColorGreen, DvbSubtitleOutlineColorBlue, } } // Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub // font settings must match. const ( // DvbSubtitleShadowColorNone is a DvbSubtitleShadowColor enum value DvbSubtitleShadowColorNone = "NONE" // DvbSubtitleShadowColorBlack is a DvbSubtitleShadowColor enum value DvbSubtitleShadowColorBlack = "BLACK" // DvbSubtitleShadowColorWhite is a DvbSubtitleShadowColor enum value DvbSubtitleShadowColorWhite = "WHITE" ) // DvbSubtitleShadowColor_Values returns all elements of the DvbSubtitleShadowColor enum func DvbSubtitleShadowColor_Values() []string { return []string{ DvbSubtitleShadowColorNone, DvbSubtitleShadowColorBlack, DvbSubtitleShadowColorWhite, } } // Only applies to jobs with input captions in Teletext or STL formats. Specify // whether the spacing between letters in your captions is set by the captions // grid or varies depending on letter width. Choose fixed grid to conform to // the spacing specified in the captions file more accurately. Choose proportional // to make the text easier to read if the captions are closed caption. const ( // DvbSubtitleTeletextSpacingFixedGrid is a DvbSubtitleTeletextSpacing enum value DvbSubtitleTeletextSpacingFixedGrid = "FIXED_GRID" // DvbSubtitleTeletextSpacingProportional is a DvbSubtitleTeletextSpacing enum value DvbSubtitleTeletextSpacingProportional = "PROPORTIONAL" ) // DvbSubtitleTeletextSpacing_Values returns all elements of the DvbSubtitleTeletextSpacing enum func DvbSubtitleTeletextSpacing_Values() []string { return []string{ DvbSubtitleTeletextSpacingFixedGrid, DvbSubtitleTeletextSpacingProportional, } } // Specify whether your DVB subtitles are standard or for hearing impaired. // Choose hearing impaired if your subtitles include audio descriptions and // dialogue. Choose standard if your subtitles include only dialogue. const ( // DvbSubtitlingTypeHearingImpaired is a DvbSubtitlingType enum value DvbSubtitlingTypeHearingImpaired = "HEARING_IMPAIRED" // DvbSubtitlingTypeStandard is a DvbSubtitlingType enum value DvbSubtitlingTypeStandard = "STANDARD" ) // DvbSubtitlingType_Values returns all elements of the DvbSubtitlingType enum func DvbSubtitlingType_Values() []string { return []string{ DvbSubtitlingTypeHearingImpaired, DvbSubtitlingTypeStandard, } } // Specify how MediaConvert handles the display definition segment (DDS). Keep // the default, None (NONE), to exclude the DDS from this set of captions. Choose // No display window (NO_DISPLAY_WINDOW) to have MediaConvert include the DDS // but not include display window data. In this case, MediaConvert writes that // information to the page composition segment (PCS) instead. Choose Specify // (SPECIFIED) to have MediaConvert set up the display window based on the values // that you specify in related job settings. For video resolutions that are // 576 pixels or smaller in height, MediaConvert doesn't include the DDS, regardless // of the value you choose for DDS handling (ddsHandling). In this case, it // doesn't write the display window data to the PCS either. Related settings: // Use the settings DDS x-coordinate (ddsXCoordinate) and DDS y-coordinate (ddsYCoordinate) // to specify the offset between the top left corner of the display window and // the top left corner of the video frame. All burn-in and DVB-Sub font settings // must match. const ( // DvbddsHandlingNone is a DvbddsHandling enum value DvbddsHandlingNone = "NONE" // DvbddsHandlingSpecified is a DvbddsHandling enum value DvbddsHandlingSpecified = "SPECIFIED" // DvbddsHandlingNoDisplayWindow is a DvbddsHandling enum value DvbddsHandlingNoDisplayWindow = "NO_DISPLAY_WINDOW" ) // DvbddsHandling_Values returns all elements of the DvbddsHandling enum func DvbddsHandling_Values() []string { return []string{ DvbddsHandlingNone, DvbddsHandlingSpecified, DvbddsHandlingNoDisplayWindow, } } // Specify the bitstream mode for the E-AC-3 stream that the encoder emits. // For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex // E). const ( // Eac3AtmosBitstreamModeCompleteMain is a Eac3AtmosBitstreamMode enum value Eac3AtmosBitstreamModeCompleteMain = "COMPLETE_MAIN" ) // Eac3AtmosBitstreamMode_Values returns all elements of the Eac3AtmosBitstreamMode enum func Eac3AtmosBitstreamMode_Values() []string { return []string{ Eac3AtmosBitstreamModeCompleteMain, } } // The coding mode for Dolby Digital Plus JOC (Atmos). const ( // Eac3AtmosCodingModeCodingModeAuto is a Eac3AtmosCodingMode enum value Eac3AtmosCodingModeCodingModeAuto = "CODING_MODE_AUTO" // Eac3AtmosCodingModeCodingMode514 is a Eac3AtmosCodingMode enum value Eac3AtmosCodingModeCodingMode514 = "CODING_MODE_5_1_4" // Eac3AtmosCodingModeCodingMode714 is a Eac3AtmosCodingMode enum value Eac3AtmosCodingModeCodingMode714 = "CODING_MODE_7_1_4" // Eac3AtmosCodingModeCodingMode916 is a Eac3AtmosCodingMode enum value Eac3AtmosCodingModeCodingMode916 = "CODING_MODE_9_1_6" ) // Eac3AtmosCodingMode_Values returns all elements of the Eac3AtmosCodingMode enum func Eac3AtmosCodingMode_Values() []string { return []string{ Eac3AtmosCodingModeCodingModeAuto, Eac3AtmosCodingModeCodingMode514, Eac3AtmosCodingModeCodingMode714, Eac3AtmosCodingModeCodingMode916, } } // Enable Dolby Dialogue Intelligence to adjust loudness based on dialogue analysis. const ( // Eac3AtmosDialogueIntelligenceEnabled is a Eac3AtmosDialogueIntelligence enum value Eac3AtmosDialogueIntelligenceEnabled = "ENABLED" // Eac3AtmosDialogueIntelligenceDisabled is a Eac3AtmosDialogueIntelligence enum value Eac3AtmosDialogueIntelligenceDisabled = "DISABLED" ) // Eac3AtmosDialogueIntelligence_Values returns all elements of the Eac3AtmosDialogueIntelligence enum func Eac3AtmosDialogueIntelligence_Values() []string { return []string{ Eac3AtmosDialogueIntelligenceEnabled, Eac3AtmosDialogueIntelligenceDisabled, } } // Specify whether MediaConvert should use any downmix metadata from your input // file. Keep the default value, Custom (SPECIFIED) to provide downmix values // in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use // the metadata from your input. Related settings--Use these settings to specify // your downmix values: Left only/Right only surround (LoRoSurroundMixLevel), // Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right // total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel), // and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for // Downmix control (DownmixControl) and you don't specify values for the related // settings, MediaConvert uses default values for those settings. const ( // Eac3AtmosDownmixControlSpecified is a Eac3AtmosDownmixControl enum value Eac3AtmosDownmixControlSpecified = "SPECIFIED" // Eac3AtmosDownmixControlInitializeFromSource is a Eac3AtmosDownmixControl enum value Eac3AtmosDownmixControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" ) // Eac3AtmosDownmixControl_Values returns all elements of the Eac3AtmosDownmixControl enum func Eac3AtmosDownmixControl_Values() []string { return []string{ Eac3AtmosDownmixControlSpecified, Eac3AtmosDownmixControlInitializeFromSource, } } // Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses // when encoding the metadata in the Dolby stream for the line operating mode. // Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: // To have MediaConvert use the value you specify here, keep the default value, // Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). // Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine). // For information about the Dolby DRC operating modes and profiles, see the // Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Eac3AtmosDynamicRangeCompressionLineNone is a Eac3AtmosDynamicRangeCompressionLine enum value Eac3AtmosDynamicRangeCompressionLineNone = "NONE" // Eac3AtmosDynamicRangeCompressionLineFilmStandard is a Eac3AtmosDynamicRangeCompressionLine enum value Eac3AtmosDynamicRangeCompressionLineFilmStandard = "FILM_STANDARD" // Eac3AtmosDynamicRangeCompressionLineFilmLight is a Eac3AtmosDynamicRangeCompressionLine enum value Eac3AtmosDynamicRangeCompressionLineFilmLight = "FILM_LIGHT" // Eac3AtmosDynamicRangeCompressionLineMusicStandard is a Eac3AtmosDynamicRangeCompressionLine enum value Eac3AtmosDynamicRangeCompressionLineMusicStandard = "MUSIC_STANDARD" // Eac3AtmosDynamicRangeCompressionLineMusicLight is a Eac3AtmosDynamicRangeCompressionLine enum value Eac3AtmosDynamicRangeCompressionLineMusicLight = "MUSIC_LIGHT" // Eac3AtmosDynamicRangeCompressionLineSpeech is a Eac3AtmosDynamicRangeCompressionLine enum value Eac3AtmosDynamicRangeCompressionLineSpeech = "SPEECH" ) // Eac3AtmosDynamicRangeCompressionLine_Values returns all elements of the Eac3AtmosDynamicRangeCompressionLine enum func Eac3AtmosDynamicRangeCompressionLine_Values() []string { return []string{ Eac3AtmosDynamicRangeCompressionLineNone, Eac3AtmosDynamicRangeCompressionLineFilmStandard, Eac3AtmosDynamicRangeCompressionLineFilmLight, Eac3AtmosDynamicRangeCompressionLineMusicStandard, Eac3AtmosDynamicRangeCompressionLineMusicLight, Eac3AtmosDynamicRangeCompressionLineSpeech, } } // Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses // when encoding the metadata in the Dolby stream for the RF operating mode. // Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: // To have MediaConvert use the value you specify here, keep the default value, // Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). // Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf). // For information about the Dolby DRC operating modes and profiles, see the // Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Eac3AtmosDynamicRangeCompressionRfNone is a Eac3AtmosDynamicRangeCompressionRf enum value Eac3AtmosDynamicRangeCompressionRfNone = "NONE" // Eac3AtmosDynamicRangeCompressionRfFilmStandard is a Eac3AtmosDynamicRangeCompressionRf enum value Eac3AtmosDynamicRangeCompressionRfFilmStandard = "FILM_STANDARD" // Eac3AtmosDynamicRangeCompressionRfFilmLight is a Eac3AtmosDynamicRangeCompressionRf enum value Eac3AtmosDynamicRangeCompressionRfFilmLight = "FILM_LIGHT" // Eac3AtmosDynamicRangeCompressionRfMusicStandard is a Eac3AtmosDynamicRangeCompressionRf enum value Eac3AtmosDynamicRangeCompressionRfMusicStandard = "MUSIC_STANDARD" // Eac3AtmosDynamicRangeCompressionRfMusicLight is a Eac3AtmosDynamicRangeCompressionRf enum value Eac3AtmosDynamicRangeCompressionRfMusicLight = "MUSIC_LIGHT" // Eac3AtmosDynamicRangeCompressionRfSpeech is a Eac3AtmosDynamicRangeCompressionRf enum value Eac3AtmosDynamicRangeCompressionRfSpeech = "SPEECH" ) // Eac3AtmosDynamicRangeCompressionRf_Values returns all elements of the Eac3AtmosDynamicRangeCompressionRf enum func Eac3AtmosDynamicRangeCompressionRf_Values() []string { return []string{ Eac3AtmosDynamicRangeCompressionRfNone, Eac3AtmosDynamicRangeCompressionRfFilmStandard, Eac3AtmosDynamicRangeCompressionRfFilmLight, Eac3AtmosDynamicRangeCompressionRfMusicStandard, Eac3AtmosDynamicRangeCompressionRfMusicLight, Eac3AtmosDynamicRangeCompressionRfSpeech, } } // Specify whether MediaConvert should use any dynamic range control metadata // from your input file. Keep the default value, Custom (SPECIFIED), to provide // dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) // to use the metadata from your input. Related settings--Use these settings // to specify your dynamic range control values: Dynamic range compression line // (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf). // When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl) // and you don't specify values for the related settings, MediaConvert uses // default values for those settings. const ( // Eac3AtmosDynamicRangeControlSpecified is a Eac3AtmosDynamicRangeControl enum value Eac3AtmosDynamicRangeControlSpecified = "SPECIFIED" // Eac3AtmosDynamicRangeControlInitializeFromSource is a Eac3AtmosDynamicRangeControl enum value Eac3AtmosDynamicRangeControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" ) // Eac3AtmosDynamicRangeControl_Values returns all elements of the Eac3AtmosDynamicRangeControl enum func Eac3AtmosDynamicRangeControl_Values() []string { return []string{ Eac3AtmosDynamicRangeControlSpecified, Eac3AtmosDynamicRangeControlInitializeFromSource, } } // Choose how the service meters the loudness of your audio. const ( // Eac3AtmosMeteringModeLeqA is a Eac3AtmosMeteringMode enum value Eac3AtmosMeteringModeLeqA = "LEQ_A" // Eac3AtmosMeteringModeItuBs17701 is a Eac3AtmosMeteringMode enum value Eac3AtmosMeteringModeItuBs17701 = "ITU_BS_1770_1" // Eac3AtmosMeteringModeItuBs17702 is a Eac3AtmosMeteringMode enum value Eac3AtmosMeteringModeItuBs17702 = "ITU_BS_1770_2" // Eac3AtmosMeteringModeItuBs17703 is a Eac3AtmosMeteringMode enum value Eac3AtmosMeteringModeItuBs17703 = "ITU_BS_1770_3" // Eac3AtmosMeteringModeItuBs17704 is a Eac3AtmosMeteringMode enum value Eac3AtmosMeteringModeItuBs17704 = "ITU_BS_1770_4" ) // Eac3AtmosMeteringMode_Values returns all elements of the Eac3AtmosMeteringMode enum func Eac3AtmosMeteringMode_Values() []string { return []string{ Eac3AtmosMeteringModeLeqA, Eac3AtmosMeteringModeItuBs17701, Eac3AtmosMeteringModeItuBs17702, Eac3AtmosMeteringModeItuBs17703, Eac3AtmosMeteringModeItuBs17704, } } // Choose how the service does stereo downmixing. Default value: Not indicated // (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert // use this value, keep the default value, Custom (SPECIFIED) for the setting // Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo // downmix (StereoDownmix). const ( // Eac3AtmosStereoDownmixNotIndicated is a Eac3AtmosStereoDownmix enum value Eac3AtmosStereoDownmixNotIndicated = "NOT_INDICATED" // Eac3AtmosStereoDownmixStereo is a Eac3AtmosStereoDownmix enum value Eac3AtmosStereoDownmixStereo = "STEREO" // Eac3AtmosStereoDownmixSurround is a Eac3AtmosStereoDownmix enum value Eac3AtmosStereoDownmixSurround = "SURROUND" // Eac3AtmosStereoDownmixDpl2 is a Eac3AtmosStereoDownmix enum value Eac3AtmosStereoDownmixDpl2 = "DPL2" ) // Eac3AtmosStereoDownmix_Values returns all elements of the Eac3AtmosStereoDownmix enum func Eac3AtmosStereoDownmix_Values() []string { return []string{ Eac3AtmosStereoDownmixNotIndicated, Eac3AtmosStereoDownmixStereo, Eac3AtmosStereoDownmixSurround, Eac3AtmosStereoDownmixDpl2, } } // Specify whether your input audio has an additional center rear surround channel // matrix encoded into your left and right surround channels. const ( // Eac3AtmosSurroundExModeNotIndicated is a Eac3AtmosSurroundExMode enum value Eac3AtmosSurroundExModeNotIndicated = "NOT_INDICATED" // Eac3AtmosSurroundExModeEnabled is a Eac3AtmosSurroundExMode enum value Eac3AtmosSurroundExModeEnabled = "ENABLED" // Eac3AtmosSurroundExModeDisabled is a Eac3AtmosSurroundExMode enum value Eac3AtmosSurroundExModeDisabled = "DISABLED" ) // Eac3AtmosSurroundExMode_Values returns all elements of the Eac3AtmosSurroundExMode enum func Eac3AtmosSurroundExMode_Values() []string { return []string{ Eac3AtmosSurroundExModeNotIndicated, Eac3AtmosSurroundExModeEnabled, Eac3AtmosSurroundExModeDisabled, } } // If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels. // Only used for 3/2 coding mode. const ( // Eac3AttenuationControlAttenuate3Db is a Eac3AttenuationControl enum value Eac3AttenuationControlAttenuate3Db = "ATTENUATE_3_DB" // Eac3AttenuationControlNone is a Eac3AttenuationControl enum value Eac3AttenuationControlNone = "NONE" ) // Eac3AttenuationControl_Values returns all elements of the Eac3AttenuationControl enum func Eac3AttenuationControl_Values() []string { return []string{ Eac3AttenuationControlAttenuate3Db, Eac3AttenuationControlNone, } } // Specify the bitstream mode for the E-AC-3 stream that the encoder emits. // For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex // E). const ( // Eac3BitstreamModeCompleteMain is a Eac3BitstreamMode enum value Eac3BitstreamModeCompleteMain = "COMPLETE_MAIN" // Eac3BitstreamModeCommentary is a Eac3BitstreamMode enum value Eac3BitstreamModeCommentary = "COMMENTARY" // Eac3BitstreamModeEmergency is a Eac3BitstreamMode enum value Eac3BitstreamModeEmergency = "EMERGENCY" // Eac3BitstreamModeHearingImpaired is a Eac3BitstreamMode enum value Eac3BitstreamModeHearingImpaired = "HEARING_IMPAIRED" // Eac3BitstreamModeVisuallyImpaired is a Eac3BitstreamMode enum value Eac3BitstreamModeVisuallyImpaired = "VISUALLY_IMPAIRED" ) // Eac3BitstreamMode_Values returns all elements of the Eac3BitstreamMode enum func Eac3BitstreamMode_Values() []string { return []string{ Eac3BitstreamModeCompleteMain, Eac3BitstreamModeCommentary, Eac3BitstreamModeEmergency, Eac3BitstreamModeHearingImpaired, Eac3BitstreamModeVisuallyImpaired, } } // Dolby Digital Plus coding mode. Determines number of channels. const ( // Eac3CodingModeCodingMode10 is a Eac3CodingMode enum value Eac3CodingModeCodingMode10 = "CODING_MODE_1_0" // Eac3CodingModeCodingMode20 is a Eac3CodingMode enum value Eac3CodingModeCodingMode20 = "CODING_MODE_2_0" // Eac3CodingModeCodingMode32 is a Eac3CodingMode enum value Eac3CodingModeCodingMode32 = "CODING_MODE_3_2" ) // Eac3CodingMode_Values returns all elements of the Eac3CodingMode enum func Eac3CodingMode_Values() []string { return []string{ Eac3CodingModeCodingMode10, Eac3CodingModeCodingMode20, Eac3CodingModeCodingMode32, } } // Activates a DC highpass filter for all input channels. const ( // Eac3DcFilterEnabled is a Eac3DcFilter enum value Eac3DcFilterEnabled = "ENABLED" // Eac3DcFilterDisabled is a Eac3DcFilter enum value Eac3DcFilterDisabled = "DISABLED" ) // Eac3DcFilter_Values returns all elements of the Eac3DcFilter enum func Eac3DcFilter_Values() []string { return []string{ Eac3DcFilterEnabled, Eac3DcFilterDisabled, } } // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the line // operating mode. Related setting: When you use this setting, MediaConvert // ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). // For information about the Dolby Digital DRC operating modes and profiles, // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Eac3DynamicRangeCompressionLineNone is a Eac3DynamicRangeCompressionLine enum value Eac3DynamicRangeCompressionLineNone = "NONE" // Eac3DynamicRangeCompressionLineFilmStandard is a Eac3DynamicRangeCompressionLine enum value Eac3DynamicRangeCompressionLineFilmStandard = "FILM_STANDARD" // Eac3DynamicRangeCompressionLineFilmLight is a Eac3DynamicRangeCompressionLine enum value Eac3DynamicRangeCompressionLineFilmLight = "FILM_LIGHT" // Eac3DynamicRangeCompressionLineMusicStandard is a Eac3DynamicRangeCompressionLine enum value Eac3DynamicRangeCompressionLineMusicStandard = "MUSIC_STANDARD" // Eac3DynamicRangeCompressionLineMusicLight is a Eac3DynamicRangeCompressionLine enum value Eac3DynamicRangeCompressionLineMusicLight = "MUSIC_LIGHT" // Eac3DynamicRangeCompressionLineSpeech is a Eac3DynamicRangeCompressionLine enum value Eac3DynamicRangeCompressionLineSpeech = "SPEECH" ) // Eac3DynamicRangeCompressionLine_Values returns all elements of the Eac3DynamicRangeCompressionLine enum func Eac3DynamicRangeCompressionLine_Values() []string { return []string{ Eac3DynamicRangeCompressionLineNone, Eac3DynamicRangeCompressionLineFilmStandard, Eac3DynamicRangeCompressionLineFilmLight, Eac3DynamicRangeCompressionLineMusicStandard, Eac3DynamicRangeCompressionLineMusicLight, Eac3DynamicRangeCompressionLineSpeech, } } // Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert // uses when encoding the metadata in the Dolby Digital stream for the RF operating // mode. Related setting: When you use this setting, MediaConvert ignores any // value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). // For information about the Dolby Digital DRC operating modes and profiles, // see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf. const ( // Eac3DynamicRangeCompressionRfNone is a Eac3DynamicRangeCompressionRf enum value Eac3DynamicRangeCompressionRfNone = "NONE" // Eac3DynamicRangeCompressionRfFilmStandard is a Eac3DynamicRangeCompressionRf enum value Eac3DynamicRangeCompressionRfFilmStandard = "FILM_STANDARD" // Eac3DynamicRangeCompressionRfFilmLight is a Eac3DynamicRangeCompressionRf enum value Eac3DynamicRangeCompressionRfFilmLight = "FILM_LIGHT" // Eac3DynamicRangeCompressionRfMusicStandard is a Eac3DynamicRangeCompressionRf enum value Eac3DynamicRangeCompressionRfMusicStandard = "MUSIC_STANDARD" // Eac3DynamicRangeCompressionRfMusicLight is a Eac3DynamicRangeCompressionRf enum value Eac3DynamicRangeCompressionRfMusicLight = "MUSIC_LIGHT" // Eac3DynamicRangeCompressionRfSpeech is a Eac3DynamicRangeCompressionRf enum value Eac3DynamicRangeCompressionRfSpeech = "SPEECH" ) // Eac3DynamicRangeCompressionRf_Values returns all elements of the Eac3DynamicRangeCompressionRf enum func Eac3DynamicRangeCompressionRf_Values() []string { return []string{ Eac3DynamicRangeCompressionRfNone, Eac3DynamicRangeCompressionRfFilmStandard, Eac3DynamicRangeCompressionRfFilmLight, Eac3DynamicRangeCompressionRfMusicStandard, Eac3DynamicRangeCompressionRfMusicLight, Eac3DynamicRangeCompressionRfSpeech, } } // When encoding 3/2 audio, controls whether the LFE channel is enabled const ( // Eac3LfeControlLfe is a Eac3LfeControl enum value Eac3LfeControlLfe = "LFE" // Eac3LfeControlNoLfe is a Eac3LfeControl enum value Eac3LfeControlNoLfe = "NO_LFE" ) // Eac3LfeControl_Values returns all elements of the Eac3LfeControl enum func Eac3LfeControl_Values() []string { return []string{ Eac3LfeControlLfe, Eac3LfeControlNoLfe, } } // Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only // valid with 3_2_LFE coding mode. const ( // Eac3LfeFilterEnabled is a Eac3LfeFilter enum value Eac3LfeFilterEnabled = "ENABLED" // Eac3LfeFilterDisabled is a Eac3LfeFilter enum value Eac3LfeFilterDisabled = "DISABLED" ) // Eac3LfeFilter_Values returns all elements of the Eac3LfeFilter enum func Eac3LfeFilter_Values() []string { return []string{ Eac3LfeFilterEnabled, Eac3LfeFilterDisabled, } } // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, // or DolbyE decoder that supplied this audio data. If audio was not supplied // from one of these streams, then the static metadata settings will be used. const ( // Eac3MetadataControlFollowInput is a Eac3MetadataControl enum value Eac3MetadataControlFollowInput = "FOLLOW_INPUT" // Eac3MetadataControlUseConfigured is a Eac3MetadataControl enum value Eac3MetadataControlUseConfigured = "USE_CONFIGURED" ) // Eac3MetadataControl_Values returns all elements of the Eac3MetadataControl enum func Eac3MetadataControl_Values() []string { return []string{ Eac3MetadataControlFollowInput, Eac3MetadataControlUseConfigured, } } // When set to WHEN_POSSIBLE, input DD+ audio will be passed through if it is // present on the input. this detection is dynamic over the life of the transcode. // Inputs that alternate between DD+ and non-DD+ content will have a consistent // DD+ output as the system alternates between passthrough and encoding. const ( // Eac3PassthroughControlWhenPossible is a Eac3PassthroughControl enum value Eac3PassthroughControlWhenPossible = "WHEN_POSSIBLE" // Eac3PassthroughControlNoPassthrough is a Eac3PassthroughControl enum value Eac3PassthroughControlNoPassthrough = "NO_PASSTHROUGH" ) // Eac3PassthroughControl_Values returns all elements of the Eac3PassthroughControl enum func Eac3PassthroughControl_Values() []string { return []string{ Eac3PassthroughControlWhenPossible, Eac3PassthroughControlNoPassthrough, } } // Controls the amount of phase-shift applied to the surround channels. Only // used for 3/2 coding mode. const ( // Eac3PhaseControlShift90Degrees is a Eac3PhaseControl enum value Eac3PhaseControlShift90Degrees = "SHIFT_90_DEGREES" // Eac3PhaseControlNoShift is a Eac3PhaseControl enum value Eac3PhaseControlNoShift = "NO_SHIFT" ) // Eac3PhaseControl_Values returns all elements of the Eac3PhaseControl enum func Eac3PhaseControl_Values() []string { return []string{ Eac3PhaseControlShift90Degrees, Eac3PhaseControlNoShift, } } // Choose how the service does stereo downmixing. This setting only applies // if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) // for the setting Coding mode (Eac3CodingMode). If you choose a different value // for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix). const ( // Eac3StereoDownmixNotIndicated is a Eac3StereoDownmix enum value Eac3StereoDownmixNotIndicated = "NOT_INDICATED" // Eac3StereoDownmixLoRo is a Eac3StereoDownmix enum value Eac3StereoDownmixLoRo = "LO_RO" // Eac3StereoDownmixLtRt is a Eac3StereoDownmix enum value Eac3StereoDownmixLtRt = "LT_RT" // Eac3StereoDownmixDpl2 is a Eac3StereoDownmix enum value Eac3StereoDownmixDpl2 = "DPL2" ) // Eac3StereoDownmix_Values returns all elements of the Eac3StereoDownmix enum func Eac3StereoDownmix_Values() []string { return []string{ Eac3StereoDownmixNotIndicated, Eac3StereoDownmixLoRo, Eac3StereoDownmixLtRt, Eac3StereoDownmixDpl2, } } // When encoding 3/2 audio, sets whether an extra center back surround channel // is matrix encoded into the left and right surround channels. const ( // Eac3SurroundExModeNotIndicated is a Eac3SurroundExMode enum value Eac3SurroundExModeNotIndicated = "NOT_INDICATED" // Eac3SurroundExModeEnabled is a Eac3SurroundExMode enum value Eac3SurroundExModeEnabled = "ENABLED" // Eac3SurroundExModeDisabled is a Eac3SurroundExMode enum value Eac3SurroundExModeDisabled = "DISABLED" ) // Eac3SurroundExMode_Values returns all elements of the Eac3SurroundExMode enum func Eac3SurroundExMode_Values() []string { return []string{ Eac3SurroundExModeNotIndicated, Eac3SurroundExModeEnabled, Eac3SurroundExModeDisabled, } } // When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into // the two channels. const ( // Eac3SurroundModeNotIndicated is a Eac3SurroundMode enum value Eac3SurroundModeNotIndicated = "NOT_INDICATED" // Eac3SurroundModeEnabled is a Eac3SurroundMode enum value Eac3SurroundModeEnabled = "ENABLED" // Eac3SurroundModeDisabled is a Eac3SurroundMode enum value Eac3SurroundModeDisabled = "DISABLED" ) // Eac3SurroundMode_Values returns all elements of the Eac3SurroundMode enum func Eac3SurroundMode_Values() []string { return []string{ Eac3SurroundModeNotIndicated, Eac3SurroundModeEnabled, Eac3SurroundModeDisabled, } } // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 // compatibility bytes fields of the 708 wrapper, and it also translates the // 608 data into 708. const ( // EmbeddedConvert608To708Upconvert is a EmbeddedConvert608To708 enum value EmbeddedConvert608To708Upconvert = "UPCONVERT" // EmbeddedConvert608To708Disabled is a EmbeddedConvert608To708 enum value EmbeddedConvert608To708Disabled = "DISABLED" ) // EmbeddedConvert608To708_Values returns all elements of the EmbeddedConvert608To708 enum func EmbeddedConvert608To708_Values() []string { return []string{ EmbeddedConvert608To708Upconvert, EmbeddedConvert608To708Disabled, } } // By default, the service terminates any unterminated captions at the end of // each input. If you want the caption to continue onto your next input, disable // this setting. const ( // EmbeddedTerminateCaptionsEndOfInput is a EmbeddedTerminateCaptions enum value EmbeddedTerminateCaptionsEndOfInput = "END_OF_INPUT" // EmbeddedTerminateCaptionsDisabled is a EmbeddedTerminateCaptions enum value EmbeddedTerminateCaptionsDisabled = "DISABLED" ) // EmbeddedTerminateCaptions_Values returns all elements of the EmbeddedTerminateCaptions enum func EmbeddedTerminateCaptions_Values() []string { return []string{ EmbeddedTerminateCaptionsEndOfInput, EmbeddedTerminateCaptionsDisabled, } } // If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning // of the archive as required for progressive downloading. Otherwise it is placed // normally at the end. const ( // F4vMoovPlacementProgressiveDownload is a F4vMoovPlacement enum value F4vMoovPlacementProgressiveDownload = "PROGRESSIVE_DOWNLOAD" // F4vMoovPlacementNormal is a F4vMoovPlacement enum value F4vMoovPlacementNormal = "NORMAL" ) // F4vMoovPlacement_Values returns all elements of the F4vMoovPlacement enum func F4vMoovPlacement_Values() []string { return []string{ F4vMoovPlacementProgressiveDownload, F4vMoovPlacementNormal, } } // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 // compatibility bytes fields of the 708 wrapper, and it also translates the // 608 data into 708. const ( // FileSourceConvert608To708Upconvert is a FileSourceConvert608To708 enum value FileSourceConvert608To708Upconvert = "UPCONVERT" // FileSourceConvert608To708Disabled is a FileSourceConvert608To708 enum value FileSourceConvert608To708Disabled = "DISABLED" ) // FileSourceConvert608To708_Values returns all elements of the FileSourceConvert608To708 enum func FileSourceConvert608To708_Values() []string { return []string{ FileSourceConvert608To708Upconvert, FileSourceConvert608To708Disabled, } } // Provide the font script, using an ISO 15924 script code, if the LanguageCode // is not sufficient for determining the script type. Where LanguageCode or // CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. const ( // FontScriptAutomatic is a FontScript enum value FontScriptAutomatic = "AUTOMATIC" // FontScriptHans is a FontScript enum value FontScriptHans = "HANS" // FontScriptHant is a FontScript enum value FontScriptHant = "HANT" ) // FontScript_Values returns all elements of the FontScript enum func FontScript_Values() []string { return []string{ FontScriptAutomatic, FontScriptHans, FontScriptHant, } } // Keep the default value, Auto (AUTO), for this setting to have MediaConvert // automatically apply the best types of quantization for your video content. // When you want to apply your quantization settings manually, you must set // H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting // to specify the strength of any adaptive quantization filters that you enable. // If you don't want MediaConvert to do any adaptive quantization in this transcode, // set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related // settings: The value that you choose here applies to the following settings: // H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization. const ( // H264AdaptiveQuantizationOff is a H264AdaptiveQuantization enum value H264AdaptiveQuantizationOff = "OFF" // H264AdaptiveQuantizationAuto is a H264AdaptiveQuantization enum value H264AdaptiveQuantizationAuto = "AUTO" // H264AdaptiveQuantizationLow is a H264AdaptiveQuantization enum value H264AdaptiveQuantizationLow = "LOW" // H264AdaptiveQuantizationMedium is a H264AdaptiveQuantization enum value H264AdaptiveQuantizationMedium = "MEDIUM" // H264AdaptiveQuantizationHigh is a H264AdaptiveQuantization enum value H264AdaptiveQuantizationHigh = "HIGH" // H264AdaptiveQuantizationHigher is a H264AdaptiveQuantization enum value H264AdaptiveQuantizationHigher = "HIGHER" // H264AdaptiveQuantizationMax is a H264AdaptiveQuantization enum value H264AdaptiveQuantizationMax = "MAX" ) // H264AdaptiveQuantization_Values returns all elements of the H264AdaptiveQuantization enum func H264AdaptiveQuantization_Values() []string { return []string{ H264AdaptiveQuantizationOff, H264AdaptiveQuantizationAuto, H264AdaptiveQuantizationLow, H264AdaptiveQuantizationMedium, H264AdaptiveQuantizationHigh, H264AdaptiveQuantizationHigher, H264AdaptiveQuantizationMax, } } // Specify an H.264 level that is consistent with your output video settings. // If you aren't sure what level to specify, choose Auto (AUTO). const ( // H264CodecLevelAuto is a H264CodecLevel enum value H264CodecLevelAuto = "AUTO" // H264CodecLevelLevel1 is a H264CodecLevel enum value H264CodecLevelLevel1 = "LEVEL_1" // H264CodecLevelLevel11 is a H264CodecLevel enum value H264CodecLevelLevel11 = "LEVEL_1_1" // H264CodecLevelLevel12 is a H264CodecLevel enum value H264CodecLevelLevel12 = "LEVEL_1_2" // H264CodecLevelLevel13 is a H264CodecLevel enum value H264CodecLevelLevel13 = "LEVEL_1_3" // H264CodecLevelLevel2 is a H264CodecLevel enum value H264CodecLevelLevel2 = "LEVEL_2" // H264CodecLevelLevel21 is a H264CodecLevel enum value H264CodecLevelLevel21 = "LEVEL_2_1" // H264CodecLevelLevel22 is a H264CodecLevel enum value H264CodecLevelLevel22 = "LEVEL_2_2" // H264CodecLevelLevel3 is a H264CodecLevel enum value H264CodecLevelLevel3 = "LEVEL_3" // H264CodecLevelLevel31 is a H264CodecLevel enum value H264CodecLevelLevel31 = "LEVEL_3_1" // H264CodecLevelLevel32 is a H264CodecLevel enum value H264CodecLevelLevel32 = "LEVEL_3_2" // H264CodecLevelLevel4 is a H264CodecLevel enum value H264CodecLevelLevel4 = "LEVEL_4" // H264CodecLevelLevel41 is a H264CodecLevel enum value H264CodecLevelLevel41 = "LEVEL_4_1" // H264CodecLevelLevel42 is a H264CodecLevel enum value H264CodecLevelLevel42 = "LEVEL_4_2" // H264CodecLevelLevel5 is a H264CodecLevel enum value H264CodecLevelLevel5 = "LEVEL_5" // H264CodecLevelLevel51 is a H264CodecLevel enum value H264CodecLevelLevel51 = "LEVEL_5_1" // H264CodecLevelLevel52 is a H264CodecLevel enum value H264CodecLevelLevel52 = "LEVEL_5_2" ) // H264CodecLevel_Values returns all elements of the H264CodecLevel enum func H264CodecLevel_Values() []string { return []string{ H264CodecLevelAuto, H264CodecLevelLevel1, H264CodecLevelLevel11, H264CodecLevelLevel12, H264CodecLevelLevel13, H264CodecLevelLevel2, H264CodecLevelLevel21, H264CodecLevelLevel22, H264CodecLevelLevel3, H264CodecLevelLevel31, H264CodecLevelLevel32, H264CodecLevelLevel4, H264CodecLevelLevel41, H264CodecLevelLevel42, H264CodecLevelLevel5, H264CodecLevelLevel51, H264CodecLevelLevel52, } } // H.264 Profile. High 4:2:2 and 10-bit profiles are only available with the // AVC-I License. const ( // H264CodecProfileBaseline is a H264CodecProfile enum value H264CodecProfileBaseline = "BASELINE" // H264CodecProfileHigh is a H264CodecProfile enum value H264CodecProfileHigh = "HIGH" // H264CodecProfileHigh10bit is a H264CodecProfile enum value H264CodecProfileHigh10bit = "HIGH_10BIT" // H264CodecProfileHigh422 is a H264CodecProfile enum value H264CodecProfileHigh422 = "HIGH_422" // H264CodecProfileHigh42210bit is a H264CodecProfile enum value H264CodecProfileHigh42210bit = "HIGH_422_10BIT" // H264CodecProfileMain is a H264CodecProfile enum value H264CodecProfileMain = "MAIN" ) // H264CodecProfile_Values returns all elements of the H264CodecProfile enum func H264CodecProfile_Values() []string { return []string{ H264CodecProfileBaseline, H264CodecProfileHigh, H264CodecProfileHigh10bit, H264CodecProfileHigh422, H264CodecProfileHigh42210bit, H264CodecProfileMain, } } // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the // value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). const ( // H264DynamicSubGopAdaptive is a H264DynamicSubGop enum value H264DynamicSubGopAdaptive = "ADAPTIVE" // H264DynamicSubGopStatic is a H264DynamicSubGop enum value H264DynamicSubGopStatic = "STATIC" ) // H264DynamicSubGop_Values returns all elements of the H264DynamicSubGop enum func H264DynamicSubGop_Values() []string { return []string{ H264DynamicSubGopAdaptive, H264DynamicSubGopStatic, } } // Entropy encoding mode. Use CABAC (must be in Main or High profile) or CAVLC. const ( // H264EntropyEncodingCabac is a H264EntropyEncoding enum value H264EntropyEncodingCabac = "CABAC" // H264EntropyEncodingCavlc is a H264EntropyEncoding enum value H264EntropyEncodingCavlc = "CAVLC" ) // H264EntropyEncoding_Values returns all elements of the H264EntropyEncoding enum func H264EntropyEncoding_Values() []string { return []string{ H264EntropyEncodingCabac, H264EntropyEncodingCavlc, } } // Keep the default value, PAFF, to have MediaConvert use PAFF encoding for // interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding // and create separate interlaced fields. const ( // H264FieldEncodingPaff is a H264FieldEncoding enum value H264FieldEncodingPaff = "PAFF" // H264FieldEncodingForceField is a H264FieldEncoding enum value H264FieldEncodingForceField = "FORCE_FIELD" ) // H264FieldEncoding_Values returns all elements of the H264FieldEncoding enum func H264FieldEncoding_Values() []string { return []string{ H264FieldEncodingPaff, H264FieldEncodingForceField, } } // Only use this setting when you change the default value, AUTO, for the setting // H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization // and all other adaptive quantization from your JSON job specification, MediaConvert // automatically applies the best types of quantization for your video content. // When you set H264AdaptiveQuantization to a value other than AUTO, the default // value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change // this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears // as a visual flicker that can arise when the encoder saves bits by copying // some macroblocks many times from frame to frame, and then refreshes them // at the I-frame. When you enable this setting, the encoder updates these macroblocks // slightly more often to smooth out the flicker. To manually enable or disable // H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) // to a value other than AUTO. const ( // H264FlickerAdaptiveQuantizationDisabled is a H264FlickerAdaptiveQuantization enum value H264FlickerAdaptiveQuantizationDisabled = "DISABLED" // H264FlickerAdaptiveQuantizationEnabled is a H264FlickerAdaptiveQuantization enum value H264FlickerAdaptiveQuantizationEnabled = "ENABLED" ) // H264FlickerAdaptiveQuantization_Values returns all elements of the H264FlickerAdaptiveQuantization enum func H264FlickerAdaptiveQuantization_Values() []string { return []string{ H264FlickerAdaptiveQuantizationDisabled, H264FlickerAdaptiveQuantizationEnabled, } } // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. const ( // H264FramerateControlInitializeFromSource is a H264FramerateControl enum value H264FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // H264FramerateControlSpecified is a H264FramerateControl enum value H264FramerateControlSpecified = "SPECIFIED" ) // H264FramerateControl_Values returns all elements of the H264FramerateControl enum func H264FramerateControl_Values() []string { return []string{ H264FramerateControlInitializeFromSource, H264FramerateControlSpecified, } } // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. const ( // H264FramerateConversionAlgorithmDuplicateDrop is a H264FramerateConversionAlgorithm enum value H264FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // H264FramerateConversionAlgorithmInterpolate is a H264FramerateConversionAlgorithm enum value H264FramerateConversionAlgorithmInterpolate = "INTERPOLATE" // H264FramerateConversionAlgorithmFrameformer is a H264FramerateConversionAlgorithm enum value H264FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) // H264FramerateConversionAlgorithm_Values returns all elements of the H264FramerateConversionAlgorithm enum func H264FramerateConversionAlgorithm_Values() []string { return []string{ H264FramerateConversionAlgorithmDuplicateDrop, H264FramerateConversionAlgorithmInterpolate, H264FramerateConversionAlgorithmFrameformer, } } // If enable, use reference B frames for GOP structures that have B frames > // 1. const ( // H264GopBReferenceDisabled is a H264GopBReference enum value H264GopBReferenceDisabled = "DISABLED" // H264GopBReferenceEnabled is a H264GopBReference enum value H264GopBReferenceEnabled = "ENABLED" ) // H264GopBReference_Values returns all elements of the H264GopBReference enum func H264GopBReference_Values() []string { return []string{ H264GopBReferenceDisabled, H264GopBReferenceEnabled, } } // Indicates if the GOP Size in H264 is specified in frames or seconds. If seconds // the system will convert the GOP Size into a frame count at run time. const ( // H264GopSizeUnitsFrames is a H264GopSizeUnits enum value H264GopSizeUnitsFrames = "FRAMES" // H264GopSizeUnitsSeconds is a H264GopSizeUnits enum value H264GopSizeUnitsSeconds = "SECONDS" ) // H264GopSizeUnits_Values returns all elements of the H264GopSizeUnits enum func H264GopSizeUnits_Values() []string { return []string{ H264GopSizeUnitsFrames, H264GopSizeUnitsSeconds, } } // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. const ( // H264InterlaceModeProgressive is a H264InterlaceMode enum value H264InterlaceModeProgressive = "PROGRESSIVE" // H264InterlaceModeTopField is a H264InterlaceMode enum value H264InterlaceModeTopField = "TOP_FIELD" // H264InterlaceModeBottomField is a H264InterlaceMode enum value H264InterlaceModeBottomField = "BOTTOM_FIELD" // H264InterlaceModeFollowTopField is a H264InterlaceMode enum value H264InterlaceModeFollowTopField = "FOLLOW_TOP_FIELD" // H264InterlaceModeFollowBottomField is a H264InterlaceMode enum value H264InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" ) // H264InterlaceMode_Values returns all elements of the H264InterlaceMode enum func H264InterlaceMode_Values() []string { return []string{ H264InterlaceModeProgressive, H264InterlaceModeTopField, H264InterlaceModeBottomField, H264InterlaceModeFollowTopField, H264InterlaceModeFollowBottomField, } } // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. const ( // H264ParControlInitializeFromSource is a H264ParControl enum value H264ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // H264ParControlSpecified is a H264ParControl enum value H264ParControlSpecified = "SPECIFIED" ) // H264ParControl_Values returns all elements of the H264ParControl enum func H264ParControl_Values() []string { return []string{ H264ParControlInitializeFromSource, H264ParControlSpecified, } } // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. const ( // H264QualityTuningLevelSinglePass is a H264QualityTuningLevel enum value H264QualityTuningLevelSinglePass = "SINGLE_PASS" // H264QualityTuningLevelSinglePassHq is a H264QualityTuningLevel enum value H264QualityTuningLevelSinglePassHq = "SINGLE_PASS_HQ" // H264QualityTuningLevelMultiPassHq is a H264QualityTuningLevel enum value H264QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" ) // H264QualityTuningLevel_Values returns all elements of the H264QualityTuningLevel enum func H264QualityTuningLevel_Values() []string { return []string{ H264QualityTuningLevelSinglePass, H264QualityTuningLevelSinglePassHq, H264QualityTuningLevelMultiPassHq, } } // Use this setting to specify whether this output has a variable bitrate (VBR), // constant bitrate (CBR) or quality-defined variable bitrate (QVBR). const ( // H264RateControlModeVbr is a H264RateControlMode enum value H264RateControlModeVbr = "VBR" // H264RateControlModeCbr is a H264RateControlMode enum value H264RateControlModeCbr = "CBR" // H264RateControlModeQvbr is a H264RateControlMode enum value H264RateControlModeQvbr = "QVBR" ) // H264RateControlMode_Values returns all elements of the H264RateControlMode enum func H264RateControlMode_Values() []string { return []string{ H264RateControlModeVbr, H264RateControlModeCbr, H264RateControlModeQvbr, } } // Places a PPS header on each encoded picture, even if repeated. const ( // H264RepeatPpsDisabled is a H264RepeatPps enum value H264RepeatPpsDisabled = "DISABLED" // H264RepeatPpsEnabled is a H264RepeatPps enum value H264RepeatPpsEnabled = "ENABLED" ) // H264RepeatPps_Values returns all elements of the H264RepeatPps enum func H264RepeatPps_Values() []string { return []string{ H264RepeatPpsDisabled, H264RepeatPpsEnabled, } } // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). const ( // H264ScanTypeConversionModeInterlaced is a H264ScanTypeConversionMode enum value H264ScanTypeConversionModeInterlaced = "INTERLACED" // H264ScanTypeConversionModeInterlacedOptimize is a H264ScanTypeConversionMode enum value H264ScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE" ) // H264ScanTypeConversionMode_Values returns all elements of the H264ScanTypeConversionMode enum func H264ScanTypeConversionMode_Values() []string { return []string{ H264ScanTypeConversionModeInterlaced, H264ScanTypeConversionModeInterlacedOptimize, } } // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) // for further video quality improvement. For more information about QVBR, see // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. const ( // H264SceneChangeDetectDisabled is a H264SceneChangeDetect enum value H264SceneChangeDetectDisabled = "DISABLED" // H264SceneChangeDetectEnabled is a H264SceneChangeDetect enum value H264SceneChangeDetectEnabled = "ENABLED" // H264SceneChangeDetectTransitionDetection is a H264SceneChangeDetect enum value H264SceneChangeDetectTransitionDetection = "TRANSITION_DETECTION" ) // H264SceneChangeDetect_Values returns all elements of the H264SceneChangeDetect enum func H264SceneChangeDetect_Values() []string { return []string{ H264SceneChangeDetectDisabled, H264SceneChangeDetectEnabled, H264SceneChangeDetectTransitionDetection, } } // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output. When you enable // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. const ( // H264SlowPalDisabled is a H264SlowPal enum value H264SlowPalDisabled = "DISABLED" // H264SlowPalEnabled is a H264SlowPal enum value H264SlowPalEnabled = "ENABLED" ) // H264SlowPal_Values returns all elements of the H264SlowPal enum func H264SlowPal_Values() []string { return []string{ H264SlowPalDisabled, H264SlowPalEnabled, } } // Only use this setting when you change the default value, Auto (AUTO), for // the setting H264AdaptiveQuantization. When you keep all defaults, excluding // H264AdaptiveQuantization and all other adaptive quantization from your JSON // job specification, MediaConvert automatically applies the best types of quantization // for your video content. When you set H264AdaptiveQuantization to a value // other than AUTO, the default value for H264SpatialAdaptiveQuantization is // Enabled (ENABLED). Keep this default value to adjust quantization within // each frame based on spatial variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas that can sustain more // distortion with no noticeable visual degradation and uses more bits on areas // where any small distortion will be noticeable. For example, complex textured // blocks are encoded with fewer bits and smooth textured blocks are encoded // with more bits. Enabling this feature will almost always improve your video // quality. Note, though, that this feature doesn't take into account where // the viewer's attention is likely to be. If viewers are likely to be focusing // their attention on a part of the screen with a lot of complex texture, you // might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). // Related setting: When you enable spatial adaptive quantization, set the value // for Adaptive quantization (H264AdaptiveQuantization) depending on your content. // For homogeneous content, such as cartoons and video games, set it to Low. // For content with a wider variety of textures, set it to High or Higher. To // manually enable or disable H264SpatialAdaptiveQuantization, you must set // Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO. const ( // H264SpatialAdaptiveQuantizationDisabled is a H264SpatialAdaptiveQuantization enum value H264SpatialAdaptiveQuantizationDisabled = "DISABLED" // H264SpatialAdaptiveQuantizationEnabled is a H264SpatialAdaptiveQuantization enum value H264SpatialAdaptiveQuantizationEnabled = "ENABLED" ) // H264SpatialAdaptiveQuantization_Values returns all elements of the H264SpatialAdaptiveQuantization enum func H264SpatialAdaptiveQuantization_Values() []string { return []string{ H264SpatialAdaptiveQuantizationDisabled, H264SpatialAdaptiveQuantizationEnabled, } } // Produces a bitstream compliant with SMPTE RP-2027. const ( // H264SyntaxDefault is a H264Syntax enum value H264SyntaxDefault = "DEFAULT" // H264SyntaxRp2027 is a H264Syntax enum value H264SyntaxRp2027 = "RP2027" ) // H264Syntax_Values returns all elements of the H264Syntax enum func H264Syntax_Values() []string { return []string{ H264SyntaxDefault, H264SyntaxRp2027, } } // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable // hard or soft telecine to create a smoother picture. Hard telecine (HARD) // produces a 29.97i output. Soft telecine (SOFT) produces an output with a // 23.976 output that signals to the video player device to do the conversion // during play back. When you keep the default value, None (NONE), MediaConvert // does a standard frame rate conversion to 29.97 without doing anything with // the field polarity to create a smoother picture. const ( // H264TelecineNone is a H264Telecine enum value H264TelecineNone = "NONE" // H264TelecineSoft is a H264Telecine enum value H264TelecineSoft = "SOFT" // H264TelecineHard is a H264Telecine enum value H264TelecineHard = "HARD" ) // H264Telecine_Values returns all elements of the H264Telecine enum func H264Telecine_Values() []string { return []string{ H264TelecineNone, H264TelecineSoft, H264TelecineHard, } } // Only use this setting when you change the default value, AUTO, for the setting // H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization // and all other adaptive quantization from your JSON job specification, MediaConvert // automatically applies the best types of quantization for your video content. // When you set H264AdaptiveQuantization to a value other than AUTO, the default // value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this // default value to adjust quantization within each frame based on temporal // variation of content complexity. When you enable this feature, the encoder // uses fewer bits on areas of the frame that aren't moving and uses more bits // on complex objects with sharp edges that move a lot. For example, this feature // improves the readability of text tickers on newscasts and scoreboards on // sports matches. Enabling this feature will almost always improve your video // quality. Note, though, that this feature doesn't take into account where // the viewer's attention is likely to be. If viewers are likely to be focusing // their attention on a part of the screen that doesn't have moving objects // with sharp edges, such as sports athletes' faces, you might choose to set // H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: // When you enable temporal quantization, adjust the strength of the filter // with the setting Adaptive quantization (adaptiveQuantization). To manually // enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive // quantization (H264AdaptiveQuantization) to a value other than AUTO. const ( // H264TemporalAdaptiveQuantizationDisabled is a H264TemporalAdaptiveQuantization enum value H264TemporalAdaptiveQuantizationDisabled = "DISABLED" // H264TemporalAdaptiveQuantizationEnabled is a H264TemporalAdaptiveQuantization enum value H264TemporalAdaptiveQuantizationEnabled = "ENABLED" ) // H264TemporalAdaptiveQuantization_Values returns all elements of the H264TemporalAdaptiveQuantization enum func H264TemporalAdaptiveQuantization_Values() []string { return []string{ H264TemporalAdaptiveQuantizationDisabled, H264TemporalAdaptiveQuantizationEnabled, } } // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. const ( // H264UnregisteredSeiTimecodeDisabled is a H264UnregisteredSeiTimecode enum value H264UnregisteredSeiTimecodeDisabled = "DISABLED" // H264UnregisteredSeiTimecodeEnabled is a H264UnregisteredSeiTimecode enum value H264UnregisteredSeiTimecodeEnabled = "ENABLED" ) // H264UnregisteredSeiTimecode_Values returns all elements of the H264UnregisteredSeiTimecode enum func H264UnregisteredSeiTimecode_Values() []string { return []string{ H264UnregisteredSeiTimecodeDisabled, H264UnregisteredSeiTimecodeEnabled, } } // Specify the strength of any adaptive quantization filters that you enable. // The value that you choose here applies to the following settings: Flicker // adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization // (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization). const ( // H265AdaptiveQuantizationOff is a H265AdaptiveQuantization enum value H265AdaptiveQuantizationOff = "OFF" // H265AdaptiveQuantizationLow is a H265AdaptiveQuantization enum value H265AdaptiveQuantizationLow = "LOW" // H265AdaptiveQuantizationMedium is a H265AdaptiveQuantization enum value H265AdaptiveQuantizationMedium = "MEDIUM" // H265AdaptiveQuantizationHigh is a H265AdaptiveQuantization enum value H265AdaptiveQuantizationHigh = "HIGH" // H265AdaptiveQuantizationHigher is a H265AdaptiveQuantization enum value H265AdaptiveQuantizationHigher = "HIGHER" // H265AdaptiveQuantizationMax is a H265AdaptiveQuantization enum value H265AdaptiveQuantizationMax = "MAX" ) // H265AdaptiveQuantization_Values returns all elements of the H265AdaptiveQuantization enum func H265AdaptiveQuantization_Values() []string { return []string{ H265AdaptiveQuantizationOff, H265AdaptiveQuantizationLow, H265AdaptiveQuantizationMedium, H265AdaptiveQuantizationHigh, H265AdaptiveQuantizationHigher, H265AdaptiveQuantizationMax, } } // Enables Alternate Transfer Function SEI message for outputs using Hybrid // Log Gamma (HLG) Electro-Optical Transfer Function (EOTF). const ( // H265AlternateTransferFunctionSeiDisabled is a H265AlternateTransferFunctionSei enum value H265AlternateTransferFunctionSeiDisabled = "DISABLED" // H265AlternateTransferFunctionSeiEnabled is a H265AlternateTransferFunctionSei enum value H265AlternateTransferFunctionSeiEnabled = "ENABLED" ) // H265AlternateTransferFunctionSei_Values returns all elements of the H265AlternateTransferFunctionSei enum func H265AlternateTransferFunctionSei_Values() []string { return []string{ H265AlternateTransferFunctionSeiDisabled, H265AlternateTransferFunctionSeiEnabled, } } // H.265 Level. const ( // H265CodecLevelAuto is a H265CodecLevel enum value H265CodecLevelAuto = "AUTO" // H265CodecLevelLevel1 is a H265CodecLevel enum value H265CodecLevelLevel1 = "LEVEL_1" // H265CodecLevelLevel2 is a H265CodecLevel enum value H265CodecLevelLevel2 = "LEVEL_2" // H265CodecLevelLevel21 is a H265CodecLevel enum value H265CodecLevelLevel21 = "LEVEL_2_1" // H265CodecLevelLevel3 is a H265CodecLevel enum value H265CodecLevelLevel3 = "LEVEL_3" // H265CodecLevelLevel31 is a H265CodecLevel enum value H265CodecLevelLevel31 = "LEVEL_3_1" // H265CodecLevelLevel4 is a H265CodecLevel enum value H265CodecLevelLevel4 = "LEVEL_4" // H265CodecLevelLevel41 is a H265CodecLevel enum value H265CodecLevelLevel41 = "LEVEL_4_1" // H265CodecLevelLevel5 is a H265CodecLevel enum value H265CodecLevelLevel5 = "LEVEL_5" // H265CodecLevelLevel51 is a H265CodecLevel enum value H265CodecLevelLevel51 = "LEVEL_5_1" // H265CodecLevelLevel52 is a H265CodecLevel enum value H265CodecLevelLevel52 = "LEVEL_5_2" // H265CodecLevelLevel6 is a H265CodecLevel enum value H265CodecLevelLevel6 = "LEVEL_6" // H265CodecLevelLevel61 is a H265CodecLevel enum value H265CodecLevelLevel61 = "LEVEL_6_1" // H265CodecLevelLevel62 is a H265CodecLevel enum value H265CodecLevelLevel62 = "LEVEL_6_2" ) // H265CodecLevel_Values returns all elements of the H265CodecLevel enum func H265CodecLevel_Values() []string { return []string{ H265CodecLevelAuto, H265CodecLevelLevel1, H265CodecLevelLevel2, H265CodecLevelLevel21, H265CodecLevelLevel3, H265CodecLevelLevel31, H265CodecLevelLevel4, H265CodecLevelLevel41, H265CodecLevelLevel5, H265CodecLevelLevel51, H265CodecLevelLevel52, H265CodecLevelLevel6, H265CodecLevelLevel61, H265CodecLevelLevel62, } } // Represents the Profile and Tier, per the HEVC (H.265) specification. Selections // are grouped as [Profile] / [Tier], so "Main/High" represents Main Profile // with High Tier. 4:2:2 profiles are only available with the HEVC 4:2:2 License. const ( // H265CodecProfileMainMain is a H265CodecProfile enum value H265CodecProfileMainMain = "MAIN_MAIN" // H265CodecProfileMainHigh is a H265CodecProfile enum value H265CodecProfileMainHigh = "MAIN_HIGH" // H265CodecProfileMain10Main is a H265CodecProfile enum value H265CodecProfileMain10Main = "MAIN10_MAIN" // H265CodecProfileMain10High is a H265CodecProfile enum value H265CodecProfileMain10High = "MAIN10_HIGH" // H265CodecProfileMain4228bitMain is a H265CodecProfile enum value H265CodecProfileMain4228bitMain = "MAIN_422_8BIT_MAIN" // H265CodecProfileMain4228bitHigh is a H265CodecProfile enum value H265CodecProfileMain4228bitHigh = "MAIN_422_8BIT_HIGH" // H265CodecProfileMain42210bitMain is a H265CodecProfile enum value H265CodecProfileMain42210bitMain = "MAIN_422_10BIT_MAIN" // H265CodecProfileMain42210bitHigh is a H265CodecProfile enum value H265CodecProfileMain42210bitHigh = "MAIN_422_10BIT_HIGH" ) // H265CodecProfile_Values returns all elements of the H265CodecProfile enum func H265CodecProfile_Values() []string { return []string{ H265CodecProfileMainMain, H265CodecProfileMainHigh, H265CodecProfileMain10Main, H265CodecProfileMain10High, H265CodecProfileMain4228bitMain, H265CodecProfileMain4228bitHigh, H265CodecProfileMain42210bitMain, H265CodecProfileMain42210bitHigh, } } // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the // value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). const ( // H265DynamicSubGopAdaptive is a H265DynamicSubGop enum value H265DynamicSubGopAdaptive = "ADAPTIVE" // H265DynamicSubGopStatic is a H265DynamicSubGop enum value H265DynamicSubGopStatic = "STATIC" ) // H265DynamicSubGop_Values returns all elements of the H265DynamicSubGop enum func H265DynamicSubGop_Values() []string { return []string{ H265DynamicSubGopAdaptive, H265DynamicSubGopStatic, } } // Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears // as a visual flicker that can arise when the encoder saves bits by copying // some macroblocks many times from frame to frame, and then refreshes them // at the I-frame. When you enable this setting, the encoder updates these macroblocks // slightly more often to smooth out the flicker. This setting is disabled by // default. Related setting: In addition to enabling this setting, you must // also set adaptiveQuantization to a value other than Off (OFF). const ( // H265FlickerAdaptiveQuantizationDisabled is a H265FlickerAdaptiveQuantization enum value H265FlickerAdaptiveQuantizationDisabled = "DISABLED" // H265FlickerAdaptiveQuantizationEnabled is a H265FlickerAdaptiveQuantization enum value H265FlickerAdaptiveQuantizationEnabled = "ENABLED" ) // H265FlickerAdaptiveQuantization_Values returns all elements of the H265FlickerAdaptiveQuantization enum func H265FlickerAdaptiveQuantization_Values() []string { return []string{ H265FlickerAdaptiveQuantizationDisabled, H265FlickerAdaptiveQuantizationEnabled, } } // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. const ( // H265FramerateControlInitializeFromSource is a H265FramerateControl enum value H265FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // H265FramerateControlSpecified is a H265FramerateControl enum value H265FramerateControlSpecified = "SPECIFIED" ) // H265FramerateControl_Values returns all elements of the H265FramerateControl enum func H265FramerateControl_Values() []string { return []string{ H265FramerateControlInitializeFromSource, H265FramerateControlSpecified, } } // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. const ( // H265FramerateConversionAlgorithmDuplicateDrop is a H265FramerateConversionAlgorithm enum value H265FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // H265FramerateConversionAlgorithmInterpolate is a H265FramerateConversionAlgorithm enum value H265FramerateConversionAlgorithmInterpolate = "INTERPOLATE" // H265FramerateConversionAlgorithmFrameformer is a H265FramerateConversionAlgorithm enum value H265FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) // H265FramerateConversionAlgorithm_Values returns all elements of the H265FramerateConversionAlgorithm enum func H265FramerateConversionAlgorithm_Values() []string { return []string{ H265FramerateConversionAlgorithmDuplicateDrop, H265FramerateConversionAlgorithmInterpolate, H265FramerateConversionAlgorithmFrameformer, } } // If enable, use reference B frames for GOP structures that have B frames > // 1. const ( // H265GopBReferenceDisabled is a H265GopBReference enum value H265GopBReferenceDisabled = "DISABLED" // H265GopBReferenceEnabled is a H265GopBReference enum value H265GopBReferenceEnabled = "ENABLED" ) // H265GopBReference_Values returns all elements of the H265GopBReference enum func H265GopBReference_Values() []string { return []string{ H265GopBReferenceDisabled, H265GopBReferenceEnabled, } } // Indicates if the GOP Size in H265 is specified in frames or seconds. If seconds // the system will convert the GOP Size into a frame count at run time. const ( // H265GopSizeUnitsFrames is a H265GopSizeUnits enum value H265GopSizeUnitsFrames = "FRAMES" // H265GopSizeUnitsSeconds is a H265GopSizeUnits enum value H265GopSizeUnitsSeconds = "SECONDS" ) // H265GopSizeUnits_Values returns all elements of the H265GopSizeUnits enum func H265GopSizeUnits_Values() []string { return []string{ H265GopSizeUnitsFrames, H265GopSizeUnitsSeconds, } } // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. const ( // H265InterlaceModeProgressive is a H265InterlaceMode enum value H265InterlaceModeProgressive = "PROGRESSIVE" // H265InterlaceModeTopField is a H265InterlaceMode enum value H265InterlaceModeTopField = "TOP_FIELD" // H265InterlaceModeBottomField is a H265InterlaceMode enum value H265InterlaceModeBottomField = "BOTTOM_FIELD" // H265InterlaceModeFollowTopField is a H265InterlaceMode enum value H265InterlaceModeFollowTopField = "FOLLOW_TOP_FIELD" // H265InterlaceModeFollowBottomField is a H265InterlaceMode enum value H265InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" ) // H265InterlaceMode_Values returns all elements of the H265InterlaceMode enum func H265InterlaceMode_Values() []string { return []string{ H265InterlaceModeProgressive, H265InterlaceModeTopField, H265InterlaceModeBottomField, H265InterlaceModeFollowTopField, H265InterlaceModeFollowBottomField, } } // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. const ( // H265ParControlInitializeFromSource is a H265ParControl enum value H265ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // H265ParControlSpecified is a H265ParControl enum value H265ParControlSpecified = "SPECIFIED" ) // H265ParControl_Values returns all elements of the H265ParControl enum func H265ParControl_Values() []string { return []string{ H265ParControlInitializeFromSource, H265ParControlSpecified, } } // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. const ( // H265QualityTuningLevelSinglePass is a H265QualityTuningLevel enum value H265QualityTuningLevelSinglePass = "SINGLE_PASS" // H265QualityTuningLevelSinglePassHq is a H265QualityTuningLevel enum value H265QualityTuningLevelSinglePassHq = "SINGLE_PASS_HQ" // H265QualityTuningLevelMultiPassHq is a H265QualityTuningLevel enum value H265QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" ) // H265QualityTuningLevel_Values returns all elements of the H265QualityTuningLevel enum func H265QualityTuningLevel_Values() []string { return []string{ H265QualityTuningLevelSinglePass, H265QualityTuningLevelSinglePassHq, H265QualityTuningLevelMultiPassHq, } } // Use this setting to specify whether this output has a variable bitrate (VBR), // constant bitrate (CBR) or quality-defined variable bitrate (QVBR). const ( // H265RateControlModeVbr is a H265RateControlMode enum value H265RateControlModeVbr = "VBR" // H265RateControlModeCbr is a H265RateControlMode enum value H265RateControlModeCbr = "CBR" // H265RateControlModeQvbr is a H265RateControlMode enum value H265RateControlModeQvbr = "QVBR" ) // H265RateControlMode_Values returns all elements of the H265RateControlMode enum func H265RateControlMode_Values() []string { return []string{ H265RateControlModeVbr, H265RateControlModeCbr, H265RateControlModeQvbr, } } // Specify Sample Adaptive Offset (SAO) filter strength. Adaptive mode dynamically // selects best strength based on content const ( // H265SampleAdaptiveOffsetFilterModeDefault is a H265SampleAdaptiveOffsetFilterMode enum value H265SampleAdaptiveOffsetFilterModeDefault = "DEFAULT" // H265SampleAdaptiveOffsetFilterModeAdaptive is a H265SampleAdaptiveOffsetFilterMode enum value H265SampleAdaptiveOffsetFilterModeAdaptive = "ADAPTIVE" // H265SampleAdaptiveOffsetFilterModeOff is a H265SampleAdaptiveOffsetFilterMode enum value H265SampleAdaptiveOffsetFilterModeOff = "OFF" ) // H265SampleAdaptiveOffsetFilterMode_Values returns all elements of the H265SampleAdaptiveOffsetFilterMode enum func H265SampleAdaptiveOffsetFilterMode_Values() []string { return []string{ H265SampleAdaptiveOffsetFilterModeDefault, H265SampleAdaptiveOffsetFilterModeAdaptive, H265SampleAdaptiveOffsetFilterModeOff, } } // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). const ( // H265ScanTypeConversionModeInterlaced is a H265ScanTypeConversionMode enum value H265ScanTypeConversionModeInterlaced = "INTERLACED" // H265ScanTypeConversionModeInterlacedOptimize is a H265ScanTypeConversionMode enum value H265ScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE" ) // H265ScanTypeConversionMode_Values returns all elements of the H265ScanTypeConversionMode enum func H265ScanTypeConversionMode_Values() []string { return []string{ H265ScanTypeConversionModeInterlaced, H265ScanTypeConversionModeInterlacedOptimize, } } // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) // for further video quality improvement. For more information about QVBR, see // https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr. const ( // H265SceneChangeDetectDisabled is a H265SceneChangeDetect enum value H265SceneChangeDetectDisabled = "DISABLED" // H265SceneChangeDetectEnabled is a H265SceneChangeDetect enum value H265SceneChangeDetectEnabled = "ENABLED" // H265SceneChangeDetectTransitionDetection is a H265SceneChangeDetect enum value H265SceneChangeDetectTransitionDetection = "TRANSITION_DETECTION" ) // H265SceneChangeDetect_Values returns all elements of the H265SceneChangeDetect enum func H265SceneChangeDetect_Values() []string { return []string{ H265SceneChangeDetectDisabled, H265SceneChangeDetectEnabled, H265SceneChangeDetectTransitionDetection, } } // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output. When you enable // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. const ( // H265SlowPalDisabled is a H265SlowPal enum value H265SlowPalDisabled = "DISABLED" // H265SlowPalEnabled is a H265SlowPal enum value H265SlowPalEnabled = "ENABLED" ) // H265SlowPal_Values returns all elements of the H265SlowPal enum func H265SlowPal_Values() []string { return []string{ H265SlowPalDisabled, H265SlowPalEnabled, } } // Keep the default value, Enabled (ENABLED), to adjust quantization within // each frame based on spatial variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas that can sustain more // distortion with no noticeable visual degradation and uses more bits on areas // where any small distortion will be noticeable. For example, complex textured // blocks are encoded with fewer bits and smooth textured blocks are encoded // with more bits. Enabling this feature will almost always improve your video // quality. Note, though, that this feature doesn't take into account where // the viewer's attention is likely to be. If viewers are likely to be focusing // their attention on a part of the screen with a lot of complex texture, you // might choose to disable this feature. Related setting: When you enable spatial // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) // depending on your content. For homogeneous content, such as cartoons and // video games, set it to Low. For content with a wider variety of textures, // set it to High or Higher. const ( // H265SpatialAdaptiveQuantizationDisabled is a H265SpatialAdaptiveQuantization enum value H265SpatialAdaptiveQuantizationDisabled = "DISABLED" // H265SpatialAdaptiveQuantizationEnabled is a H265SpatialAdaptiveQuantization enum value H265SpatialAdaptiveQuantizationEnabled = "ENABLED" ) // H265SpatialAdaptiveQuantization_Values returns all elements of the H265SpatialAdaptiveQuantization enum func H265SpatialAdaptiveQuantization_Values() []string { return []string{ H265SpatialAdaptiveQuantizationDisabled, H265SpatialAdaptiveQuantizationEnabled, } } // This field applies only if the Streams > Advanced > Framerate (framerate) // field is set to 29.970. This field works with the Streams > Advanced > Preprocessors // > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced // Mode field (interlace_mode) to identify the scan type for the output: Progressive, // Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output // from 23.976 input. - Soft: produces 23.976; the player converts this output // to 29.97i. const ( // H265TelecineNone is a H265Telecine enum value H265TelecineNone = "NONE" // H265TelecineSoft is a H265Telecine enum value H265TelecineSoft = "SOFT" // H265TelecineHard is a H265Telecine enum value H265TelecineHard = "HARD" ) // H265Telecine_Values returns all elements of the H265Telecine enum func H265Telecine_Values() []string { return []string{ H265TelecineNone, H265TelecineSoft, H265TelecineHard, } } // Keep the default value, Enabled (ENABLED), to adjust quantization within // each frame based on temporal variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas of the frame that aren't // moving and uses more bits on complex objects with sharp edges that move a // lot. For example, this feature improves the readability of text tickers on // newscasts and scoreboards on sports matches. Enabling this feature will almost // always improve your video quality. Note, though, that this feature doesn't // take into account where the viewer's attention is likely to be. If viewers // are likely to be focusing their attention on a part of the screen that doesn't // have moving objects with sharp edges, such as sports athletes' faces, you // might choose to disable this feature. Related setting: When you enable temporal // quantization, adjust the strength of the filter with the setting Adaptive // quantization (adaptiveQuantization). const ( // H265TemporalAdaptiveQuantizationDisabled is a H265TemporalAdaptiveQuantization enum value H265TemporalAdaptiveQuantizationDisabled = "DISABLED" // H265TemporalAdaptiveQuantizationEnabled is a H265TemporalAdaptiveQuantization enum value H265TemporalAdaptiveQuantizationEnabled = "ENABLED" ) // H265TemporalAdaptiveQuantization_Values returns all elements of the H265TemporalAdaptiveQuantization enum func H265TemporalAdaptiveQuantization_Values() []string { return []string{ H265TemporalAdaptiveQuantizationDisabled, H265TemporalAdaptiveQuantizationEnabled, } } // Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers // are supported depending on GOP structure: I- and P-frames form one layer, // reference B-frames can form a second layer and non-reference b-frames can // form a third layer. Decoders can optionally decode only the lower temporal // layers to generate a lower frame rate output. For example, given a bitstream // with temporal IDs and with b-frames = 1 (i.e. IbPbPb display order), a decoder // could decode all the frames for full frame rate output or only the I and // P frames (lowest temporal layer) for a half frame rate output. const ( // H265TemporalIdsDisabled is a H265TemporalIds enum value H265TemporalIdsDisabled = "DISABLED" // H265TemporalIdsEnabled is a H265TemporalIds enum value H265TemporalIdsEnabled = "ENABLED" ) // H265TemporalIds_Values returns all elements of the H265TemporalIds enum func H265TemporalIds_Values() []string { return []string{ H265TemporalIdsDisabled, H265TemporalIdsEnabled, } } // Enable use of tiles, allowing horizontal as well as vertical subdivision // of the encoded pictures. const ( // H265TilesDisabled is a H265Tiles enum value H265TilesDisabled = "DISABLED" // H265TilesEnabled is a H265Tiles enum value H265TilesEnabled = "ENABLED" ) // H265Tiles_Values returns all elements of the H265Tiles enum func H265Tiles_Values() []string { return []string{ H265TilesDisabled, H265TilesEnabled, } } // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. const ( // H265UnregisteredSeiTimecodeDisabled is a H265UnregisteredSeiTimecode enum value H265UnregisteredSeiTimecodeDisabled = "DISABLED" // H265UnregisteredSeiTimecodeEnabled is a H265UnregisteredSeiTimecode enum value H265UnregisteredSeiTimecodeEnabled = "ENABLED" ) // H265UnregisteredSeiTimecode_Values returns all elements of the H265UnregisteredSeiTimecode enum func H265UnregisteredSeiTimecode_Values() []string { return []string{ H265UnregisteredSeiTimecodeDisabled, H265UnregisteredSeiTimecodeEnabled, } } // If the location of parameter set NAL units doesn't matter in your workflow, // ignore this setting. Use this setting only with CMAF or DASH outputs, or // with standalone file outputs in an MPEG-4 container (MP4 outputs). Choose // HVC1 to mark your output as HVC1. This makes your output compliant with the // following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 // 3rd Edition. For these outputs, the service stores parameter set NAL units // in the sample headers but not in the samples directly. For MP4 outputs, when // you choose HVC1, your output video might not work properly with some downstream // systems and video players. The service defaults to marking your output as // HEV1. For these outputs, the service writes parameter set NAL units directly // into the samples. const ( // H265WriteMp4PackagingTypeHvc1 is a H265WriteMp4PackagingType enum value H265WriteMp4PackagingTypeHvc1 = "HVC1" // H265WriteMp4PackagingTypeHev1 is a H265WriteMp4PackagingType enum value H265WriteMp4PackagingTypeHev1 = "HEV1" ) // H265WriteMp4PackagingType_Values returns all elements of the H265WriteMp4PackagingType enum func H265WriteMp4PackagingType_Values() []string { return []string{ H265WriteMp4PackagingTypeHvc1, H265WriteMp4PackagingTypeHev1, } } const ( // HlsAdMarkersElemental is a HlsAdMarkers enum value HlsAdMarkersElemental = "ELEMENTAL" // HlsAdMarkersElementalScte35 is a HlsAdMarkers enum value HlsAdMarkersElementalScte35 = "ELEMENTAL_SCTE35" ) // HlsAdMarkers_Values returns all elements of the HlsAdMarkers enum func HlsAdMarkers_Values() []string { return []string{ HlsAdMarkersElemental, HlsAdMarkersElementalScte35, } } // Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream // (M2TS) to create a file in an MPEG2-TS container. Keep the default value // Automatic (AUTOMATIC) to create a raw audio-only file with no container. // Regardless of the value that you specify here, if this output has video, // the service will place outputs into an MPEG2-TS container. const ( // HlsAudioOnlyContainerAutomatic is a HlsAudioOnlyContainer enum value HlsAudioOnlyContainerAutomatic = "AUTOMATIC" // HlsAudioOnlyContainerM2ts is a HlsAudioOnlyContainer enum value HlsAudioOnlyContainerM2ts = "M2TS" ) // HlsAudioOnlyContainer_Values returns all elements of the HlsAudioOnlyContainer enum func HlsAudioOnlyContainer_Values() []string { return []string{ HlsAudioOnlyContainerAutomatic, HlsAudioOnlyContainerM2ts, } } // Ignore this setting unless you are using FairPlay DRM with Verimatrix and // you encounter playback issues. Keep the default value, Include (INCLUDE), // to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only // headers from your audio segments. const ( // HlsAudioOnlyHeaderInclude is a HlsAudioOnlyHeader enum value HlsAudioOnlyHeaderInclude = "INCLUDE" // HlsAudioOnlyHeaderExclude is a HlsAudioOnlyHeader enum value HlsAudioOnlyHeaderExclude = "EXCLUDE" ) // HlsAudioOnlyHeader_Values returns all elements of the HlsAudioOnlyHeader enum func HlsAudioOnlyHeader_Values() []string { return []string{ HlsAudioOnlyHeaderInclude, HlsAudioOnlyHeaderExclude, } } // Four types of audio-only tracks are supported: Audio-Only Variant Stream // The client can play back this audio-only stream instead of video in low-bandwidth // scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest. Alternate // Audio, Auto Select, Default Alternate rendition that the client should try // to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest // with DEFAULT=YES, AUTOSELECT=YES Alternate Audio, Auto Select, Not Default // Alternate rendition that the client may try to play back by default. Represented // as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=YES Alternate // Audio, not Auto Select Alternate rendition that the client will not try to // play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with // DEFAULT=NO, AUTOSELECT=NO const ( // HlsAudioTrackTypeAlternateAudioAutoSelectDefault is a HlsAudioTrackType enum value HlsAudioTrackTypeAlternateAudioAutoSelectDefault = "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT" // HlsAudioTrackTypeAlternateAudioAutoSelect is a HlsAudioTrackType enum value HlsAudioTrackTypeAlternateAudioAutoSelect = "ALTERNATE_AUDIO_AUTO_SELECT" // HlsAudioTrackTypeAlternateAudioNotAutoSelect is a HlsAudioTrackType enum value HlsAudioTrackTypeAlternateAudioNotAutoSelect = "ALTERNATE_AUDIO_NOT_AUTO_SELECT" // HlsAudioTrackTypeAudioOnlyVariantStream is a HlsAudioTrackType enum value HlsAudioTrackTypeAudioOnlyVariantStream = "AUDIO_ONLY_VARIANT_STREAM" ) // HlsAudioTrackType_Values returns all elements of the HlsAudioTrackType enum func HlsAudioTrackType_Values() []string { return []string{ HlsAudioTrackTypeAlternateAudioAutoSelectDefault, HlsAudioTrackTypeAlternateAudioAutoSelect, HlsAudioTrackTypeAlternateAudioNotAutoSelect, HlsAudioTrackTypeAudioOnlyVariantStream, } } // Applies only to 608 Embedded output captions. Insert: Include CLOSED-CAPTIONS // lines in the manifest. Specify at least one language in the CC1 Language // Code field. One CLOSED-CAPTION line is added for each Language Code you specify. // Make sure to specify the languages in the order in which they appear in the // original source (if the source is embedded format) or the order of the caption // selectors (if the source is other than embedded). Otherwise, languages in // the manifest will not match up properly with the output captions. None: Include // CLOSED-CAPTIONS=NONE line in the manifest. Omit: Omit any CLOSED-CAPTIONS // line from the manifest. const ( // HlsCaptionLanguageSettingInsert is a HlsCaptionLanguageSetting enum value HlsCaptionLanguageSettingInsert = "INSERT" // HlsCaptionLanguageSettingOmit is a HlsCaptionLanguageSetting enum value HlsCaptionLanguageSettingOmit = "OMIT" // HlsCaptionLanguageSettingNone is a HlsCaptionLanguageSetting enum value HlsCaptionLanguageSettingNone = "NONE" ) // HlsCaptionLanguageSetting_Values returns all elements of the HlsCaptionLanguageSetting enum func HlsCaptionLanguageSetting_Values() []string { return []string{ HlsCaptionLanguageSettingInsert, HlsCaptionLanguageSettingOmit, HlsCaptionLanguageSettingNone, } } // Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no // tag. Otherwise, keep the default value Enabled (ENABLED) and control caching // in your video distribution set up. For example, use the Cache-Control http // header. const ( // HlsClientCacheDisabled is a HlsClientCache enum value HlsClientCacheDisabled = "DISABLED" // HlsClientCacheEnabled is a HlsClientCache enum value HlsClientCacheEnabled = "ENABLED" ) // HlsClientCache_Values returns all elements of the HlsClientCache enum func HlsClientCache_Values() []string { return []string{ HlsClientCacheDisabled, HlsClientCacheEnabled, } } // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist // generation. const ( // HlsCodecSpecificationRfc6381 is a HlsCodecSpecification enum value HlsCodecSpecificationRfc6381 = "RFC_6381" // HlsCodecSpecificationRfc4281 is a HlsCodecSpecification enum value HlsCodecSpecificationRfc4281 = "RFC_4281" ) // HlsCodecSpecification_Values returns all elements of the HlsCodecSpecification enum func HlsCodecSpecification_Values() []string { return []string{ HlsCodecSpecificationRfc6381, HlsCodecSpecificationRfc4281, } } // Specify whether to flag this audio track as descriptive video service (DVS) // in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes // the parameter CHARACTERISTICS="public.accessibility.describes-video" in the // EXT-X-MEDIA entry for this track. When you keep the default choice, Don't // flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can // help with accessibility on Apple devices. For more information, see the Apple // documentation. const ( // HlsDescriptiveVideoServiceFlagDontFlag is a HlsDescriptiveVideoServiceFlag enum value HlsDescriptiveVideoServiceFlagDontFlag = "DONT_FLAG" // HlsDescriptiveVideoServiceFlagFlag is a HlsDescriptiveVideoServiceFlag enum value HlsDescriptiveVideoServiceFlagFlag = "FLAG" ) // HlsDescriptiveVideoServiceFlag_Values returns all elements of the HlsDescriptiveVideoServiceFlag enum func HlsDescriptiveVideoServiceFlag_Values() []string { return []string{ HlsDescriptiveVideoServiceFlagDontFlag, HlsDescriptiveVideoServiceFlagFlag, } } // Indicates whether segments should be placed in subdirectories. const ( // HlsDirectoryStructureSingleDirectory is a HlsDirectoryStructure enum value HlsDirectoryStructureSingleDirectory = "SINGLE_DIRECTORY" // HlsDirectoryStructureSubdirectoryPerStream is a HlsDirectoryStructure enum value HlsDirectoryStructureSubdirectoryPerStream = "SUBDIRECTORY_PER_STREAM" ) // HlsDirectoryStructure_Values returns all elements of the HlsDirectoryStructure enum func HlsDirectoryStructure_Values() []string { return []string{ HlsDirectoryStructureSingleDirectory, HlsDirectoryStructureSubdirectoryPerStream, } } // Encrypts the segments with the given encryption scheme. Leave blank to disable. // Selecting 'Disabled' in the web interface also disables encryption. const ( // HlsEncryptionTypeAes128 is a HlsEncryptionType enum value HlsEncryptionTypeAes128 = "AES128" // HlsEncryptionTypeSampleAes is a HlsEncryptionType enum value HlsEncryptionTypeSampleAes = "SAMPLE_AES" ) // HlsEncryptionType_Values returns all elements of the HlsEncryptionType enum func HlsEncryptionType_Values() []string { return []string{ HlsEncryptionTypeAes128, HlsEncryptionTypeSampleAes, } } // Choose Include (INCLUDE) to have MediaConvert generate a child manifest that // lists only the I-frames for this rendition, in addition to your regular manifest // for this rendition. You might use this manifest as part of a workflow that // creates preview functions for your video. MediaConvert adds both the I-frame // only child manifest and the regular child manifest to the parent manifest. // When you don't need the I-frame only child manifest, keep the default value // Exclude (EXCLUDE). const ( // HlsIFrameOnlyManifestInclude is a HlsIFrameOnlyManifest enum value HlsIFrameOnlyManifestInclude = "INCLUDE" // HlsIFrameOnlyManifestExclude is a HlsIFrameOnlyManifest enum value HlsIFrameOnlyManifestExclude = "EXCLUDE" ) // HlsIFrameOnlyManifest_Values returns all elements of the HlsIFrameOnlyManifest enum func HlsIFrameOnlyManifest_Values() []string { return []string{ HlsIFrameOnlyManifestInclude, HlsIFrameOnlyManifestExclude, } } // Specify whether MediaConvert generates images for trick play. Keep the default // value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) // to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) // to generate tiled thumbnails and full-resolution images of single frames. // MediaConvert creates a child manifest for each set of images that you generate // and adds corresponding entries to the parent manifest. A common application // for these images is Roku trick mode. The thumbnails and full-frame images // that MediaConvert creates with this feature are compatible with this Roku // specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md const ( // HlsImageBasedTrickPlayNone is a HlsImageBasedTrickPlay enum value HlsImageBasedTrickPlayNone = "NONE" // HlsImageBasedTrickPlayThumbnail is a HlsImageBasedTrickPlay enum value HlsImageBasedTrickPlayThumbnail = "THUMBNAIL" // HlsImageBasedTrickPlayThumbnailAndFullframe is a HlsImageBasedTrickPlay enum value HlsImageBasedTrickPlayThumbnailAndFullframe = "THUMBNAIL_AND_FULLFRAME" ) // HlsImageBasedTrickPlay_Values returns all elements of the HlsImageBasedTrickPlay enum func HlsImageBasedTrickPlay_Values() []string { return []string{ HlsImageBasedTrickPlayNone, HlsImageBasedTrickPlayThumbnail, HlsImageBasedTrickPlayThumbnailAndFullframe, } } // The Initialization Vector is a 128-bit number used in conjunction with the // key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed // in the manifest. Otherwise Initialization Vector is not in the manifest. const ( // HlsInitializationVectorInManifestInclude is a HlsInitializationVectorInManifest enum value HlsInitializationVectorInManifestInclude = "INCLUDE" // HlsInitializationVectorInManifestExclude is a HlsInitializationVectorInManifest enum value HlsInitializationVectorInManifestExclude = "EXCLUDE" ) // HlsInitializationVectorInManifest_Values returns all elements of the HlsInitializationVectorInManifest enum func HlsInitializationVectorInManifest_Values() []string { return []string{ HlsInitializationVectorInManifestInclude, HlsInitializationVectorInManifestExclude, } } // Specify whether your DRM encryption key is static or from a key provider // that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. const ( // HlsKeyProviderTypeSpeke is a HlsKeyProviderType enum value HlsKeyProviderTypeSpeke = "SPEKE" // HlsKeyProviderTypeStaticKey is a HlsKeyProviderType enum value HlsKeyProviderTypeStaticKey = "STATIC_KEY" ) // HlsKeyProviderType_Values returns all elements of the HlsKeyProviderType enum func HlsKeyProviderType_Values() []string { return []string{ HlsKeyProviderTypeSpeke, HlsKeyProviderTypeStaticKey, } } // When set to GZIP, compresses HLS playlist. const ( // HlsManifestCompressionGzip is a HlsManifestCompression enum value HlsManifestCompressionGzip = "GZIP" // HlsManifestCompressionNone is a HlsManifestCompression enum value HlsManifestCompressionNone = "NONE" ) // HlsManifestCompression_Values returns all elements of the HlsManifestCompression enum func HlsManifestCompression_Values() []string { return []string{ HlsManifestCompressionGzip, HlsManifestCompressionNone, } } // Indicates whether the output manifest should use floating point values for // segment duration. const ( // HlsManifestDurationFormatFloatingPoint is a HlsManifestDurationFormat enum value HlsManifestDurationFormatFloatingPoint = "FLOATING_POINT" // HlsManifestDurationFormatInteger is a HlsManifestDurationFormat enum value HlsManifestDurationFormatInteger = "INTEGER" ) // HlsManifestDurationFormat_Values returns all elements of the HlsManifestDurationFormat enum func HlsManifestDurationFormat_Values() []string { return []string{ HlsManifestDurationFormatFloatingPoint, HlsManifestDurationFormatInteger, } } // Enable this setting to insert the EXT-X-SESSION-KEY element into the master // playlist. This allows for offline Apple HLS FairPlay content protection. const ( // HlsOfflineEncryptedEnabled is a HlsOfflineEncrypted enum value HlsOfflineEncryptedEnabled = "ENABLED" // HlsOfflineEncryptedDisabled is a HlsOfflineEncrypted enum value HlsOfflineEncryptedDisabled = "DISABLED" ) // HlsOfflineEncrypted_Values returns all elements of the HlsOfflineEncrypted enum func HlsOfflineEncrypted_Values() []string { return []string{ HlsOfflineEncryptedEnabled, HlsOfflineEncryptedDisabled, } } // Indicates whether the .m3u8 manifest file should be generated for this HLS // output group. const ( // HlsOutputSelectionManifestsAndSegments is a HlsOutputSelection enum value HlsOutputSelectionManifestsAndSegments = "MANIFESTS_AND_SEGMENTS" // HlsOutputSelectionSegmentsOnly is a HlsOutputSelection enum value HlsOutputSelectionSegmentsOnly = "SEGMENTS_ONLY" ) // HlsOutputSelection_Values returns all elements of the HlsOutputSelection enum func HlsOutputSelection_Values() []string { return []string{ HlsOutputSelectionManifestsAndSegments, HlsOutputSelectionSegmentsOnly, } } // Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. // The value is calculated as follows: either the program date and time are // initialized using the input timecode source, or the time is initialized using // the input timecode source and the date is initialized using the timestamp_offset. const ( // HlsProgramDateTimeInclude is a HlsProgramDateTime enum value HlsProgramDateTimeInclude = "INCLUDE" // HlsProgramDateTimeExclude is a HlsProgramDateTime enum value HlsProgramDateTimeExclude = "EXCLUDE" ) // HlsProgramDateTime_Values returns all elements of the HlsProgramDateTime enum func HlsProgramDateTime_Values() []string { return []string{ HlsProgramDateTimeInclude, HlsProgramDateTimeExclude, } } // When set to SINGLE_FILE, emits program as a single media resource (.ts) file, // uses #EXT-X-BYTERANGE tags to index segment for playback. const ( // HlsSegmentControlSingleFile is a HlsSegmentControl enum value HlsSegmentControlSingleFile = "SINGLE_FILE" // HlsSegmentControlSegmentedFiles is a HlsSegmentControl enum value HlsSegmentControlSegmentedFiles = "SEGMENTED_FILES" ) // HlsSegmentControl_Values returns all elements of the HlsSegmentControl enum func HlsSegmentControl_Values() []string { return []string{ HlsSegmentControlSingleFile, HlsSegmentControlSegmentedFiles, } } // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag // of variant manifest. const ( // HlsStreamInfResolutionInclude is a HlsStreamInfResolution enum value HlsStreamInfResolutionInclude = "INCLUDE" // HlsStreamInfResolutionExclude is a HlsStreamInfResolution enum value HlsStreamInfResolutionExclude = "EXCLUDE" ) // HlsStreamInfResolution_Values returns all elements of the HlsStreamInfResolution enum func HlsStreamInfResolution_Values() []string { return []string{ HlsStreamInfResolutionInclude, HlsStreamInfResolutionExclude, } } // Indicates ID3 frame that has the timecode. const ( // HlsTimedMetadataId3FrameNone is a HlsTimedMetadataId3Frame enum value HlsTimedMetadataId3FrameNone = "NONE" // HlsTimedMetadataId3FramePriv is a HlsTimedMetadataId3Frame enum value HlsTimedMetadataId3FramePriv = "PRIV" // HlsTimedMetadataId3FrameTdrl is a HlsTimedMetadataId3Frame enum value HlsTimedMetadataId3FrameTdrl = "TDRL" ) // HlsTimedMetadataId3Frame_Values returns all elements of the HlsTimedMetadataId3Frame enum func HlsTimedMetadataId3Frame_Values() []string { return []string{ HlsTimedMetadataId3FrameNone, HlsTimedMetadataId3FramePriv, HlsTimedMetadataId3FrameTdrl, } } // Keep this setting enabled to have MediaConvert use the font style and position // information from the captions source in the output. This option is available // only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting // for simplified output captions. const ( // ImscStylePassthroughEnabled is a ImscStylePassthrough enum value ImscStylePassthroughEnabled = "ENABLED" // ImscStylePassthroughDisabled is a ImscStylePassthrough enum value ImscStylePassthroughDisabled = "DISABLED" ) // ImscStylePassthrough_Values returns all elements of the ImscStylePassthrough enum func ImscStylePassthrough_Values() []string { return []string{ ImscStylePassthroughEnabled, ImscStylePassthroughDisabled, } } // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. // Default is disabled. Only manually controllable for MPEG2 and uncompressed // video inputs. const ( // InputDeblockFilterEnabled is a InputDeblockFilter enum value InputDeblockFilterEnabled = "ENABLED" // InputDeblockFilterDisabled is a InputDeblockFilter enum value InputDeblockFilterDisabled = "DISABLED" ) // InputDeblockFilter_Values returns all elements of the InputDeblockFilter enum func InputDeblockFilter_Values() []string { return []string{ InputDeblockFilterEnabled, InputDeblockFilterDisabled, } } // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default // is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video // inputs. const ( // InputDenoiseFilterEnabled is a InputDenoiseFilter enum value InputDenoiseFilterEnabled = "ENABLED" // InputDenoiseFilterDisabled is a InputDenoiseFilter enum value InputDenoiseFilterDisabled = "DISABLED" ) // InputDenoiseFilter_Values returns all elements of the InputDenoiseFilter enum func InputDenoiseFilter_Values() []string { return []string{ InputDenoiseFilterEnabled, InputDenoiseFilterDisabled, } } // Specify how the transcoding service applies the denoise and deblock filters. // You must also enable the filters separately, with Denoise (InputDenoiseFilter) // and Deblock (InputDeblockFilter). * Auto - The transcoding service determines // whether to apply filtering, depending on input type and quality. * Disable // - The input is not filtered. This is true even if you use the API to enable // them in (InputDeblockFilter) and (InputDeblockFilter). * Force - The input // is filtered regardless of input type. const ( // InputFilterEnableAuto is a InputFilterEnable enum value InputFilterEnableAuto = "AUTO" // InputFilterEnableDisable is a InputFilterEnable enum value InputFilterEnableDisable = "DISABLE" // InputFilterEnableForce is a InputFilterEnable enum value InputFilterEnableForce = "FORCE" ) // InputFilterEnable_Values returns all elements of the InputFilterEnable enum func InputFilterEnable_Values() []string { return []string{ InputFilterEnableAuto, InputFilterEnableDisable, InputFilterEnableForce, } } // Set PSI control (InputPsiControl) for transport stream inputs to specify // which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio // and video. * Use PSI - Scan only PSI data. const ( // InputPsiControlIgnorePsi is a InputPsiControl enum value InputPsiControlIgnorePsi = "IGNORE_PSI" // InputPsiControlUsePsi is a InputPsiControl enum value InputPsiControlUsePsi = "USE_PSI" ) // InputPsiControl_Values returns all elements of the InputPsiControl enum func InputPsiControl_Values() []string { return []string{ InputPsiControlIgnorePsi, InputPsiControlUsePsi, } } // Use Rotate (InputRotate) to specify how the service rotates your video. You // can choose automatic rotation or specify a rotation. You can specify a clockwise // rotation of 0, 90, 180, or 270 degrees. If your input video container is // .mov or .mp4 and your input has rotation metadata, you can choose Automatic // to have the service rotate your video according to the rotation specified // in the metadata. The rotation must be within one degree of 90, 180, or 270 // degrees. If the rotation metadata specifies any other rotation, the service // will default to no rotation. By default, the service does no rotation, even // if your input video has rotation metadata. The service doesn't pass through // rotation metadata. const ( // InputRotateDegree0 is a InputRotate enum value InputRotateDegree0 = "DEGREE_0" // InputRotateDegrees90 is a InputRotate enum value InputRotateDegrees90 = "DEGREES_90" // InputRotateDegrees180 is a InputRotate enum value InputRotateDegrees180 = "DEGREES_180" // InputRotateDegrees270 is a InputRotate enum value InputRotateDegrees270 = "DEGREES_270" // InputRotateAuto is a InputRotate enum value InputRotateAuto = "AUTO" ) // InputRotate_Values returns all elements of the InputRotate enum func InputRotate_Values() []string { return []string{ InputRotateDegree0, InputRotateDegrees90, InputRotateDegrees180, InputRotateDegrees270, InputRotateAuto, } } // Use this setting when your input video codec is AVC-Intra. Ignore this setting // for all other inputs. If the sample range metadata in your input video is // accurate, or if you don't know about sample range, keep the default value, // Follow (FOLLOW), for this setting. When you do, the service automatically // detects your input sample range. If your input video has metadata indicating // the wrong sample range, specify the accurate sample range here. When you // do, MediaConvert ignores any sample range information in the input metadata. // Regardless of whether MediaConvert uses the input sample range or the sample // range that you specify, MediaConvert uses the sample range for transcoding // and also writes it to the output metadata. const ( // InputSampleRangeFollow is a InputSampleRange enum value InputSampleRangeFollow = "FOLLOW" // InputSampleRangeFullRange is a InputSampleRange enum value InputSampleRangeFullRange = "FULL_RANGE" // InputSampleRangeLimitedRange is a InputSampleRange enum value InputSampleRangeLimitedRange = "LIMITED_RANGE" ) // InputSampleRange_Values returns all elements of the InputSampleRange enum func InputSampleRange_Values() []string { return []string{ InputSampleRangeFollow, InputSampleRangeFullRange, InputSampleRangeLimitedRange, } } // When you have a progressive segmented frame (PsF) input, use this setting // to flag the input as PsF. MediaConvert doesn't automatically detect PsF. // Therefore, flagging your input as PsF results in better preservation of video // quality when you do deinterlacing and frame rate conversion. If you don't // specify, the default value is Auto (AUTO). Auto is the correct setting for // all inputs that are not PsF. Don't set this value to PsF when your input // is interlaced. Doing so creates horizontal interlacing artifacts. const ( // InputScanTypeAuto is a InputScanType enum value InputScanTypeAuto = "AUTO" // InputScanTypePsf is a InputScanType enum value InputScanTypePsf = "PSF" ) // InputScanType_Values returns all elements of the InputScanType enum func InputScanType_Values() []string { return []string{ InputScanTypeAuto, InputScanTypePsf, } } // Use this Timecode source setting, located under the input settings (InputTimecodeSource), // to specify how the service counts input video frames. This input frame count // affects only the behavior of features that apply to a single input at a time, // such as input clipping and synchronizing some captions formats. Choose Embedded // (EMBEDDED) to use the timecodes in your input video. Choose Start at zero // (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) // to start the first frame at the timecode that you specify in the setting // Start timecode (timecodeStart). If you don't specify a value for Timecode // source, the service will use Embedded by default. For more information about // timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode. const ( // InputTimecodeSourceEmbedded is a InputTimecodeSource enum value InputTimecodeSourceEmbedded = "EMBEDDED" // InputTimecodeSourceZerobased is a InputTimecodeSource enum value InputTimecodeSourceZerobased = "ZEROBASED" // InputTimecodeSourceSpecifiedstart is a InputTimecodeSource enum value InputTimecodeSourceSpecifiedstart = "SPECIFIEDSTART" ) // InputTimecodeSource_Values returns all elements of the InputTimecodeSource enum func InputTimecodeSource_Values() []string { return []string{ InputTimecodeSourceEmbedded, InputTimecodeSourceZerobased, InputTimecodeSourceSpecifiedstart, } } // A job's phase can be PROBING, TRANSCODING OR UPLOADING const ( // JobPhaseProbing is a JobPhase enum value JobPhaseProbing = "PROBING" // JobPhaseTranscoding is a JobPhase enum value JobPhaseTranscoding = "TRANSCODING" // JobPhaseUploading is a JobPhase enum value JobPhaseUploading = "UPLOADING" ) // JobPhase_Values returns all elements of the JobPhase enum func JobPhase_Values() []string { return []string{ JobPhaseProbing, JobPhaseTranscoding, JobPhaseUploading, } } // A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR. const ( // JobStatusSubmitted is a JobStatus enum value JobStatusSubmitted = "SUBMITTED" // JobStatusProgressing is a JobStatus enum value JobStatusProgressing = "PROGRESSING" // JobStatusComplete is a JobStatus enum value JobStatusComplete = "COMPLETE" // JobStatusCanceled is a JobStatus enum value JobStatusCanceled = "CANCELED" // JobStatusError is a JobStatus enum value JobStatusError = "ERROR" ) // JobStatus_Values returns all elements of the JobStatus enum func JobStatus_Values() []string { return []string{ JobStatusSubmitted, JobStatusProgressing, JobStatusComplete, JobStatusCanceled, JobStatusError, } } // Optional. When you request a list of job templates, you can choose to list // them alphabetically by NAME or chronologically by CREATION_DATE. If you don't // specify, the service will list them by name. const ( // JobTemplateListByName is a JobTemplateListBy enum value JobTemplateListByName = "NAME" // JobTemplateListByCreationDate is a JobTemplateListBy enum value JobTemplateListByCreationDate = "CREATION_DATE" // JobTemplateListBySystem is a JobTemplateListBy enum value JobTemplateListBySystem = "SYSTEM" ) // JobTemplateListBy_Values returns all elements of the JobTemplateListBy enum func JobTemplateListBy_Values() []string { return []string{ JobTemplateListByName, JobTemplateListByCreationDate, JobTemplateListBySystem, } } // Specify the language, using the ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php. const ( // LanguageCodeEng is a LanguageCode enum value LanguageCodeEng = "ENG" // LanguageCodeSpa is a LanguageCode enum value LanguageCodeSpa = "SPA" // LanguageCodeFra is a LanguageCode enum value LanguageCodeFra = "FRA" // LanguageCodeDeu is a LanguageCode enum value LanguageCodeDeu = "DEU" // LanguageCodeGer is a LanguageCode enum value LanguageCodeGer = "GER" // LanguageCodeZho is a LanguageCode enum value LanguageCodeZho = "ZHO" // LanguageCodeAra is a LanguageCode enum value LanguageCodeAra = "ARA" // LanguageCodeHin is a LanguageCode enum value LanguageCodeHin = "HIN" // LanguageCodeJpn is a LanguageCode enum value LanguageCodeJpn = "JPN" // LanguageCodeRus is a LanguageCode enum value LanguageCodeRus = "RUS" // LanguageCodePor is a LanguageCode enum value LanguageCodePor = "POR" // LanguageCodeIta is a LanguageCode enum value LanguageCodeIta = "ITA" // LanguageCodeUrd is a LanguageCode enum value LanguageCodeUrd = "URD" // LanguageCodeVie is a LanguageCode enum value LanguageCodeVie = "VIE" // LanguageCodeKor is a LanguageCode enum value LanguageCodeKor = "KOR" // LanguageCodePan is a LanguageCode enum value LanguageCodePan = "PAN" // LanguageCodeAbk is a LanguageCode enum value LanguageCodeAbk = "ABK" // LanguageCodeAar is a LanguageCode enum value LanguageCodeAar = "AAR" // LanguageCodeAfr is a LanguageCode enum value LanguageCodeAfr = "AFR" // LanguageCodeAka is a LanguageCode enum value LanguageCodeAka = "AKA" // LanguageCodeSqi is a LanguageCode enum value LanguageCodeSqi = "SQI" // LanguageCodeAmh is a LanguageCode enum value LanguageCodeAmh = "AMH" // LanguageCodeArg is a LanguageCode enum value LanguageCodeArg = "ARG" // LanguageCodeHye is a LanguageCode enum value LanguageCodeHye = "HYE" // LanguageCodeAsm is a LanguageCode enum value LanguageCodeAsm = "ASM" // LanguageCodeAva is a LanguageCode enum value LanguageCodeAva = "AVA" // LanguageCodeAve is a LanguageCode enum value LanguageCodeAve = "AVE" // LanguageCodeAym is a LanguageCode enum value LanguageCodeAym = "AYM" // LanguageCodeAze is a LanguageCode enum value LanguageCodeAze = "AZE" // LanguageCodeBam is a LanguageCode enum value LanguageCodeBam = "BAM" // LanguageCodeBak is a LanguageCode enum value LanguageCodeBak = "BAK" // LanguageCodeEus is a LanguageCode enum value LanguageCodeEus = "EUS" // LanguageCodeBel is a LanguageCode enum value LanguageCodeBel = "BEL" // LanguageCodeBen is a LanguageCode enum value LanguageCodeBen = "BEN" // LanguageCodeBih is a LanguageCode enum value LanguageCodeBih = "BIH" // LanguageCodeBis is a LanguageCode enum value LanguageCodeBis = "BIS" // LanguageCodeBos is a LanguageCode enum value LanguageCodeBos = "BOS" // LanguageCodeBre is a LanguageCode enum value LanguageCodeBre = "BRE" // LanguageCodeBul is a LanguageCode enum value LanguageCodeBul = "BUL" // LanguageCodeMya is a LanguageCode enum value LanguageCodeMya = "MYA" // LanguageCodeCat is a LanguageCode enum value LanguageCodeCat = "CAT" // LanguageCodeKhm is a LanguageCode enum value LanguageCodeKhm = "KHM" // LanguageCodeCha is a LanguageCode enum value LanguageCodeCha = "CHA" // LanguageCodeChe is a LanguageCode enum value LanguageCodeChe = "CHE" // LanguageCodeNya is a LanguageCode enum value LanguageCodeNya = "NYA" // LanguageCodeChu is a LanguageCode enum value LanguageCodeChu = "CHU" // LanguageCodeChv is a LanguageCode enum value LanguageCodeChv = "CHV" // LanguageCodeCor is a LanguageCode enum value LanguageCodeCor = "COR" // LanguageCodeCos is a LanguageCode enum value LanguageCodeCos = "COS" // LanguageCodeCre is a LanguageCode enum value LanguageCodeCre = "CRE" // LanguageCodeHrv is a LanguageCode enum value LanguageCodeHrv = "HRV" // LanguageCodeCes is a LanguageCode enum value LanguageCodeCes = "CES" // LanguageCodeDan is a LanguageCode enum value LanguageCodeDan = "DAN" // LanguageCodeDiv is a LanguageCode enum value LanguageCodeDiv = "DIV" // LanguageCodeNld is a LanguageCode enum value LanguageCodeNld = "NLD" // LanguageCodeDzo is a LanguageCode enum value LanguageCodeDzo = "DZO" // LanguageCodeEnm is a LanguageCode enum value LanguageCodeEnm = "ENM" // LanguageCodeEpo is a LanguageCode enum value LanguageCodeEpo = "EPO" // LanguageCodeEst is a LanguageCode enum value LanguageCodeEst = "EST" // LanguageCodeEwe is a LanguageCode enum value LanguageCodeEwe = "EWE" // LanguageCodeFao is a LanguageCode enum value LanguageCodeFao = "FAO" // LanguageCodeFij is a LanguageCode enum value LanguageCodeFij = "FIJ" // LanguageCodeFin is a LanguageCode enum value LanguageCodeFin = "FIN" // LanguageCodeFrm is a LanguageCode enum value LanguageCodeFrm = "FRM" // LanguageCodeFul is a LanguageCode enum value LanguageCodeFul = "FUL" // LanguageCodeGla is a LanguageCode enum value LanguageCodeGla = "GLA" // LanguageCodeGlg is a LanguageCode enum value LanguageCodeGlg = "GLG" // LanguageCodeLug is a LanguageCode enum value LanguageCodeLug = "LUG" // LanguageCodeKat is a LanguageCode enum value LanguageCodeKat = "KAT" // LanguageCodeEll is a LanguageCode enum value LanguageCodeEll = "ELL" // LanguageCodeGrn is a LanguageCode enum value LanguageCodeGrn = "GRN" // LanguageCodeGuj is a LanguageCode enum value LanguageCodeGuj = "GUJ" // LanguageCodeHat is a LanguageCode enum value LanguageCodeHat = "HAT" // LanguageCodeHau is a LanguageCode enum value LanguageCodeHau = "HAU" // LanguageCodeHeb is a LanguageCode enum value LanguageCodeHeb = "HEB" // LanguageCodeHer is a LanguageCode enum value LanguageCodeHer = "HER" // LanguageCodeHmo is a LanguageCode enum value LanguageCodeHmo = "HMO" // LanguageCodeHun is a LanguageCode enum value LanguageCodeHun = "HUN" // LanguageCodeIsl is a LanguageCode enum value LanguageCodeIsl = "ISL" // LanguageCodeIdo is a LanguageCode enum value LanguageCodeIdo = "IDO" // LanguageCodeIbo is a LanguageCode enum value LanguageCodeIbo = "IBO" // LanguageCodeInd is a LanguageCode enum value LanguageCodeInd = "IND" // LanguageCodeIna is a LanguageCode enum value LanguageCodeIna = "INA" // LanguageCodeIle is a LanguageCode enum value LanguageCodeIle = "ILE" // LanguageCodeIku is a LanguageCode enum value LanguageCodeIku = "IKU" // LanguageCodeIpk is a LanguageCode enum value LanguageCodeIpk = "IPK" // LanguageCodeGle is a LanguageCode enum value LanguageCodeGle = "GLE" // LanguageCodeJav is a LanguageCode enum value LanguageCodeJav = "JAV" // LanguageCodeKal is a LanguageCode enum value LanguageCodeKal = "KAL" // LanguageCodeKan is a LanguageCode enum value LanguageCodeKan = "KAN" // LanguageCodeKau is a LanguageCode enum value LanguageCodeKau = "KAU" // LanguageCodeKas is a LanguageCode enum value LanguageCodeKas = "KAS" // LanguageCodeKaz is a LanguageCode enum value LanguageCodeKaz = "KAZ" // LanguageCodeKik is a LanguageCode enum value LanguageCodeKik = "KIK" // LanguageCodeKin is a LanguageCode enum value LanguageCodeKin = "KIN" // LanguageCodeKir is a LanguageCode enum value LanguageCodeKir = "KIR" // LanguageCodeKom is a LanguageCode enum value LanguageCodeKom = "KOM" // LanguageCodeKon is a LanguageCode enum value LanguageCodeKon = "KON" // LanguageCodeKua is a LanguageCode enum value LanguageCodeKua = "KUA" // LanguageCodeKur is a LanguageCode enum value LanguageCodeKur = "KUR" // LanguageCodeLao is a LanguageCode enum value LanguageCodeLao = "LAO" // LanguageCodeLat is a LanguageCode enum value LanguageCodeLat = "LAT" // LanguageCodeLav is a LanguageCode enum value LanguageCodeLav = "LAV" // LanguageCodeLim is a LanguageCode enum value LanguageCodeLim = "LIM" // LanguageCodeLin is a LanguageCode enum value LanguageCodeLin = "LIN" // LanguageCodeLit is a LanguageCode enum value LanguageCodeLit = "LIT" // LanguageCodeLub is a LanguageCode enum value LanguageCodeLub = "LUB" // LanguageCodeLtz is a LanguageCode enum value LanguageCodeLtz = "LTZ" // LanguageCodeMkd is a LanguageCode enum value LanguageCodeMkd = "MKD" // LanguageCodeMlg is a LanguageCode enum value LanguageCodeMlg = "MLG" // LanguageCodeMsa is a LanguageCode enum value LanguageCodeMsa = "MSA" // LanguageCodeMal is a LanguageCode enum value LanguageCodeMal = "MAL" // LanguageCodeMlt is a LanguageCode enum value LanguageCodeMlt = "MLT" // LanguageCodeGlv is a LanguageCode enum value LanguageCodeGlv = "GLV" // LanguageCodeMri is a LanguageCode enum value LanguageCodeMri = "MRI" // LanguageCodeMar is a LanguageCode enum value LanguageCodeMar = "MAR" // LanguageCodeMah is a LanguageCode enum value LanguageCodeMah = "MAH" // LanguageCodeMon is a LanguageCode enum value LanguageCodeMon = "MON" // LanguageCodeNau is a LanguageCode enum value LanguageCodeNau = "NAU" // LanguageCodeNav is a LanguageCode enum value LanguageCodeNav = "NAV" // LanguageCodeNde is a LanguageCode enum value LanguageCodeNde = "NDE" // LanguageCodeNbl is a LanguageCode enum value LanguageCodeNbl = "NBL" // LanguageCodeNdo is a LanguageCode enum value LanguageCodeNdo = "NDO" // LanguageCodeNep is a LanguageCode enum value LanguageCodeNep = "NEP" // LanguageCodeSme is a LanguageCode enum value LanguageCodeSme = "SME" // LanguageCodeNor is a LanguageCode enum value LanguageCodeNor = "NOR" // LanguageCodeNob is a LanguageCode enum value LanguageCodeNob = "NOB" // LanguageCodeNno is a LanguageCode enum value LanguageCodeNno = "NNO" // LanguageCodeOci is a LanguageCode enum value LanguageCodeOci = "OCI" // LanguageCodeOji is a LanguageCode enum value LanguageCodeOji = "OJI" // LanguageCodeOri is a LanguageCode enum value LanguageCodeOri = "ORI" // LanguageCodeOrm is a LanguageCode enum value LanguageCodeOrm = "ORM" // LanguageCodeOss is a LanguageCode enum value LanguageCodeOss = "OSS" // LanguageCodePli is a LanguageCode enum value LanguageCodePli = "PLI" // LanguageCodeFas is a LanguageCode enum value LanguageCodeFas = "FAS" // LanguageCodePol is a LanguageCode enum value LanguageCodePol = "POL" // LanguageCodePus is a LanguageCode enum value LanguageCodePus = "PUS" // LanguageCodeQue is a LanguageCode enum value LanguageCodeQue = "QUE" // LanguageCodeQaa is a LanguageCode enum value LanguageCodeQaa = "QAA" // LanguageCodeRon is a LanguageCode enum value LanguageCodeRon = "RON" // LanguageCodeRoh is a LanguageCode enum value LanguageCodeRoh = "ROH" // LanguageCodeRun is a LanguageCode enum value LanguageCodeRun = "RUN" // LanguageCodeSmo is a LanguageCode enum value LanguageCodeSmo = "SMO" // LanguageCodeSag is a LanguageCode enum value LanguageCodeSag = "SAG" // LanguageCodeSan is a LanguageCode enum value LanguageCodeSan = "SAN" // LanguageCodeSrd is a LanguageCode enum value LanguageCodeSrd = "SRD" // LanguageCodeSrb is a LanguageCode enum value LanguageCodeSrb = "SRB" // LanguageCodeSna is a LanguageCode enum value LanguageCodeSna = "SNA" // LanguageCodeIii is a LanguageCode enum value LanguageCodeIii = "III" // LanguageCodeSnd is a LanguageCode enum value LanguageCodeSnd = "SND" // LanguageCodeSin is a LanguageCode enum value LanguageCodeSin = "SIN" // LanguageCodeSlk is a LanguageCode enum value LanguageCodeSlk = "SLK" // LanguageCodeSlv is a LanguageCode enum value LanguageCodeSlv = "SLV" // LanguageCodeSom is a LanguageCode enum value LanguageCodeSom = "SOM" // LanguageCodeSot is a LanguageCode enum value LanguageCodeSot = "SOT" // LanguageCodeSun is a LanguageCode enum value LanguageCodeSun = "SUN" // LanguageCodeSwa is a LanguageCode enum value LanguageCodeSwa = "SWA" // LanguageCodeSsw is a LanguageCode enum value LanguageCodeSsw = "SSW" // LanguageCodeSwe is a LanguageCode enum value LanguageCodeSwe = "SWE" // LanguageCodeTgl is a LanguageCode enum value LanguageCodeTgl = "TGL" // LanguageCodeTah is a LanguageCode enum value LanguageCodeTah = "TAH" // LanguageCodeTgk is a LanguageCode enum value LanguageCodeTgk = "TGK" // LanguageCodeTam is a LanguageCode enum value LanguageCodeTam = "TAM" // LanguageCodeTat is a LanguageCode enum value LanguageCodeTat = "TAT" // LanguageCodeTel is a LanguageCode enum value LanguageCodeTel = "TEL" // LanguageCodeTha is a LanguageCode enum value LanguageCodeTha = "THA" // LanguageCodeBod is a LanguageCode enum value LanguageCodeBod = "BOD" // LanguageCodeTir is a LanguageCode enum value LanguageCodeTir = "TIR" // LanguageCodeTon is a LanguageCode enum value LanguageCodeTon = "TON" // LanguageCodeTso is a LanguageCode enum value LanguageCodeTso = "TSO" // LanguageCodeTsn is a LanguageCode enum value LanguageCodeTsn = "TSN" // LanguageCodeTur is a LanguageCode enum value LanguageCodeTur = "TUR" // LanguageCodeTuk is a LanguageCode enum value LanguageCodeTuk = "TUK" // LanguageCodeTwi is a LanguageCode enum value LanguageCodeTwi = "TWI" // LanguageCodeUig is a LanguageCode enum value LanguageCodeUig = "UIG" // LanguageCodeUkr is a LanguageCode enum value LanguageCodeUkr = "UKR" // LanguageCodeUzb is a LanguageCode enum value LanguageCodeUzb = "UZB" // LanguageCodeVen is a LanguageCode enum value LanguageCodeVen = "VEN" // LanguageCodeVol is a LanguageCode enum value LanguageCodeVol = "VOL" // LanguageCodeWln is a LanguageCode enum value LanguageCodeWln = "WLN" // LanguageCodeCym is a LanguageCode enum value LanguageCodeCym = "CYM" // LanguageCodeFry is a LanguageCode enum value LanguageCodeFry = "FRY" // LanguageCodeWol is a LanguageCode enum value LanguageCodeWol = "WOL" // LanguageCodeXho is a LanguageCode enum value LanguageCodeXho = "XHO" // LanguageCodeYid is a LanguageCode enum value LanguageCodeYid = "YID" // LanguageCodeYor is a LanguageCode enum value LanguageCodeYor = "YOR" // LanguageCodeZha is a LanguageCode enum value LanguageCodeZha = "ZHA" // LanguageCodeZul is a LanguageCode enum value LanguageCodeZul = "ZUL" // LanguageCodeOrj is a LanguageCode enum value LanguageCodeOrj = "ORJ" // LanguageCodeQpc is a LanguageCode enum value LanguageCodeQpc = "QPC" // LanguageCodeTng is a LanguageCode enum value LanguageCodeTng = "TNG" ) // LanguageCode_Values returns all elements of the LanguageCode enum func LanguageCode_Values() []string { return []string{ LanguageCodeEng, LanguageCodeSpa, LanguageCodeFra, LanguageCodeDeu, LanguageCodeGer, LanguageCodeZho, LanguageCodeAra, LanguageCodeHin, LanguageCodeJpn, LanguageCodeRus, LanguageCodePor, LanguageCodeIta, LanguageCodeUrd, LanguageCodeVie, LanguageCodeKor, LanguageCodePan, LanguageCodeAbk, LanguageCodeAar, LanguageCodeAfr, LanguageCodeAka, LanguageCodeSqi, LanguageCodeAmh, LanguageCodeArg, LanguageCodeHye, LanguageCodeAsm, LanguageCodeAva, LanguageCodeAve, LanguageCodeAym, LanguageCodeAze, LanguageCodeBam, LanguageCodeBak, LanguageCodeEus, LanguageCodeBel, LanguageCodeBen, LanguageCodeBih, LanguageCodeBis, LanguageCodeBos, LanguageCodeBre, LanguageCodeBul, LanguageCodeMya, LanguageCodeCat, LanguageCodeKhm, LanguageCodeCha, LanguageCodeChe, LanguageCodeNya, LanguageCodeChu, LanguageCodeChv, LanguageCodeCor, LanguageCodeCos, LanguageCodeCre, LanguageCodeHrv, LanguageCodeCes, LanguageCodeDan, LanguageCodeDiv, LanguageCodeNld, LanguageCodeDzo, LanguageCodeEnm, LanguageCodeEpo, LanguageCodeEst, LanguageCodeEwe, LanguageCodeFao, LanguageCodeFij, LanguageCodeFin, LanguageCodeFrm, LanguageCodeFul, LanguageCodeGla, LanguageCodeGlg, LanguageCodeLug, LanguageCodeKat, LanguageCodeEll, LanguageCodeGrn, LanguageCodeGuj, LanguageCodeHat, LanguageCodeHau, LanguageCodeHeb, LanguageCodeHer, LanguageCodeHmo, LanguageCodeHun, LanguageCodeIsl, LanguageCodeIdo, LanguageCodeIbo, LanguageCodeInd, LanguageCodeIna, LanguageCodeIle, LanguageCodeIku, LanguageCodeIpk, LanguageCodeGle, LanguageCodeJav, LanguageCodeKal, LanguageCodeKan, LanguageCodeKau, LanguageCodeKas, LanguageCodeKaz, LanguageCodeKik, LanguageCodeKin, LanguageCodeKir, LanguageCodeKom, LanguageCodeKon, LanguageCodeKua, LanguageCodeKur, LanguageCodeLao, LanguageCodeLat, LanguageCodeLav, LanguageCodeLim, LanguageCodeLin, LanguageCodeLit, LanguageCodeLub, LanguageCodeLtz, LanguageCodeMkd, LanguageCodeMlg, LanguageCodeMsa, LanguageCodeMal, LanguageCodeMlt, LanguageCodeGlv, LanguageCodeMri, LanguageCodeMar, LanguageCodeMah, LanguageCodeMon, LanguageCodeNau, LanguageCodeNav, LanguageCodeNde, LanguageCodeNbl, LanguageCodeNdo, LanguageCodeNep, LanguageCodeSme, LanguageCodeNor, LanguageCodeNob, LanguageCodeNno, LanguageCodeOci, LanguageCodeOji, LanguageCodeOri, LanguageCodeOrm, LanguageCodeOss, LanguageCodePli, LanguageCodeFas, LanguageCodePol, LanguageCodePus, LanguageCodeQue, LanguageCodeQaa, LanguageCodeRon, LanguageCodeRoh, LanguageCodeRun, LanguageCodeSmo, LanguageCodeSag, LanguageCodeSan, LanguageCodeSrd, LanguageCodeSrb, LanguageCodeSna, LanguageCodeIii, LanguageCodeSnd, LanguageCodeSin, LanguageCodeSlk, LanguageCodeSlv, LanguageCodeSom, LanguageCodeSot, LanguageCodeSun, LanguageCodeSwa, LanguageCodeSsw, LanguageCodeSwe, LanguageCodeTgl, LanguageCodeTah, LanguageCodeTgk, LanguageCodeTam, LanguageCodeTat, LanguageCodeTel, LanguageCodeTha, LanguageCodeBod, LanguageCodeTir, LanguageCodeTon, LanguageCodeTso, LanguageCodeTsn, LanguageCodeTur, LanguageCodeTuk, LanguageCodeTwi, LanguageCodeUig, LanguageCodeUkr, LanguageCodeUzb, LanguageCodeVen, LanguageCodeVol, LanguageCodeWln, LanguageCodeCym, LanguageCodeFry, LanguageCodeWol, LanguageCodeXho, LanguageCodeYid, LanguageCodeYor, LanguageCodeZha, LanguageCodeZul, LanguageCodeOrj, LanguageCodeQpc, LanguageCodeTng, } } // Selects between the DVB and ATSC buffer models for Dolby Digital audio. const ( // M2tsAudioBufferModelDvb is a M2tsAudioBufferModel enum value M2tsAudioBufferModelDvb = "DVB" // M2tsAudioBufferModelAtsc is a M2tsAudioBufferModel enum value M2tsAudioBufferModelAtsc = "ATSC" ) // M2tsAudioBufferModel_Values returns all elements of the M2tsAudioBufferModel enum func M2tsAudioBufferModel_Values() []string { return []string{ M2tsAudioBufferModelDvb, M2tsAudioBufferModelAtsc, } } // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences // between video and audio. For this situation, choose Match video duration // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, // MediaConvert pads the output audio streams with silence or trims them to // ensure that the total duration of each audio stream is at least as long as // the total duration of the video stream. After padding or trimming, the audio // stream duration is no more than one frame longer than the video stream. MediaConvert // applies audio padding or trimming only to the end of the last segment of // the output. For unsegmented outputs, MediaConvert adds padding only to the // end of the file. When you keep the default value, any minor discrepancies // between audio and video duration will depend on your output audio codec. const ( // M2tsAudioDurationDefaultCodecDuration is a M2tsAudioDuration enum value M2tsAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" // M2tsAudioDurationMatchVideoDuration is a M2tsAudioDuration enum value M2tsAudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION" ) // M2tsAudioDuration_Values returns all elements of the M2tsAudioDuration enum func M2tsAudioDuration_Values() []string { return []string{ M2tsAudioDurationDefaultCodecDuration, M2tsAudioDurationMatchVideoDuration, } } // Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX, // use multiplex buffer model. If set to NONE, this can lead to lower latency, // but low-memory devices may not be able to play back the stream without interruptions. const ( // M2tsBufferModelMultiplex is a M2tsBufferModel enum value M2tsBufferModelMultiplex = "MULTIPLEX" // M2tsBufferModelNone is a M2tsBufferModel enum value M2tsBufferModelNone = "NONE" ) // M2tsBufferModel_Values returns all elements of the M2tsBufferModel enum func M2tsBufferModel_Values() []string { return []string{ M2tsBufferModelMultiplex, M2tsBufferModelNone, } } // When set to VIDEO_AND_FIXED_INTERVALS, audio EBP markers will be added to // partitions 3 and 4. The interval between these additional markers will be // fixed, and will be slightly shorter than the video EBP marker interval. When // set to VIDEO_INTERVAL, these additional markers will not be inserted. Only // applicable when EBP segmentation markers are is selected (segmentationMarkers // is EBP or EBP_LEGACY). const ( // M2tsEbpAudioIntervalVideoAndFixedIntervals is a M2tsEbpAudioInterval enum value M2tsEbpAudioIntervalVideoAndFixedIntervals = "VIDEO_AND_FIXED_INTERVALS" // M2tsEbpAudioIntervalVideoInterval is a M2tsEbpAudioInterval enum value M2tsEbpAudioIntervalVideoInterval = "VIDEO_INTERVAL" ) // M2tsEbpAudioInterval_Values returns all elements of the M2tsEbpAudioInterval enum func M2tsEbpAudioInterval_Values() []string { return []string{ M2tsEbpAudioIntervalVideoAndFixedIntervals, M2tsEbpAudioIntervalVideoInterval, } } // Selects which PIDs to place EBP markers on. They can either be placed only // on the video PID, or on both the video PID and all audio PIDs. Only applicable // when EBP segmentation markers are is selected (segmentationMarkers is EBP // or EBP_LEGACY). const ( // M2tsEbpPlacementVideoAndAudioPids is a M2tsEbpPlacement enum value M2tsEbpPlacementVideoAndAudioPids = "VIDEO_AND_AUDIO_PIDS" // M2tsEbpPlacementVideoPid is a M2tsEbpPlacement enum value M2tsEbpPlacementVideoPid = "VIDEO_PID" ) // M2tsEbpPlacement_Values returns all elements of the M2tsEbpPlacement enum func M2tsEbpPlacement_Values() []string { return []string{ M2tsEbpPlacementVideoAndAudioPids, M2tsEbpPlacementVideoPid, } } // Controls whether to include the ES Rate field in the PES header. const ( // M2tsEsRateInPesInclude is a M2tsEsRateInPes enum value M2tsEsRateInPesInclude = "INCLUDE" // M2tsEsRateInPesExclude is a M2tsEsRateInPes enum value M2tsEsRateInPesExclude = "EXCLUDE" ) // M2tsEsRateInPes_Values returns all elements of the M2tsEsRateInPes enum func M2tsEsRateInPes_Values() []string { return []string{ M2tsEsRateInPesInclude, M2tsEsRateInPesExclude, } } // Keep the default value (DEFAULT) unless you know that your audio EBP markers // are incorrectly appearing before your video EBP markers. To correct this // problem, set this value to Force (FORCE). const ( // M2tsForceTsVideoEbpOrderForce is a M2tsForceTsVideoEbpOrder enum value M2tsForceTsVideoEbpOrderForce = "FORCE" // M2tsForceTsVideoEbpOrderDefault is a M2tsForceTsVideoEbpOrder enum value M2tsForceTsVideoEbpOrderDefault = "DEFAULT" ) // M2tsForceTsVideoEbpOrder_Values returns all elements of the M2tsForceTsVideoEbpOrder enum func M2tsForceTsVideoEbpOrder_Values() []string { return []string{ M2tsForceTsVideoEbpOrderForce, M2tsForceTsVideoEbpOrderDefault, } } // If INSERT, Nielsen inaudible tones for media tracking will be detected in // the input audio and an equivalent ID3 tag will be inserted in the output. const ( // M2tsNielsenId3Insert is a M2tsNielsenId3 enum value M2tsNielsenId3Insert = "INSERT" // M2tsNielsenId3None is a M2tsNielsenId3 enum value M2tsNielsenId3None = "NONE" ) // M2tsNielsenId3_Values returns all elements of the M2tsNielsenId3 enum func M2tsNielsenId3_Values() []string { return []string{ M2tsNielsenId3Insert, M2tsNielsenId3None, } } // When set to PCR_EVERY_PES_PACKET, a Program Clock Reference value is inserted // for every Packetized Elementary Stream (PES) header. This is effective only // when the PCR PID is the same as the video or audio elementary stream. const ( // M2tsPcrControlPcrEveryPesPacket is a M2tsPcrControl enum value M2tsPcrControlPcrEveryPesPacket = "PCR_EVERY_PES_PACKET" // M2tsPcrControlConfiguredPcrPeriod is a M2tsPcrControl enum value M2tsPcrControlConfiguredPcrPeriod = "CONFIGURED_PCR_PERIOD" ) // M2tsPcrControl_Values returns all elements of the M2tsPcrControl enum func M2tsPcrControl_Values() []string { return []string{ M2tsPcrControlPcrEveryPesPacket, M2tsPcrControlConfiguredPcrPeriod, } } // When set to CBR, inserts null packets into transport stream to fill specified // bitrate. When set to VBR, the bitrate setting acts as the maximum bitrate, // but the output will not be padded up to that bitrate. const ( // M2tsRateModeVbr is a M2tsRateMode enum value M2tsRateModeVbr = "VBR" // M2tsRateModeCbr is a M2tsRateMode enum value M2tsRateModeCbr = "CBR" ) // M2tsRateMode_Values returns all elements of the M2tsRateMode enum func M2tsRateMode_Values() []string { return []string{ M2tsRateModeVbr, M2tsRateModeCbr, } } // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if // you want SCTE-35 markers that appear in your input to also appear in this // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. // For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also // provide the ESAM XML as a string in the setting Signal processing notification // XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam). const ( // M2tsScte35SourcePassthrough is a M2tsScte35Source enum value M2tsScte35SourcePassthrough = "PASSTHROUGH" // M2tsScte35SourceNone is a M2tsScte35Source enum value M2tsScte35SourceNone = "NONE" ) // M2tsScte35Source_Values returns all elements of the M2tsScte35Source enum func M2tsScte35Source_Values() []string { return []string{ M2tsScte35SourcePassthrough, M2tsScte35SourceNone, } } // Inserts segmentation markers at each segmentation_time period. rai_segstart // sets the Random Access Indicator bit in the adaptation field. rai_adapt sets // the RAI bit and adds the current timecode in the private data bytes. psi_segstart // inserts PAT and PMT tables at the start of segments. ebp adds Encoder Boundary // Point information to the adaptation field as per OpenCable specification // OC-SP-EBP-I01-130118. ebp_legacy adds Encoder Boundary Point information // to the adaptation field using a legacy proprietary format. const ( // M2tsSegmentationMarkersNone is a M2tsSegmentationMarkers enum value M2tsSegmentationMarkersNone = "NONE" // M2tsSegmentationMarkersRaiSegstart is a M2tsSegmentationMarkers enum value M2tsSegmentationMarkersRaiSegstart = "RAI_SEGSTART" // M2tsSegmentationMarkersRaiAdapt is a M2tsSegmentationMarkers enum value M2tsSegmentationMarkersRaiAdapt = "RAI_ADAPT" // M2tsSegmentationMarkersPsiSegstart is a M2tsSegmentationMarkers enum value M2tsSegmentationMarkersPsiSegstart = "PSI_SEGSTART" // M2tsSegmentationMarkersEbp is a M2tsSegmentationMarkers enum value M2tsSegmentationMarkersEbp = "EBP" // M2tsSegmentationMarkersEbpLegacy is a M2tsSegmentationMarkers enum value M2tsSegmentationMarkersEbpLegacy = "EBP_LEGACY" ) // M2tsSegmentationMarkers_Values returns all elements of the M2tsSegmentationMarkers enum func M2tsSegmentationMarkers_Values() []string { return []string{ M2tsSegmentationMarkersNone, M2tsSegmentationMarkersRaiSegstart, M2tsSegmentationMarkersRaiAdapt, M2tsSegmentationMarkersPsiSegstart, M2tsSegmentationMarkersEbp, M2tsSegmentationMarkersEbpLegacy, } } // The segmentation style parameter controls how segmentation markers are inserted // into the transport stream. With avails, it is possible that segments may // be truncated, which can influence where future segmentation markers are inserted. // When a segmentation style of "reset_cadence" is selected and a segment is // truncated due to an avail, we will reset the segmentation cadence. This means // the subsequent segment will have a duration of of $segmentation_time seconds. // When a segmentation style of "maintain_cadence" is selected and a segment // is truncated due to an avail, we will not reset the segmentation cadence. // This means the subsequent segment will likely be truncated as well. However, // all segments after that will have a duration of $segmentation_time seconds. // Note that EBP lookahead is a slight exception to this rule. const ( // M2tsSegmentationStyleMaintainCadence is a M2tsSegmentationStyle enum value M2tsSegmentationStyleMaintainCadence = "MAINTAIN_CADENCE" // M2tsSegmentationStyleResetCadence is a M2tsSegmentationStyle enum value M2tsSegmentationStyleResetCadence = "RESET_CADENCE" ) // M2tsSegmentationStyle_Values returns all elements of the M2tsSegmentationStyle enum func M2tsSegmentationStyle_Values() []string { return []string{ M2tsSegmentationStyleMaintainCadence, M2tsSegmentationStyleResetCadence, } } // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences // between video and audio. For this situation, choose Match video duration // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, // MediaConvert pads the output audio streams with silence or trims them to // ensure that the total duration of each audio stream is at least as long as // the total duration of the video stream. After padding or trimming, the audio // stream duration is no more than one frame longer than the video stream. MediaConvert // applies audio padding or trimming only to the end of the last segment of // the output. For unsegmented outputs, MediaConvert adds padding only to the // end of the file. When you keep the default value, any minor discrepancies // between audio and video duration will depend on your output audio codec. const ( // M3u8AudioDurationDefaultCodecDuration is a M3u8AudioDuration enum value M3u8AudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" // M3u8AudioDurationMatchVideoDuration is a M3u8AudioDuration enum value M3u8AudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION" ) // M3u8AudioDuration_Values returns all elements of the M3u8AudioDuration enum func M3u8AudioDuration_Values() []string { return []string{ M3u8AudioDurationDefaultCodecDuration, M3u8AudioDurationMatchVideoDuration, } } // If INSERT, Nielsen inaudible tones for media tracking will be detected in // the input audio and an equivalent ID3 tag will be inserted in the output. const ( // M3u8NielsenId3Insert is a M3u8NielsenId3 enum value M3u8NielsenId3Insert = "INSERT" // M3u8NielsenId3None is a M3u8NielsenId3 enum value M3u8NielsenId3None = "NONE" ) // M3u8NielsenId3_Values returns all elements of the M3u8NielsenId3 enum func M3u8NielsenId3_Values() []string { return []string{ M3u8NielsenId3Insert, M3u8NielsenId3None, } } // When set to PCR_EVERY_PES_PACKET a Program Clock Reference value is inserted // for every Packetized Elementary Stream (PES) header. This parameter is effective // only when the PCR PID is the same as the video or audio elementary stream. const ( // M3u8PcrControlPcrEveryPesPacket is a M3u8PcrControl enum value M3u8PcrControlPcrEveryPesPacket = "PCR_EVERY_PES_PACKET" // M3u8PcrControlConfiguredPcrPeriod is a M3u8PcrControl enum value M3u8PcrControlConfiguredPcrPeriod = "CONFIGURED_PCR_PERIOD" ) // M3u8PcrControl_Values returns all elements of the M3u8PcrControl enum func M3u8PcrControl_Values() []string { return []string{ M3u8PcrControlPcrEveryPesPacket, M3u8PcrControlConfiguredPcrPeriod, } } // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if // you want SCTE-35 markers that appear in your input to also appear in this // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. // For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you // don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose // Ad markers (adMarkers) if you do want manifest conditioning. In both cases, // also provide the ESAM XML as a string in the setting Signal processing notification // XML (sccXml). const ( // M3u8Scte35SourcePassthrough is a M3u8Scte35Source enum value M3u8Scte35SourcePassthrough = "PASSTHROUGH" // M3u8Scte35SourceNone is a M3u8Scte35Source enum value M3u8Scte35SourceNone = "NONE" ) // M3u8Scte35Source_Values returns all elements of the M3u8Scte35Source enum func M3u8Scte35Source_Values() []string { return []string{ M3u8Scte35SourcePassthrough, M3u8Scte35SourceNone, } } // Choose the type of motion graphic asset that you are providing for your overlay. // You can choose either a .mov file or a series of .png files. const ( // MotionImageInsertionModeMov is a MotionImageInsertionMode enum value MotionImageInsertionModeMov = "MOV" // MotionImageInsertionModePng is a MotionImageInsertionMode enum value MotionImageInsertionModePng = "PNG" ) // MotionImageInsertionMode_Values returns all elements of the MotionImageInsertionMode enum func MotionImageInsertionMode_Values() []string { return []string{ MotionImageInsertionModeMov, MotionImageInsertionModePng, } } // Specify whether your motion graphic overlay repeats on a loop or plays only // once. const ( // MotionImagePlaybackOnce is a MotionImagePlayback enum value MotionImagePlaybackOnce = "ONCE" // MotionImagePlaybackRepeat is a MotionImagePlayback enum value MotionImagePlaybackRepeat = "REPEAT" ) // MotionImagePlayback_Values returns all elements of the MotionImagePlayback enum func MotionImagePlayback_Values() []string { return []string{ MotionImagePlaybackOnce, MotionImagePlaybackRepeat, } } // When enabled, include 'clap' atom if appropriate for the video output settings. const ( // MovClapAtomInclude is a MovClapAtom enum value MovClapAtomInclude = "INCLUDE" // MovClapAtomExclude is a MovClapAtom enum value MovClapAtomExclude = "EXCLUDE" ) // MovClapAtom_Values returns all elements of the MovClapAtom enum func MovClapAtom_Values() []string { return []string{ MovClapAtomInclude, MovClapAtomExclude, } } // When enabled, file composition times will start at zero, composition times // in the 'ctts' (composition time to sample) box for B-frames will be negative, // and a 'cslg' (composition shift least greatest) box will be included per // 14496-1 amendment 1. This improves compatibility with Apple players and tools. const ( // MovCslgAtomInclude is a MovCslgAtom enum value MovCslgAtomInclude = "INCLUDE" // MovCslgAtomExclude is a MovCslgAtom enum value MovCslgAtomExclude = "EXCLUDE" ) // MovCslgAtom_Values returns all elements of the MovCslgAtom enum func MovCslgAtom_Values() []string { return []string{ MovCslgAtomInclude, MovCslgAtomExclude, } } // When set to XDCAM, writes MPEG2 video streams into the QuickTime file using // XDCAM fourcc codes. This increases compatibility with Apple editors and players, // but may decrease compatibility with other players. Only applicable when the // video codec is MPEG2. const ( // MovMpeg2FourCCControlXdcam is a MovMpeg2FourCCControl enum value MovMpeg2FourCCControlXdcam = "XDCAM" // MovMpeg2FourCCControlMpeg is a MovMpeg2FourCCControl enum value MovMpeg2FourCCControlMpeg = "MPEG" ) // MovMpeg2FourCCControl_Values returns all elements of the MovMpeg2FourCCControl enum func MovMpeg2FourCCControl_Values() []string { return []string{ MovMpeg2FourCCControlXdcam, MovMpeg2FourCCControlMpeg, } } // To make this output compatible with Omenon, keep the default value, OMNEON. // Unless you need Omneon compatibility, set this value to NONE. When you keep // the default value, OMNEON, MediaConvert increases the length of the edit // list atom. This might cause file rejections when a recipient of the output // file doesn't expct this extra padding. const ( // MovPaddingControlOmneon is a MovPaddingControl enum value MovPaddingControlOmneon = "OMNEON" // MovPaddingControlNone is a MovPaddingControl enum value MovPaddingControlNone = "NONE" ) // MovPaddingControl_Values returns all elements of the MovPaddingControl enum func MovPaddingControl_Values() []string { return []string{ MovPaddingControlOmneon, MovPaddingControlNone, } } // Always keep the default value (SELF_CONTAINED) for this setting. const ( // MovReferenceSelfContained is a MovReference enum value MovReferenceSelfContained = "SELF_CONTAINED" // MovReferenceExternal is a MovReference enum value MovReferenceExternal = "EXTERNAL" ) // MovReference_Values returns all elements of the MovReference enum func MovReference_Values() []string { return []string{ MovReferenceSelfContained, MovReferenceExternal, } } // Specify whether the service encodes this MP3 audio output with a constant // bitrate (CBR) or a variable bitrate (VBR). const ( // Mp3RateControlModeCbr is a Mp3RateControlMode enum value Mp3RateControlModeCbr = "CBR" // Mp3RateControlModeVbr is a Mp3RateControlMode enum value Mp3RateControlModeVbr = "VBR" ) // Mp3RateControlMode_Values returns all elements of the Mp3RateControlMode enum func Mp3RateControlMode_Values() []string { return []string{ Mp3RateControlModeCbr, Mp3RateControlModeVbr, } } // When enabled, file composition times will start at zero, composition times // in the 'ctts' (composition time to sample) box for B-frames will be negative, // and a 'cslg' (composition shift least greatest) box will be included per // 14496-1 amendment 1. This improves compatibility with Apple players and tools. const ( // Mp4CslgAtomInclude is a Mp4CslgAtom enum value Mp4CslgAtomInclude = "INCLUDE" // Mp4CslgAtomExclude is a Mp4CslgAtom enum value Mp4CslgAtomExclude = "EXCLUDE" ) // Mp4CslgAtom_Values returns all elements of the Mp4CslgAtom enum func Mp4CslgAtom_Values() []string { return []string{ Mp4CslgAtomInclude, Mp4CslgAtomExclude, } } // Inserts a free-space box immediately after the moov box. const ( // Mp4FreeSpaceBoxInclude is a Mp4FreeSpaceBox enum value Mp4FreeSpaceBoxInclude = "INCLUDE" // Mp4FreeSpaceBoxExclude is a Mp4FreeSpaceBox enum value Mp4FreeSpaceBoxExclude = "EXCLUDE" ) // Mp4FreeSpaceBox_Values returns all elements of the Mp4FreeSpaceBox enum func Mp4FreeSpaceBox_Values() []string { return []string{ Mp4FreeSpaceBoxInclude, Mp4FreeSpaceBoxExclude, } } // If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning // of the archive as required for progressive downloading. Otherwise it is placed // normally at the end. const ( // Mp4MoovPlacementProgressiveDownload is a Mp4MoovPlacement enum value Mp4MoovPlacementProgressiveDownload = "PROGRESSIVE_DOWNLOAD" // Mp4MoovPlacementNormal is a Mp4MoovPlacement enum value Mp4MoovPlacementNormal = "NORMAL" ) // Mp4MoovPlacement_Values returns all elements of the Mp4MoovPlacement enum func Mp4MoovPlacement_Values() []string { return []string{ Mp4MoovPlacementProgressiveDownload, Mp4MoovPlacementNormal, } } // Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH // manifest with elements for embedded 608 captions. This markup isn't generally // required, but some video players require it to discover and play embedded // 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements // out. When you enable this setting, this is the markup that MediaConvert includes // in your manifest: const ( // MpdAccessibilityCaptionHintsInclude is a MpdAccessibilityCaptionHints enum value MpdAccessibilityCaptionHintsInclude = "INCLUDE" // MpdAccessibilityCaptionHintsExclude is a MpdAccessibilityCaptionHints enum value MpdAccessibilityCaptionHintsExclude = "EXCLUDE" ) // MpdAccessibilityCaptionHints_Values returns all elements of the MpdAccessibilityCaptionHints enum func MpdAccessibilityCaptionHints_Values() []string { return []string{ MpdAccessibilityCaptionHintsInclude, MpdAccessibilityCaptionHintsExclude, } } // Specify this setting only when your output will be consumed by a downstream // repackaging workflow that is sensitive to very small duration differences // between video and audio. For this situation, choose Match video duration // (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default // codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, // MediaConvert pads the output audio streams with silence or trims them to // ensure that the total duration of each audio stream is at least as long as // the total duration of the video stream. After padding or trimming, the audio // stream duration is no more than one frame longer than the video stream. MediaConvert // applies audio padding or trimming only to the end of the last segment of // the output. For unsegmented outputs, MediaConvert adds padding only to the // end of the file. When you keep the default value, any minor discrepancies // between audio and video duration will depend on your output audio codec. const ( // MpdAudioDurationDefaultCodecDuration is a MpdAudioDuration enum value MpdAudioDurationDefaultCodecDuration = "DEFAULT_CODEC_DURATION" // MpdAudioDurationMatchVideoDuration is a MpdAudioDuration enum value MpdAudioDurationMatchVideoDuration = "MATCH_VIDEO_DURATION" ) // MpdAudioDuration_Values returns all elements of the MpdAudioDuration enum func MpdAudioDuration_Values() []string { return []string{ MpdAudioDurationDefaultCodecDuration, MpdAudioDurationMatchVideoDuration, } } // Use this setting only in DASH output groups that include sidecar TTML or // IMSC captions. You specify sidecar captions in a separate output from your // audio and video. Choose Raw (RAW) for captions in a single XML file in a // raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in // XML format contained within fragmented MP4 files. This set of fragmented // MP4 files is separate from your video and audio fragmented MP4 files. const ( // MpdCaptionContainerTypeRaw is a MpdCaptionContainerType enum value MpdCaptionContainerTypeRaw = "RAW" // MpdCaptionContainerTypeFragmentedMp4 is a MpdCaptionContainerType enum value MpdCaptionContainerTypeFragmentedMp4 = "FRAGMENTED_MP4" ) // MpdCaptionContainerType_Values returns all elements of the MpdCaptionContainerType enum func MpdCaptionContainerType_Values() []string { return []string{ MpdCaptionContainerTypeRaw, MpdCaptionContainerTypeFragmentedMp4, } } // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting // SCC XML (sccXml). const ( // MpdScte35EsamInsert is a MpdScte35Esam enum value MpdScte35EsamInsert = "INSERT" // MpdScte35EsamNone is a MpdScte35Esam enum value MpdScte35EsamNone = "NONE" ) // MpdScte35Esam_Values returns all elements of the MpdScte35Esam enum func MpdScte35Esam_Values() []string { return []string{ MpdScte35EsamInsert, MpdScte35EsamNone, } } // Ignore this setting unless you have SCTE-35 markers in your input video file. // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear // in your input to also appear in this output. Choose None (NONE) if you don't // want those SCTE-35 markers in this output. const ( // MpdScte35SourcePassthrough is a MpdScte35Source enum value MpdScte35SourcePassthrough = "PASSTHROUGH" // MpdScte35SourceNone is a MpdScte35Source enum value MpdScte35SourceNone = "NONE" ) // MpdScte35Source_Values returns all elements of the MpdScte35Source enum func MpdScte35Source_Values() []string { return []string{ MpdScte35SourcePassthrough, MpdScte35SourceNone, } } // Specify the strength of any adaptive quantization filters that you enable. // The value that you choose here applies to the following settings: Spatial // adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive // quantization (temporalAdaptiveQuantization). const ( // Mpeg2AdaptiveQuantizationOff is a Mpeg2AdaptiveQuantization enum value Mpeg2AdaptiveQuantizationOff = "OFF" // Mpeg2AdaptiveQuantizationLow is a Mpeg2AdaptiveQuantization enum value Mpeg2AdaptiveQuantizationLow = "LOW" // Mpeg2AdaptiveQuantizationMedium is a Mpeg2AdaptiveQuantization enum value Mpeg2AdaptiveQuantizationMedium = "MEDIUM" // Mpeg2AdaptiveQuantizationHigh is a Mpeg2AdaptiveQuantization enum value Mpeg2AdaptiveQuantizationHigh = "HIGH" ) // Mpeg2AdaptiveQuantization_Values returns all elements of the Mpeg2AdaptiveQuantization enum func Mpeg2AdaptiveQuantization_Values() []string { return []string{ Mpeg2AdaptiveQuantizationOff, Mpeg2AdaptiveQuantizationLow, Mpeg2AdaptiveQuantizationMedium, Mpeg2AdaptiveQuantizationHigh, } } // Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output. const ( // Mpeg2CodecLevelAuto is a Mpeg2CodecLevel enum value Mpeg2CodecLevelAuto = "AUTO" // Mpeg2CodecLevelLow is a Mpeg2CodecLevel enum value Mpeg2CodecLevelLow = "LOW" // Mpeg2CodecLevelMain is a Mpeg2CodecLevel enum value Mpeg2CodecLevelMain = "MAIN" // Mpeg2CodecLevelHigh1440 is a Mpeg2CodecLevel enum value Mpeg2CodecLevelHigh1440 = "HIGH1440" // Mpeg2CodecLevelHigh is a Mpeg2CodecLevel enum value Mpeg2CodecLevelHigh = "HIGH" ) // Mpeg2CodecLevel_Values returns all elements of the Mpeg2CodecLevel enum func Mpeg2CodecLevel_Values() []string { return []string{ Mpeg2CodecLevelAuto, Mpeg2CodecLevelLow, Mpeg2CodecLevelMain, Mpeg2CodecLevelHigh1440, Mpeg2CodecLevelHigh, } } // Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output. const ( // Mpeg2CodecProfileMain is a Mpeg2CodecProfile enum value Mpeg2CodecProfileMain = "MAIN" // Mpeg2CodecProfileProfile422 is a Mpeg2CodecProfile enum value Mpeg2CodecProfileProfile422 = "PROFILE_422" ) // Mpeg2CodecProfile_Values returns all elements of the Mpeg2CodecProfile enum func Mpeg2CodecProfile_Values() []string { return []string{ Mpeg2CodecProfileMain, Mpeg2CodecProfileProfile422, } } // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames // for low-motion portions. The maximum number of B-frames is limited by the // value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). const ( // Mpeg2DynamicSubGopAdaptive is a Mpeg2DynamicSubGop enum value Mpeg2DynamicSubGopAdaptive = "ADAPTIVE" // Mpeg2DynamicSubGopStatic is a Mpeg2DynamicSubGop enum value Mpeg2DynamicSubGopStatic = "STATIC" ) // Mpeg2DynamicSubGop_Values returns all elements of the Mpeg2DynamicSubGop enum func Mpeg2DynamicSubGop_Values() []string { return []string{ Mpeg2DynamicSubGopAdaptive, Mpeg2DynamicSubGopStatic, } } // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. const ( // Mpeg2FramerateControlInitializeFromSource is a Mpeg2FramerateControl enum value Mpeg2FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // Mpeg2FramerateControlSpecified is a Mpeg2FramerateControl enum value Mpeg2FramerateControlSpecified = "SPECIFIED" ) // Mpeg2FramerateControl_Values returns all elements of the Mpeg2FramerateControl enum func Mpeg2FramerateControl_Values() []string { return []string{ Mpeg2FramerateControlInitializeFromSource, Mpeg2FramerateControlSpecified, } } // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. const ( // Mpeg2FramerateConversionAlgorithmDuplicateDrop is a Mpeg2FramerateConversionAlgorithm enum value Mpeg2FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // Mpeg2FramerateConversionAlgorithmInterpolate is a Mpeg2FramerateConversionAlgorithm enum value Mpeg2FramerateConversionAlgorithmInterpolate = "INTERPOLATE" // Mpeg2FramerateConversionAlgorithmFrameformer is a Mpeg2FramerateConversionAlgorithm enum value Mpeg2FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) // Mpeg2FramerateConversionAlgorithm_Values returns all elements of the Mpeg2FramerateConversionAlgorithm enum func Mpeg2FramerateConversionAlgorithm_Values() []string { return []string{ Mpeg2FramerateConversionAlgorithmDuplicateDrop, Mpeg2FramerateConversionAlgorithmInterpolate, Mpeg2FramerateConversionAlgorithmFrameformer, } } // Indicates if the GOP Size in MPEG2 is specified in frames or seconds. If // seconds the system will convert the GOP Size into a frame count at run time. const ( // Mpeg2GopSizeUnitsFrames is a Mpeg2GopSizeUnits enum value Mpeg2GopSizeUnitsFrames = "FRAMES" // Mpeg2GopSizeUnitsSeconds is a Mpeg2GopSizeUnits enum value Mpeg2GopSizeUnitsSeconds = "SECONDS" ) // Mpeg2GopSizeUnits_Values returns all elements of the Mpeg2GopSizeUnits enum func Mpeg2GopSizeUnits_Values() []string { return []string{ Mpeg2GopSizeUnitsFrames, Mpeg2GopSizeUnitsSeconds, } } // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. const ( // Mpeg2InterlaceModeProgressive is a Mpeg2InterlaceMode enum value Mpeg2InterlaceModeProgressive = "PROGRESSIVE" // Mpeg2InterlaceModeTopField is a Mpeg2InterlaceMode enum value Mpeg2InterlaceModeTopField = "TOP_FIELD" // Mpeg2InterlaceModeBottomField is a Mpeg2InterlaceMode enum value Mpeg2InterlaceModeBottomField = "BOTTOM_FIELD" // Mpeg2InterlaceModeFollowTopField is a Mpeg2InterlaceMode enum value Mpeg2InterlaceModeFollowTopField = "FOLLOW_TOP_FIELD" // Mpeg2InterlaceModeFollowBottomField is a Mpeg2InterlaceMode enum value Mpeg2InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" ) // Mpeg2InterlaceMode_Values returns all elements of the Mpeg2InterlaceMode enum func Mpeg2InterlaceMode_Values() []string { return []string{ Mpeg2InterlaceModeProgressive, Mpeg2InterlaceModeTopField, Mpeg2InterlaceModeBottomField, Mpeg2InterlaceModeFollowTopField, Mpeg2InterlaceModeFollowBottomField, } } // Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision // for intra-block DC coefficients. If you choose the value auto, the service // will automatically select the precision based on the per-frame compression // ratio. const ( // Mpeg2IntraDcPrecisionAuto is a Mpeg2IntraDcPrecision enum value Mpeg2IntraDcPrecisionAuto = "AUTO" // Mpeg2IntraDcPrecisionIntraDcPrecision8 is a Mpeg2IntraDcPrecision enum value Mpeg2IntraDcPrecisionIntraDcPrecision8 = "INTRA_DC_PRECISION_8" // Mpeg2IntraDcPrecisionIntraDcPrecision9 is a Mpeg2IntraDcPrecision enum value Mpeg2IntraDcPrecisionIntraDcPrecision9 = "INTRA_DC_PRECISION_9" // Mpeg2IntraDcPrecisionIntraDcPrecision10 is a Mpeg2IntraDcPrecision enum value Mpeg2IntraDcPrecisionIntraDcPrecision10 = "INTRA_DC_PRECISION_10" // Mpeg2IntraDcPrecisionIntraDcPrecision11 is a Mpeg2IntraDcPrecision enum value Mpeg2IntraDcPrecisionIntraDcPrecision11 = "INTRA_DC_PRECISION_11" ) // Mpeg2IntraDcPrecision_Values returns all elements of the Mpeg2IntraDcPrecision enum func Mpeg2IntraDcPrecision_Values() []string { return []string{ Mpeg2IntraDcPrecisionAuto, Mpeg2IntraDcPrecisionIntraDcPrecision8, Mpeg2IntraDcPrecisionIntraDcPrecision9, Mpeg2IntraDcPrecisionIntraDcPrecision10, Mpeg2IntraDcPrecisionIntraDcPrecision11, } } // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. const ( // Mpeg2ParControlInitializeFromSource is a Mpeg2ParControl enum value Mpeg2ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // Mpeg2ParControlSpecified is a Mpeg2ParControl enum value Mpeg2ParControlSpecified = "SPECIFIED" ) // Mpeg2ParControl_Values returns all elements of the Mpeg2ParControl enum func Mpeg2ParControl_Values() []string { return []string{ Mpeg2ParControlInitializeFromSource, Mpeg2ParControlSpecified, } } // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. const ( // Mpeg2QualityTuningLevelSinglePass is a Mpeg2QualityTuningLevel enum value Mpeg2QualityTuningLevelSinglePass = "SINGLE_PASS" // Mpeg2QualityTuningLevelMultiPass is a Mpeg2QualityTuningLevel enum value Mpeg2QualityTuningLevelMultiPass = "MULTI_PASS" ) // Mpeg2QualityTuningLevel_Values returns all elements of the Mpeg2QualityTuningLevel enum func Mpeg2QualityTuningLevel_Values() []string { return []string{ Mpeg2QualityTuningLevelSinglePass, Mpeg2QualityTuningLevelMultiPass, } } // Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate // is variable (vbr) or constant (cbr). const ( // Mpeg2RateControlModeVbr is a Mpeg2RateControlMode enum value Mpeg2RateControlModeVbr = "VBR" // Mpeg2RateControlModeCbr is a Mpeg2RateControlMode enum value Mpeg2RateControlModeCbr = "CBR" ) // Mpeg2RateControlMode_Values returns all elements of the Mpeg2RateControlMode enum func Mpeg2RateControlMode_Values() []string { return []string{ Mpeg2RateControlModeVbr, Mpeg2RateControlModeCbr, } } // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). const ( // Mpeg2ScanTypeConversionModeInterlaced is a Mpeg2ScanTypeConversionMode enum value Mpeg2ScanTypeConversionModeInterlaced = "INTERLACED" // Mpeg2ScanTypeConversionModeInterlacedOptimize is a Mpeg2ScanTypeConversionMode enum value Mpeg2ScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE" ) // Mpeg2ScanTypeConversionMode_Values returns all elements of the Mpeg2ScanTypeConversionMode enum func Mpeg2ScanTypeConversionMode_Values() []string { return []string{ Mpeg2ScanTypeConversionModeInterlaced, Mpeg2ScanTypeConversionModeInterlacedOptimize, } } // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. const ( // Mpeg2SceneChangeDetectDisabled is a Mpeg2SceneChangeDetect enum value Mpeg2SceneChangeDetectDisabled = "DISABLED" // Mpeg2SceneChangeDetectEnabled is a Mpeg2SceneChangeDetect enum value Mpeg2SceneChangeDetectEnabled = "ENABLED" ) // Mpeg2SceneChangeDetect_Values returns all elements of the Mpeg2SceneChangeDetect enum func Mpeg2SceneChangeDetect_Values() []string { return []string{ Mpeg2SceneChangeDetectDisabled, Mpeg2SceneChangeDetectEnabled, } } // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output. When you enable // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. const ( // Mpeg2SlowPalDisabled is a Mpeg2SlowPal enum value Mpeg2SlowPalDisabled = "DISABLED" // Mpeg2SlowPalEnabled is a Mpeg2SlowPal enum value Mpeg2SlowPalEnabled = "ENABLED" ) // Mpeg2SlowPal_Values returns all elements of the Mpeg2SlowPal enum func Mpeg2SlowPal_Values() []string { return []string{ Mpeg2SlowPalDisabled, Mpeg2SlowPalEnabled, } } // Keep the default value, Enabled (ENABLED), to adjust quantization within // each frame based on spatial variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas that can sustain more // distortion with no noticeable visual degradation and uses more bits on areas // where any small distortion will be noticeable. For example, complex textured // blocks are encoded with fewer bits and smooth textured blocks are encoded // with more bits. Enabling this feature will almost always improve your video // quality. Note, though, that this feature doesn't take into account where // the viewer's attention is likely to be. If viewers are likely to be focusing // their attention on a part of the screen with a lot of complex texture, you // might choose to disable this feature. Related setting: When you enable spatial // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) // depending on your content. For homogeneous content, such as cartoons and // video games, set it to Low. For content with a wider variety of textures, // set it to High or Higher. const ( // Mpeg2SpatialAdaptiveQuantizationDisabled is a Mpeg2SpatialAdaptiveQuantization enum value Mpeg2SpatialAdaptiveQuantizationDisabled = "DISABLED" // Mpeg2SpatialAdaptiveQuantizationEnabled is a Mpeg2SpatialAdaptiveQuantization enum value Mpeg2SpatialAdaptiveQuantizationEnabled = "ENABLED" ) // Mpeg2SpatialAdaptiveQuantization_Values returns all elements of the Mpeg2SpatialAdaptiveQuantization enum func Mpeg2SpatialAdaptiveQuantization_Values() []string { return []string{ Mpeg2SpatialAdaptiveQuantizationDisabled, Mpeg2SpatialAdaptiveQuantizationEnabled, } } // Specify whether this output's video uses the D10 syntax. Keep the default // value to not use the syntax. Related settings: When you choose D10 (D_10) // for your MXF profile (profile), you must also set this value to to D10 (D_10). const ( // Mpeg2SyntaxDefault is a Mpeg2Syntax enum value Mpeg2SyntaxDefault = "DEFAULT" // Mpeg2SyntaxD10 is a Mpeg2Syntax enum value Mpeg2SyntaxD10 = "D_10" ) // Mpeg2Syntax_Values returns all elements of the Mpeg2Syntax enum func Mpeg2Syntax_Values() []string { return []string{ Mpeg2SyntaxDefault, Mpeg2SyntaxD10, } } // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable // hard or soft telecine to create a smoother picture. Hard telecine (HARD) // produces a 29.97i output. Soft telecine (SOFT) produces an output with a // 23.976 output that signals to the video player device to do the conversion // during play back. When you keep the default value, None (NONE), MediaConvert // does a standard frame rate conversion to 29.97 without doing anything with // the field polarity to create a smoother picture. const ( // Mpeg2TelecineNone is a Mpeg2Telecine enum value Mpeg2TelecineNone = "NONE" // Mpeg2TelecineSoft is a Mpeg2Telecine enum value Mpeg2TelecineSoft = "SOFT" // Mpeg2TelecineHard is a Mpeg2Telecine enum value Mpeg2TelecineHard = "HARD" ) // Mpeg2Telecine_Values returns all elements of the Mpeg2Telecine enum func Mpeg2Telecine_Values() []string { return []string{ Mpeg2TelecineNone, Mpeg2TelecineSoft, Mpeg2TelecineHard, } } // Keep the default value, Enabled (ENABLED), to adjust quantization within // each frame based on temporal variation of content complexity. When you enable // this feature, the encoder uses fewer bits on areas of the frame that aren't // moving and uses more bits on complex objects with sharp edges that move a // lot. For example, this feature improves the readability of text tickers on // newscasts and scoreboards on sports matches. Enabling this feature will almost // always improve your video quality. Note, though, that this feature doesn't // take into account where the viewer's attention is likely to be. If viewers // are likely to be focusing their attention on a part of the screen that doesn't // have moving objects with sharp edges, such as sports athletes' faces, you // might choose to disable this feature. Related setting: When you enable temporal // quantization, adjust the strength of the filter with the setting Adaptive // quantization (adaptiveQuantization). const ( // Mpeg2TemporalAdaptiveQuantizationDisabled is a Mpeg2TemporalAdaptiveQuantization enum value Mpeg2TemporalAdaptiveQuantizationDisabled = "DISABLED" // Mpeg2TemporalAdaptiveQuantizationEnabled is a Mpeg2TemporalAdaptiveQuantization enum value Mpeg2TemporalAdaptiveQuantizationEnabled = "ENABLED" ) // Mpeg2TemporalAdaptiveQuantization_Values returns all elements of the Mpeg2TemporalAdaptiveQuantization enum func Mpeg2TemporalAdaptiveQuantization_Values() []string { return []string{ Mpeg2TemporalAdaptiveQuantizationDisabled, Mpeg2TemporalAdaptiveQuantizationEnabled, } } // COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across // a Microsoft Smooth output group into a single audio stream. const ( // MsSmoothAudioDeduplicationCombineDuplicateStreams is a MsSmoothAudioDeduplication enum value MsSmoothAudioDeduplicationCombineDuplicateStreams = "COMBINE_DUPLICATE_STREAMS" // MsSmoothAudioDeduplicationNone is a MsSmoothAudioDeduplication enum value MsSmoothAudioDeduplicationNone = "NONE" ) // MsSmoothAudioDeduplication_Values returns all elements of the MsSmoothAudioDeduplication enum func MsSmoothAudioDeduplication_Values() []string { return []string{ MsSmoothAudioDeduplicationCombineDuplicateStreams, MsSmoothAudioDeduplicationNone, } } // Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding // format for the server and client manifest. Valid options are utf8 and utf16. const ( // MsSmoothManifestEncodingUtf8 is a MsSmoothManifestEncoding enum value MsSmoothManifestEncodingUtf8 = "UTF8" // MsSmoothManifestEncodingUtf16 is a MsSmoothManifestEncoding enum value MsSmoothManifestEncodingUtf16 = "UTF16" ) // MsSmoothManifestEncoding_Values returns all elements of the MsSmoothManifestEncoding enum func MsSmoothManifestEncoding_Values() []string { return []string{ MsSmoothManifestEncodingUtf8, MsSmoothManifestEncodingUtf16, } } // Optional. When you have AFD signaling set up in your output video stream, // use this setting to choose whether to also include it in the MXF wrapper. // Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. // Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from // the video stream for this output to the MXF wrapper. Regardless of which // option you choose, the AFD values remain in the video stream. Related settings: // To set up your output to include or exclude AFD values, see AfdSignaling, // under VideoDescription. On the console, find AFD signaling under the output's // video encoding settings. const ( // MxfAfdSignalingNoCopy is a MxfAfdSignaling enum value MxfAfdSignalingNoCopy = "NO_COPY" // MxfAfdSignalingCopyFromVideo is a MxfAfdSignaling enum value MxfAfdSignalingCopyFromVideo = "COPY_FROM_VIDEO" ) // MxfAfdSignaling_Values returns all elements of the MxfAfdSignaling enum func MxfAfdSignaling_Values() []string { return []string{ MxfAfdSignalingNoCopy, MxfAfdSignalingCopyFromVideo, } } // Specify the MXF profile, also called shim, for this output. When you choose // Auto, MediaConvert chooses a profile based on the video codec and resolution. // For a list of codecs supported with each MXF profile, see https://docs.aws.amazon.com/mediaconvert/latest/ug/codecs-supported-with-each-mxf-profile.html. // For more information about the automatic selection behavior, see https://docs.aws.amazon.com/mediaconvert/latest/ug/default-automatic-selection-of-mxf-profiles.html. const ( // MxfProfileD10 is a MxfProfile enum value MxfProfileD10 = "D_10" // MxfProfileXdcam is a MxfProfile enum value MxfProfileXdcam = "XDCAM" // MxfProfileOp1a is a MxfProfile enum value MxfProfileOp1a = "OP1A" // MxfProfileXavc is a MxfProfile enum value MxfProfileXavc = "XAVC" ) // MxfProfile_Values returns all elements of the MxfProfile enum func MxfProfile_Values() []string { return []string{ MxfProfileD10, MxfProfileXdcam, MxfProfileOp1a, MxfProfileXavc, } } // To create an output that complies with the XAVC file format guidelines for // interoperability, keep the default value, Drop frames for compliance (DROP_FRAMES_FOR_COMPLIANCE). // To include all frames from your input in this output, keep the default setting, // Allow any duration (ALLOW_ANY_DURATION). The number of frames that MediaConvert // excludes when you set this to Drop frames for compliance depends on the output // frame rate and duration. const ( // MxfXavcDurationModeAllowAnyDuration is a MxfXavcDurationMode enum value MxfXavcDurationModeAllowAnyDuration = "ALLOW_ANY_DURATION" // MxfXavcDurationModeDropFramesForCompliance is a MxfXavcDurationMode enum value MxfXavcDurationModeDropFramesForCompliance = "DROP_FRAMES_FOR_COMPLIANCE" ) // MxfXavcDurationMode_Values returns all elements of the MxfXavcDurationMode enum func MxfXavcDurationMode_Values() []string { return []string{ MxfXavcDurationModeAllowAnyDuration, MxfXavcDurationModeDropFramesForCompliance, } } // Choose the type of Nielsen watermarks that you want in your outputs. When // you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the // setting SID (sourceId). When you choose CBET (CBET), you must provide a value // for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET // (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings. const ( // NielsenActiveWatermarkProcessTypeNaes2AndNw is a NielsenActiveWatermarkProcessType enum value NielsenActiveWatermarkProcessTypeNaes2AndNw = "NAES2_AND_NW" // NielsenActiveWatermarkProcessTypeCbet is a NielsenActiveWatermarkProcessType enum value NielsenActiveWatermarkProcessTypeCbet = "CBET" // NielsenActiveWatermarkProcessTypeNaes2AndNwAndCbet is a NielsenActiveWatermarkProcessType enum value NielsenActiveWatermarkProcessTypeNaes2AndNwAndCbet = "NAES2_AND_NW_AND_CBET" ) // NielsenActiveWatermarkProcessType_Values returns all elements of the NielsenActiveWatermarkProcessType enum func NielsenActiveWatermarkProcessType_Values() []string { return []string{ NielsenActiveWatermarkProcessTypeNaes2AndNw, NielsenActiveWatermarkProcessTypeCbet, NielsenActiveWatermarkProcessTypeNaes2AndNwAndCbet, } } // Required. Specify whether your source content already contains Nielsen non-linear // watermarks. When you set this value to Watermarked (WATERMARKED), the service // fails the job. Nielsen requires that you add non-linear watermarking to only // clean content that doesn't already have non-linear Nielsen watermarks. const ( // NielsenSourceWatermarkStatusTypeClean is a NielsenSourceWatermarkStatusType enum value NielsenSourceWatermarkStatusTypeClean = "CLEAN" // NielsenSourceWatermarkStatusTypeWatermarked is a NielsenSourceWatermarkStatusType enum value NielsenSourceWatermarkStatusTypeWatermarked = "WATERMARKED" ) // NielsenSourceWatermarkStatusType_Values returns all elements of the NielsenSourceWatermarkStatusType enum func NielsenSourceWatermarkStatusType_Values() []string { return []string{ NielsenSourceWatermarkStatusTypeClean, NielsenSourceWatermarkStatusTypeWatermarked, } } // To create assets that have the same TIC values in each audio track, keep // the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that // have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK). const ( // NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack is a NielsenUniqueTicPerAudioTrackType enum value NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack = "RESERVE_UNIQUE_TICS_PER_TRACK" // NielsenUniqueTicPerAudioTrackTypeSameTicsPerTrack is a NielsenUniqueTicPerAudioTrackType enum value NielsenUniqueTicPerAudioTrackTypeSameTicsPerTrack = "SAME_TICS_PER_TRACK" ) // NielsenUniqueTicPerAudioTrackType_Values returns all elements of the NielsenUniqueTicPerAudioTrackType enum func NielsenUniqueTicPerAudioTrackType_Values() []string { return []string{ NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack, NielsenUniqueTicPerAudioTrackTypeSameTicsPerTrack, } } // Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), // you can use this setting to apply sharpening. The default behavior, Auto // (AUTO), allows the transcoder to determine whether to apply filtering, depending // on input type and quality. When you set Noise reducer to Temporal, your output // bandwidth is reduced. When Post temporal sharpening is also enabled, that // bandwidth reduction is smaller. const ( // NoiseFilterPostTemporalSharpeningDisabled is a NoiseFilterPostTemporalSharpening enum value NoiseFilterPostTemporalSharpeningDisabled = "DISABLED" // NoiseFilterPostTemporalSharpeningEnabled is a NoiseFilterPostTemporalSharpening enum value NoiseFilterPostTemporalSharpeningEnabled = "ENABLED" // NoiseFilterPostTemporalSharpeningAuto is a NoiseFilterPostTemporalSharpening enum value NoiseFilterPostTemporalSharpeningAuto = "AUTO" ) // NoiseFilterPostTemporalSharpening_Values returns all elements of the NoiseFilterPostTemporalSharpening enum func NoiseFilterPostTemporalSharpening_Values() []string { return []string{ NoiseFilterPostTemporalSharpeningDisabled, NoiseFilterPostTemporalSharpeningEnabled, NoiseFilterPostTemporalSharpeningAuto, } } // Use Noise reducer filter (NoiseReducerFilter) to select one of the following // spatial image filtering functions. To use this setting, you must also enable // Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing // noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution // filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain // filtering based on JND principles. * Temporal optimizes video quality for // complex motion. const ( // NoiseReducerFilterBilateral is a NoiseReducerFilter enum value NoiseReducerFilterBilateral = "BILATERAL" // NoiseReducerFilterMean is a NoiseReducerFilter enum value NoiseReducerFilterMean = "MEAN" // NoiseReducerFilterGaussian is a NoiseReducerFilter enum value NoiseReducerFilterGaussian = "GAUSSIAN" // NoiseReducerFilterLanczos is a NoiseReducerFilter enum value NoiseReducerFilterLanczos = "LANCZOS" // NoiseReducerFilterSharpen is a NoiseReducerFilter enum value NoiseReducerFilterSharpen = "SHARPEN" // NoiseReducerFilterConserve is a NoiseReducerFilter enum value NoiseReducerFilterConserve = "CONSERVE" // NoiseReducerFilterSpatial is a NoiseReducerFilter enum value NoiseReducerFilterSpatial = "SPATIAL" // NoiseReducerFilterTemporal is a NoiseReducerFilter enum value NoiseReducerFilterTemporal = "TEMPORAL" ) // NoiseReducerFilter_Values returns all elements of the NoiseReducerFilter enum func NoiseReducerFilter_Values() []string { return []string{ NoiseReducerFilterBilateral, NoiseReducerFilterMean, NoiseReducerFilterGaussian, NoiseReducerFilterLanczos, NoiseReducerFilterSharpen, NoiseReducerFilterConserve, NoiseReducerFilterSpatial, NoiseReducerFilterTemporal, } } // Optional. When you request lists of resources, you can specify whether they // are sorted in ASCENDING or DESCENDING order. Default varies by resource. const ( // OrderAscending is a Order enum value OrderAscending = "ASCENDING" // OrderDescending is a Order enum value OrderDescending = "DESCENDING" ) // Order_Values returns all elements of the Order enum func Order_Values() []string { return []string{ OrderAscending, OrderDescending, } } // Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming, // CMAF) const ( // OutputGroupTypeHlsGroupSettings is a OutputGroupType enum value OutputGroupTypeHlsGroupSettings = "HLS_GROUP_SETTINGS" // OutputGroupTypeDashIsoGroupSettings is a OutputGroupType enum value OutputGroupTypeDashIsoGroupSettings = "DASH_ISO_GROUP_SETTINGS" // OutputGroupTypeFileGroupSettings is a OutputGroupType enum value OutputGroupTypeFileGroupSettings = "FILE_GROUP_SETTINGS" // OutputGroupTypeMsSmoothGroupSettings is a OutputGroupType enum value OutputGroupTypeMsSmoothGroupSettings = "MS_SMOOTH_GROUP_SETTINGS" // OutputGroupTypeCmafGroupSettings is a OutputGroupType enum value OutputGroupTypeCmafGroupSettings = "CMAF_GROUP_SETTINGS" ) // OutputGroupType_Values returns all elements of the OutputGroupType enum func OutputGroupType_Values() []string { return []string{ OutputGroupTypeHlsGroupSettings, OutputGroupTypeDashIsoGroupSettings, OutputGroupTypeFileGroupSettings, OutputGroupTypeMsSmoothGroupSettings, OutputGroupTypeCmafGroupSettings, } } // Selects method of inserting SDT information into output stream. "Follow input // SDT" copies SDT information from input stream to output stream. "Follow input // SDT if present" copies SDT information from input stream to output stream // if SDT information is present in the input, otherwise it will fall back on // the user-defined values. Enter "SDT Manually" means user will enter the SDT // information. "No SDT" means output stream will not contain SDT information. const ( // OutputSdtSdtFollow is a OutputSdt enum value OutputSdtSdtFollow = "SDT_FOLLOW" // OutputSdtSdtFollowIfPresent is a OutputSdt enum value OutputSdtSdtFollowIfPresent = "SDT_FOLLOW_IF_PRESENT" // OutputSdtSdtManual is a OutputSdt enum value OutputSdtSdtManual = "SDT_MANUAL" // OutputSdtSdtNone is a OutputSdt enum value OutputSdtSdtNone = "SDT_NONE" ) // OutputSdt_Values returns all elements of the OutputSdt enum func OutputSdt_Values() []string { return []string{ OutputSdtSdtFollow, OutputSdtSdtFollowIfPresent, OutputSdtSdtManual, OutputSdtSdtNone, } } // Optional. When you request a list of presets, you can choose to list them // alphabetically by NAME or chronologically by CREATION_DATE. If you don't // specify, the service will list them by name. const ( // PresetListByName is a PresetListBy enum value PresetListByName = "NAME" // PresetListByCreationDate is a PresetListBy enum value PresetListByCreationDate = "CREATION_DATE" // PresetListBySystem is a PresetListBy enum value PresetListBySystem = "SYSTEM" ) // PresetListBy_Values returns all elements of the PresetListBy enum func PresetListBy_Values() []string { return []string{ PresetListByName, PresetListByCreationDate, PresetListBySystem, } } // Specifies whether the pricing plan for the queue is on-demand or reserved. // For on-demand, you pay per minute, billed in increments of .01 minute. For // reserved, you pay for the transcoding capacity of the entire queue, regardless // of how much or how little you use it. Reserved pricing requires a 12-month // commitment. const ( // PricingPlanOnDemand is a PricingPlan enum value PricingPlanOnDemand = "ON_DEMAND" // PricingPlanReserved is a PricingPlan enum value PricingPlanReserved = "RESERVED" ) // PricingPlan_Values returns all elements of the PricingPlan enum func PricingPlan_Values() []string { return []string{ PricingPlanOnDemand, PricingPlanReserved, } } // This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that // you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 // sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma // sampling. You must specify a value for this setting when your output codec // profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma // sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose // an output codec profile that supports 4:4:4 chroma sampling. These values // for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444 // (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When // you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all // video preprocessors except for Nexguard file marker (PartnerWatermarking). // When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate // conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm) // to Drop duplicate (DUPLICATE_DROP). const ( // ProresChromaSamplingPreserve444Sampling is a ProresChromaSampling enum value ProresChromaSamplingPreserve444Sampling = "PRESERVE_444_SAMPLING" // ProresChromaSamplingSubsampleTo422 is a ProresChromaSampling enum value ProresChromaSamplingSubsampleTo422 = "SUBSAMPLE_TO_422" ) // ProresChromaSampling_Values returns all elements of the ProresChromaSampling enum func ProresChromaSampling_Values() []string { return []string{ ProresChromaSamplingPreserve444Sampling, ProresChromaSamplingSubsampleTo422, } } // Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec // to use for this output. const ( // ProresCodecProfileAppleProres422 is a ProresCodecProfile enum value ProresCodecProfileAppleProres422 = "APPLE_PRORES_422" // ProresCodecProfileAppleProres422Hq is a ProresCodecProfile enum value ProresCodecProfileAppleProres422Hq = "APPLE_PRORES_422_HQ" // ProresCodecProfileAppleProres422Lt is a ProresCodecProfile enum value ProresCodecProfileAppleProres422Lt = "APPLE_PRORES_422_LT" // ProresCodecProfileAppleProres422Proxy is a ProresCodecProfile enum value ProresCodecProfileAppleProres422Proxy = "APPLE_PRORES_422_PROXY" // ProresCodecProfileAppleProres4444 is a ProresCodecProfile enum value ProresCodecProfileAppleProres4444 = "APPLE_PRORES_4444" // ProresCodecProfileAppleProres4444Xq is a ProresCodecProfile enum value ProresCodecProfileAppleProres4444Xq = "APPLE_PRORES_4444_XQ" ) // ProresCodecProfile_Values returns all elements of the ProresCodecProfile enum func ProresCodecProfile_Values() []string { return []string{ ProresCodecProfileAppleProres422, ProresCodecProfileAppleProres422Hq, ProresCodecProfileAppleProres422Lt, ProresCodecProfileAppleProres422Proxy, ProresCodecProfileAppleProres4444, ProresCodecProfileAppleProres4444Xq, } } // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. const ( // ProresFramerateControlInitializeFromSource is a ProresFramerateControl enum value ProresFramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // ProresFramerateControlSpecified is a ProresFramerateControl enum value ProresFramerateControlSpecified = "SPECIFIED" ) // ProresFramerateControl_Values returns all elements of the ProresFramerateControl enum func ProresFramerateControl_Values() []string { return []string{ ProresFramerateControlInitializeFromSource, ProresFramerateControlSpecified, } } // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. const ( // ProresFramerateConversionAlgorithmDuplicateDrop is a ProresFramerateConversionAlgorithm enum value ProresFramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // ProresFramerateConversionAlgorithmInterpolate is a ProresFramerateConversionAlgorithm enum value ProresFramerateConversionAlgorithmInterpolate = "INTERPOLATE" // ProresFramerateConversionAlgorithmFrameformer is a ProresFramerateConversionAlgorithm enum value ProresFramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) // ProresFramerateConversionAlgorithm_Values returns all elements of the ProresFramerateConversionAlgorithm enum func ProresFramerateConversionAlgorithm_Values() []string { return []string{ ProresFramerateConversionAlgorithmDuplicateDrop, ProresFramerateConversionAlgorithmInterpolate, ProresFramerateConversionAlgorithmFrameformer, } } // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. const ( // ProresInterlaceModeProgressive is a ProresInterlaceMode enum value ProresInterlaceModeProgressive = "PROGRESSIVE" // ProresInterlaceModeTopField is a ProresInterlaceMode enum value ProresInterlaceModeTopField = "TOP_FIELD" // ProresInterlaceModeBottomField is a ProresInterlaceMode enum value ProresInterlaceModeBottomField = "BOTTOM_FIELD" // ProresInterlaceModeFollowTopField is a ProresInterlaceMode enum value ProresInterlaceModeFollowTopField = "FOLLOW_TOP_FIELD" // ProresInterlaceModeFollowBottomField is a ProresInterlaceMode enum value ProresInterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" ) // ProresInterlaceMode_Values returns all elements of the ProresInterlaceMode enum func ProresInterlaceMode_Values() []string { return []string{ ProresInterlaceModeProgressive, ProresInterlaceModeTopField, ProresInterlaceModeBottomField, ProresInterlaceModeFollowTopField, ProresInterlaceModeFollowBottomField, } } // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. const ( // ProresParControlInitializeFromSource is a ProresParControl enum value ProresParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // ProresParControlSpecified is a ProresParControl enum value ProresParControlSpecified = "SPECIFIED" ) // ProresParControl_Values returns all elements of the ProresParControl enum func ProresParControl_Values() []string { return []string{ ProresParControlInitializeFromSource, ProresParControlSpecified, } } // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). const ( // ProresScanTypeConversionModeInterlaced is a ProresScanTypeConversionMode enum value ProresScanTypeConversionModeInterlaced = "INTERLACED" // ProresScanTypeConversionModeInterlacedOptimize is a ProresScanTypeConversionMode enum value ProresScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE" ) // ProresScanTypeConversionMode_Values returns all elements of the ProresScanTypeConversionMode enum func ProresScanTypeConversionMode_Values() []string { return []string{ ProresScanTypeConversionModeInterlaced, ProresScanTypeConversionModeInterlacedOptimize, } } // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output. When you enable // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples // your audio to keep it synchronized with the video. Note that enabling this // setting will slightly reduce the duration of your video. Required settings: // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. const ( // ProresSlowPalDisabled is a ProresSlowPal enum value ProresSlowPalDisabled = "DISABLED" // ProresSlowPalEnabled is a ProresSlowPal enum value ProresSlowPalEnabled = "ENABLED" ) // ProresSlowPal_Values returns all elements of the ProresSlowPal enum func ProresSlowPal_Values() []string { return []string{ ProresSlowPalDisabled, ProresSlowPalEnabled, } } // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable // hard telecine (HARD) to create a smoother picture. When you keep the default // value, None (NONE), MediaConvert does a standard frame rate conversion to // 29.97 without doing anything with the field polarity to create a smoother // picture. const ( // ProresTelecineNone is a ProresTelecine enum value ProresTelecineNone = "NONE" // ProresTelecineHard is a ProresTelecine enum value ProresTelecineHard = "HARD" ) // ProresTelecine_Values returns all elements of the ProresTelecine enum func ProresTelecine_Values() []string { return []string{ ProresTelecineNone, ProresTelecineHard, } } // Optional. When you request a list of queues, you can choose to list them // alphabetically by NAME or chronologically by CREATION_DATE. If you don't // specify, the service will list them by creation date. const ( // QueueListByName is a QueueListBy enum value QueueListByName = "NAME" // QueueListByCreationDate is a QueueListBy enum value QueueListByCreationDate = "CREATION_DATE" ) // QueueListBy_Values returns all elements of the QueueListBy enum func QueueListBy_Values() []string { return []string{ QueueListByName, QueueListByCreationDate, } } // Queues can be ACTIVE or PAUSED. If you pause a queue, jobs in that queue // won't begin. Jobs that are running when you pause a queue continue to run // until they finish or result in an error. const ( // QueueStatusActive is a QueueStatus enum value QueueStatusActive = "ACTIVE" // QueueStatusPaused is a QueueStatus enum value QueueStatusPaused = "PAUSED" ) // QueueStatus_Values returns all elements of the QueueStatus enum func QueueStatus_Values() []string { return []string{ QueueStatusActive, QueueStatusPaused, } } // Specifies whether the term of your reserved queue pricing plan is automatically // extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term. const ( // RenewalTypeAutoRenew is a RenewalType enum value RenewalTypeAutoRenew = "AUTO_RENEW" // RenewalTypeExpire is a RenewalType enum value RenewalTypeExpire = "EXPIRE" ) // RenewalType_Values returns all elements of the RenewalType enum func RenewalType_Values() []string { return []string{ RenewalTypeAutoRenew, RenewalTypeExpire, } } // Specifies whether the pricing plan for your reserved queue is ACTIVE or EXPIRED. const ( // ReservationPlanStatusActive is a ReservationPlanStatus enum value ReservationPlanStatusActive = "ACTIVE" // ReservationPlanStatusExpired is a ReservationPlanStatus enum value ReservationPlanStatusExpired = "EXPIRED" ) // ReservationPlanStatus_Values returns all elements of the ReservationPlanStatus enum func ReservationPlanStatus_Values() []string { return []string{ ReservationPlanStatusActive, ReservationPlanStatusExpired, } } // Use Respond to AFD (RespondToAfd) to specify how the service changes the // video itself in response to AFD values in the input. * Choose Respond to // clip the input video frame according to the AFD value, input display aspect // ratio, and output display aspect ratio. * Choose Passthrough to include the // input AFD values. Do not choose this when AfdSignaling is set to (NONE). // A preferred implementation of this workflow is to set RespondToAfd to (NONE) // and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values // from this output. const ( // RespondToAfdNone is a RespondToAfd enum value RespondToAfdNone = "NONE" // RespondToAfdRespond is a RespondToAfd enum value RespondToAfdRespond = "RESPOND" // RespondToAfdPassthrough is a RespondToAfd enum value RespondToAfdPassthrough = "PASSTHROUGH" ) // RespondToAfd_Values returns all elements of the RespondToAfd enum func RespondToAfd_Values() []string { return []string{ RespondToAfdNone, RespondToAfdRespond, RespondToAfdPassthrough, } } // Choose an Amazon S3 canned ACL for MediaConvert to apply to this output. const ( // S3ObjectCannedAclPublicRead is a S3ObjectCannedAcl enum value S3ObjectCannedAclPublicRead = "PUBLIC_READ" // S3ObjectCannedAclAuthenticatedRead is a S3ObjectCannedAcl enum value S3ObjectCannedAclAuthenticatedRead = "AUTHENTICATED_READ" // S3ObjectCannedAclBucketOwnerRead is a S3ObjectCannedAcl enum value S3ObjectCannedAclBucketOwnerRead = "BUCKET_OWNER_READ" // S3ObjectCannedAclBucketOwnerFullControl is a S3ObjectCannedAcl enum value S3ObjectCannedAclBucketOwnerFullControl = "BUCKET_OWNER_FULL_CONTROL" ) // S3ObjectCannedAcl_Values returns all elements of the S3ObjectCannedAcl enum func S3ObjectCannedAcl_Values() []string { return []string{ S3ObjectCannedAclPublicRead, S3ObjectCannedAclAuthenticatedRead, S3ObjectCannedAclBucketOwnerRead, S3ObjectCannedAclBucketOwnerFullControl, } } // Specify how you want your data keys managed. AWS uses data keys to encrypt // your content. AWS also encrypts the data keys themselves, using a customer // master key (CMK), and then stores the encrypted data keys alongside your // encrypted content. Use this setting to specify which AWS service manages // the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). // If you want your master key to be managed by AWS Key Management Service (KMS), // choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose // AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with // Amazon S3 to encrypt your data keys. You can optionally choose to specify // a different, customer managed CMK. Do so by specifying the Amazon Resource // Name (ARN) of the key for the setting KMS ARN (kmsKeyArn). const ( // S3ServerSideEncryptionTypeServerSideEncryptionS3 is a S3ServerSideEncryptionType enum value S3ServerSideEncryptionTypeServerSideEncryptionS3 = "SERVER_SIDE_ENCRYPTION_S3" // S3ServerSideEncryptionTypeServerSideEncryptionKms is a S3ServerSideEncryptionType enum value S3ServerSideEncryptionTypeServerSideEncryptionKms = "SERVER_SIDE_ENCRYPTION_KMS" ) // S3ServerSideEncryptionType_Values returns all elements of the S3ServerSideEncryptionType enum func S3ServerSideEncryptionType_Values() []string { return []string{ S3ServerSideEncryptionTypeServerSideEncryptionS3, S3ServerSideEncryptionTypeServerSideEncryptionKms, } } // Specify the video color sample range for this output. To create a full range // output, you must start with a full range YUV input and keep the default value, // None (NONE). To create a limited range output from a full range input, choose // Limited range (LIMITED_RANGE_SQUEEZE). With RGB inputs, your output is always // limited range, regardless of your choice here. When you create a limited // range output from a full range input, MediaConvert limits the active pixel // values in a way that depends on the output's bit depth: 8-bit outputs contain // only values from 16 through 235 and 10-bit outputs contain only values from // 64 through 940. With this conversion, MediaConvert also changes the output // metadata to note the limited range. const ( // SampleRangeConversionLimitedRangeSqueeze is a SampleRangeConversion enum value SampleRangeConversionLimitedRangeSqueeze = "LIMITED_RANGE_SQUEEZE" // SampleRangeConversionNone is a SampleRangeConversion enum value SampleRangeConversionNone = "NONE" ) // SampleRangeConversion_Values returns all elements of the SampleRangeConversion enum func SampleRangeConversion_Values() []string { return []string{ SampleRangeConversionLimitedRangeSqueeze, SampleRangeConversionNone, } } // Specify how the service handles outputs that have a different aspect ratio // from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) // to have the service stretch your video image to fit. Keep the setting Default // (DEFAULT) to have the service letterbox your video instead. This setting // overrides any value that you specify for the setting Selection placement // (position) in this output. const ( // ScalingBehaviorDefault is a ScalingBehavior enum value ScalingBehaviorDefault = "DEFAULT" // ScalingBehaviorStretchToOutput is a ScalingBehavior enum value ScalingBehaviorStretchToOutput = "STRETCH_TO_OUTPUT" ) // ScalingBehavior_Values returns all elements of the ScalingBehavior enum func ScalingBehavior_Values() []string { return []string{ ScalingBehaviorDefault, ScalingBehaviorStretchToOutput, } } // Set Framerate (SccDestinationFramerate) to make sure that the captions and // the video are synchronized in the output. Specify a frame rate that matches // the frame rate of the associated video. If the video frame rate is 29.97, // choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has // video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 // non-dropframe (FRAMERATE_29_97_NON_DROPFRAME). const ( // SccDestinationFramerateFramerate2397 is a SccDestinationFramerate enum value SccDestinationFramerateFramerate2397 = "FRAMERATE_23_97" // SccDestinationFramerateFramerate24 is a SccDestinationFramerate enum value SccDestinationFramerateFramerate24 = "FRAMERATE_24" // SccDestinationFramerateFramerate25 is a SccDestinationFramerate enum value SccDestinationFramerateFramerate25 = "FRAMERATE_25" // SccDestinationFramerateFramerate2997Dropframe is a SccDestinationFramerate enum value SccDestinationFramerateFramerate2997Dropframe = "FRAMERATE_29_97_DROPFRAME" // SccDestinationFramerateFramerate2997NonDropframe is a SccDestinationFramerate enum value SccDestinationFramerateFramerate2997NonDropframe = "FRAMERATE_29_97_NON_DROPFRAME" ) // SccDestinationFramerate_Values returns all elements of the SccDestinationFramerate enum func SccDestinationFramerate_Values() []string { return []string{ SccDestinationFramerateFramerate2397, SccDestinationFramerateFramerate24, SccDestinationFramerateFramerate25, SccDestinationFramerateFramerate2997Dropframe, SccDestinationFramerateFramerate2997NonDropframe, } } // Enable this setting when you run a test job to estimate how many reserved // transcoding slots (RTS) you need. When this is enabled, MediaConvert runs // your job from an on-demand queue with similar performance to what you will // see with one RTS in a reserved queue. This setting is disabled by default. const ( // SimulateReservedQueueDisabled is a SimulateReservedQueue enum value SimulateReservedQueueDisabled = "DISABLED" // SimulateReservedQueueEnabled is a SimulateReservedQueue enum value SimulateReservedQueueEnabled = "ENABLED" ) // SimulateReservedQueue_Values returns all elements of the SimulateReservedQueue enum func SimulateReservedQueue_Values() []string { return []string{ SimulateReservedQueueDisabled, SimulateReservedQueueEnabled, } } // Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch // Events. Set the interval, in seconds, between status updates. MediaConvert // sends an update at this interval from the time the service begins processing // your job to the time it completes the transcode or encounters an error. const ( // StatusUpdateIntervalSeconds10 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds10 = "SECONDS_10" // StatusUpdateIntervalSeconds12 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds12 = "SECONDS_12" // StatusUpdateIntervalSeconds15 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds15 = "SECONDS_15" // StatusUpdateIntervalSeconds20 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds20 = "SECONDS_20" // StatusUpdateIntervalSeconds30 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds30 = "SECONDS_30" // StatusUpdateIntervalSeconds60 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds60 = "SECONDS_60" // StatusUpdateIntervalSeconds120 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds120 = "SECONDS_120" // StatusUpdateIntervalSeconds180 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds180 = "SECONDS_180" // StatusUpdateIntervalSeconds240 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds240 = "SECONDS_240" // StatusUpdateIntervalSeconds300 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds300 = "SECONDS_300" // StatusUpdateIntervalSeconds360 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds360 = "SECONDS_360" // StatusUpdateIntervalSeconds420 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds420 = "SECONDS_420" // StatusUpdateIntervalSeconds480 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds480 = "SECONDS_480" // StatusUpdateIntervalSeconds540 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds540 = "SECONDS_540" // StatusUpdateIntervalSeconds600 is a StatusUpdateInterval enum value StatusUpdateIntervalSeconds600 = "SECONDS_600" ) // StatusUpdateInterval_Values returns all elements of the StatusUpdateInterval enum func StatusUpdateInterval_Values() []string { return []string{ StatusUpdateIntervalSeconds10, StatusUpdateIntervalSeconds12, StatusUpdateIntervalSeconds15, StatusUpdateIntervalSeconds20, StatusUpdateIntervalSeconds30, StatusUpdateIntervalSeconds60, StatusUpdateIntervalSeconds120, StatusUpdateIntervalSeconds180, StatusUpdateIntervalSeconds240, StatusUpdateIntervalSeconds300, StatusUpdateIntervalSeconds360, StatusUpdateIntervalSeconds420, StatusUpdateIntervalSeconds480, StatusUpdateIntervalSeconds540, StatusUpdateIntervalSeconds600, } } // A page type as defined in the standard ETSI EN 300 468, Table 94 const ( // TeletextPageTypePageTypeInitial is a TeletextPageType enum value TeletextPageTypePageTypeInitial = "PAGE_TYPE_INITIAL" // TeletextPageTypePageTypeSubtitle is a TeletextPageType enum value TeletextPageTypePageTypeSubtitle = "PAGE_TYPE_SUBTITLE" // TeletextPageTypePageTypeAddlInfo is a TeletextPageType enum value TeletextPageTypePageTypeAddlInfo = "PAGE_TYPE_ADDL_INFO" // TeletextPageTypePageTypeProgramSchedule is a TeletextPageType enum value TeletextPageTypePageTypeProgramSchedule = "PAGE_TYPE_PROGRAM_SCHEDULE" // TeletextPageTypePageTypeHearingImpairedSubtitle is a TeletextPageType enum value TeletextPageTypePageTypeHearingImpairedSubtitle = "PAGE_TYPE_HEARING_IMPAIRED_SUBTITLE" ) // TeletextPageType_Values returns all elements of the TeletextPageType enum func TeletextPageType_Values() []string { return []string{ TeletextPageTypePageTypeInitial, TeletextPageTypePageTypeSubtitle, TeletextPageTypePageTypeAddlInfo, TeletextPageTypePageTypeProgramSchedule, TeletextPageTypePageTypeHearingImpairedSubtitle, } } // Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to // specify the location the burned-in timecode on output video. const ( // TimecodeBurninPositionTopCenter is a TimecodeBurninPosition enum value TimecodeBurninPositionTopCenter = "TOP_CENTER" // TimecodeBurninPositionTopLeft is a TimecodeBurninPosition enum value TimecodeBurninPositionTopLeft = "TOP_LEFT" // TimecodeBurninPositionTopRight is a TimecodeBurninPosition enum value TimecodeBurninPositionTopRight = "TOP_RIGHT" // TimecodeBurninPositionMiddleLeft is a TimecodeBurninPosition enum value TimecodeBurninPositionMiddleLeft = "MIDDLE_LEFT" // TimecodeBurninPositionMiddleCenter is a TimecodeBurninPosition enum value TimecodeBurninPositionMiddleCenter = "MIDDLE_CENTER" // TimecodeBurninPositionMiddleRight is a TimecodeBurninPosition enum value TimecodeBurninPositionMiddleRight = "MIDDLE_RIGHT" // TimecodeBurninPositionBottomLeft is a TimecodeBurninPosition enum value TimecodeBurninPositionBottomLeft = "BOTTOM_LEFT" // TimecodeBurninPositionBottomCenter is a TimecodeBurninPosition enum value TimecodeBurninPositionBottomCenter = "BOTTOM_CENTER" // TimecodeBurninPositionBottomRight is a TimecodeBurninPosition enum value TimecodeBurninPositionBottomRight = "BOTTOM_RIGHT" ) // TimecodeBurninPosition_Values returns all elements of the TimecodeBurninPosition enum func TimecodeBurninPosition_Values() []string { return []string{ TimecodeBurninPositionTopCenter, TimecodeBurninPositionTopLeft, TimecodeBurninPositionTopRight, TimecodeBurninPositionMiddleLeft, TimecodeBurninPositionMiddleCenter, TimecodeBurninPositionMiddleRight, TimecodeBurninPositionBottomLeft, TimecodeBurninPositionBottomCenter, TimecodeBurninPositionBottomRight, } } // Use Source (TimecodeSource) to set how timecodes are handled within this // job. To make sure that your video, audio, captions, and markers are synchronized // and that time-based features, such as image inserter, work correctly, choose // the Timecode source option that matches your assets. All timecodes are in // a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - // Use the timecode that is in the input video. If no embedded timecode is in // the source, the service will use Start at 0 (ZEROBASED) instead. * Start // at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. // * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame // to a value other than zero. You use Start timecode (Start) to provide this // value. const ( // TimecodeSourceEmbedded is a TimecodeSource enum value TimecodeSourceEmbedded = "EMBEDDED" // TimecodeSourceZerobased is a TimecodeSource enum value TimecodeSourceZerobased = "ZEROBASED" // TimecodeSourceSpecifiedstart is a TimecodeSource enum value TimecodeSourceSpecifiedstart = "SPECIFIEDSTART" ) // TimecodeSource_Values returns all elements of the TimecodeSource enum func TimecodeSource_Values() []string { return []string{ TimecodeSourceEmbedded, TimecodeSourceZerobased, TimecodeSourceSpecifiedstart, } } // Applies only to HLS outputs. Use this setting to specify whether the service // inserts the ID3 timed metadata from the input in this output. const ( // TimedMetadataPassthrough is a TimedMetadata enum value TimedMetadataPassthrough = "PASSTHROUGH" // TimedMetadataNone is a TimedMetadata enum value TimedMetadataNone = "NONE" ) // TimedMetadata_Values returns all elements of the TimedMetadata enum func TimedMetadata_Values() []string { return []string{ TimedMetadataPassthrough, TimedMetadataNone, } } // Pass through style and position information from a TTML-like input source // (TTML, IMSC, SMPTE-TT) to the TTML output. const ( // TtmlStylePassthroughEnabled is a TtmlStylePassthrough enum value TtmlStylePassthroughEnabled = "ENABLED" // TtmlStylePassthroughDisabled is a TtmlStylePassthrough enum value TtmlStylePassthroughDisabled = "DISABLED" ) // TtmlStylePassthrough_Values returns all elements of the TtmlStylePassthrough enum func TtmlStylePassthrough_Values() []string { return []string{ TtmlStylePassthroughEnabled, TtmlStylePassthroughDisabled, } } const ( // TypeSystem is a Type enum value TypeSystem = "SYSTEM" // TypeCustom is a Type enum value TypeCustom = "CUSTOM" ) // Type_Values returns all elements of the Type enum func Type_Values() []string { return []string{ TypeSystem, TypeCustom, } } // Specify the VC3 class to choose the quality characteristics for this output. // VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) // and Resolution (height and width), determine your output bitrate. For example, // say that your video resolution is 1920x1080 and your framerate is 29.97. // Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately // 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of // approximately 220 Mbps. VC3 class also specifies the color bit depth of your // output. const ( // Vc3ClassClass1458bit is a Vc3Class enum value Vc3ClassClass1458bit = "CLASS_145_8BIT" // Vc3ClassClass2208bit is a Vc3Class enum value Vc3ClassClass2208bit = "CLASS_220_8BIT" // Vc3ClassClass22010bit is a Vc3Class enum value Vc3ClassClass22010bit = "CLASS_220_10BIT" ) // Vc3Class_Values returns all elements of the Vc3Class enum func Vc3Class_Values() []string { return []string{ Vc3ClassClass1458bit, Vc3ClassClass2208bit, Vc3ClassClass22010bit, } } // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. const ( // Vc3FramerateControlInitializeFromSource is a Vc3FramerateControl enum value Vc3FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // Vc3FramerateControlSpecified is a Vc3FramerateControl enum value Vc3FramerateControlSpecified = "SPECIFIED" ) // Vc3FramerateControl_Values returns all elements of the Vc3FramerateControl enum func Vc3FramerateControl_Values() []string { return []string{ Vc3FramerateControlInitializeFromSource, Vc3FramerateControlSpecified, } } // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. const ( // Vc3FramerateConversionAlgorithmDuplicateDrop is a Vc3FramerateConversionAlgorithm enum value Vc3FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // Vc3FramerateConversionAlgorithmInterpolate is a Vc3FramerateConversionAlgorithm enum value Vc3FramerateConversionAlgorithmInterpolate = "INTERPOLATE" // Vc3FramerateConversionAlgorithmFrameformer is a Vc3FramerateConversionAlgorithm enum value Vc3FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) // Vc3FramerateConversionAlgorithm_Values returns all elements of the Vc3FramerateConversionAlgorithm enum func Vc3FramerateConversionAlgorithm_Values() []string { return []string{ Vc3FramerateConversionAlgorithmDuplicateDrop, Vc3FramerateConversionAlgorithmInterpolate, Vc3FramerateConversionAlgorithmFrameformer, } } // Optional. Choose the scan line type for this output. If you don't specify // a value, MediaConvert will create a progressive output. const ( // Vc3InterlaceModeInterlaced is a Vc3InterlaceMode enum value Vc3InterlaceModeInterlaced = "INTERLACED" // Vc3InterlaceModeProgressive is a Vc3InterlaceMode enum value Vc3InterlaceModeProgressive = "PROGRESSIVE" ) // Vc3InterlaceMode_Values returns all elements of the Vc3InterlaceMode enum func Vc3InterlaceMode_Values() []string { return []string{ Vc3InterlaceModeInterlaced, Vc3InterlaceModeProgressive, } } // Use this setting for interlaced outputs, when your output frame rate is half // of your input frame rate. In this situation, choose Optimized interlacing // (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this // case, each progressive frame from the input corresponds to an interlaced // field in the output. Keep the default value, Basic interlacing (INTERLACED), // for all other output frame rates. With basic interlacing, MediaConvert performs // any frame rate conversion first and then interlaces the frames. When you // choose Optimized interlacing and you set your output frame rate to a value // that isn't suitable for optimized interlacing, MediaConvert automatically // falls back to basic interlacing. Required settings: To use optimized interlacing, // you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't // use optimized interlacing for hard telecine outputs. You must also set Interlace // mode (interlaceMode) to a value other than Progressive (PROGRESSIVE). const ( // Vc3ScanTypeConversionModeInterlaced is a Vc3ScanTypeConversionMode enum value Vc3ScanTypeConversionModeInterlaced = "INTERLACED" // Vc3ScanTypeConversionModeInterlacedOptimize is a Vc3ScanTypeConversionMode enum value Vc3ScanTypeConversionModeInterlacedOptimize = "INTERLACED_OPTIMIZE" ) // Vc3ScanTypeConversionMode_Values returns all elements of the Vc3ScanTypeConversionMode enum func Vc3ScanTypeConversionMode_Values() []string { return []string{ Vc3ScanTypeConversionModeInterlaced, Vc3ScanTypeConversionModeInterlacedOptimize, } } // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output by relabeling the // video frames and resampling your audio. Note that enabling this setting will // slightly reduce the duration of your video. Related settings: You must also // set Framerate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. const ( // Vc3SlowPalDisabled is a Vc3SlowPal enum value Vc3SlowPalDisabled = "DISABLED" // Vc3SlowPalEnabled is a Vc3SlowPal enum value Vc3SlowPalEnabled = "ENABLED" ) // Vc3SlowPal_Values returns all elements of the Vc3SlowPal enum func Vc3SlowPal_Values() []string { return []string{ Vc3SlowPalDisabled, Vc3SlowPalEnabled, } } // When you do frame rate conversion from 23.976 frames per second (fps) to // 29.97 fps, and your output scan type is interlaced, you can optionally enable // hard telecine (HARD) to create a smoother picture. When you keep the default // value, None (NONE), MediaConvert does a standard frame rate conversion to // 29.97 without doing anything with the field polarity to create a smoother // picture. const ( // Vc3TelecineNone is a Vc3Telecine enum value Vc3TelecineNone = "NONE" // Vc3TelecineHard is a Vc3Telecine enum value Vc3TelecineHard = "HARD" ) // Vc3Telecine_Values returns all elements of the Vc3Telecine enum func Vc3Telecine_Values() []string { return []string{ Vc3TelecineNone, Vc3TelecineHard, } } // Type of video codec const ( // VideoCodecAv1 is a VideoCodec enum value VideoCodecAv1 = "AV1" // VideoCodecAvcIntra is a VideoCodec enum value VideoCodecAvcIntra = "AVC_INTRA" // VideoCodecFrameCapture is a VideoCodec enum value VideoCodecFrameCapture = "FRAME_CAPTURE" // VideoCodecH264 is a VideoCodec enum value VideoCodecH264 = "H_264" // VideoCodecH265 is a VideoCodec enum value VideoCodecH265 = "H_265" // VideoCodecMpeg2 is a VideoCodec enum value VideoCodecMpeg2 = "MPEG2" // VideoCodecProres is a VideoCodec enum value VideoCodecProres = "PRORES" // VideoCodecVc3 is a VideoCodec enum value VideoCodecVc3 = "VC3" // VideoCodecVp8 is a VideoCodec enum value VideoCodecVp8 = "VP8" // VideoCodecVp9 is a VideoCodec enum value VideoCodecVp9 = "VP9" // VideoCodecXavc is a VideoCodec enum value VideoCodecXavc = "XAVC" ) // VideoCodec_Values returns all elements of the VideoCodec enum func VideoCodec_Values() []string { return []string{ VideoCodecAv1, VideoCodecAvcIntra, VideoCodecFrameCapture, VideoCodecH264, VideoCodecH265, VideoCodecMpeg2, VideoCodecProres, VideoCodecVc3, VideoCodecVp8, VideoCodecVp9, VideoCodecXavc, } } // Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode // insertion when the input frame rate is identical to the output frame rate. // To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) // to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. // When the service inserts timecodes in an output, by default, it uses any // embedded timecodes from the input. If none are present, the service will // set the timecode for the first output frame to zero. To change this default // behavior, adjust the settings under Timecode configuration (TimecodeConfig). // In the console, these settings are located under Job > Job settings > Timecode // configuration. Note - Timecode source under input settings (InputTimecodeSource) // does not affect the timecodes that are inserted in the output. Source under // Job settings > Timecode configuration (TimecodeSource) does. const ( // VideoTimecodeInsertionDisabled is a VideoTimecodeInsertion enum value VideoTimecodeInsertionDisabled = "DISABLED" // VideoTimecodeInsertionPicTimingSei is a VideoTimecodeInsertion enum value VideoTimecodeInsertionPicTimingSei = "PIC_TIMING_SEI" ) // VideoTimecodeInsertion_Values returns all elements of the VideoTimecodeInsertion enum func VideoTimecodeInsertion_Values() []string { return []string{ VideoTimecodeInsertionDisabled, VideoTimecodeInsertionPicTimingSei, } } // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. const ( // Vp8FramerateControlInitializeFromSource is a Vp8FramerateControl enum value Vp8FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // Vp8FramerateControlSpecified is a Vp8FramerateControl enum value Vp8FramerateControlSpecified = "SPECIFIED" ) // Vp8FramerateControl_Values returns all elements of the Vp8FramerateControl enum func Vp8FramerateControl_Values() []string { return []string{ Vp8FramerateControlInitializeFromSource, Vp8FramerateControlSpecified, } } // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. const ( // Vp8FramerateConversionAlgorithmDuplicateDrop is a Vp8FramerateConversionAlgorithm enum value Vp8FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // Vp8FramerateConversionAlgorithmInterpolate is a Vp8FramerateConversionAlgorithm enum value Vp8FramerateConversionAlgorithmInterpolate = "INTERPOLATE" // Vp8FramerateConversionAlgorithmFrameformer is a Vp8FramerateConversionAlgorithm enum value Vp8FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) // Vp8FramerateConversionAlgorithm_Values returns all elements of the Vp8FramerateConversionAlgorithm enum func Vp8FramerateConversionAlgorithm_Values() []string { return []string{ Vp8FramerateConversionAlgorithmDuplicateDrop, Vp8FramerateConversionAlgorithmInterpolate, Vp8FramerateConversionAlgorithmFrameformer, } } // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. const ( // Vp8ParControlInitializeFromSource is a Vp8ParControl enum value Vp8ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // Vp8ParControlSpecified is a Vp8ParControl enum value Vp8ParControlSpecified = "SPECIFIED" ) // Vp8ParControl_Values returns all elements of the Vp8ParControl enum func Vp8ParControl_Values() []string { return []string{ Vp8ParControlInitializeFromSource, Vp8ParControlSpecified, } } // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, multi-pass encoding. const ( // Vp8QualityTuningLevelMultiPass is a Vp8QualityTuningLevel enum value Vp8QualityTuningLevelMultiPass = "MULTI_PASS" // Vp8QualityTuningLevelMultiPassHq is a Vp8QualityTuningLevel enum value Vp8QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" ) // Vp8QualityTuningLevel_Values returns all elements of the Vp8QualityTuningLevel enum func Vp8QualityTuningLevel_Values() []string { return []string{ Vp8QualityTuningLevelMultiPass, Vp8QualityTuningLevelMultiPassHq, } } // With the VP8 codec, you can use only the variable bitrate (VBR) rate control // mode. const ( // Vp8RateControlModeVbr is a Vp8RateControlMode enum value Vp8RateControlModeVbr = "VBR" ) // Vp8RateControlMode_Values returns all elements of the Vp8RateControlMode enum func Vp8RateControlMode_Values() []string { return []string{ Vp8RateControlModeVbr, } } // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. const ( // Vp9FramerateControlInitializeFromSource is a Vp9FramerateControl enum value Vp9FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // Vp9FramerateControlSpecified is a Vp9FramerateControl enum value Vp9FramerateControlSpecified = "SPECIFIED" ) // Vp9FramerateControl_Values returns all elements of the Vp9FramerateControl enum func Vp9FramerateControl_Values() []string { return []string{ Vp9FramerateControlInitializeFromSource, Vp9FramerateControlSpecified, } } // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. const ( // Vp9FramerateConversionAlgorithmDuplicateDrop is a Vp9FramerateConversionAlgorithm enum value Vp9FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // Vp9FramerateConversionAlgorithmInterpolate is a Vp9FramerateConversionAlgorithm enum value Vp9FramerateConversionAlgorithmInterpolate = "INTERPOLATE" // Vp9FramerateConversionAlgorithmFrameformer is a Vp9FramerateConversionAlgorithm enum value Vp9FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) // Vp9FramerateConversionAlgorithm_Values returns all elements of the Vp9FramerateConversionAlgorithm enum func Vp9FramerateConversionAlgorithm_Values() []string { return []string{ Vp9FramerateConversionAlgorithmDuplicateDrop, Vp9FramerateConversionAlgorithmInterpolate, Vp9FramerateConversionAlgorithmFrameformer, } } // Optional. Specify how the service determines the pixel aspect ratio (PAR) // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), // uses the PAR from your input video for your output. To specify a different // PAR in the console, choose any value other than Follow source. To specify // a different PAR by editing the JSON job specification, choose SPECIFIED. // When you choose SPECIFIED for this setting, you must also specify values // for the parNumerator and parDenominator settings. const ( // Vp9ParControlInitializeFromSource is a Vp9ParControl enum value Vp9ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // Vp9ParControlSpecified is a Vp9ParControl enum value Vp9ParControlSpecified = "SPECIFIED" ) // Vp9ParControl_Values returns all elements of the Vp9ParControl enum func Vp9ParControl_Values() []string { return []string{ Vp9ParControlInitializeFromSource, Vp9ParControlSpecified, } } // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, multi-pass encoding. const ( // Vp9QualityTuningLevelMultiPass is a Vp9QualityTuningLevel enum value Vp9QualityTuningLevelMultiPass = "MULTI_PASS" // Vp9QualityTuningLevelMultiPassHq is a Vp9QualityTuningLevel enum value Vp9QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" ) // Vp9QualityTuningLevel_Values returns all elements of the Vp9QualityTuningLevel enum func Vp9QualityTuningLevel_Values() []string { return []string{ Vp9QualityTuningLevelMultiPass, Vp9QualityTuningLevelMultiPassHq, } } // With the VP9 codec, you can use only the variable bitrate (VBR) rate control // mode. const ( // Vp9RateControlModeVbr is a Vp9RateControlMode enum value Vp9RateControlModeVbr = "VBR" ) // Vp9RateControlMode_Values returns all elements of the Vp9RateControlMode enum func Vp9RateControlMode_Values() []string { return []string{ Vp9RateControlModeVbr, } } // Optional. Ignore this setting unless Nagra support directs you to specify // a value. When you don't specify a value here, the Nagra NexGuard library // uses its default value. const ( // WatermarkingStrengthLightest is a WatermarkingStrength enum value WatermarkingStrengthLightest = "LIGHTEST" // WatermarkingStrengthLighter is a WatermarkingStrength enum value WatermarkingStrengthLighter = "LIGHTER" // WatermarkingStrengthDefault is a WatermarkingStrength enum value WatermarkingStrengthDefault = "DEFAULT" // WatermarkingStrengthStronger is a WatermarkingStrength enum value WatermarkingStrengthStronger = "STRONGER" // WatermarkingStrengthStrongest is a WatermarkingStrength enum value WatermarkingStrengthStrongest = "STRONGEST" ) // WatermarkingStrength_Values returns all elements of the WatermarkingStrength enum func WatermarkingStrength_Values() []string { return []string{ WatermarkingStrengthLightest, WatermarkingStrengthLighter, WatermarkingStrengthDefault, WatermarkingStrengthStronger, WatermarkingStrengthStrongest, } } // The service defaults to using RIFF for WAV outputs. If your output audio // is likely to exceed 4 GB in file size, or if you otherwise need the extended // support of the RF64 format, set your output WAV file format to RF64. const ( // WavFormatRiff is a WavFormat enum value WavFormatRiff = "RIFF" // WavFormatRf64 is a WavFormat enum value WavFormatRf64 = "RF64" ) // WavFormat_Values returns all elements of the WavFormat enum func WavFormat_Values() []string { return []string{ WavFormatRiff, WavFormatRf64, } } // Choose Enabled (ENABLED) to have MediaConvert use the font style, color, // and position information from the captions source in the input. Keep the // default value, Disabled (DISABLED), for simplified output captions. const ( // WebvttStylePassthroughEnabled is a WebvttStylePassthrough enum value WebvttStylePassthroughEnabled = "ENABLED" // WebvttStylePassthroughDisabled is a WebvttStylePassthrough enum value WebvttStylePassthroughDisabled = "DISABLED" ) // WebvttStylePassthrough_Values returns all elements of the WebvttStylePassthrough enum func WebvttStylePassthrough_Values() []string { return []string{ WebvttStylePassthroughEnabled, WebvttStylePassthroughDisabled, } } // Specify the XAVC Intra 4k (CBG) Class to set the bitrate of your output. // Outputs of the same class have similar image quality over the operating points // that are valid for that class. const ( // Xavc4kIntraCbgProfileClassClass100 is a Xavc4kIntraCbgProfileClass enum value Xavc4kIntraCbgProfileClassClass100 = "CLASS_100" // Xavc4kIntraCbgProfileClassClass300 is a Xavc4kIntraCbgProfileClass enum value Xavc4kIntraCbgProfileClassClass300 = "CLASS_300" // Xavc4kIntraCbgProfileClassClass480 is a Xavc4kIntraCbgProfileClass enum value Xavc4kIntraCbgProfileClassClass480 = "CLASS_480" ) // Xavc4kIntraCbgProfileClass_Values returns all elements of the Xavc4kIntraCbgProfileClass enum func Xavc4kIntraCbgProfileClass_Values() []string { return []string{ Xavc4kIntraCbgProfileClassClass100, Xavc4kIntraCbgProfileClassClass300, Xavc4kIntraCbgProfileClassClass480, } } // Specify the XAVC Intra 4k (VBR) Class to set the bitrate of your output. // Outputs of the same class have similar image quality over the operating points // that are valid for that class. const ( // Xavc4kIntraVbrProfileClassClass100 is a Xavc4kIntraVbrProfileClass enum value Xavc4kIntraVbrProfileClassClass100 = "CLASS_100" // Xavc4kIntraVbrProfileClassClass300 is a Xavc4kIntraVbrProfileClass enum value Xavc4kIntraVbrProfileClassClass300 = "CLASS_300" // Xavc4kIntraVbrProfileClassClass480 is a Xavc4kIntraVbrProfileClass enum value Xavc4kIntraVbrProfileClassClass480 = "CLASS_480" ) // Xavc4kIntraVbrProfileClass_Values returns all elements of the Xavc4kIntraVbrProfileClass enum func Xavc4kIntraVbrProfileClass_Values() []string { return []string{ Xavc4kIntraVbrProfileClassClass100, Xavc4kIntraVbrProfileClassClass300, Xavc4kIntraVbrProfileClassClass480, } } // Specify the XAVC 4k (Long GOP) Bitrate Class to set the bitrate of your output. // Outputs of the same class have similar image quality over the operating points // that are valid for that class. const ( // Xavc4kProfileBitrateClassBitrateClass100 is a Xavc4kProfileBitrateClass enum value Xavc4kProfileBitrateClassBitrateClass100 = "BITRATE_CLASS_100" // Xavc4kProfileBitrateClassBitrateClass140 is a Xavc4kProfileBitrateClass enum value Xavc4kProfileBitrateClassBitrateClass140 = "BITRATE_CLASS_140" // Xavc4kProfileBitrateClassBitrateClass200 is a Xavc4kProfileBitrateClass enum value Xavc4kProfileBitrateClassBitrateClass200 = "BITRATE_CLASS_200" ) // Xavc4kProfileBitrateClass_Values returns all elements of the Xavc4kProfileBitrateClass enum func Xavc4kProfileBitrateClass_Values() []string { return []string{ Xavc4kProfileBitrateClassBitrateClass100, Xavc4kProfileBitrateClassBitrateClass140, Xavc4kProfileBitrateClassBitrateClass200, } } // Specify the codec profile for this output. Choose High, 8-bit, 4:2:0 (HIGH) // or High, 10-bit, 4:2:2 (HIGH_422). These profiles are specified in ITU-T // H.264. const ( // Xavc4kProfileCodecProfileHigh is a Xavc4kProfileCodecProfile enum value Xavc4kProfileCodecProfileHigh = "HIGH" // Xavc4kProfileCodecProfileHigh422 is a Xavc4kProfileCodecProfile enum value Xavc4kProfileCodecProfileHigh422 = "HIGH_422" ) // Xavc4kProfileCodecProfile_Values returns all elements of the Xavc4kProfileCodecProfile enum func Xavc4kProfileCodecProfile_Values() []string { return []string{ Xavc4kProfileCodecProfileHigh, Xavc4kProfileCodecProfileHigh422, } } // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. const ( // Xavc4kProfileQualityTuningLevelSinglePass is a Xavc4kProfileQualityTuningLevel enum value Xavc4kProfileQualityTuningLevelSinglePass = "SINGLE_PASS" // Xavc4kProfileQualityTuningLevelSinglePassHq is a Xavc4kProfileQualityTuningLevel enum value Xavc4kProfileQualityTuningLevelSinglePassHq = "SINGLE_PASS_HQ" // Xavc4kProfileQualityTuningLevelMultiPassHq is a Xavc4kProfileQualityTuningLevel enum value Xavc4kProfileQualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" ) // Xavc4kProfileQualityTuningLevel_Values returns all elements of the Xavc4kProfileQualityTuningLevel enum func Xavc4kProfileQualityTuningLevel_Values() []string { return []string{ Xavc4kProfileQualityTuningLevelSinglePass, Xavc4kProfileQualityTuningLevelSinglePassHq, Xavc4kProfileQualityTuningLevelMultiPassHq, } } // Keep the default value, Auto (AUTO), for this setting to have MediaConvert // automatically apply the best types of quantization for your video content. // When you want to apply your quantization settings manually, you must set // Adaptive quantization (adaptiveQuantization) to a value other than Auto (AUTO). // Use this setting to specify the strength of any adaptive quantization filters // that you enable. If you don't want MediaConvert to do any adaptive quantization // in this transcode, set Adaptive quantization to Off (OFF). Related settings: // The value that you choose here applies to the following settings: Flicker // adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization // (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization). const ( // XavcAdaptiveQuantizationOff is a XavcAdaptiveQuantization enum value XavcAdaptiveQuantizationOff = "OFF" // XavcAdaptiveQuantizationAuto is a XavcAdaptiveQuantization enum value XavcAdaptiveQuantizationAuto = "AUTO" // XavcAdaptiveQuantizationLow is a XavcAdaptiveQuantization enum value XavcAdaptiveQuantizationLow = "LOW" // XavcAdaptiveQuantizationMedium is a XavcAdaptiveQuantization enum value XavcAdaptiveQuantizationMedium = "MEDIUM" // XavcAdaptiveQuantizationHigh is a XavcAdaptiveQuantization enum value XavcAdaptiveQuantizationHigh = "HIGH" // XavcAdaptiveQuantizationHigher is a XavcAdaptiveQuantization enum value XavcAdaptiveQuantizationHigher = "HIGHER" // XavcAdaptiveQuantizationMax is a XavcAdaptiveQuantization enum value XavcAdaptiveQuantizationMax = "MAX" ) // XavcAdaptiveQuantization_Values returns all elements of the XavcAdaptiveQuantization enum func XavcAdaptiveQuantization_Values() []string { return []string{ XavcAdaptiveQuantizationOff, XavcAdaptiveQuantizationAuto, XavcAdaptiveQuantizationLow, XavcAdaptiveQuantizationMedium, XavcAdaptiveQuantizationHigh, XavcAdaptiveQuantizationHigher, XavcAdaptiveQuantizationMax, } } // Optional. Choose a specific entropy encoding mode only when you want to override // XAVC recommendations. If you choose the value auto, MediaConvert uses the // mode that the XAVC file format specifies given this output's operating point. const ( // XavcEntropyEncodingAuto is a XavcEntropyEncoding enum value XavcEntropyEncodingAuto = "AUTO" // XavcEntropyEncodingCabac is a XavcEntropyEncoding enum value XavcEntropyEncodingCabac = "CABAC" // XavcEntropyEncodingCavlc is a XavcEntropyEncoding enum value XavcEntropyEncodingCavlc = "CAVLC" ) // XavcEntropyEncoding_Values returns all elements of the XavcEntropyEncoding enum func XavcEntropyEncoding_Values() []string { return []string{ XavcEntropyEncodingAuto, XavcEntropyEncodingCabac, XavcEntropyEncodingCavlc, } } // The best way to set up adaptive quantization is to keep the default value, // Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). // When you do so, MediaConvert automatically applies the best types of quantization // for your video content. Include this setting in your JSON job specification // only when you choose to change the default value for Adaptive quantization. // Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears // as a visual flicker that can arise when the encoder saves bits by copying // some macroblocks many times from frame to frame, and then refreshes them // at the I-frame. When you enable this setting, the encoder updates these macroblocks // slightly more often to smooth out the flicker. This setting is disabled by // default. Related setting: In addition to enabling this setting, you must // also set Adaptive quantization (adaptiveQuantization) to a value other than // Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree // of smoothing that Flicker adaptive quantization provides. const ( // XavcFlickerAdaptiveQuantizationDisabled is a XavcFlickerAdaptiveQuantization enum value XavcFlickerAdaptiveQuantizationDisabled = "DISABLED" // XavcFlickerAdaptiveQuantizationEnabled is a XavcFlickerAdaptiveQuantization enum value XavcFlickerAdaptiveQuantizationEnabled = "ENABLED" ) // XavcFlickerAdaptiveQuantization_Values returns all elements of the XavcFlickerAdaptiveQuantization enum func XavcFlickerAdaptiveQuantization_Values() []string { return []string{ XavcFlickerAdaptiveQuantizationDisabled, XavcFlickerAdaptiveQuantizationEnabled, } } // If you are using the console, use the Frame rate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list. The framerates shown in the dropdown // list are decimal approximations of fractions. If you are creating your transcoding // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate that you specify in the settings FramerateNumerator and FramerateDenominator. const ( // XavcFramerateControlInitializeFromSource is a XavcFramerateControl enum value XavcFramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" // XavcFramerateControlSpecified is a XavcFramerateControl enum value XavcFramerateControlSpecified = "SPECIFIED" ) // XavcFramerateControl_Values returns all elements of the XavcFramerateControl enum func XavcFramerateControl_Values() []string { return []string{ XavcFramerateControlInitializeFromSource, XavcFramerateControlSpecified, } } // Choose the method that you want MediaConvert to use when increasing or decreasing // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a // smooth picture, but might introduce undesirable video artifacts. For complex // frame rate conversions, especially if your source video has already been // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do // motion-compensated interpolation. FrameFormer chooses the best conversion // method frame by frame. Note that using FrameFormer increases the transcoding // time and incurs a significant add-on cost. const ( // XavcFramerateConversionAlgorithmDuplicateDrop is a XavcFramerateConversionAlgorithm enum value XavcFramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // XavcFramerateConversionAlgorithmInterpolate is a XavcFramerateConversionAlgorithm enum value XavcFramerateConversionAlgorithmInterpolate = "INTERPOLATE" // XavcFramerateConversionAlgorithmFrameformer is a XavcFramerateConversionAlgorithm enum value XavcFramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) // XavcFramerateConversionAlgorithm_Values returns all elements of the XavcFramerateConversionAlgorithm enum func XavcFramerateConversionAlgorithm_Values() []string { return []string{ XavcFramerateConversionAlgorithmDuplicateDrop, XavcFramerateConversionAlgorithmInterpolate, XavcFramerateConversionAlgorithmFrameformer, } } // Specify whether the encoder uses B-frames as reference frames for other pictures // in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames // as reference frames. Choose Don't allow (DISABLED) to prevent the encoder // from using B-frames as reference frames. const ( // XavcGopBReferenceDisabled is a XavcGopBReference enum value XavcGopBReferenceDisabled = "DISABLED" // XavcGopBReferenceEnabled is a XavcGopBReference enum value XavcGopBReferenceEnabled = "ENABLED" ) // XavcGopBReference_Values returns all elements of the XavcGopBReference enum func XavcGopBReference_Values() []string { return []string{ XavcGopBReferenceDisabled, XavcGopBReferenceEnabled, } } // Specify the XAVC Intra HD (CBG) Class to set the bitrate of your output. // Outputs of the same class have similar image quality over the operating points // that are valid for that class. const ( // XavcHdIntraCbgProfileClassClass50 is a XavcHdIntraCbgProfileClass enum value XavcHdIntraCbgProfileClassClass50 = "CLASS_50" // XavcHdIntraCbgProfileClassClass100 is a XavcHdIntraCbgProfileClass enum value XavcHdIntraCbgProfileClassClass100 = "CLASS_100" // XavcHdIntraCbgProfileClassClass200 is a XavcHdIntraCbgProfileClass enum value XavcHdIntraCbgProfileClassClass200 = "CLASS_200" ) // XavcHdIntraCbgProfileClass_Values returns all elements of the XavcHdIntraCbgProfileClass enum func XavcHdIntraCbgProfileClass_Values() []string { return []string{ XavcHdIntraCbgProfileClassClass50, XavcHdIntraCbgProfileClassClass100, XavcHdIntraCbgProfileClassClass200, } } // Specify the XAVC HD (Long GOP) Bitrate Class to set the bitrate of your output. // Outputs of the same class have similar image quality over the operating points // that are valid for that class. const ( // XavcHdProfileBitrateClassBitrateClass25 is a XavcHdProfileBitrateClass enum value XavcHdProfileBitrateClassBitrateClass25 = "BITRATE_CLASS_25" // XavcHdProfileBitrateClassBitrateClass35 is a XavcHdProfileBitrateClass enum value XavcHdProfileBitrateClassBitrateClass35 = "BITRATE_CLASS_35" // XavcHdProfileBitrateClassBitrateClass50 is a XavcHdProfileBitrateClass enum value XavcHdProfileBitrateClassBitrateClass50 = "BITRATE_CLASS_50" ) // XavcHdProfileBitrateClass_Values returns all elements of the XavcHdProfileBitrateClass enum func XavcHdProfileBitrateClass_Values() []string { return []string{ XavcHdProfileBitrateClassBitrateClass25, XavcHdProfileBitrateClassBitrateClass35, XavcHdProfileBitrateClassBitrateClass50, } } // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you // want to trade off encoding speed for output video quality. The default behavior // is faster, lower quality, single-pass encoding. const ( // XavcHdProfileQualityTuningLevelSinglePass is a XavcHdProfileQualityTuningLevel enum value XavcHdProfileQualityTuningLevelSinglePass = "SINGLE_PASS" // XavcHdProfileQualityTuningLevelSinglePassHq is a XavcHdProfileQualityTuningLevel enum value XavcHdProfileQualityTuningLevelSinglePassHq = "SINGLE_PASS_HQ" // XavcHdProfileQualityTuningLevelMultiPassHq is a XavcHdProfileQualityTuningLevel enum value XavcHdProfileQualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" ) // XavcHdProfileQualityTuningLevel_Values returns all elements of the XavcHdProfileQualityTuningLevel enum func XavcHdProfileQualityTuningLevel_Values() []string { return []string{ XavcHdProfileQualityTuningLevelSinglePass, XavcHdProfileQualityTuningLevelSinglePassHq, XavcHdProfileQualityTuningLevelMultiPassHq, } } // Ignore this setting unless you set Frame rate (framerateNumerator divided // by framerateDenominator) to 29.970. If your input framerate is 23.976, choose // Hard (HARD). Otherwise, keep the default value None (NONE). For more information, // see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html. const ( // XavcHdProfileTelecineNone is a XavcHdProfileTelecine enum value XavcHdProfileTelecineNone = "NONE" // XavcHdProfileTelecineHard is a XavcHdProfileTelecine enum value XavcHdProfileTelecineHard = "HARD" ) // XavcHdProfileTelecine_Values returns all elements of the XavcHdProfileTelecine enum func XavcHdProfileTelecine_Values() []string { return []string{ XavcHdProfileTelecineNone, XavcHdProfileTelecineHard, } } // Choose the scan line type for the output. Keep the default value, Progressive // (PROGRESSIVE) to create a progressive output, regardless of the scan type // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) // to create an output that's interlaced with the same field polarity throughout. // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) // to produce outputs with the same field polarity as the source. For jobs that // have multiple inputs, the output field polarity might change over the course // of the output. Follow behavior depends on the input scan type. If the source // is interlaced, the output will be interlaced with the same polarity as the // source. If the source is progressive, the output will be interlaced with // top field bottom field first, depending on which of the Follow options you // choose. const ( // XavcInterlaceModeProgressive is a XavcInterlaceMode enum value XavcInterlaceModeProgressive = "PROGRESSIVE" // XavcInterlaceModeTopField is a XavcInterlaceMode enum value XavcInterlaceModeTopField = "TOP_FIELD" // XavcInterlaceModeBottomField is a XavcInterlaceMode enum value XavcInterlaceModeBottomField = "BOTTOM_FIELD" // XavcInterlaceModeFollowTopField is a XavcInterlaceMode enum value XavcInterlaceModeFollowTopField = "FOLLOW_TOP_FIELD" // XavcInterlaceModeFollowBottomField is a XavcInterlaceMode enum value XavcInterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" ) // XavcInterlaceMode_Values returns all elements of the XavcInterlaceMode enum func XavcInterlaceMode_Values() []string { return []string{ XavcInterlaceModeProgressive, XavcInterlaceModeTopField, XavcInterlaceModeBottomField, XavcInterlaceModeFollowTopField, XavcInterlaceModeFollowBottomField, } } // Specify the XAVC profile for this output. For more information, see the Sony // documentation at https://www.xavc-info.org/. Note that MediaConvert doesn't // support the interlaced video XAVC operating points for XAVC_HD_INTRA_CBG. // To create an interlaced XAVC output, choose the profile XAVC_HD. const ( // XavcProfileXavcHdIntraCbg is a XavcProfile enum value XavcProfileXavcHdIntraCbg = "XAVC_HD_INTRA_CBG" // XavcProfileXavc4kIntraCbg is a XavcProfile enum value XavcProfileXavc4kIntraCbg = "XAVC_4K_INTRA_CBG" // XavcProfileXavc4kIntraVbr is a XavcProfile enum value XavcProfileXavc4kIntraVbr = "XAVC_4K_INTRA_VBR" // XavcProfileXavcHd is a XavcProfile enum value XavcProfileXavcHd = "XAVC_HD" // XavcProfileXavc4k is a XavcProfile enum value XavcProfileXavc4k = "XAVC_4K" ) // XavcProfile_Values returns all elements of the XavcProfile enum func XavcProfile_Values() []string { return []string{ XavcProfileXavcHdIntraCbg, XavcProfileXavc4kIntraCbg, XavcProfileXavc4kIntraVbr, XavcProfileXavcHd, XavcProfileXavc4k, } } // Ignore this setting unless your input frame rate is 23.976 or 24 frames per // second (fps). Enable slow PAL to create a 25 fps output by relabeling the // video frames and resampling your audio. Note that enabling this setting will // slightly reduce the duration of your video. Related settings: You must also // set Frame rate to 25. In your JSON job specification, set (framerateControl) // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to // 1. const ( // XavcSlowPalDisabled is a XavcSlowPal enum value XavcSlowPalDisabled = "DISABLED" // XavcSlowPalEnabled is a XavcSlowPal enum value XavcSlowPalEnabled = "ENABLED" ) // XavcSlowPal_Values returns all elements of the XavcSlowPal enum func XavcSlowPal_Values() []string { return []string{ XavcSlowPalDisabled, XavcSlowPalEnabled, } } // The best way to set up adaptive quantization is to keep the default value, // Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). // When you do so, MediaConvert automatically applies the best types of quantization // for your video content. Include this setting in your JSON job specification // only when you choose to change the default value for Adaptive quantization. // For this setting, keep the default value, Enabled (ENABLED), to adjust quantization // within each frame based on spatial variation of content complexity. When // you enable this feature, the encoder uses fewer bits on areas that can sustain // more distortion with no noticeable visual degradation and uses more bits // on areas where any small distortion will be noticeable. For example, complex // textured blocks are encoded with fewer bits and smooth textured blocks are // encoded with more bits. Enabling this feature will almost always improve // your video quality. Note, though, that this feature doesn't take into account // where the viewer's attention is likely to be. If viewers are likely to be // focusing their attention on a part of the screen with a lot of complex texture, // you might choose to disable this feature. Related setting: When you enable // spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) // depending on your content. For homogeneous content, such as cartoons and // video games, set it to Low. For content with a wider variety of textures, // set it to High or Higher. const ( // XavcSpatialAdaptiveQuantizationDisabled is a XavcSpatialAdaptiveQuantization enum value XavcSpatialAdaptiveQuantizationDisabled = "DISABLED" // XavcSpatialAdaptiveQuantizationEnabled is a XavcSpatialAdaptiveQuantization enum value XavcSpatialAdaptiveQuantizationEnabled = "ENABLED" ) // XavcSpatialAdaptiveQuantization_Values returns all elements of the XavcSpatialAdaptiveQuantization enum func XavcSpatialAdaptiveQuantization_Values() []string { return []string{ XavcSpatialAdaptiveQuantizationDisabled, XavcSpatialAdaptiveQuantizationEnabled, } } // The best way to set up adaptive quantization is to keep the default value, // Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). // When you do so, MediaConvert automatically applies the best types of quantization // for your video content. Include this setting in your JSON job specification // only when you choose to change the default value for Adaptive quantization. // For this setting, keep the default value, Enabled (ENABLED), to adjust quantization // within each frame based on temporal variation of content complexity. When // you enable this feature, the encoder uses fewer bits on areas of the frame // that aren't moving and uses more bits on complex objects with sharp edges // that move a lot. For example, this feature improves the readability of text // tickers on newscasts and scoreboards on sports matches. Enabling this feature // will almost always improve your video quality. Note, though, that this feature // doesn't take into account where the viewer's attention is likely to be. If // viewers are likely to be focusing their attention on a part of the screen // that doesn't have moving objects with sharp edges, such as sports athletes' // faces, you might choose to disable this feature. Related setting: When you // enable temporal adaptive quantization, adjust the strength of the filter // with the setting Adaptive quantization (adaptiveQuantization). const ( // XavcTemporalAdaptiveQuantizationDisabled is a XavcTemporalAdaptiveQuantization enum value XavcTemporalAdaptiveQuantizationDisabled = "DISABLED" // XavcTemporalAdaptiveQuantizationEnabled is a XavcTemporalAdaptiveQuantization enum value XavcTemporalAdaptiveQuantizationEnabled = "ENABLED" ) // XavcTemporalAdaptiveQuantization_Values returns all elements of the XavcTemporalAdaptiveQuantization enum func XavcTemporalAdaptiveQuantization_Values() []string { return []string{ XavcTemporalAdaptiveQuantizationDisabled, XavcTemporalAdaptiveQuantizationEnabled, } }