1// Code generated by smithy-go-codegen DO NOT EDIT.
2
3package types
4
5import (
6	"time"
7)
8
9// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
10// value AAC. The service accepts one of two mutually exclusive groups of AAC
11// settings--VBR and CBR. To select one of these modes, set the value of Bitrate
12// control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you control the
13// audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use
14// the setting Bitrate (bitrate). Defaults and valid values depend on the rate
15// control mode.
16type AacSettings struct {
17
18	// Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio + audio
19	// description (AD) as a stereo pair. The value for AudioType will be set to 3,
20	// which signals to downstream systems that this stream contains "broadcaster mixed
21	// AD". Note that the input received by the encoder must contain pre-mixed audio;
22	// the encoder does not perform the mixing. When you choose BROADCASTER_MIXED_AD,
23	// the encoder ignores any values you provide in AudioType and
24	// FollowInputAudioType. Choose NORMAL when the input does not contain pre-mixed
25	// audio + audio description (AD). In this case, the encoder will use any values
26	// you provide for AudioType and FollowInputAudioType.
27	AudioDescriptionBroadcasterMix AacAudioDescriptionBroadcasterMix
28
29	// Specify the average bitrate in bits per second. The set of valid values for this
30	// setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000,
31	// 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000,
32	// 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000,
33	// 896000, 1024000. The value you set is also constrained by the values that you
34	// choose for Profile (codecProfile), Bitrate control mode (codingMode), and Sample
35	// rate (sampleRate). Default values depend on Bitrate control mode and Profile.
36	Bitrate int32
37
38	// AAC Profile.
39	CodecProfile AacCodecProfile
40
41	// Mono (Audio Description), Mono, Stereo, or 5.1 channel layout. Valid values
42	// depend on rate control mode and profile. "1.0 - Audio Description (Receiver
43	// Mix)" setting receives a stereo description plus control track and emits a mono
44	// AAC encode of the description track, with control data emitted in the PES header
45	// as per ETSI TS 101 154 Annex E.
46	CodingMode AacCodingMode
47
48	// Rate Control Mode.
49	RateControlMode AacRateControlMode
50
51	// Enables LATM/LOAS AAC output. Note that if you use LATM/LOAS AAC in an output,
52	// you must choose "No container" for the output container.
53	RawFormat AacRawFormat
54
55	// Sample rate in Hz. Valid values depend on rate control mode and profile.
56	SampleRate int32
57
58	// Use MPEG-2 AAC instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream
59	// containers.
60	Specification AacSpecification
61
62	// VBR Quality Level - Only used if rate_control_mode is VBR.
63	VbrQuality AacVbrQuality
64}
65
66// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
67// value AC3.
68type Ac3Settings struct {
69
70	// Specify the average bitrate in bits per second. Valid bitrates depend on the
71	// coding mode.
72	Bitrate int32
73
74	// Specify the bitstream mode for the AC-3 stream that the encoder emits. For more
75	// information about the AC3 bitstream mode, see ATSC A/52-2012 (Annex E).
76	BitstreamMode Ac3BitstreamMode
77
78	// Dolby Digital coding mode. Determines number of channels.
79	CodingMode Ac3CodingMode
80
81	// Sets the dialnorm for the output. If blank and input audio is Dolby Digital,
82	// dialnorm will be passed through.
83	Dialnorm int32
84
85	// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
86	// uses when encoding the metadata in the Dolby Digital stream for the line
87	// operating mode. Related setting: When you use this setting, MediaConvert ignores
88	// any value you provide for Dynamic range compression profile
89	// (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC
90	// operating modes and profiles, see the Dynamic Range Control chapter of the Dolby
91	// Metadata Guide at
92	// https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
93	DynamicRangeCompressionLine Ac3DynamicRangeCompressionLine
94
95	// When you want to add Dolby dynamic range compression (DRC) signaling to your
96	// output stream, we recommend that you use the mode-specific settings instead of
97	// Dynamic range compression profile (DynamicRangeCompressionProfile). The
98	// mode-specific settings are Dynamic range compression profile, line mode
99	// (dynamicRangeCompressionLine) and Dynamic range compression profile, RF mode
100	// (dynamicRangeCompressionRf). Note that when you specify values for all three
101	// settings, MediaConvert ignores the value of this setting in favor of the
102	// mode-specific settings. If you do use this setting instead of the mode-specific
103	// settings, choose None (NONE) to leave out DRC signaling. Keep the default Film
104	// standard (FILM_STANDARD) to set the profile to Dolby's film standard profile for
105	// all operating modes.
106	DynamicRangeCompressionProfile Ac3DynamicRangeCompressionProfile
107
108	// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
109	// uses when encoding the metadata in the Dolby Digital stream for the RF operating
110	// mode. Related setting: When you use this setting, MediaConvert ignores any value
111	// you provide for Dynamic range compression profile
112	// (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC
113	// operating modes and profiles, see the Dynamic Range Control chapter of the Dolby
114	// Metadata Guide at
115	// https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
116	DynamicRangeCompressionRf Ac3DynamicRangeCompressionRf
117
118	// Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only valid
119	// with 3_2_LFE coding mode.
120	LfeFilter Ac3LfeFilter
121
122	// When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, or
123	// DolbyE decoder that supplied this audio data. If audio was not supplied from one
124	// of these streams, then the static metadata settings will be used.
125	MetadataControl Ac3MetadataControl
126
127	// This value is always 48000. It represents the sample rate in Hz.
128	SampleRate int32
129}
130
131// Accelerated transcoding can significantly speed up jobs with long, visually
132// complex content.
133type AccelerationSettings struct {
134
135	// Specify the conditions when the service will run your job with accelerated
136	// transcoding.
137	//
138	// This member is required.
139	Mode AccelerationMode
140}
141
142// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
143// value AIFF.
144type AiffSettings struct {
145
146	// Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality
147	// for this audio track.
148	BitDepth int32
149
150	// Specify the number of channels in this output audio track. Valid values are 1
151	// and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64.
152	Channels int32
153
154	// Sample rate in hz.
155	SampleRate int32
156}
157
158// Settings for ancillary captions source.
159type AncillarySourceSettings struct {
160
161	// Specify whether this set of input captions appears in your outputs in both 608
162	// and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the
163	// captions data in two ways: it passes the 608 data through using the 608
164	// compatibility bytes fields of the 708 wrapper, and it also translates the 608
165	// data into 708.
166	Convert608To708 AncillaryConvert608To708
167
168	// Specifies the 608 channel number in the ancillary data track from which to
169	// extract captions. Unused for passthrough.
170	SourceAncillaryChannelNumber int32
171
172	// By default, the service terminates any unterminated captions at the end of each
173	// input. If you want the caption to continue onto your next input, disable this
174	// setting.
175	TerminateCaptions AncillaryTerminateCaptions
176}
177
178// When you mimic a multi-channel audio layout with multiple mono-channel tracks,
179// you can tag each channel layout manually. For example, you would tag the tracks
180// that contain your left, right, and center audio with Left (L), Right (R), and
181// Center (C), respectively. When you don't specify a value, MediaConvert labels
182// your track as Center (C) by default. To use audio layout tagging, your output
183// must be in a QuickTime (.mov) container; your audio codec must be AAC, WAV, or
184// AIFF; and you must set up your audio track to have only one channel.
185type AudioChannelTaggingSettings struct {
186
187	// You can add a tag for this mono-channel audio track to mimic its placement in a
188	// multi-channel layout. For example, if this track is the left surround channel,
189	// choose Left surround (LS).
190	ChannelTag AudioChannelTag
191}
192
193// Settings related to audio encoding. The settings in this group vary depending on
194// the value that you choose for your audio codec.
195type AudioCodecSettings struct {
196
197	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
198	// value AAC. The service accepts one of two mutually exclusive groups of AAC
199	// settings--VBR and CBR. To select one of these modes, set the value of Bitrate
200	// control mode (rateControlMode) to "VBR" or "CBR". In VBR mode, you control the
201	// audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use
202	// the setting Bitrate (bitrate). Defaults and valid values depend on the rate
203	// control mode.
204	AacSettings *AacSettings
205
206	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
207	// value AC3.
208	Ac3Settings *Ac3Settings
209
210	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
211	// value AIFF.
212	AiffSettings *AiffSettings
213
214	// Choose the audio codec for this output. Note that the option Dolby Digital
215	// passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital Plus
216	// audio inputs. Make sure that you choose a codec that's supported with your
217	// output container:
218	// https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio
219	// For audio-only outputs, make sure that both your input audio codec and your
220	// output audio codec are supported for audio-only workflows. For more information,
221	// see:
222	// https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only
223	// and
224	// https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output
225	Codec AudioCodec
226
227	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
228	// value EAC3_ATMOS.
229	Eac3AtmosSettings *Eac3AtmosSettings
230
231	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
232	// value EAC3.
233	Eac3Settings *Eac3Settings
234
235	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
236	// value MP2.
237	Mp2Settings *Mp2Settings
238
239	// Required when you set Codec, under AudioDescriptions>CodecSettings, to the value
240	// MP3.
241	Mp3Settings *Mp3Settings
242
243	// Required when you set Codec, under AudioDescriptions>CodecSettings, to the value
244	// OPUS.
245	OpusSettings *OpusSettings
246
247	// Required when you set Codec, under AudioDescriptions>CodecSettings, to the value
248	// Vorbis.
249	VorbisSettings *VorbisSettings
250
251	// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
252	// value WAV.
253	WavSettings *WavSettings
254}
255
256// Settings related to one audio tab on the MediaConvert console. In your job JSON,
257// an instance of AudioDescription is equivalent to one audio tab in the console.
258// Usually, one audio tab corresponds to one output audio track. Depending on how
259// you set up your input audio selectors and whether you use audio selector groups,
260// one audio tab can correspond to a group of output audio tracks.
261type AudioDescription struct {
262
263	// When you mimic a multi-channel audio layout with multiple mono-channel tracks,
264	// you can tag each channel layout manually. For example, you would tag the tracks
265	// that contain your left, right, and center audio with Left (L), Right (R), and
266	// Center (C), respectively. When you don't specify a value, MediaConvert labels
267	// your track as Center (C) by default. To use audio layout tagging, your output
268	// must be in a QuickTime (.mov) container; your audio codec must be AAC, WAV, or
269	// AIFF; and you must set up your audio track to have only one channel.
270	AudioChannelTaggingSettings *AudioChannelTaggingSettings
271
272	// Advanced audio normalization settings. Ignore these settings unless you need to
273	// comply with a loudness standard.
274	AudioNormalizationSettings *AudioNormalizationSettings
275
276	// Specifies which audio data to use from each input. In the simplest case, specify
277	// an "Audio Selector":#inputs-audio_selector by name based on its order within
278	// each input. For example if you specify "Audio Selector 3", then the third audio
279	// selector will be used from each input. If an input does not have an "Audio
280	// Selector 3", then the audio selector marked as "default" in that input will be
281	// used. If there is no audio selector marked as "default", silence will be
282	// inserted for the duration of that input. Alternatively, an "Audio Selector
283	// Group":#inputs-audio_selector_group name may be specified, with similar
284	// default/silence behavior. If no audio_source_name is specified, then "Audio
285	// Selector 1" will be chosen automatically.
286	AudioSourceName *string
287
288	// Applies only if Follow Input Audio Type is unchecked (false). A number between 0
289	// and 255. The following are defined in ISO-IEC 13818-1: 0 = Undefined, 1 = Clean
290	// Effects, 2 = Hearing Impaired, 3 = Visually Impaired Commentary, 4-255 =
291	// Reserved.
292	AudioType int32
293
294	// When set to FOLLOW_INPUT, if the input contains an ISO 639 audio_type, then that
295	// value is passed through to the output. If the input contains no ISO 639
296	// audio_type, the value in Audio Type is included in the output. Otherwise the
297	// value in Audio Type is included in the output. Note that this field and
298	// audioType are both ignored if audioDescriptionBroadcasterMix is set to
299	// BROADCASTER_MIXED_AD.
300	AudioTypeControl AudioTypeControl
301
302	// Settings related to audio encoding. The settings in this group vary depending on
303	// the value that you choose for your audio codec.
304	CodecSettings *AudioCodecSettings
305
306	// Specify the language for this audio output track. The service puts this language
307	// code into your output audio track when you set Language code control
308	// (AudioLanguageCodeControl) to Use configured (USE_CONFIGURED). The service also
309	// uses your specified custom language code when you set Language code control
310	// (AudioLanguageCodeControl) to Follow input (FOLLOW_INPUT), but your input file
311	// doesn't specify a language code. For all outputs, you can use an ISO 639-2 or
312	// ISO 639-3 code. For streaming outputs, you can also use any other code in the
313	// full RFC-5646 specification. Streaming outputs are those that are in one of the
314	// following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth
315	// Streaming.
316	CustomLanguageCode *string
317
318	// Indicates the language of the audio output track. The ISO 639 language specified
319	// in the 'Language Code' drop down will be used when 'Follow Input Language Code'
320	// is not selected or when 'Follow Input Language Code' is selected but there is no
321	// ISO 639 language code specified by the input.
322	LanguageCode LanguageCode
323
324	// Specify which source for language code takes precedence for this audio track.
325	// When you choose Follow input (FOLLOW_INPUT), the service uses the language code
326	// from the input track if it's present. If there's no languge code on the input
327	// track, the service uses the code that you specify in the setting Language code
328	// (languageCode or customLanguageCode). When you choose Use configured
329	// (USE_CONFIGURED), the service uses the language code that you specify.
330	LanguageCodeControl AudioLanguageCodeControl
331
332	// Advanced audio remixing settings.
333	RemixSettings *RemixSettings
334
335	// Specify a label for this output audio stream. For example, "English", "Director
336	// commentary", or "track_2". For streaming outputs, MediaConvert passes this
337	// information into destination manifests for display on the end-viewer's player
338	// device. For outputs in other output groups, the service ignores this setting.
339	StreamName *string
340}
341
342// Advanced audio normalization settings. Ignore these settings unless you need to
343// comply with a loudness standard.
344type AudioNormalizationSettings struct {
345
346	// Choose one of the following audio normalization algorithms: ITU-R BS.1770-1:
347	// Ungated loudness. A measurement of ungated average loudness for an entire piece
348	// of content, suitable for measurement of short-form content under ATSC
349	// recommendation A/85. Supports up to 5.1 audio channels. ITU-R BS.1770-2: Gated
350	// loudness. A measurement of gated average loudness compliant with the
351	// requirements of EBU-R128. Supports up to 5.1 audio channels. ITU-R BS.1770-3:
352	// Modified peak. The same loudness measurement algorithm as 1770-2, with an
353	// updated true peak measurement. ITU-R BS.1770-4: Higher channel count. Allows for
354	// more audio channels than the other algorithms, including configurations such as
355	// 7.1.
356	Algorithm AudioNormalizationAlgorithm
357
358	// When enabled the output audio is corrected using the chosen algorithm. If
359	// disabled, the audio will be measured but not adjusted.
360	AlgorithmControl AudioNormalizationAlgorithmControl
361
362	// Content measuring above this level will be corrected to the target level.
363	// Content measuring below this level will not be corrected.
364	CorrectionGateLevel int32
365
366	// If set to LOG, log each output's audio track loudness to a CSV file.
367	LoudnessLogging AudioNormalizationLoudnessLogging
368
369	// If set to TRUE_PEAK, calculate and log the TruePeak for each output's audio
370	// track loudness.
371	PeakCalculation AudioNormalizationPeakCalculation
372
373	// When you use Audio normalization (AudioNormalizationSettings), optionally use
374	// this setting to specify a target loudness. If you don't specify a value here,
375	// the encoder chooses a value for you, based on the algorithm that you choose for
376	// Algorithm (algorithm). If you choose algorithm 1770-1, the encoder will choose
377	// -24 LKFS; otherwise, the encoder will choose -23 LKFS.
378	TargetLkfs float64
379}
380
381// Use Audio selectors (AudioSelectors) to specify a track or set of tracks from
382// the input that you will use in your outputs. You can use multiple Audio
383// selectors per input.
384type AudioSelector struct {
385
386	// Selects a specific language code from within an audio source, using the ISO
387	// 639-2 or ISO 639-3 three-letter language code
388	CustomLanguageCode *string
389
390	// Enable this setting on one audio selector to set it as the default for the job.
391	// The service uses this default for outputs where it can't find the specified
392	// input audio. If you don't set a default, those outputs have no audio.
393	DefaultSelection AudioDefaultSelection
394
395	// Specifies audio data from an external file source.
396	ExternalAudioFileInput *string
397
398	// Selects a specific language code from within an audio source.
399	LanguageCode LanguageCode
400
401	// Specifies a time delta in milliseconds to offset the audio from the input video.
402	Offset int32
403
404	// Selects a specific PID from within an audio source (e.g. 257 selects PID 0x101).
405	Pids []int32
406
407	// Use this setting for input streams that contain Dolby E, to have the service
408	// extract specific program data from the track. To select multiple programs,
409	// create multiple selectors with the same Track and different Program numbers. In
410	// the console, this setting is visible when you set Selector type to Track. Choose
411	// the program number from the dropdown list. If you are sending a JSON file,
412	// provide the program ID, which is part of the audio metadata. If your input file
413	// has incorrect metadata, you can choose All channels instead of a program number
414	// to have the service ignore the program IDs and include all the programs in the
415	// track.
416	ProgramSelection int32
417
418	// Use these settings to reorder the audio channels of one input to match those of
419	// another input. This allows you to combine the two files into a single output,
420	// one after the other.
421	RemixSettings *RemixSettings
422
423	// Specifies the type of the audio selector.
424	SelectorType AudioSelectorType
425
426	// Identify a track from the input audio to include in this selector by entering
427	// the track index number. To include several tracks in a single audio selector,
428	// specify multiple tracks as follows. Using the console, enter a comma-separated
429	// list. For examle, type "1,2,3" to include tracks 1 through 3. Specifying
430	// directly in your JSON job file, provide the track numbers in an array. For
431	// example, "tracks": [1,2,3].
432	Tracks []int32
433}
434
435// Use audio selector groups to combine multiple sidecar audio inputs so that you
436// can assign them to a single output audio tab (AudioDescription). Note that, if
437// you're working with embedded audio, it's simpler to assign multiple input tracks
438// into a single audio selector rather than use an audio selector group.
439type AudioSelectorGroup struct {
440
441	// Name of an Audio Selector within the same input to include in the group. Audio
442	// selector names are standardized, based on their order within the input (e.g.,
443	// "Audio Selector 1"). The audio selector name parameter can be repeated to add
444	// any number of audio selectors to the group.
445	AudioSelectorNames []string
446}
447
448// Use automated ABR to have MediaConvert set up the renditions in your ABR package
449// for you automatically, based on characteristics of your input video. This
450// feature optimizes video quality while minimizing the overall size of your ABR
451// package.
452type AutomatedAbrSettings struct {
453
454	// Optional. The maximum target bit rate used in your automated ABR stack. Use this
455	// value to set an upper limit on the bandwidth consumed by the highest-quality
456	// rendition. This is the rendition that is delivered to viewers with the fastest
457	// internet connections. If you don't specify a value, MediaConvert uses 8,000,000
458	// (8 mb/s) by default.
459	MaxAbrBitrate int32
460
461	// Optional. The maximum number of renditions that MediaConvert will create in your
462	// automated ABR stack. The number of renditions is determined automatically, based
463	// on analysis of each job, but will never exceed this limit. When you set this to
464	// Auto in the console, which is equivalent to excluding it from your JSON job
465	// specification, MediaConvert defaults to a limit of 15.
466	MaxRenditions int32
467
468	// Optional. The minimum target bitrate used in your automated ABR stack. Use this
469	// value to set a lower limit on the bitrate of video delivered to viewers with
470	// slow internet connections. If you don't specify a value, MediaConvert uses
471	// 600,000 (600 kb/s) by default.
472	MinAbrBitrate int32
473}
474
475// Use automated encoding to have MediaConvert choose your encoding settings for
476// you, based on characteristics of your input video.
477type AutomatedEncodingSettings struct {
478
479	// Use automated ABR to have MediaConvert set up the renditions in your ABR package
480	// for you automatically, based on characteristics of your input video. This
481	// feature optimizes video quality while minimizing the overall size of your ABR
482	// package.
483	AbrSettings *AutomatedAbrSettings
484}
485
486// Settings for quality-defined variable bitrate encoding with the AV1 codec.
487// Required when you set Rate control mode to QVBR. Not valid when you set Rate
488// control mode to a value other than QVBR, or when you don't define Rate control
489// mode.
490type Av1QvbrSettings struct {
491
492	// Required when you use QVBR rate control mode. That is, when you specify
493	// qvbrSettings within av1Settings. Specify the general target quality level for
494	// this output, from 1 to 10. Use higher numbers for greater quality. Level 10
495	// results in nearly lossless compression. The quality level for most
496	// broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value
497	// between whole numbers, also provide a value for the setting
498	// qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be
499	// 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.
500	QvbrQualityLevel int32
501
502	// Optional. Specify a value here to set the QVBR quality to a level that is
503	// between whole numbers. For example, if you want your QVBR quality level to be
504	// 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.
505	// MediaConvert rounds your QVBR quality level to the nearest third of a whole
506	// number. For example, if you set qvbrQualityLevel to 7 and you set
507	// qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33.
508	QvbrQualityLevelFineTune float64
509}
510
511// Required when you set Codec, under VideoDescription>CodecSettings to the value
512// AV1.
513type Av1Settings struct {
514
515	// Specify the strength of any adaptive quantization filters that you enable. The
516	// value that you choose here applies to Spatial adaptive quantization
517	// (spatialAdaptiveQuantization).
518	AdaptiveQuantization Av1AdaptiveQuantization
519
520	// If you are using the console, use the Framerate setting to specify the frame
521	// rate for this output. If you want to keep the same frame rate as the input
522	// video, choose Follow source. If you want to do frame rate conversion, choose a
523	// frame rate from the dropdown list or choose Custom. The framerates shown in the
524	// dropdown list are decimal approximations of fractions. If you choose Custom,
525	// specify your frame rate as a fraction. If you are creating your transcoding job
526	// specification as a JSON file without the console, use FramerateControl to
527	// specify which value the service uses for the frame rate for this output. Choose
528	// INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the
529	// input. Choose SPECIFIED if you want the service to use the frame rate you
530	// specify in the settings FramerateNumerator and FramerateDenominator.
531	FramerateControl Av1FramerateControl
532
533	// Choose the method that you want MediaConvert to use when increasing or
534	// decreasing the frame rate. We recommend using drop duplicate (DUPLICATE_DROP)
535	// for numerically simple conversions, such as 60 fps to 30 fps. For numerically
536	// complex conversions, you can use interpolate (INTERPOLATE) to avoid stutter.
537	// This results in a smooth picture, but might introduce undesirable video
538	// artifacts. For complex frame rate conversions, especially if your source video
539	// has already been converted from its original cadence, use FrameFormer
540	// (FRAMEFORMER) to do motion-compensated interpolation. FrameFormer chooses the
541	// best conversion method frame by frame. Note that using FrameFormer increases the
542	// transcoding time and incurs a significant add-on cost.
543	FramerateConversionAlgorithm Av1FramerateConversionAlgorithm
544
545	// When you use the API for transcode jobs that use frame rate conversion, specify
546	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
547	// FramerateDenominator to specify the denominator of this fraction. In this
548	// example, use 1001 for the value of FramerateDenominator. When you use the
549	// console for transcode jobs that use frame rate conversion, provide the value as
550	// a decimal number for Framerate. In this example, specify 23.976.
551	FramerateDenominator int32
552
553	// When you use the API for transcode jobs that use frame rate conversion, specify
554	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
555	// FramerateNumerator to specify the numerator of this fraction. In this example,
556	// use 24000 for the value of FramerateNumerator. When you use the console for
557	// transcode jobs that use frame rate conversion, provide the value as a decimal
558	// number for Framerate. In this example, specify 23.976.
559	FramerateNumerator int32
560
561	// Specify the GOP length (keyframe interval) in frames. With AV1, MediaConvert
562	// doesn't support GOP length in seconds. This value must be greater than zero and
563	// preferably equal to 1 + ((numberBFrames + 1) * x), where x is an integer value.
564	GopSize float64
565
566	// Maximum bitrate in bits/second. For example, enter five megabits per second as
567	// 5000000. Required when Rate control mode is QVBR.
568	MaxBitrate int32
569
570	// Specify from the number of B-frames, in the range of 0-15. For AV1 encoding, we
571	// recommend using 7 or 15. Choose a larger number for a lower bitrate and smaller
572	// file size; choose a smaller number for better video quality.
573	NumberBFramesBetweenReferenceFrames int32
574
575	// Settings for quality-defined variable bitrate encoding with the AV1 codec.
576	// Required when you set Rate control mode to QVBR. Not valid when you set Rate
577	// control mode to a value other than QVBR, or when you don't define Rate control
578	// mode.
579	QvbrSettings *Av1QvbrSettings
580
581	// 'With AV1 outputs, for rate control mode, MediaConvert supports only
582	// quality-defined variable bitrate (QVBR). You can''t use CBR or VBR.'
583	RateControlMode Av1RateControlMode
584
585	// Specify the number of slices per picture. This value must be 1, 2, 4, 8, 16, or
586	// 32. For progressive pictures, this value must be less than or equal to the
587	// number of macroblock rows. For interlaced pictures, this value must be less than
588	// or equal to half the number of macroblock rows.
589	Slices int32
590
591	// Keep the default value, Enabled (ENABLED), to adjust quantization within each
592	// frame based on spatial variation of content complexity. When you enable this
593	// feature, the encoder uses fewer bits on areas that can sustain more distortion
594	// with no noticeable visual degradation and uses more bits on areas where any
595	// small distortion will be noticeable. For example, complex textured blocks are
596	// encoded with fewer bits and smooth textured blocks are encoded with more bits.
597	// Enabling this feature will almost always improve your video quality. Note,
598	// though, that this feature doesn't take into account where the viewer's attention
599	// is likely to be. If viewers are likely to be focusing their attention on a part
600	// of the screen with a lot of complex texture, you might choose to disable this
601	// feature. Related setting: When you enable spatial adaptive quantization, set the
602	// value for Adaptive quantization (adaptiveQuantization) depending on your
603	// content. For homogeneous content, such as cartoons and video games, set it to
604	// Low. For content with a wider variety of textures, set it to High or Higher.
605	SpatialAdaptiveQuantization Av1SpatialAdaptiveQuantization
606}
607
608// Use ad avail blanking settings to specify your output content during SCTE-35
609// triggered ad avails. You can blank your video or overlay it with an image.
610// MediaConvert also removes any audio and embedded captions during the ad avail.
611// For more information, see
612// https://docs.aws.amazon.com/mediaconvert/latest/ug/ad-avail-blanking.html.
613type AvailBlanking struct {
614
615	// Blanking image to be used. Leave empty for solid black. Only bmp and png images
616	// are supported.
617	AvailBlankingImage *string
618}
619
620// Required when you choose AVC-Intra for your output video codec. For more
621// information about the AVC-Intra settings, see the relevant specification. For
622// detailed information about SD and HD in AVC-Intra, see
623// https://ieeexplore.ieee.org/document/7290936. For information about 4K/2K in
624// AVC-Intra, see https://pro-av.panasonic.net/en/avc-ultra/AVC-ULTRAoverview.pdf.
625type AvcIntraSettings struct {
626
627	// Specify the AVC-Intra class of your output. The AVC-Intra class selection
628	// determines the output video bit rate depending on the frame rate of the output.
629	// Outputs with higher class values have higher bitrates and improved image
630	// quality. Note that for Class 4K/2K, MediaConvert supports only 4:2:2 chroma
631	// subsampling.
632	AvcIntraClass AvcIntraClass
633
634	// Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K
635	// (CLASS_4K_2K). When you set AVC-Intra class to a different value, this object
636	// isn't allowed.
637	AvcIntraUhdSettings *AvcIntraUhdSettings
638
639	// If you are using the console, use the Framerate setting to specify the frame
640	// rate for this output. If you want to keep the same frame rate as the input
641	// video, choose Follow source. If you want to do frame rate conversion, choose a
642	// frame rate from the dropdown list or choose Custom. The framerates shown in the
643	// dropdown list are decimal approximations of fractions. If you choose Custom,
644	// specify your frame rate as a fraction. If you are creating your transcoding job
645	// specification as a JSON file without the console, use FramerateControl to
646	// specify which value the service uses for the frame rate for this output. Choose
647	// INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the
648	// input. Choose SPECIFIED if you want the service to use the frame rate you
649	// specify in the settings FramerateNumerator and FramerateDenominator.
650	FramerateControl AvcIntraFramerateControl
651
652	// Choose the method that you want MediaConvert to use when increasing or
653	// decreasing the frame rate. We recommend using drop duplicate (DUPLICATE_DROP)
654	// for numerically simple conversions, such as 60 fps to 30 fps. For numerically
655	// complex conversions, you can use interpolate (INTERPOLATE) to avoid stutter.
656	// This results in a smooth picture, but might introduce undesirable video
657	// artifacts. For complex frame rate conversions, especially if your source video
658	// has already been converted from its original cadence, use FrameFormer
659	// (FRAMEFORMER) to do motion-compensated interpolation. FrameFormer chooses the
660	// best conversion method frame by frame. Note that using FrameFormer increases the
661	// transcoding time and incurs a significant add-on cost.
662	FramerateConversionAlgorithm AvcIntraFramerateConversionAlgorithm
663
664	// When you use the API for transcode jobs that use frame rate conversion, specify
665	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
666	// FramerateDenominator to specify the denominator of this fraction. In this
667	// example, use 1001 for the value of FramerateDenominator. When you use the
668	// console for transcode jobs that use frame rate conversion, provide the value as
669	// a decimal number for Framerate. In this example, specify 23.976.
670	FramerateDenominator int32
671
672	// When you use the API for transcode jobs that use frame rate conversion, specify
673	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
674	// FramerateNumerator to specify the numerator of this fraction. In this example,
675	// use 24000 for the value of FramerateNumerator. When you use the console for
676	// transcode jobs that use frame rate conversion, provide the value as a decimal
677	// number for Framerate. In this example, specify 23.976.
678	FramerateNumerator int32
679
680	// Choose the scan line type for the output. Keep the default value, Progressive
681	// (PROGRESSIVE) to create a progressive output, regardless of the scan type of
682	// your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
683	// to create an output that's interlaced with the same field polarity throughout.
684	// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom
685	// (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the
686	// source. For jobs that have multiple inputs, the output field polarity might
687	// change over the course of the output. Follow behavior depends on the input scan
688	// type. If the source is interlaced, the output will be interlaced with the same
689	// polarity as the source. If the source is progressive, the output will be
690	// interlaced with top field bottom field first, depending on which of the Follow
691	// options you choose.
692	InterlaceMode AvcIntraInterlaceMode
693
694	// Use this setting for interlaced outputs, when your output frame rate is half of
695	// your input frame rate. In this situation, choose Optimized interlacing
696	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
697	// case, each progressive frame from the input corresponds to an interlaced field
698	// in the output. Keep the default value, Basic interlacing (INTERLACED), for all
699	// other output frame rates. With basic interlacing, MediaConvert performs any
700	// frame rate conversion first and then interlaces the frames. When you choose
701	// Optimized interlacing and you set your output frame rate to a value that isn't
702	// suitable for optimized interlacing, MediaConvert automatically falls back to
703	// basic interlacing. Required settings: To use optimized interlacing, you must set
704	// Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized
705	// interlacing for hard telecine outputs. You must also set Interlace mode
706	// (interlaceMode) to a value other than Progressive (PROGRESSIVE).
707	ScanTypeConversionMode AvcIntraScanTypeConversionMode
708
709	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
710	// second (fps). Enable slow PAL to create a 25 fps output. When you enable slow
711	// PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio
712	// to keep it synchronized with the video. Note that enabling this setting will
713	// slightly reduce the duration of your video. Required settings: You must also set
714	// Framerate to 25. In your JSON job specification, set (framerateControl) to
715	// (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.
716	SlowPal AvcIntraSlowPal
717
718	// When you do frame rate conversion from 23.976 frames per second (fps) to 29.97
719	// fps, and your output scan type is interlaced, you can optionally enable hard
720	// telecine (HARD) to create a smoother picture. When you keep the default value,
721	// None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without
722	// doing anything with the field polarity to create a smoother picture.
723	Telecine AvcIntraTelecine
724}
725
726// Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K
727// (CLASS_4K_2K). When you set AVC-Intra class to a different value, this object
728// isn't allowed.
729type AvcIntraUhdSettings struct {
730
731	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how many
732	// transcoding passes MediaConvert does with your video. When you choose Multi-pass
733	// (MULTI_PASS), your video quality is better and your output bitrate is more
734	// accurate. That is, the actual bitrate of your output is closer to the target
735	// bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS),
736	// your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS).
737	QualityTuningLevel AvcIntraUhdQualityTuningLevel
738}
739
740// Settings related to burn-in captions. Set up burn-in captions in the same output
741// as your video. For more information, see
742// https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html.
743// When you work directly in your JSON job specification, include this object and
744// any required children when you set destinationType to BURN_IN.
745type BurninDestinationSettings struct {
746
747	// If no explicit x_position or y_position is provided, setting alignment to
748	// centered will place the captions at the bottom center of the output. Similarly,
749	// setting a left alignment will align captions to the bottom left of the output.
750	// If x and y positions are given in conjunction with the alignment parameter, the
751	// font will be justified (either left or centered) relative to those coordinates.
752	// This option is not valid for source captions that are STL, 608/embedded or
753	// teletext. These source settings are already pre-defined by the caption stream.
754	// All burn-in and DVB-Sub font settings must match.
755	Alignment BurninSubtitleAlignment
756
757	// Specifies the color of the rectangle behind the captions. All burn-in and
758	// DVB-Sub font settings must match.
759	BackgroundColor BurninSubtitleBackgroundColor
760
761	// Specifies the opacity of the background rectangle. 255 is opaque; 0 is
762	// transparent. Leaving this parameter blank is equivalent to setting it to 0
763	// (transparent). All burn-in and DVB-Sub font settings must match.
764	BackgroundOpacity int32
765
766	// Specifies the color of the burned-in captions. This option is not valid for
767	// source captions that are STL, 608/embedded or teletext. These source settings
768	// are already pre-defined by the caption stream. All burn-in and DVB-Sub font
769	// settings must match.
770	FontColor BurninSubtitleFontColor
771
772	// Specifies the opacity of the burned-in captions. 255 is opaque; 0 is
773	// transparent. All burn-in and DVB-Sub font settings must match.
774	FontOpacity int32
775
776	// Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and
777	// DVB-Sub font settings must match.
778	FontResolution int32
779
780	// Provide the font script, using an ISO 15924 script code, if the LanguageCode is
781	// not sufficient for determining the script type. Where LanguageCode or
782	// CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. This is used
783	// to help determine the appropriate font for rendering burn-in captions.
784	FontScript FontScript
785
786	// A positive integer indicates the exact font size in points. Set to 0 for
787	// automatic font size selection. All burn-in and DVB-Sub font settings must match.
788	FontSize int32
789
790	// Specifies font outline color. This option is not valid for source captions that
791	// are either 608/embedded or teletext. These source settings are already
792	// pre-defined by the caption stream. All burn-in and DVB-Sub font settings must
793	// match.
794	OutlineColor BurninSubtitleOutlineColor
795
796	// Specifies font outline size in pixels. This option is not valid for source
797	// captions that are either 608/embedded or teletext. These source settings are
798	// already pre-defined by the caption stream. All burn-in and DVB-Sub font settings
799	// must match.
800	OutlineSize int32
801
802	// Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub
803	// font settings must match.
804	ShadowColor BurninSubtitleShadowColor
805
806	// Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving
807	// this parameter blank is equivalent to setting it to 0 (transparent). All burn-in
808	// and DVB-Sub font settings must match.
809	ShadowOpacity int32
810
811	// Specifies the horizontal offset of the shadow relative to the captions in
812	// pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All
813	// burn-in and DVB-Sub font settings must match.
814	ShadowXOffset int32
815
816	// Specifies the vertical offset of the shadow relative to the captions in pixels.
817	// A value of -2 would result in a shadow offset 2 pixels above the text. All
818	// burn-in and DVB-Sub font settings must match.
819	ShadowYOffset int32
820
821	// Only applies to jobs with input captions in Teletext or STL formats. Specify
822	// whether the spacing between letters in your captions is set by the captions grid
823	// or varies depending on letter width. Choose fixed grid to conform to the spacing
824	// specified in the captions file more accurately. Choose proportional to make the
825	// text easier to read if the captions are closed caption.
826	TeletextSpacing BurninSubtitleTeletextSpacing
827
828	// Specifies the horizontal position of the caption relative to the left side of
829	// the output in pixels. A value of 10 would result in the captions starting 10
830	// pixels from the left of the output. If no explicit x_position is provided, the
831	// horizontal caption position will be determined by the alignment parameter. This
832	// option is not valid for source captions that are STL, 608/embedded or teletext.
833	// These source settings are already pre-defined by the caption stream. All burn-in
834	// and DVB-Sub font settings must match.
835	XPosition int32
836
837	// Specifies the vertical position of the caption relative to the top of the output
838	// in pixels. A value of 10 would result in the captions starting 10 pixels from
839	// the top of the output. If no explicit y_position is provided, the caption will
840	// be positioned towards the bottom of the output. This option is not valid for
841	// source captions that are STL, 608/embedded or teletext. These source settings
842	// are already pre-defined by the caption stream. All burn-in and DVB-Sub font
843	// settings must match.
844	YPosition int32
845}
846
847// This object holds groups of settings related to captions for one output. For
848// each output that has captions, include one instance of CaptionDescriptions.
849type CaptionDescription struct {
850
851	// Specifies which "Caption Selector":#inputs-caption_selector to use from each
852	// input when generating captions. The name should be of the format "Caption
853	// Selector ", which denotes that the Nth Caption Selector will be used from each
854	// input.
855	CaptionSelectorName *string
856
857	// Specify the language for this captions output track. For most captions output
858	// formats, the encoder puts this language information in the output captions
859	// metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses
860	// this language information when automatically selecting the font script for
861	// rendering the captions text. For all outputs, you can use an ISO 639-2 or ISO
862	// 639-3 code. For streaming outputs, you can also use any other code in the full
863	// RFC-5646 specification. Streaming outputs are those that are in one of the
864	// following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth
865	// Streaming.
866	CustomLanguageCode *string
867
868	// Settings related to one captions tab on the MediaConvert console. In your job
869	// JSON, an instance of captions DestinationSettings is equivalent to one captions
870	// tab in the console. Usually, one captions tab corresponds to one output captions
871	// track. Depending on your output captions format, one tab might correspond to a
872	// set of output captions tracks. For more information, see
873	// https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.
874	DestinationSettings *CaptionDestinationSettings
875
876	// Specify the language of this captions output track. For most captions output
877	// formats, the encoder puts this language information in the output captions
878	// metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses
879	// this language information to choose the font language for rendering the captions
880	// text.
881	LanguageCode LanguageCode
882
883	// Specify a label for this set of output captions. For example, "English",
884	// "Director commentary", or "track_2". For streaming outputs, MediaConvert passes
885	// this information into destination manifests for display on the end-viewer's
886	// player device. For outputs in other output groups, the service ignores this
887	// setting.
888	LanguageDescription *string
889}
890
891// Caption Description for preset
892type CaptionDescriptionPreset struct {
893
894	// Specify the language for this captions output track. For most captions output
895	// formats, the encoder puts this language information in the output captions
896	// metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses
897	// this language information when automatically selecting the font script for
898	// rendering the captions text. For all outputs, you can use an ISO 639-2 or ISO
899	// 639-3 code. For streaming outputs, you can also use any other code in the full
900	// RFC-5646 specification. Streaming outputs are those that are in one of the
901	// following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth
902	// Streaming.
903	CustomLanguageCode *string
904
905	// Settings related to one captions tab on the MediaConvert console. In your job
906	// JSON, an instance of captions DestinationSettings is equivalent to one captions
907	// tab in the console. Usually, one captions tab corresponds to one output captions
908	// track. Depending on your output captions format, one tab might correspond to a
909	// set of output captions tracks. For more information, see
910	// https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.
911	DestinationSettings *CaptionDestinationSettings
912
913	// Specify the language of this captions output track. For most captions output
914	// formats, the encoder puts this language information in the output captions
915	// metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses
916	// this language information to choose the font language for rendering the captions
917	// text.
918	LanguageCode LanguageCode
919
920	// Specify a label for this set of output captions. For example, "English",
921	// "Director commentary", or "track_2". For streaming outputs, MediaConvert passes
922	// this information into destination manifests for display on the end-viewer's
923	// player device. For outputs in other output groups, the service ignores this
924	// setting.
925	LanguageDescription *string
926}
927
928// Settings related to one captions tab on the MediaConvert console. In your job
929// JSON, an instance of captions DestinationSettings is equivalent to one captions
930// tab in the console. Usually, one captions tab corresponds to one output captions
931// track. Depending on your output captions format, one tab might correspond to a
932// set of output captions tracks. For more information, see
933// https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.
934type CaptionDestinationSettings struct {
935
936	// Settings related to burn-in captions. Set up burn-in captions in the same output
937	// as your video. For more information, see
938	// https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html.
939	// When you work directly in your JSON job specification, include this object and
940	// any required children when you set destinationType to BURN_IN.
941	BurninDestinationSettings *BurninDestinationSettings
942
943	// Specify the format for this set of captions on this output. The default format
944	// is embedded without SCTE-20. Note that your choice of video output container
945	// constrains your choice of output captions format. For more information, see
946	// https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html.
947	// If you are using SCTE-20 and you want to create an output that complies with the
948	// SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To create a
949	// non-compliant output where the embedded captions come first, choose Embedded
950	// plus SCTE-20 (EMBEDDED_PLUS_SCTE20).
951	DestinationType CaptionDestinationType
952
953	// Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output
954	// as your video. For more information, see
955	// https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html.
956	// When you work directly in your JSON job specification, include this object and
957	// any required children when you set destinationType to DVB_SUB.
958	DvbSubDestinationSettings *DvbSubDestinationSettings
959
960	// Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or
961	// ancillary) captions. Set up embedded captions in the same output as your video.
962	// For more information, see
963	// https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html.
964	// When you work directly in your JSON job specification, include this object and
965	// any required children when you set destinationType to EMBEDDED,
966	// EMBEDDED_PLUS_SCTE20, or SCTE20_PLUS_EMBEDDED.
967	EmbeddedDestinationSettings *EmbeddedDestinationSettings
968
969	// Settings related to IMSC captions. IMSC is a sidecar format that holds captions
970	// in a file that is separate from the video container. Set up sidecar captions in
971	// the same output group, but different output from your video. For more
972	// information, see
973	// https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
974	// When you work directly in your JSON job specification, include this object and
975	// any required children when you set destinationType to IMSC.
976	ImscDestinationSettings *ImscDestinationSettings
977
978	// Settings related to SCC captions. SCC is a sidecar format that holds captions in
979	// a file that is separate from the video container. Set up sidecar captions in the
980	// same output group, but different output from your video. For more information,
981	// see
982	// https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html.
983	// When you work directly in your JSON job specification, include this object and
984	// any required children when you set destinationType to SCC.
985	SccDestinationSettings *SccDestinationSettings
986
987	// Settings related to teletext captions. Set up teletext captions in the same
988	// output as your video. For more information, see
989	// https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html.
990	// When you work directly in your JSON job specification, include this object and
991	// any required children when you set destinationType to TELETEXT.
992	TeletextDestinationSettings *TeletextDestinationSettings
993
994	// Settings related to TTML captions. TTML is a sidecar format that holds captions
995	// in a file that is separate from the video container. Set up sidecar captions in
996	// the same output group, but different output from your video. For more
997	// information, see
998	// https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
999	// When you work directly in your JSON job specification, include this object and
1000	// any required children when you set destinationType to TTML.
1001	TtmlDestinationSettings *TtmlDestinationSettings
1002
1003	// WEBVTT Destination Settings
1004	WebvttDestinationSettings *WebvttDestinationSettings
1005}
1006
1007// Use captions selectors to specify the captions data from your input that you use
1008// in your outputs. You can use up to 20 captions selectors per input.
1009type CaptionSelector struct {
1010
1011	// The specific language to extract from source, using the ISO 639-2 or ISO 639-3
1012	// three-letter language code. If input is SCTE-27, complete this field and/or PID
1013	// to select the caption language to extract. If input is DVB-Sub and output is
1014	// Burn-in or SMPTE-TT, complete this field and/or PID to select the caption
1015	// language to extract. If input is DVB-Sub that is being passed through, omit this
1016	// field (and PID field); there is no way to extract a specific language with
1017	// pass-through captions.
1018	CustomLanguageCode *string
1019
1020	// The specific language to extract from source. If input is SCTE-27, complete this
1021	// field and/or PID to select the caption language to extract. If input is DVB-Sub
1022	// and output is Burn-in or SMPTE-TT, complete this field and/or PID to select the
1023	// caption language to extract. If input is DVB-Sub that is being passed through,
1024	// omit this field (and PID field); there is no way to extract a specific language
1025	// with pass-through captions.
1026	LanguageCode LanguageCode
1027
1028	// If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file,
1029	// specify the URI of the input captions source file. If your input captions are
1030	// IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.
1031	SourceSettings *CaptionSourceSettings
1032}
1033
1034// Ignore this setting unless your input captions format is SCC. To have the
1035// service compensate for differing frame rates between your input captions and
1036// input video, specify the frame rate of the captions file. Specify this value as
1037// a fraction, using the settings Framerate numerator (framerateNumerator) and
1038// Framerate denominator (framerateDenominator). For example, you might specify 24
1039// / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001
1040// for 29.97 fps.
1041type CaptionSourceFramerate struct {
1042
1043	// Specify the denominator of the fraction that represents the frame rate for the
1044	// setting Caption source frame rate (CaptionSourceFramerate). Use this setting
1045	// along with the setting Framerate numerator (framerateNumerator).
1046	FramerateDenominator int32
1047
1048	// Specify the numerator of the fraction that represents the frame rate for the
1049	// setting Caption source frame rate (CaptionSourceFramerate). Use this setting
1050	// along with the setting Framerate denominator (framerateDenominator).
1051	FramerateNumerator int32
1052}
1053
1054// If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file,
1055// specify the URI of the input captions source file. If your input captions are
1056// IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.
1057type CaptionSourceSettings struct {
1058
1059	// Settings for ancillary captions source.
1060	AncillarySourceSettings *AncillarySourceSettings
1061
1062	// DVB Sub Source Settings
1063	DvbSubSourceSettings *DvbSubSourceSettings
1064
1065	// Settings for embedded captions Source
1066	EmbeddedSourceSettings *EmbeddedSourceSettings
1067
1068	// If your input captions are SCC, SMI, SRT, STL, TTML, WebVTT, or IMSC 1.1 in an
1069	// xml file, specify the URI of the input caption source file. If your caption
1070	// source is IMSC in an IMF package, use TrackSourceSettings instead of
1071	// FileSoureSettings.
1072	FileSourceSettings *FileSourceSettings
1073
1074	// Use Source (SourceType) to identify the format of your input captions. The
1075	// service cannot auto-detect caption format.
1076	SourceType CaptionSourceType
1077
1078	// Settings specific to Teletext caption sources, including Page number.
1079	TeletextSourceSettings *TeletextSourceSettings
1080
1081	// Settings specific to caption sources that are specified by track number.
1082	// Currently, this is only IMSC captions in an IMF package. If your caption source
1083	// is IMSC 1.1 in a separate xml file, use FileSourceSettings instead of
1084	// TrackSourceSettings.
1085	TrackSourceSettings *TrackSourceSettings
1086}
1087
1088// Channel mapping (ChannelMapping) contains the group of fields that hold the
1089// remixing value for each channel, in dB. Specify remix values to indicate how
1090// much of the content from your input audio channel you want in your output audio
1091// channels. Each instance of the InputChannels or InputChannelsFineTune array
1092// specifies these values for one output channel. Use one instance of this array
1093// for each output channel. In the console, each array corresponds to a column in
1094// the graphical depiction of the mapping matrix. The rows of the graphical matrix
1095// correspond to input channels. Valid values are within the range from -60 (mute)
1096// through 6. A setting of 0 passes the input channel unchanged to the output
1097// channel (no attenuation or amplification). Use InputChannels or
1098// InputChannelsFineTune to specify your remix values. Don't use both.
1099type ChannelMapping struct {
1100
1101	// In your JSON job specification, include one child of OutputChannels for each
1102	// audio channel that you want in your output. Each child should contain one
1103	// instance of InputChannels or InputChannelsFineTune.
1104	OutputChannels []OutputChannelMapping
1105}
1106
1107// Specify the details for each pair of HLS and DASH additional manifests that you
1108// want the service to generate for this CMAF output group. Each pair of manifests
1109// can reference a different subset of outputs in the group.
1110type CmafAdditionalManifest struct {
1111
1112	// Specify a name modifier that the service adds to the name of this manifest to
1113	// make it different from the file names of the other main manifests in the output
1114	// group. For example, say that the default main manifest for your HLS group is
1115	// film-name.m3u8. If you enter "-no-premium" for this setting, then the file name
1116	// the service generates for this top-level manifest is film-name-no-premium.m3u8.
1117	// For HLS output groups, specify a manifestNameModifier that is different from the
1118	// nameModifier of the output. The service uses the output name modifier to create
1119	// unique names for the individual variant manifests.
1120	ManifestNameModifier *string
1121
1122	// Specify the outputs that you want this additional top-level manifest to
1123	// reference.
1124	SelectedOutputs []string
1125}
1126
1127// Settings for CMAF encryption
1128type CmafEncryptionSettings struct {
1129
1130	// This is a 128-bit, 16-byte hex value represented by a 32-character text string.
1131	// If this parameter is not set then the Initialization Vector will follow the
1132	// segment number by default.
1133	ConstantInitializationVector *string
1134
1135	// Specify the encryption scheme that you want the service to use when encrypting
1136	// your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR).
1137	EncryptionMethod CmafEncryptionType
1138
1139	// When you use DRM with CMAF outputs, choose whether the service writes the
1140	// 128-bit encryption initialization vector in the HLS and DASH manifests.
1141	InitializationVectorInManifest CmafInitializationVectorInManifest
1142
1143	// If your output group type is CMAF, use these settings when doing DRM encryption
1144	// with a SPEKE-compliant key provider. If your output group type is HLS, DASH, or
1145	// Microsoft Smooth, use the SpekeKeyProvider settings instead.
1146	SpekeKeyProvider *SpekeKeyProviderCmaf
1147
1148	// Use these settings to set up encryption with a static key provider.
1149	StaticKeyProvider *StaticKeyProvider
1150
1151	// Specify whether your DRM encryption key is static or from a key provider that
1152	// follows the SPEKE standard. For more information about SPEKE, see
1153	// https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.
1154	Type CmafKeyProviderType
1155}
1156
1157// Settings related to your CMAF output package. For more information, see
1158// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When
1159// you work directly in your JSON job specification, include this object and any
1160// required children when you set Type, under OutputGroupSettings, to
1161// CMAF_GROUP_SETTINGS.
1162type CmafGroupSettings struct {
1163
1164	// By default, the service creates one top-level .m3u8 HLS manifest and one top
1165	// -level .mpd DASH manifest for each CMAF output group in your job. These default
1166	// manifests reference every output in the output group. To create additional
1167	// top-level manifests that reference a subset of the outputs in the output group,
1168	// specify a list of them here. For each additional manifest that you specify, the
1169	// service creates one HLS manifest and one DASH manifest.
1170	AdditionalManifests []CmafAdditionalManifest
1171
1172	// A partial URI prefix that will be put in the manifest file at the top level
1173	// BaseURL element. Can be used if streams are delivered from a different URL than
1174	// the manifest file.
1175	BaseUrl *string
1176
1177	// Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no
1178	// tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in
1179	// your video distribution set up. For example, use the Cache-Control http header.
1180	ClientCache CmafClientCache
1181
1182	// Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist
1183	// generation.
1184	CodecSpecification CmafCodecSpecification
1185
1186	// Use Destination (Destination) to specify the S3 output location and the output
1187	// filename base. Destination accepts format identifiers. If you do not specify the
1188	// base filename in the URI, the service will use the filename of the input file.
1189	// If your job has multiple inputs, the service uses the filename of the first
1190	// input file.
1191	Destination *string
1192
1193	// Settings associated with the destination. Will vary based on the type of
1194	// destination
1195	DestinationSettings *DestinationSettings
1196
1197	// DRM settings.
1198	Encryption *CmafEncryptionSettings
1199
1200	// Length of fragments to generate (in seconds). Fragment length must be compatible
1201	// with GOP size and Framerate. Note that fragments will end on the next keyframe
1202	// after this number of seconds, so actual fragment length may be longer. When Emit
1203	// Single File is checked, the fragmentation is internal to a single output file
1204	// and it does not cause the creation of many output files as in other output
1205	// types.
1206	FragmentLength int32
1207
1208	// When set to GZIP, compresses HLS playlist.
1209	ManifestCompression CmafManifestCompression
1210
1211	// Indicates whether the output manifest should use floating point values for
1212	// segment duration.
1213	ManifestDurationFormat CmafManifestDurationFormat
1214
1215	// Minimum time of initially buffered media that is needed to ensure smooth
1216	// playout.
1217	MinBufferTime int32
1218
1219	// Keep this setting at the default value of 0, unless you are troubleshooting a
1220	// problem with how devices play back the end of your video asset. If you know that
1221	// player devices are hanging on the final segment of your video because the length
1222	// of your final segment is too short, use this setting to specify a minimum final
1223	// segment length, in seconds. Choose a value that is greater than or equal to 1
1224	// and less than your segment length. When you specify a value for this setting,
1225	// the encoder will combine any final segment that is shorter than the length that
1226	// you specify with the previous segment. For example, your segment length is 3
1227	// seconds and your final segment is .5 seconds without a minimum final segment
1228	// length; when you set the minimum final segment length to 1, your final segment
1229	// is 3.5 seconds.
1230	MinFinalSegmentLength float64
1231
1232	// Specify whether your DASH profile is on-demand or main. When you choose Main
1233	// profile (MAIN_PROFILE), the service signals
1234	// urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you
1235	// choose On-demand (ON_DEMAND_PROFILE), the service signals
1236	// urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose
1237	// On-demand, you must also set the output group setting Segment control
1238	// (SegmentControl) to Single file (SINGLE_FILE).
1239	MpdProfile CmafMpdProfile
1240
1241	// Use this setting only when your output video stream has B-frames, which causes
1242	// the initial presentation time stamp (PTS) to be offset from the initial decode
1243	// time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps
1244	// in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you
1245	// want MediaConvert to use the initial PTS as the first time stamp in the
1246	// manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial
1247	// PTS in the video stream and instead write the initial time stamp as zero in the
1248	// manifest. For outputs that don't have B-frames, the time stamps in your DASH
1249	// manifests start at zero regardless of your choice here.
1250	PtsOffsetHandlingForBFrames CmafPtsOffsetHandlingForBFrames
1251
1252	// When set to SINGLE_FILE, a single output file is generated, which is internally
1253	// segmented using the Fragment Length and Segment Length. When set to
1254	// SEGMENTED_FILES, separate segment files will be created.
1255	SegmentControl CmafSegmentControl
1256
1257	// Use this setting to specify the length, in seconds, of each individual CMAF
1258	// segment. This value applies to the whole package; that is, to every output in
1259	// the output group. Note that segments end on the first keyframe after this number
1260	// of seconds, so the actual segment length might be slightly longer. If you set
1261	// Segment control (CmafSegmentControl) to single file, the service puts the
1262	// content of each output in a single file that has metadata that marks these
1263	// segments. If you set it to segmented files, the service creates multiple files
1264	// for each output, each with the content of one segment.
1265	SegmentLength int32
1266
1267	// Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag of
1268	// variant manifest.
1269	StreamInfResolution CmafStreamInfResolution
1270
1271	// When set to ENABLED, a DASH MPD manifest will be generated for this output.
1272	WriteDashManifest CmafWriteDASHManifest
1273
1274	// When set to ENABLED, an Apple HLS manifest will be generated for this output.
1275	WriteHlsManifest CmafWriteHLSManifest
1276
1277	// When you enable Precise segment duration in DASH manifests
1278	// (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment
1279	// durations. The segment duration information appears inside the SegmentTimeline
1280	// element, inside SegmentTemplate at the Representation level. When this feature
1281	// isn't enabled, the segment durations in your DASH manifest are approximate. The
1282	// segment duration information appears in the duration attribute of the
1283	// SegmentTemplate element.
1284	WriteSegmentTimelineInRepresentation CmafWriteSegmentTimelineInRepresentation
1285}
1286
1287// These settings relate to the fragmented MP4 container for the segments in your
1288// CMAF outputs.
1289type CmfcSettings struct {
1290
1291	// Specify this setting only when your output will be consumed by a downstream
1292	// repackaging workflow that is sensitive to very small duration differences
1293	// between video and audio. For this situation, choose Match video duration
1294	// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
1295	// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
1296	// MediaConvert pads the output audio streams with silence or trims them to ensure
1297	// that the total duration of each audio stream is at least as long as the total
1298	// duration of the video stream. After padding or trimming, the audio stream
1299	// duration is no more than one frame longer than the video stream. MediaConvert
1300	// applies audio padding or trimming only to the end of the last segment of the
1301	// output. For unsegmented outputs, MediaConvert adds padding only to the end of
1302	// the file. When you keep the default value, any minor discrepancies between audio
1303	// and video duration will depend on your output audio codec.
1304	AudioDuration CmfcAudioDuration
1305
1306	// Specify the audio rendition group for this audio rendition. Specify up to one
1307	// value for each audio output in your output group. This value appears in your HLS
1308	// parent manifest in the EXT-X-MEDIA tag of TYPE=AUDIO, as the value for the
1309	// GROUP-ID attribute. For example, if you specify "audio_aac_1" for Audio group
1310	// ID, it appears in your manifest like this:
1311	// #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio_aac_1". Related setting: To associate
1312	// the rendition group that this audio track belongs to with a video rendition,
1313	// include the same value that you provide here for that video output's setting
1314	// Audio rendition sets (audioRenditionSets).
1315	AudioGroupId *string
1316
1317	// List the audio rendition groups that you want included with this video
1318	// rendition. Use a comma-separated list. For example, say you want to include the
1319	// audio rendition groups that have the audio group IDs "audio_aac_1" and
1320	// "audio_dolby". Then you would specify this value: "audio_aac_1, audio_dolby".
1321	// Related setting: The rendition groups that you include in your comma-separated
1322	// list should all match values that you specify in the setting Audio group ID
1323	// (AudioGroupId) for audio renditions in the same output group as this video
1324	// rendition. Default behavior: If you don't specify anything here and for Audio
1325	// group ID, MediaConvert puts each audio variant in its own audio rendition group
1326	// and associates it with every video variant. Each value in your list appears in
1327	// your HLS parent manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO
1328	// attribute. To continue the previous example, say that the file name for the
1329	// child manifest for your video rendition is "amazing_video_1.m3u8". Then, in your
1330	// parent manifest, each value will appear on separate lines, like this:
1331	// #EXT-X-STREAM-INF:AUDIO="audio_aac_1"... amazing_video_1.m3u8
1332	// #EXT-X-STREAM-INF:AUDIO="audio_dolby"... amazing_video_1.m3u8
1333	AudioRenditionSets *string
1334
1335	// Use this setting to control the values that MediaConvert puts in your HLS parent
1336	// playlist to control how the client player selects which audio track to play. The
1337	// other options for this setting determine the values that MediaConvert writes for
1338	// the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio
1339	// variant. For more information about these attributes, see the Apple
1340	// documentation article
1341	// https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist.
1342	// Choose Alternate audio, auto select, default
1343	// (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) to set DEFAULT=YES and AUTOSELECT=YES.
1344	// Choose this value for only one variant in your output group. Choose Alternate
1345	// audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) to set DEFAULT=NO
1346	// and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO
1347	// and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert
1348	// defaults to Alternate audio, auto select, default. When there is more than one
1349	// variant in your output group, you must explicitly choose a value for this
1350	// setting.
1351	AudioTrackType CmfcAudioTrackType
1352
1353	// Specify whether to flag this audio track as descriptive video service (DVS) in
1354	// your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the
1355	// parameter CHARACTERISTICS="public.accessibility.describes-video" in the
1356	// EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag
1357	// (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with
1358	// accessibility on Apple devices. For more information, see the Apple
1359	// documentation.
1360	DescriptiveVideoServiceFlag CmfcDescriptiveVideoServiceFlag
1361
1362	// Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest
1363	// that lists only the I-frames for this rendition, in addition to your regular
1364	// manifest for this rendition. You might use this manifest as part of a workflow
1365	// that creates preview functions for your video. MediaConvert adds both the
1366	// I-frame only child manifest and the regular child manifest to the parent
1367	// manifest. When you don't need the I-frame only child manifest, keep the default
1368	// value Exclude (EXCLUDE).
1369	IFrameOnlyManifest CmfcIFrameOnlyManifest
1370
1371	// Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT
1372	// to put SCTE-35 markers in this output at the insertion points that you specify
1373	// in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).
1374	Scte35Esam CmfcScte35Esam
1375
1376	// Ignore this setting unless you have SCTE-35 markers in your input video file.
1377	// Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your
1378	// input to also appear in this output. Choose None (NONE) if you don't want those
1379	// SCTE-35 markers in this output.
1380	Scte35Source CmfcScte35Source
1381}
1382
1383// Settings for color correction.
1384type ColorCorrector struct {
1385
1386	// Brightness level.
1387	Brightness int32
1388
1389	// Specify the color space you want for this output. The service supports
1390	// conversion between HDR formats, between SDR formats, from SDR to HDR, and from
1391	// HDR to SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The
1392	// converted video has an HDR format, but visually appears the same as an
1393	// unconverted output. HDR to SDR conversion uses Elemental tone mapping technology
1394	// to approximate the outcome of manually regrading from HDR to SDR.
1395	ColorSpaceConversion ColorSpaceConversion
1396
1397	// Contrast level.
1398	Contrast int32
1399
1400	// Use these settings when you convert to the HDR 10 color space. Specify the SMPTE
1401	// ST 2086 Mastering Display Color Volume static metadata that you want signaled in
1402	// the output. These values don't affect the pixel values that are encoded in the
1403	// video stream. They are intended to help the downstream video player display
1404	// content in a way that reflects the intentions of the the content creator. When
1405	// you set Color space conversion (ColorSpaceConversion) to HDR 10 (FORCE_HDR10),
1406	// these settings are required. You must set values for Max frame average light
1407	// level (maxFrameAverageLightLevel) and Max content light level
1408	// (maxContentLightLevel); these settings don't have a default value. The default
1409	// values for the other HDR 10 metadata settings are defined by the P3D65 color
1410	// space. For more information about MediaConvert HDR jobs, see
1411	// https://docs.aws.amazon.com/console/mediaconvert/hdr.
1412	Hdr10Metadata *Hdr10Metadata
1413
1414	// Hue in degrees.
1415	Hue int32
1416
1417	// Saturation level.
1418	Saturation int32
1419}
1420
1421// Container specific settings.
1422type ContainerSettings struct {
1423
1424	// These settings relate to the fragmented MP4 container for the segments in your
1425	// CMAF outputs.
1426	CmfcSettings *CmfcSettings
1427
1428	// Container for this output. Some containers require a container settings object.
1429	// If not specified, the default object will be created.
1430	Container ContainerType
1431
1432	// Settings for F4v container
1433	F4vSettings *F4vSettings
1434
1435	// MPEG-2 TS container settings. These apply to outputs in a File output group when
1436	// the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In
1437	// these assets, data is organized by the program map table (PMT). Each transport
1438	// stream program contains subsets of data, including audio, video, and metadata.
1439	// Each of these subsets of data has a numerical label called a packet identifier
1440	// (PID). Each transport stream program corresponds to one MediaConvert output. The
1441	// PMT lists the types of data in a program along with their PID. Downstream
1442	// systems and players use the program map table to look up the PID for each type
1443	// of data it accesses and then uses the PIDs to locate specific data within the
1444	// asset.
1445	M2tsSettings *M2tsSettings
1446
1447	// These settings relate to the MPEG-2 transport stream (MPEG2-TS) container for
1448	// the MPEG2-TS segments in your HLS outputs.
1449	M3u8Settings *M3u8Settings
1450
1451	// These settings relate to your QuickTime MOV output container.
1452	MovSettings *MovSettings
1453
1454	// These settings relate to your MP4 output container. You can create audio only
1455	// outputs with this container. For more information, see
1456	// https://docs.aws.amazon.com/mediaconvert/latest/ug/supported-codecs-containers-audio-only.html#output-codecs-and-containers-supported-for-audio-only.
1457	Mp4Settings *Mp4Settings
1458
1459	// These settings relate to the fragmented MP4 container for the segments in your
1460	// DASH outputs.
1461	MpdSettings *MpdSettings
1462
1463	// These settings relate to your MXF output container.
1464	MxfSettings *MxfSettings
1465}
1466
1467// Specify the details for each additional DASH manifest that you want the service
1468// to generate for this output group. Each manifest can reference a different
1469// subset of outputs in the group.
1470type DashAdditionalManifest struct {
1471
1472	// Specify a name modifier that the service adds to the name of this manifest to
1473	// make it different from the file names of the other main manifests in the output
1474	// group. For example, say that the default main manifest for your DASH group is
1475	// film-name.mpd. If you enter "-no-premium" for this setting, then the file name
1476	// the service generates for this top-level manifest is film-name-no-premium.mpd.
1477	ManifestNameModifier *string
1478
1479	// Specify the outputs that you want this additional top-level manifest to
1480	// reference.
1481	SelectedOutputs []string
1482}
1483
1484// Specifies DRM settings for DASH outputs.
1485type DashIsoEncryptionSettings struct {
1486
1487	// This setting can improve the compatibility of your output with video players on
1488	// obsolete devices. It applies only to DASH H.264 outputs with DRM encryption.
1489	// Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback
1490	// on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). If you
1491	// choose Unencrypted SEI, for that output, the service will exclude the access
1492	// unit delimiter and will leave the SEI NAL units unencrypted.
1493	PlaybackDeviceCompatibility DashIsoPlaybackDeviceCompatibility
1494
1495	// If your output group type is HLS, DASH, or Microsoft Smooth, use these settings
1496	// when doing DRM encryption with a SPEKE-compliant key provider. If your output
1497	// group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
1498	SpekeKeyProvider *SpekeKeyProvider
1499}
1500
1501// Settings related to your DASH output package. For more information, see
1502// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When
1503// you work directly in your JSON job specification, include this object and any
1504// required children when you set Type, under OutputGroupSettings, to
1505// DASH_ISO_GROUP_SETTINGS.
1506type DashIsoGroupSettings struct {
1507
1508	// By default, the service creates one .mpd DASH manifest for each DASH ISO output
1509	// group in your job. This default manifest references every output in the output
1510	// group. To create additional DASH manifests that reference a subset of the
1511	// outputs in the output group, specify a list of them here.
1512	AdditionalManifests []DashAdditionalManifest
1513
1514	// Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos)
1515	// and your downstream workflow requires that your DASH manifest use the Dolby
1516	// channel configuration tag, rather than the MPEG one. For example, you might need
1517	// to use this to make dynamic ad insertion work. Specify which audio channel
1518	// configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the
1519	// default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), to have
1520	// MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby
1521	// channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have MediaConvert write
1522	// this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011.
1523	AudioChannelConfigSchemeIdUri DashIsoGroupAudioChannelConfigSchemeIdUri
1524
1525	// A partial URI prefix that will be put in the manifest (.mpd) file at the top
1526	// level BaseURL element. Can be used if streams are delivered from a different URL
1527	// than the manifest file.
1528	BaseUrl *string
1529
1530	// Use Destination (Destination) to specify the S3 output location and the output
1531	// filename base. Destination accepts format identifiers. If you do not specify the
1532	// base filename in the URI, the service will use the filename of the input file.
1533	// If your job has multiple inputs, the service uses the filename of the first
1534	// input file.
1535	Destination *string
1536
1537	// Settings associated with the destination. Will vary based on the type of
1538	// destination
1539	DestinationSettings *DestinationSettings
1540
1541	// DRM settings.
1542	Encryption *DashIsoEncryptionSettings
1543
1544	// Length of fragments to generate (in seconds). Fragment length must be compatible
1545	// with GOP size and Framerate. Note that fragments will end on the next keyframe
1546	// after this number of seconds, so actual fragment length may be longer. When Emit
1547	// Single File is checked, the fragmentation is internal to a single output file
1548	// and it does not cause the creation of many output files as in other output
1549	// types.
1550	FragmentLength int32
1551
1552	// Supports HbbTV specification as indicated
1553	HbbtvCompliance DashIsoHbbtvCompliance
1554
1555	// Minimum time of initially buffered media that is needed to ensure smooth
1556	// playout.
1557	MinBufferTime int32
1558
1559	// Keep this setting at the default value of 0, unless you are troubleshooting a
1560	// problem with how devices play back the end of your video asset. If you know that
1561	// player devices are hanging on the final segment of your video because the length
1562	// of your final segment is too short, use this setting to specify a minimum final
1563	// segment length, in seconds. Choose a value that is greater than or equal to 1
1564	// and less than your segment length. When you specify a value for this setting,
1565	// the encoder will combine any final segment that is shorter than the length that
1566	// you specify with the previous segment. For example, your segment length is 3
1567	// seconds and your final segment is .5 seconds without a minimum final segment
1568	// length; when you set the minimum final segment length to 1, your final segment
1569	// is 3.5 seconds.
1570	MinFinalSegmentLength float64
1571
1572	// Specify whether your DASH profile is on-demand or main. When you choose Main
1573	// profile (MAIN_PROFILE), the service signals
1574	// urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you
1575	// choose On-demand (ON_DEMAND_PROFILE), the service signals
1576	// urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose
1577	// On-demand, you must also set the output group setting Segment control
1578	// (SegmentControl) to Single file (SINGLE_FILE).
1579	MpdProfile DashIsoMpdProfile
1580
1581	// Use this setting only when your output video stream has B-frames, which causes
1582	// the initial presentation time stamp (PTS) to be offset from the initial decode
1583	// time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps
1584	// in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you
1585	// want MediaConvert to use the initial PTS as the first time stamp in the
1586	// manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial
1587	// PTS in the video stream and instead write the initial time stamp as zero in the
1588	// manifest. For outputs that don't have B-frames, the time stamps in your DASH
1589	// manifests start at zero regardless of your choice here.
1590	PtsOffsetHandlingForBFrames DashIsoPtsOffsetHandlingForBFrames
1591
1592	// When set to SINGLE_FILE, a single output file is generated, which is internally
1593	// segmented using the Fragment Length and Segment Length. When set to
1594	// SEGMENTED_FILES, separate segment files will be created.
1595	SegmentControl DashIsoSegmentControl
1596
1597	// Length of mpd segments to create (in seconds). Note that segments will end on
1598	// the next keyframe after this number of seconds, so actual segment length may be
1599	// longer. When Emit Single File is checked, the segmentation is internal to a
1600	// single output file and it does not cause the creation of many output files as in
1601	// other output types.
1602	SegmentLength int32
1603
1604	// If you get an HTTP error in the 400 range when you play back your DASH output,
1605	// enable this setting and run your transcoding job again. When you enable this
1606	// setting, the service writes precise segment durations in the DASH manifest. The
1607	// segment duration information appears inside the SegmentTimeline element, inside
1608	// SegmentTemplate at the Representation level. When you don't enable this setting,
1609	// the service writes approximate segment durations in your DASH manifest.
1610	WriteSegmentTimelineInRepresentation DashIsoWriteSegmentTimelineInRepresentation
1611}
1612
1613// Settings for deinterlacer
1614type Deinterlacer struct {
1615
1616	// Only applies when you set Deinterlacer (DeinterlaceMode) to Deinterlace
1617	// (DEINTERLACE) or Adaptive (ADAPTIVE). Motion adaptive interpolate (INTERPOLATE)
1618	// produces sharper pictures, while blend (BLEND) produces smoother motion. Use
1619	// (INTERPOLATE_TICKER) OR (BLEND_TICKER) if your source file includes a ticker,
1620	// such as a scrolling headline at the bottom of the frame.
1621	Algorithm DeinterlaceAlgorithm
1622
1623	// * When set to NORMAL (default), the deinterlacer does not convert frames that
1624	// are tagged in metadata as progressive. It will only convert those that are
1625	// tagged as some other type. - When set to FORCE_ALL_FRAMES, the deinterlacer
1626	// converts every frame to progressive - even those that are already tagged as
1627	// progressive. Turn Force mode on only if there is a good chance that the metadata
1628	// has tagged frames as progressive when they are not progressive. Do not turn on
1629	// otherwise; processing frames that are already progressive into progressive will
1630	// probably result in lower quality video.
1631	Control DeinterlacerControl
1632
1633	// Use Deinterlacer (DeinterlaceMode) to choose how the service will do
1634	// deinterlacing. Default is Deinterlace. - Deinterlace converts interlaced to
1635	// progressive. - Inverse telecine converts Hard Telecine 29.97i to progressive
1636	// 23.976p. - Adaptive auto-detects and converts to progressive.
1637	Mode DeinterlacerMode
1638}
1639
1640// Settings associated with the destination. Will vary based on the type of
1641// destination
1642type DestinationSettings struct {
1643
1644	// Settings associated with S3 destination
1645	S3Settings *S3DestinationSettings
1646}
1647
1648// With AWS Elemental MediaConvert, you can create profile 5 Dolby Vision outputs
1649// from MXF and IMF sources that contain mastering information as frame-interleaved
1650// Dolby Vision metadata.
1651type DolbyVision struct {
1652
1653	// Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override the
1654	// MaxCLL and MaxFALL values in your input with new values.
1655	L6Metadata *DolbyVisionLevel6Metadata
1656
1657	// Use Dolby Vision Mode to choose how the service will handle Dolby Vision MaxCLL
1658	// and MaxFALL properies.
1659	L6Mode DolbyVisionLevel6Mode
1660
1661	// In the current MediaConvert implementation, the Dolby Vision profile is always 5
1662	// (PROFILE_5). Therefore, all of your inputs must contain Dolby Vision frame
1663	// interleaved data.
1664	Profile DolbyVisionProfile
1665}
1666
1667// Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override the
1668// MaxCLL and MaxFALL values in your input with new values.
1669type DolbyVisionLevel6Metadata struct {
1670
1671	// Maximum Content Light Level. Static HDR metadata that corresponds to the
1672	// brightest pixel in the entire stream. Measured in nits.
1673	MaxCll int32
1674
1675	// Maximum Frame-Average Light Level. Static HDR metadata that corresponds to the
1676	// highest frame-average brightness in the entire stream. Measured in nits.
1677	MaxFall int32
1678}
1679
1680// Use these settings to insert a DVB Network Information Table (NIT) in the
1681// transport stream of this output. When you work directly in your JSON job
1682// specification, include this object only when your job has a transport stream
1683// output and the container settings contain the object M2tsSettings.
1684type DvbNitSettings struct {
1685
1686	// The numeric value placed in the Network Information Table (NIT).
1687	NetworkId int32
1688
1689	// The network name text placed in the network_name_descriptor inside the Network
1690	// Information Table. Maximum length is 256 characters.
1691	NetworkName *string
1692
1693	// The number of milliseconds between instances of this table in the output
1694	// transport stream.
1695	NitInterval int32
1696}
1697
1698// Use these settings to insert a DVB Service Description Table (SDT) in the
1699// transport stream of this output. When you work directly in your JSON job
1700// specification, include this object only when your job has a transport stream
1701// output and the container settings contain the object M2tsSettings.
1702type DvbSdtSettings struct {
1703
1704	// Selects method of inserting SDT information into output stream. "Follow input
1705	// SDT" copies SDT information from input stream to output stream. "Follow input
1706	// SDT if present" copies SDT information from input stream to output stream if SDT
1707	// information is present in the input, otherwise it will fall back on the
1708	// user-defined values. Enter "SDT Manually" means user will enter the SDT
1709	// information. "No SDT" means output stream will not contain SDT information.
1710	OutputSdt OutputSdt
1711
1712	// The number of milliseconds between instances of this table in the output
1713	// transport stream.
1714	SdtInterval int32
1715
1716	// The service name placed in the service_descriptor in the Service Description
1717	// Table. Maximum length is 256 characters.
1718	ServiceName *string
1719
1720	// The service provider name placed in the service_descriptor in the Service
1721	// Description Table. Maximum length is 256 characters.
1722	ServiceProviderName *string
1723}
1724
1725// Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output
1726// as your video. For more information, see
1727// https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html.
1728// When you work directly in your JSON job specification, include this object and
1729// any required children when you set destinationType to DVB_SUB.
1730type DvbSubDestinationSettings struct {
1731
1732	// If no explicit x_position or y_position is provided, setting alignment to
1733	// centered will place the captions at the bottom center of the output. Similarly,
1734	// setting a left alignment will align captions to the bottom left of the output.
1735	// If x and y positions are given in conjunction with the alignment parameter, the
1736	// font will be justified (either left or centered) relative to those coordinates.
1737	// This option is not valid for source captions that are STL, 608/embedded or
1738	// teletext. These source settings are already pre-defined by the caption stream.
1739	// All burn-in and DVB-Sub font settings must match.
1740	Alignment DvbSubtitleAlignment
1741
1742	// Specifies the color of the rectangle behind the captions. All burn-in and
1743	// DVB-Sub font settings must match.
1744	BackgroundColor DvbSubtitleBackgroundColor
1745
1746	// Specifies the opacity of the background rectangle. 255 is opaque; 0 is
1747	// transparent. Leaving this parameter blank is equivalent to setting it to 0
1748	// (transparent). All burn-in and DVB-Sub font settings must match.
1749	BackgroundOpacity int32
1750
1751	// Specify how MediaConvert handles the display definition segment (DDS). Keep the
1752	// default, None (NONE), to exclude the DDS from this set of captions. Choose No
1753	// display window (NO_DISPLAY_WINDOW) to have MediaConvert include the DDS but not
1754	// include display window data. In this case, MediaConvert writes that information
1755	// to the page composition segment (PCS) instead. Choose Specify (SPECIFIED) to
1756	// have MediaConvert set up the display window based on the values that you specify
1757	// in related job settings. For video resolutions that are 576 pixels or smaller in
1758	// height, MediaConvert doesn't include the DDS, regardless of the value you choose
1759	// for DDS handling (ddsHandling). In this case, it doesn't write the display
1760	// window data to the PCS either. Related settings: Use the settings DDS
1761	// x-coordinate (ddsXCoordinate) and DDS y-coordinate (ddsYCoordinate) to specify
1762	// the offset between the top left corner of the display window and the top left
1763	// corner of the video frame. All burn-in and DVB-Sub font settings must match.
1764	DdsHandling DvbddsHandling
1765
1766	// Use this setting, along with DDS y-coordinate (ddsYCoordinate), to specify the
1767	// upper left corner of the display definition segment (DDS) display window. With
1768	// this setting, specify the distance, in pixels, between the left side of the
1769	// frame and the left side of the DDS display window. Keep the default value, 0, to
1770	// have MediaConvert automatically choose this offset. Related setting: When you
1771	// use this setting, you must set DDS handling (ddsHandling) to a value other than
1772	// None (NONE). MediaConvert uses these values to determine whether to write page
1773	// position data to the DDS or to the page composition segment (PCS). All burn-in
1774	// and DVB-Sub font settings must match.
1775	DdsXCoordinate int32
1776
1777	// Use this setting, along with DDS x-coordinate (ddsXCoordinate), to specify the
1778	// upper left corner of the display definition segment (DDS) display window. With
1779	// this setting, specify the distance, in pixels, between the top of the frame and
1780	// the top of the DDS display window. Keep the default value, 0, to have
1781	// MediaConvert automatically choose this offset. Related setting: When you use
1782	// this setting, you must set DDS handling (ddsHandling) to a value other than None
1783	// (NONE). MediaConvert uses these values to determine whether to write page
1784	// position data to the DDS or to the page composition segment (PCS). All burn-in
1785	// and DVB-Sub font settings must match.
1786	DdsYCoordinate int32
1787
1788	// Specifies the color of the burned-in captions. This option is not valid for
1789	// source captions that are STL, 608/embedded or teletext. These source settings
1790	// are already pre-defined by the caption stream. All burn-in and DVB-Sub font
1791	// settings must match.
1792	FontColor DvbSubtitleFontColor
1793
1794	// Specifies the opacity of the burned-in captions. 255 is opaque; 0 is
1795	// transparent. All burn-in and DVB-Sub font settings must match.
1796	FontOpacity int32
1797
1798	// Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and
1799	// DVB-Sub font settings must match.
1800	FontResolution int32
1801
1802	// Provide the font script, using an ISO 15924 script code, if the LanguageCode is
1803	// not sufficient for determining the script type. Where LanguageCode or
1804	// CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. This is used
1805	// to help determine the appropriate font for rendering DVB-Sub captions.
1806	FontScript FontScript
1807
1808	// A positive integer indicates the exact font size in points. Set to 0 for
1809	// automatic font size selection. All burn-in and DVB-Sub font settings must match.
1810	FontSize int32
1811
1812	// Specify the height, in pixels, of this set of DVB-Sub captions. The default
1813	// value is 576 pixels. Related setting: When you use this setting, you must set
1814	// DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and
1815	// DVB-Sub font settings must match.
1816	Height int32
1817
1818	// Specifies font outline color. This option is not valid for source captions that
1819	// are either 608/embedded or teletext. These source settings are already
1820	// pre-defined by the caption stream. All burn-in and DVB-Sub font settings must
1821	// match.
1822	OutlineColor DvbSubtitleOutlineColor
1823
1824	// Specifies font outline size in pixels. This option is not valid for source
1825	// captions that are either 608/embedded or teletext. These source settings are
1826	// already pre-defined by the caption stream. All burn-in and DVB-Sub font settings
1827	// must match.
1828	OutlineSize int32
1829
1830	// Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub
1831	// font settings must match.
1832	ShadowColor DvbSubtitleShadowColor
1833
1834	// Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving
1835	// this parameter blank is equivalent to setting it to 0 (transparent). All burn-in
1836	// and DVB-Sub font settings must match.
1837	ShadowOpacity int32
1838
1839	// Specifies the horizontal offset of the shadow relative to the captions in
1840	// pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All
1841	// burn-in and DVB-Sub font settings must match.
1842	ShadowXOffset int32
1843
1844	// Specifies the vertical offset of the shadow relative to the captions in pixels.
1845	// A value of -2 would result in a shadow offset 2 pixels above the text. All
1846	// burn-in and DVB-Sub font settings must match.
1847	ShadowYOffset int32
1848
1849	// Specify whether your DVB subtitles are standard or for hearing impaired. Choose
1850	// hearing impaired if your subtitles include audio descriptions and dialogue.
1851	// Choose standard if your subtitles include only dialogue.
1852	SubtitlingType DvbSubtitlingType
1853
1854	// Only applies to jobs with input captions in Teletext or STL formats. Specify
1855	// whether the spacing between letters in your captions is set by the captions grid
1856	// or varies depending on letter width. Choose fixed grid to conform to the spacing
1857	// specified in the captions file more accurately. Choose proportional to make the
1858	// text easier to read if the captions are closed caption.
1859	TeletextSpacing DvbSubtitleTeletextSpacing
1860
1861	// Specify the width, in pixels, of this set of DVB-Sub captions. The default value
1862	// is 720 pixels. Related setting: When you use this setting, you must set DDS
1863	// handling (ddsHandling) to a value other than None (NONE). All burn-in and
1864	// DVB-Sub font settings must match.
1865	Width int32
1866
1867	// Specifies the horizontal position of the caption relative to the left side of
1868	// the output in pixels. A value of 10 would result in the captions starting 10
1869	// pixels from the left of the output. If no explicit x_position is provided, the
1870	// horizontal caption position will be determined by the alignment parameter. This
1871	// option is not valid for source captions that are STL, 608/embedded or teletext.
1872	// These source settings are already pre-defined by the caption stream. All burn-in
1873	// and DVB-Sub font settings must match.
1874	XPosition int32
1875
1876	// Specifies the vertical position of the caption relative to the top of the output
1877	// in pixels. A value of 10 would result in the captions starting 10 pixels from
1878	// the top of the output. If no explicit y_position is provided, the caption will
1879	// be positioned towards the bottom of the output. This option is not valid for
1880	// source captions that are STL, 608/embedded or teletext. These source settings
1881	// are already pre-defined by the caption stream. All burn-in and DVB-Sub font
1882	// settings must match.
1883	YPosition int32
1884}
1885
1886// DVB Sub Source Settings
1887type DvbSubSourceSettings struct {
1888
1889	// When using DVB-Sub with Burn-In or SMPTE-TT, use this PID for the source
1890	// content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through,
1891	// regardless of selectors.
1892	Pid int32
1893}
1894
1895// Use these settings to insert a DVB Time and Date Table (TDT) in the transport
1896// stream of this output. When you work directly in your JSON job specification,
1897// include this object only when your job has a transport stream output and the
1898// container settings contain the object M2tsSettings.
1899type DvbTdtSettings struct {
1900
1901	// The number of milliseconds between instances of this table in the output
1902	// transport stream.
1903	TdtInterval int32
1904}
1905
1906// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
1907// value EAC3_ATMOS.
1908type Eac3AtmosSettings struct {
1909
1910	// Specify the average bitrate in bits per second. Valid values: 384k, 448k, 640k,
1911	// 768k
1912	Bitrate int32
1913
1914	// Specify the bitstream mode for the E-AC-3 stream that the encoder emits. For
1915	// more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex E).
1916	BitstreamMode Eac3AtmosBitstreamMode
1917
1918	// The coding mode for Dolby Digital Plus JOC (Atmos) is always 9.1.6
1919	// (CODING_MODE_9_1_6).
1920	CodingMode Eac3AtmosCodingMode
1921
1922	// Enable Dolby Dialogue Intelligence to adjust loudness based on dialogue
1923	// analysis.
1924	DialogueIntelligence Eac3AtmosDialogueIntelligence
1925
1926	// Specify the absolute peak level for a signal with dynamic range compression.
1927	DynamicRangeCompressionLine Eac3AtmosDynamicRangeCompressionLine
1928
1929	// Specify how the service limits the audio dynamic range when compressing the
1930	// audio.
1931	DynamicRangeCompressionRf Eac3AtmosDynamicRangeCompressionRf
1932
1933	// Specify a value for the following Dolby Atmos setting: Left only/Right only
1934	// center mix (Lo/Ro center). MediaConvert uses this value for downmixing. How the
1935	// service uses this value depends on the value that you choose for Stereo downmix
1936	// (Eac3AtmosStereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and
1937	// -6.0.
1938	LoRoCenterMixLevel float64
1939
1940	// Specify a value for the following Dolby Atmos setting: Left only/Right only
1941	// (Lo/Ro surround). MediaConvert uses this value for downmixing. How the service
1942	// uses this value depends on the value that you choose for Stereo downmix
1943	// (Eac3AtmosStereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The
1944	// value -60 mutes the channel.
1945	LoRoSurroundMixLevel float64
1946
1947	// Specify a value for the following Dolby Atmos setting: Left total/Right total
1948	// center mix (Lt/Rt center). MediaConvert uses this value for downmixing. How the
1949	// service uses this value depends on the value that you choose for Stereo downmix
1950	// (Eac3AtmosStereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and
1951	// -6.0.
1952	LtRtCenterMixLevel float64
1953
1954	// Specify a value for the following Dolby Atmos setting: Left total/Right total
1955	// surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. How
1956	// the service uses this value depends on the value that you choose for Stereo
1957	// downmix (Eac3AtmosStereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60.
1958	// The value -60 mutes the channel.
1959	LtRtSurroundMixLevel float64
1960
1961	// Choose how the service meters the loudness of your audio.
1962	MeteringMode Eac3AtmosMeteringMode
1963
1964	// This value is always 48000. It represents the sample rate in Hz.
1965	SampleRate int32
1966
1967	// Specify the percentage of audio content that must be speech before the encoder
1968	// uses the measured speech loudness as the overall program loudness.
1969	SpeechThreshold int32
1970
1971	// Choose how the service does stereo downmixing.
1972	StereoDownmix Eac3AtmosStereoDownmix
1973
1974	// Specify whether your input audio has an additional center rear surround channel
1975	// matrix encoded into your left and right surround channels.
1976	SurroundExMode Eac3AtmosSurroundExMode
1977}
1978
1979// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
1980// value EAC3.
1981type Eac3Settings struct {
1982
1983	// If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels.
1984	// Only used for 3/2 coding mode.
1985	AttenuationControl Eac3AttenuationControl
1986
1987	// Specify the average bitrate in bits per second. Valid bitrates depend on the
1988	// coding mode.
1989	Bitrate int32
1990
1991	// Specify the bitstream mode for the E-AC-3 stream that the encoder emits. For
1992	// more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex E).
1993	BitstreamMode Eac3BitstreamMode
1994
1995	// Dolby Digital Plus coding mode. Determines number of channels.
1996	CodingMode Eac3CodingMode
1997
1998	// Activates a DC highpass filter for all input channels.
1999	DcFilter Eac3DcFilter
2000
2001	// Sets the dialnorm for the output. If blank and input audio is Dolby Digital
2002	// Plus, dialnorm will be passed through.
2003	Dialnorm int32
2004
2005	// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
2006	// uses when encoding the metadata in the Dolby Digital stream for the line
2007	// operating mode. Related setting: When you use this setting, MediaConvert ignores
2008	// any value you provide for Dynamic range compression profile
2009	// (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC
2010	// operating modes and profiles, see the Dynamic Range Control chapter of the Dolby
2011	// Metadata Guide at
2012	// https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
2013	DynamicRangeCompressionLine Eac3DynamicRangeCompressionLine
2014
2015	// Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert
2016	// uses when encoding the metadata in the Dolby Digital stream for the RF operating
2017	// mode. Related setting: When you use this setting, MediaConvert ignores any value
2018	// you provide for Dynamic range compression profile
2019	// (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC
2020	// operating modes and profiles, see the Dynamic Range Control chapter of the Dolby
2021	// Metadata Guide at
2022	// https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
2023	DynamicRangeCompressionRf Eac3DynamicRangeCompressionRf
2024
2025	// When encoding 3/2 audio, controls whether the LFE channel is enabled
2026	LfeControl Eac3LfeControl
2027
2028	// Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only valid
2029	// with 3_2_LFE coding mode.
2030	LfeFilter Eac3LfeFilter
2031
2032	// Specify a value for the following Dolby Digital Plus setting: Left only/Right
2033	// only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. How
2034	// the service uses this value depends on the value that you choose for Stereo
2035	// downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5,
2036	// -6.0, and -60. The value -60 mutes the channel. This setting applies only if you
2037	// keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the
2038	// setting Coding mode (Eac3CodingMode). If you choose a different value for Coding
2039	// mode, the service ignores Left only/Right only center (loRoCenterMixLevel).
2040	LoRoCenterMixLevel float64
2041
2042	// Specify a value for the following Dolby Digital Plus setting: Left only/Right
2043	// only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the
2044	// service uses this value depends on the value that you choose for Stereo downmix
2045	// (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value
2046	// -60 mutes the channel. This setting applies only if you keep the default value
2047	// of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode
2048	// (Eac3CodingMode). If you choose a different value for Coding mode, the service
2049	// ignores Left only/Right only surround (loRoSurroundMixLevel).
2050	LoRoSurroundMixLevel float64
2051
2052	// Specify a value for the following Dolby Digital Plus setting: Left total/Right
2053	// total center mix (Lt/Rt center). MediaConvert uses this value for downmixing.
2054	// How the service uses this value depends on the value that you choose for Stereo
2055	// downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5,
2056	// -6.0, and -60. The value -60 mutes the channel. This setting applies only if you
2057	// keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the
2058	// setting Coding mode (Eac3CodingMode). If you choose a different value for Coding
2059	// mode, the service ignores Left total/Right total center (ltRtCenterMixLevel).
2060	LtRtCenterMixLevel float64
2061
2062	// Specify a value for the following Dolby Digital Plus setting: Left total/Right
2063	// total surround mix (Lt/Rt surround). MediaConvert uses this value for
2064	// downmixing. How the service uses this value depends on the value that you choose
2065	// for Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0,
2066	// and -60. The value -60 mutes the channel. This setting applies only if you keep
2067	// the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting
2068	// Coding mode (Eac3CodingMode). If you choose a different value for Coding mode,
2069	// the service ignores Left total/Right total surround (ltRtSurroundMixLevel).
2070	LtRtSurroundMixLevel float64
2071
2072	// When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, or
2073	// DolbyE decoder that supplied this audio data. If audio was not supplied from one
2074	// of these streams, then the static metadata settings will be used.
2075	MetadataControl Eac3MetadataControl
2076
2077	// When set to WHEN_POSSIBLE, input DD+ audio will be passed through if it is
2078	// present on the input. this detection is dynamic over the life of the transcode.
2079	// Inputs that alternate between DD+ and non-DD+ content will have a consistent DD+
2080	// output as the system alternates between passthrough and encoding.
2081	PassthroughControl Eac3PassthroughControl
2082
2083	// Controls the amount of phase-shift applied to the surround channels. Only used
2084	// for 3/2 coding mode.
2085	PhaseControl Eac3PhaseControl
2086
2087	// This value is always 48000. It represents the sample rate in Hz.
2088	SampleRate int32
2089
2090	// Choose how the service does stereo downmixing. This setting only applies if you
2091	// keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the
2092	// setting Coding mode (Eac3CodingMode). If you choose a different value for Coding
2093	// mode, the service ignores Stereo downmix (Eac3StereoDownmix).
2094	StereoDownmix Eac3StereoDownmix
2095
2096	// When encoding 3/2 audio, sets whether an extra center back surround channel is
2097	// matrix encoded into the left and right surround channels.
2098	SurroundExMode Eac3SurroundExMode
2099
2100	// When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into the
2101	// two channels.
2102	SurroundMode Eac3SurroundMode
2103}
2104
2105// Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or
2106// ancillary) captions. Set up embedded captions in the same output as your video.
2107// For more information, see
2108// https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html.
2109// When you work directly in your JSON job specification, include this object and
2110// any required children when you set destinationType to EMBEDDED,
2111// EMBEDDED_PLUS_SCTE20, or SCTE20_PLUS_EMBEDDED.
2112type EmbeddedDestinationSettings struct {
2113
2114	// Ignore this setting unless your input captions are SCC format and your output
2115	// captions are embedded in the video stream. Specify a CC number for each captions
2116	// channel in this output. If you have two channels, choose CC numbers that aren't
2117	// in the same field. For example, choose 1 and 3. For more information, see
2118	// https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded.
2119	Destination608ChannelNumber int32
2120
2121	// Ignore this setting unless your input captions are SCC format and you want both
2122	// 608 and 708 captions embedded in your output stream. Optionally, specify the 708
2123	// service number for each output captions channel. Choose a different number for
2124	// each channel. To use this setting, also set Force 608 to 708 upconvert
2125	// (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector
2126	// settings. If you choose to upconvert but don't specify a 708 service number,
2127	// MediaConvert uses the number that you specify for CC channel number
2128	// (destination608ChannelNumber) for the 708 service number. For more information,
2129	// see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded.
2130	Destination708ServiceNumber int32
2131}
2132
2133// Settings for embedded captions Source
2134type EmbeddedSourceSettings struct {
2135
2136	// Specify whether this set of input captions appears in your outputs in both 608
2137	// and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the
2138	// captions data in two ways: it passes the 608 data through using the 608
2139	// compatibility bytes fields of the 708 wrapper, and it also translates the 608
2140	// data into 708.
2141	Convert608To708 EmbeddedConvert608To708
2142
2143	// Specifies the 608/708 channel number within the video track from which to
2144	// extract captions. Unused for passthrough.
2145	Source608ChannelNumber int32
2146
2147	// Specifies the video track index used for extracting captions. The system only
2148	// supports one input video track, so this should always be set to '1'.
2149	Source608TrackNumber int32
2150
2151	// By default, the service terminates any unterminated captions at the end of each
2152	// input. If you want the caption to continue onto your next input, disable this
2153	// setting.
2154	TerminateCaptions EmbeddedTerminateCaptions
2155}
2156
2157// Describes an account-specific API endpoint.
2158type Endpoint struct {
2159
2160	// URL of endpoint
2161	Url *string
2162}
2163
2164// ESAM ManifestConfirmConditionNotification defined by OC-SP-ESAM-API-I03-131025.
2165type EsamManifestConfirmConditionNotification struct {
2166
2167	// Provide your ESAM ManifestConfirmConditionNotification XML document inside your
2168	// JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The
2169	// transcoder will use the Manifest Conditioning instructions in the message that
2170	// you supply.
2171	MccXml *string
2172}
2173
2174// Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion,
2175// you can ignore these settings.
2176type EsamSettings struct {
2177
2178	// Specifies an ESAM ManifestConfirmConditionNotification XML as per
2179	// OC-SP-ESAM-API-I03-131025. The transcoder uses the manifest conditioning
2180	// instructions that you provide in the setting MCC XML (mccXml).
2181	ManifestConfirmConditionNotification *EsamManifestConfirmConditionNotification
2182
2183	// Specifies the stream distance, in milliseconds, between the SCTE 35 messages
2184	// that the transcoder places and the splice points that they refer to. If the time
2185	// between the start of the asset and the SCTE-35 message is less than this value,
2186	// then the transcoder places the SCTE-35 marker at the beginning of the stream.
2187	ResponseSignalPreroll int32
2188
2189	// Specifies an ESAM SignalProcessingNotification XML as per
2190	// OC-SP-ESAM-API-I03-131025. The transcoder uses the signal processing
2191	// instructions that you provide in the setting SCC XML (sccXml).
2192	SignalProcessingNotification *EsamSignalProcessingNotification
2193}
2194
2195// ESAM SignalProcessingNotification data defined by OC-SP-ESAM-API-I03-131025.
2196type EsamSignalProcessingNotification struct {
2197
2198	// Provide your ESAM SignalProcessingNotification XML document inside your JSON job
2199	// settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder
2200	// will use the signal processing instructions in the message that you supply.
2201	// Provide your ESAM SignalProcessingNotification XML document inside your JSON job
2202	// settings. For your MPEG2-TS file outputs, if you want the service to place
2203	// SCTE-35 markers at the insertion points you specify in the XML document, you
2204	// must also enable SCTE-35 ESAM (scte35Esam). Note that you can either specify an
2205	// ESAM XML document or enable SCTE-35 passthrough. You can't do both.
2206	SccXml *string
2207}
2208
2209// Settings for F4v container
2210type F4vSettings struct {
2211
2212	// If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning of
2213	// the archive as required for progressive downloading. Otherwise it is placed
2214	// normally at the end.
2215	MoovPlacement F4vMoovPlacement
2216}
2217
2218// Settings related to your File output group. MediaConvert uses this group of
2219// settings to generate a single standalone file, rather than a streaming package.
2220// When you work directly in your JSON job specification, include this object and
2221// any required children when you set Type, under OutputGroupSettings, to
2222// FILE_GROUP_SETTINGS.
2223type FileGroupSettings struct {
2224
2225	// Use Destination (Destination) to specify the S3 output location and the output
2226	// filename base. Destination accepts format identifiers. If you do not specify the
2227	// base filename in the URI, the service will use the filename of the input file.
2228	// If your job has multiple inputs, the service uses the filename of the first
2229	// input file.
2230	Destination *string
2231
2232	// Settings associated with the destination. Will vary based on the type of
2233	// destination
2234	DestinationSettings *DestinationSettings
2235}
2236
2237// If your input captions are SCC, SMI, SRT, STL, TTML, WebVTT, or IMSC 1.1 in an
2238// xml file, specify the URI of the input caption source file. If your caption
2239// source is IMSC in an IMF package, use TrackSourceSettings instead of
2240// FileSoureSettings.
2241type FileSourceSettings struct {
2242
2243	// Specify whether this set of input captions appears in your outputs in both 608
2244	// and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the
2245	// captions data in two ways: it passes the 608 data through using the 608
2246	// compatibility bytes fields of the 708 wrapper, and it also translates the 608
2247	// data into 708.
2248	Convert608To708 FileSourceConvert608To708
2249
2250	// Ignore this setting unless your input captions format is SCC. To have the
2251	// service compensate for differing frame rates between your input captions and
2252	// input video, specify the frame rate of the captions file. Specify this value as
2253	// a fraction, using the settings Framerate numerator (framerateNumerator) and
2254	// Framerate denominator (framerateDenominator). For example, you might specify 24
2255	// / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001
2256	// for 29.97 fps.
2257	Framerate *CaptionSourceFramerate
2258
2259	// External caption file used for loading captions. Accepted file extensions are
2260	// 'scc', 'ttml', 'dfxp', 'stl', 'srt', 'xml', 'smi', and 'vtt'.
2261	SourceFile *string
2262
2263	// Specifies a time delta in seconds to offset the captions from the source file.
2264	TimeDelta int32
2265}
2266
2267// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
2268// value FRAME_CAPTURE.
2269type FrameCaptureSettings struct {
2270
2271	// Frame capture will encode the first frame of the output stream, then one frame
2272	// every framerateDenominator/framerateNumerator seconds. For example, settings of
2273	// framerateNumerator = 1 and framerateDenominator = 3 (a rate of 1/3 frame per
2274	// second) will capture the first frame, then 1 frame every 3s. Files will be named
2275	// as filename.n.jpg where n is the 0-based sequence number of each Capture.
2276	FramerateDenominator int32
2277
2278	// Frame capture will encode the first frame of the output stream, then one frame
2279	// every framerateDenominator/framerateNumerator seconds. For example, settings of
2280	// framerateNumerator = 1 and framerateDenominator = 3 (a rate of 1/3 frame per
2281	// second) will capture the first frame, then 1 frame every 3s. Files will be named
2282	// as filename.NNNNNNN.jpg where N is the 0-based frame sequence number zero padded
2283	// to 7 decimal places.
2284	FramerateNumerator int32
2285
2286	// Maximum number of captures (encoded jpg output files).
2287	MaxCaptures int32
2288
2289	// JPEG Quality - a higher value equals higher quality.
2290	Quality int32
2291}
2292
2293// Settings for quality-defined variable bitrate encoding with the H.264 codec.
2294// Required when you set Rate control mode to QVBR. Not valid when you set Rate
2295// control mode to a value other than QVBR, or when you don't define Rate control
2296// mode.
2297type H264QvbrSettings struct {
2298
2299	// Use this setting only when Rate control mode is QVBR and Quality tuning level is
2300	// Multi-pass HQ. For Max average bitrate values suited to the complexity of your
2301	// input video, the service limits the average bitrate of the video part of this
2302	// output to the value that you choose. That is, the total size of the video
2303	// element is less than or equal to the value you set multiplied by the number of
2304	// seconds of encoded output.
2305	MaxAverageBitrate int32
2306
2307	// Required when you use QVBR rate control mode. That is, when you specify
2308	// qvbrSettings within h264Settings. Specify the general target quality level for
2309	// this output, from 1 to 10. Use higher numbers for greater quality. Level 10
2310	// results in nearly lossless compression. The quality level for most
2311	// broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value
2312	// between whole numbers, also provide a value for the setting
2313	// qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be
2314	// 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.
2315	QvbrQualityLevel int32
2316
2317	// Optional. Specify a value here to set the QVBR quality to a level that is
2318	// between whole numbers. For example, if you want your QVBR quality level to be
2319	// 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.
2320	// MediaConvert rounds your QVBR quality level to the nearest third of a whole
2321	// number. For example, if you set qvbrQualityLevel to 7 and you set
2322	// qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33.
2323	QvbrQualityLevelFineTune float64
2324}
2325
2326// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
2327// value H_264.
2328type H264Settings struct {
2329
2330	// Keep the default value, Auto (AUTO), for this setting to have MediaConvert
2331	// automatically apply the best types of quantization for your video content. When
2332	// you want to apply your quantization settings manually, you must set
2333	// H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting to
2334	// specify the strength of any adaptive quantization filters that you enable. If
2335	// you don't want MediaConvert to do any adaptive quantization in this transcode,
2336	// set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related
2337	// settings: The value that you choose here applies to the following settings:
2338	// H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and
2339	// H264TemporalAdaptiveQuantization.
2340	AdaptiveQuantization H264AdaptiveQuantization
2341
2342	// Specify the average bitrate in bits per second. Required for VBR and CBR. For MS
2343	// Smooth outputs, bitrates must be unique when rounded down to the nearest
2344	// multiple of 1000.
2345	Bitrate int32
2346
2347	// Specify an H.264 level that is consistent with your output video settings. If
2348	// you aren't sure what level to specify, choose Auto (AUTO).
2349	CodecLevel H264CodecLevel
2350
2351	// H.264 Profile. High 4:2:2 and 10-bit profiles are only available with the AVC-I
2352	// License.
2353	CodecProfile H264CodecProfile
2354
2355	// Choose Adaptive to improve subjective video quality for high-motion content.
2356	// This will cause the service to use fewer B-frames (which infer information based
2357	// on other frames) for high-motion portions of the video and more B-frames for
2358	// low-motion portions. The maximum number of B-frames is limited by the value you
2359	// provide for the setting B frames between reference frames
2360	// (numberBFramesBetweenReferenceFrames).
2361	DynamicSubGop H264DynamicSubGop
2362
2363	// Entropy encoding mode. Use CABAC (must be in Main or High profile) or CAVLC.
2364	EntropyEncoding H264EntropyEncoding
2365
2366	// Keep the default value, PAFF, to have MediaConvert use PAFF encoding for
2367	// interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding
2368	// and create separate interlaced fields.
2369	FieldEncoding H264FieldEncoding
2370
2371	// Only use this setting when you change the default value, AUTO, for the setting
2372	// H264AdaptiveQuantization. When you keep all defaults, excluding
2373	// H264AdaptiveQuantization and all other adaptive quantization from your JSON job
2374	// specification, MediaConvert automatically applies the best types of quantization
2375	// for your video content. When you set H264AdaptiveQuantization to a value other
2376	// than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled
2377	// (DISABLED). Change this value to Enabled (ENABLED) to reduce I-frame pop.
2378	// I-frame pop appears as a visual flicker that can arise when the encoder saves
2379	// bits by copying some macroblocks many times from frame to frame, and then
2380	// refreshes them at the I-frame. When you enable this setting, the encoder updates
2381	// these macroblocks slightly more often to smooth out the flicker. To manually
2382	// enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive
2383	// quantization (H264AdaptiveQuantization) to a value other than AUTO.
2384	FlickerAdaptiveQuantization H264FlickerAdaptiveQuantization
2385
2386	// If you are using the console, use the Framerate setting to specify the frame
2387	// rate for this output. If you want to keep the same frame rate as the input
2388	// video, choose Follow source. If you want to do frame rate conversion, choose a
2389	// frame rate from the dropdown list or choose Custom. The framerates shown in the
2390	// dropdown list are decimal approximations of fractions. If you choose Custom,
2391	// specify your frame rate as a fraction. If you are creating your transcoding job
2392	// specification as a JSON file without the console, use FramerateControl to
2393	// specify which value the service uses for the frame rate for this output. Choose
2394	// INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the
2395	// input. Choose SPECIFIED if you want the service to use the frame rate you
2396	// specify in the settings FramerateNumerator and FramerateDenominator.
2397	FramerateControl H264FramerateControl
2398
2399	// Choose the method that you want MediaConvert to use when increasing or
2400	// decreasing the frame rate. We recommend using drop duplicate (DUPLICATE_DROP)
2401	// for numerically simple conversions, such as 60 fps to 30 fps. For numerically
2402	// complex conversions, you can use interpolate (INTERPOLATE) to avoid stutter.
2403	// This results in a smooth picture, but might introduce undesirable video
2404	// artifacts. For complex frame rate conversions, especially if your source video
2405	// has already been converted from its original cadence, use FrameFormer
2406	// (FRAMEFORMER) to do motion-compensated interpolation. FrameFormer chooses the
2407	// best conversion method frame by frame. Note that using FrameFormer increases the
2408	// transcoding time and incurs a significant add-on cost.
2409	FramerateConversionAlgorithm H264FramerateConversionAlgorithm
2410
2411	// When you use the API for transcode jobs that use frame rate conversion, specify
2412	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
2413	// FramerateDenominator to specify the denominator of this fraction. In this
2414	// example, use 1001 for the value of FramerateDenominator. When you use the
2415	// console for transcode jobs that use frame rate conversion, provide the value as
2416	// a decimal number for Framerate. In this example, specify 23.976.
2417	FramerateDenominator int32
2418
2419	// When you use the API for transcode jobs that use frame rate conversion, specify
2420	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
2421	// FramerateNumerator to specify the numerator of this fraction. In this example,
2422	// use 24000 for the value of FramerateNumerator. When you use the console for
2423	// transcode jobs that use frame rate conversion, provide the value as a decimal
2424	// number for Framerate. In this example, specify 23.976.
2425	FramerateNumerator int32
2426
2427	// If enable, use reference B frames for GOP structures that have B frames > 1.
2428	GopBReference H264GopBReference
2429
2430	// Frequency of closed GOPs. In streaming applications, it is recommended that this
2431	// be set to 1 so a decoder joining mid-stream will receive an IDR frame as quickly
2432	// as possible. Setting this value to 0 will break output segmenting.
2433	GopClosedCadence int32
2434
2435	// GOP Length (keyframe interval) in frames or seconds. Must be greater than zero.
2436	GopSize float64
2437
2438	// Indicates if the GOP Size in H264 is specified in frames or seconds. If seconds
2439	// the system will convert the GOP Size into a frame count at run time.
2440	GopSizeUnits H264GopSizeUnits
2441
2442	// Percentage of the buffer that should initially be filled (HRD buffer model).
2443	HrdBufferInitialFillPercentage int32
2444
2445	// Size of buffer (HRD buffer model) in bits. For example, enter five megabits as
2446	// 5000000.
2447	HrdBufferSize int32
2448
2449	// Choose the scan line type for the output. Keep the default value, Progressive
2450	// (PROGRESSIVE) to create a progressive output, regardless of the scan type of
2451	// your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
2452	// to create an output that's interlaced with the same field polarity throughout.
2453	// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom
2454	// (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the
2455	// source. For jobs that have multiple inputs, the output field polarity might
2456	// change over the course of the output. Follow behavior depends on the input scan
2457	// type. If the source is interlaced, the output will be interlaced with the same
2458	// polarity as the source. If the source is progressive, the output will be
2459	// interlaced with top field bottom field first, depending on which of the Follow
2460	// options you choose.
2461	InterlaceMode H264InterlaceMode
2462
2463	// Maximum bitrate in bits/second. For example, enter five megabits per second as
2464	// 5000000. Required when Rate control mode is QVBR.
2465	MaxBitrate int32
2466
2467	// Enforces separation between repeated (cadence) I-frames and I-frames inserted by
2468	// Scene Change Detection. If a scene change I-frame is within I-interval frames of
2469	// a cadence I-frame, the GOP is shrunk and/or stretched to the scene change
2470	// I-frame. GOP stretch requires enabling lookahead as well as setting I-interval.
2471	// The normal cadence resumes for the next GOP. This setting is only used when
2472	// Scene Change Detect is enabled. Note: Maximum GOP stretch = GOP size +
2473	// Min-I-interval - 1
2474	MinIInterval int32
2475
2476	// Number of B-frames between reference frames.
2477	NumberBFramesBetweenReferenceFrames int32
2478
2479	// Number of reference frames to use. The encoder may use more than requested if
2480	// using B-frames and/or interlaced encoding.
2481	NumberReferenceFrames int32
2482
2483	// Optional. Specify how the service determines the pixel aspect ratio (PAR) for
2484	// this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses
2485	// the PAR from your input video for your output. To specify a different PAR in the
2486	// console, choose any value other than Follow source. To specify a different PAR
2487	// by editing the JSON job specification, choose SPECIFIED. When you choose
2488	// SPECIFIED for this setting, you must also specify values for the parNumerator
2489	// and parDenominator settings.
2490	ParControl H264ParControl
2491
2492	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
2493	// console, this corresponds to any value other than Follow source. When you
2494	// specify an output pixel aspect ratio (PAR) that is different from your input
2495	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
2496	// widescreen, you would specify the ratio 40:33. In this example, the value for
2497	// parDenominator is 33.
2498	ParDenominator int32
2499
2500	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
2501	// console, this corresponds to any value other than Follow source. When you
2502	// specify an output pixel aspect ratio (PAR) that is different from your input
2503	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
2504	// widescreen, you would specify the ratio 40:33. In this example, the value for
2505	// parNumerator is 40.
2506	ParNumerator int32
2507
2508	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want
2509	// to trade off encoding speed for output video quality. The default behavior is
2510	// faster, lower quality, single-pass encoding.
2511	QualityTuningLevel H264QualityTuningLevel
2512
2513	// Settings for quality-defined variable bitrate encoding with the H.264 codec.
2514	// Required when you set Rate control mode to QVBR. Not valid when you set Rate
2515	// control mode to a value other than QVBR, or when you don't define Rate control
2516	// mode.
2517	QvbrSettings *H264QvbrSettings
2518
2519	// Use this setting to specify whether this output has a variable bitrate (VBR),
2520	// constant bitrate (CBR) or quality-defined variable bitrate (QVBR).
2521	RateControlMode H264RateControlMode
2522
2523	// Places a PPS header on each encoded picture, even if repeated.
2524	RepeatPps H264RepeatPps
2525
2526	// Use this setting for interlaced outputs, when your output frame rate is half of
2527	// your input frame rate. In this situation, choose Optimized interlacing
2528	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
2529	// case, each progressive frame from the input corresponds to an interlaced field
2530	// in the output. Keep the default value, Basic interlacing (INTERLACED), for all
2531	// other output frame rates. With basic interlacing, MediaConvert performs any
2532	// frame rate conversion first and then interlaces the frames. When you choose
2533	// Optimized interlacing and you set your output frame rate to a value that isn't
2534	// suitable for optimized interlacing, MediaConvert automatically falls back to
2535	// basic interlacing. Required settings: To use optimized interlacing, you must set
2536	// Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized
2537	// interlacing for hard telecine outputs. You must also set Interlace mode
2538	// (interlaceMode) to a value other than Progressive (PROGRESSIVE).
2539	ScanTypeConversionMode H264ScanTypeConversionMode
2540
2541	// Enable this setting to insert I-frames at scene changes that the service
2542	// automatically detects. This improves video quality and is enabled by default. If
2543	// this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for
2544	// further video quality improvement. For more information about QVBR, see
2545	// https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.
2546	SceneChangeDetect H264SceneChangeDetect
2547
2548	// Number of slices per picture. Must be less than or equal to the number of
2549	// macroblock rows for progressive pictures, and less than or equal to half the
2550	// number of macroblock rows for interlaced pictures.
2551	Slices int32
2552
2553	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
2554	// second (fps). Enable slow PAL to create a 25 fps output. When you enable slow
2555	// PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio
2556	// to keep it synchronized with the video. Note that enabling this setting will
2557	// slightly reduce the duration of your video. Required settings: You must also set
2558	// Framerate to 25. In your JSON job specification, set (framerateControl) to
2559	// (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.
2560	SlowPal H264SlowPal
2561
2562	// Ignore this setting unless you need to comply with a specification that requires
2563	// a specific value. If you don't have a specification requirement, we recommend
2564	// that you adjust the softness of your output by using a lower value for the
2565	// setting Sharpness (sharpness) or by enabling a noise reducer filter
2566	// (noiseReducerFilter). The Softness (softness) setting specifies the quantization
2567	// matrices that the encoder uses. Keep the default value, 0, for flat
2568	// quantization. Choose the value 1 or 16 to use the default JVT softening
2569	// quantization matricies from the H.264 specification. Choose a value from 17 to
2570	// 128 to use planar interpolation. Increasing values from 17 to 128 result in
2571	// increasing reduction of high-frequency data. The value 128 results in the
2572	// softest video.
2573	Softness int32
2574
2575	// Only use this setting when you change the default value, Auto (AUTO), for the
2576	// setting H264AdaptiveQuantization. When you keep all defaults, excluding
2577	// H264AdaptiveQuantization and all other adaptive quantization from your JSON job
2578	// specification, MediaConvert automatically applies the best types of quantization
2579	// for your video content. When you set H264AdaptiveQuantization to a value other
2580	// than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled
2581	// (ENABLED). Keep this default value to adjust quantization within each frame
2582	// based on spatial variation of content complexity. When you enable this feature,
2583	// the encoder uses fewer bits on areas that can sustain more distortion with no
2584	// noticeable visual degradation and uses more bits on areas where any small
2585	// distortion will be noticeable. For example, complex textured blocks are encoded
2586	// with fewer bits and smooth textured blocks are encoded with more bits. Enabling
2587	// this feature will almost always improve your video quality. Note, though, that
2588	// this feature doesn't take into account where the viewer's attention is likely to
2589	// be. If viewers are likely to be focusing their attention on a part of the screen
2590	// with a lot of complex texture, you might choose to set
2591	// H264SpatialAdaptiveQuantization to Disabled (DISABLED). Related setting: When
2592	// you enable spatial adaptive quantization, set the value for Adaptive
2593	// quantization (H264AdaptiveQuantization) depending on your content. For
2594	// homogeneous content, such as cartoons and video games, set it to Low. For
2595	// content with a wider variety of textures, set it to High or Higher. To manually
2596	// enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive
2597	// quantization (H264AdaptiveQuantization) to a value other than AUTO.
2598	SpatialAdaptiveQuantization H264SpatialAdaptiveQuantization
2599
2600	// Produces a bitstream compliant with SMPTE RP-2027.
2601	Syntax H264Syntax
2602
2603	// When you do frame rate conversion from 23.976 frames per second (fps) to 29.97
2604	// fps, and your output scan type is interlaced, you can optionally enable hard or
2605	// soft telecine to create a smoother picture. Hard telecine (HARD) produces a
2606	// 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that
2607	// signals to the video player device to do the conversion during play back. When
2608	// you keep the default value, None (NONE), MediaConvert does a standard frame rate
2609	// conversion to 29.97 without doing anything with the field polarity to create a
2610	// smoother picture.
2611	Telecine H264Telecine
2612
2613	// Only use this setting when you change the default value, AUTO, for the setting
2614	// H264AdaptiveQuantization. When you keep all defaults, excluding
2615	// H264AdaptiveQuantization and all other adaptive quantization from your JSON job
2616	// specification, MediaConvert automatically applies the best types of quantization
2617	// for your video content. When you set H264AdaptiveQuantization to a value other
2618	// than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled
2619	// (ENABLED). Keep this default value to adjust quantization within each frame
2620	// based on temporal variation of content complexity. When you enable this feature,
2621	// the encoder uses fewer bits on areas of the frame that aren't moving and uses
2622	// more bits on complex objects with sharp edges that move a lot. For example, this
2623	// feature improves the readability of text tickers on newscasts and scoreboards on
2624	// sports matches. Enabling this feature will almost always improve your video
2625	// quality. Note, though, that this feature doesn't take into account where the
2626	// viewer's attention is likely to be. If viewers are likely to be focusing their
2627	// attention on a part of the screen that doesn't have moving objects with sharp
2628	// edges, such as sports athletes' faces, you might choose to set
2629	// H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: When
2630	// you enable temporal quantization, adjust the strength of the filter with the
2631	// setting Adaptive quantization (adaptiveQuantization). To manually enable or
2632	// disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization
2633	// (H264AdaptiveQuantization) to a value other than AUTO.
2634	TemporalAdaptiveQuantization H264TemporalAdaptiveQuantization
2635
2636	// Inserts timecode for each frame as 4 bytes of an unregistered SEI message.
2637	UnregisteredSeiTimecode H264UnregisteredSeiTimecode
2638}
2639
2640// Settings for quality-defined variable bitrate encoding with the H.265 codec.
2641// Required when you set Rate control mode to QVBR. Not valid when you set Rate
2642// control mode to a value other than QVBR, or when you don't define Rate control
2643// mode.
2644type H265QvbrSettings struct {
2645
2646	// Use this setting only when Rate control mode is QVBR and Quality tuning level is
2647	// Multi-pass HQ. For Max average bitrate values suited to the complexity of your
2648	// input video, the service limits the average bitrate of the video part of this
2649	// output to the value that you choose. That is, the total size of the video
2650	// element is less than or equal to the value you set multiplied by the number of
2651	// seconds of encoded output.
2652	MaxAverageBitrate int32
2653
2654	// Required when you use QVBR rate control mode. That is, when you specify
2655	// qvbrSettings within h265Settings. Specify the general target quality level for
2656	// this output, from 1 to 10. Use higher numbers for greater quality. Level 10
2657	// results in nearly lossless compression. The quality level for most
2658	// broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value
2659	// between whole numbers, also provide a value for the setting
2660	// qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be
2661	// 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.
2662	QvbrQualityLevel int32
2663
2664	// Optional. Specify a value here to set the QVBR quality to a level that is
2665	// between whole numbers. For example, if you want your QVBR quality level to be
2666	// 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33.
2667	// MediaConvert rounds your QVBR quality level to the nearest third of a whole
2668	// number. For example, if you set qvbrQualityLevel to 7 and you set
2669	// qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33.
2670	QvbrQualityLevelFineTune float64
2671}
2672
2673// Settings for H265 codec
2674type H265Settings struct {
2675
2676	// Specify the strength of any adaptive quantization filters that you enable. The
2677	// value that you choose here applies to the following settings: Flicker adaptive
2678	// quantization (flickerAdaptiveQuantization), Spatial adaptive quantization
2679	// (spatialAdaptiveQuantization), and Temporal adaptive quantization
2680	// (temporalAdaptiveQuantization).
2681	AdaptiveQuantization H265AdaptiveQuantization
2682
2683	// Enables Alternate Transfer Function SEI message for outputs using Hybrid Log
2684	// Gamma (HLG) Electro-Optical Transfer Function (EOTF).
2685	AlternateTransferFunctionSei H265AlternateTransferFunctionSei
2686
2687	// Specify the average bitrate in bits per second. Required for VBR and CBR. For MS
2688	// Smooth outputs, bitrates must be unique when rounded down to the nearest
2689	// multiple of 1000.
2690	Bitrate int32
2691
2692	// H.265 Level.
2693	CodecLevel H265CodecLevel
2694
2695	// Represents the Profile and Tier, per the HEVC (H.265) specification. Selections
2696	// are grouped as [Profile] / [Tier], so "Main/High" represents Main Profile with
2697	// High Tier. 4:2:2 profiles are only available with the HEVC 4:2:2 License.
2698	CodecProfile H265CodecProfile
2699
2700	// Choose Adaptive to improve subjective video quality for high-motion content.
2701	// This will cause the service to use fewer B-frames (which infer information based
2702	// on other frames) for high-motion portions of the video and more B-frames for
2703	// low-motion portions. The maximum number of B-frames is limited by the value you
2704	// provide for the setting B frames between reference frames
2705	// (numberBFramesBetweenReferenceFrames).
2706	DynamicSubGop H265DynamicSubGop
2707
2708	// Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears
2709	// as a visual flicker that can arise when the encoder saves bits by copying some
2710	// macroblocks many times from frame to frame, and then refreshes them at the
2711	// I-frame. When you enable this setting, the encoder updates these macroblocks
2712	// slightly more often to smooth out the flicker. This setting is disabled by
2713	// default. Related setting: In addition to enabling this setting, you must also
2714	// set adaptiveQuantization to a value other than Off (OFF).
2715	FlickerAdaptiveQuantization H265FlickerAdaptiveQuantization
2716
2717	// If you are using the console, use the Framerate setting to specify the frame
2718	// rate for this output. If you want to keep the same frame rate as the input
2719	// video, choose Follow source. If you want to do frame rate conversion, choose a
2720	// frame rate from the dropdown list or choose Custom. The framerates shown in the
2721	// dropdown list are decimal approximations of fractions. If you choose Custom,
2722	// specify your frame rate as a fraction. If you are creating your transcoding job
2723	// specification as a JSON file without the console, use FramerateControl to
2724	// specify which value the service uses for the frame rate for this output. Choose
2725	// INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the
2726	// input. Choose SPECIFIED if you want the service to use the frame rate you
2727	// specify in the settings FramerateNumerator and FramerateDenominator.
2728	FramerateControl H265FramerateControl
2729
2730	// Choose the method that you want MediaConvert to use when increasing or
2731	// decreasing the frame rate. We recommend using drop duplicate (DUPLICATE_DROP)
2732	// for numerically simple conversions, such as 60 fps to 30 fps. For numerically
2733	// complex conversions, you can use interpolate (INTERPOLATE) to avoid stutter.
2734	// This results in a smooth picture, but might introduce undesirable video
2735	// artifacts. For complex frame rate conversions, especially if your source video
2736	// has already been converted from its original cadence, use FrameFormer
2737	// (FRAMEFORMER) to do motion-compensated interpolation. FrameFormer chooses the
2738	// best conversion method frame by frame. Note that using FrameFormer increases the
2739	// transcoding time and incurs a significant add-on cost.
2740	FramerateConversionAlgorithm H265FramerateConversionAlgorithm
2741
2742	// When you use the API for transcode jobs that use frame rate conversion, specify
2743	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
2744	// FramerateDenominator to specify the denominator of this fraction. In this
2745	// example, use 1001 for the value of FramerateDenominator. When you use the
2746	// console for transcode jobs that use frame rate conversion, provide the value as
2747	// a decimal number for Framerate. In this example, specify 23.976.
2748	FramerateDenominator int32
2749
2750	// When you use the API for transcode jobs that use frame rate conversion, specify
2751	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
2752	// FramerateNumerator to specify the numerator of this fraction. In this example,
2753	// use 24000 for the value of FramerateNumerator. When you use the console for
2754	// transcode jobs that use frame rate conversion, provide the value as a decimal
2755	// number for Framerate. In this example, specify 23.976.
2756	FramerateNumerator int32
2757
2758	// If enable, use reference B frames for GOP structures that have B frames > 1.
2759	GopBReference H265GopBReference
2760
2761	// Frequency of closed GOPs. In streaming applications, it is recommended that this
2762	// be set to 1 so a decoder joining mid-stream will receive an IDR frame as quickly
2763	// as possible. Setting this value to 0 will break output segmenting.
2764	GopClosedCadence int32
2765
2766	// GOP Length (keyframe interval) in frames or seconds. Must be greater than zero.
2767	GopSize float64
2768
2769	// Indicates if the GOP Size in H265 is specified in frames or seconds. If seconds
2770	// the system will convert the GOP Size into a frame count at run time.
2771	GopSizeUnits H265GopSizeUnits
2772
2773	// Percentage of the buffer that should initially be filled (HRD buffer model).
2774	HrdBufferInitialFillPercentage int32
2775
2776	// Size of buffer (HRD buffer model) in bits. For example, enter five megabits as
2777	// 5000000.
2778	HrdBufferSize int32
2779
2780	// Choose the scan line type for the output. Keep the default value, Progressive
2781	// (PROGRESSIVE) to create a progressive output, regardless of the scan type of
2782	// your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
2783	// to create an output that's interlaced with the same field polarity throughout.
2784	// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom
2785	// (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the
2786	// source. For jobs that have multiple inputs, the output field polarity might
2787	// change over the course of the output. Follow behavior depends on the input scan
2788	// type. If the source is interlaced, the output will be interlaced with the same
2789	// polarity as the source. If the source is progressive, the output will be
2790	// interlaced with top field bottom field first, depending on which of the Follow
2791	// options you choose.
2792	InterlaceMode H265InterlaceMode
2793
2794	// Maximum bitrate in bits/second. For example, enter five megabits per second as
2795	// 5000000. Required when Rate control mode is QVBR.
2796	MaxBitrate int32
2797
2798	// Enforces separation between repeated (cadence) I-frames and I-frames inserted by
2799	// Scene Change Detection. If a scene change I-frame is within I-interval frames of
2800	// a cadence I-frame, the GOP is shrunk and/or stretched to the scene change
2801	// I-frame. GOP stretch requires enabling lookahead as well as setting I-interval.
2802	// The normal cadence resumes for the next GOP. This setting is only used when
2803	// Scene Change Detect is enabled. Note: Maximum GOP stretch = GOP size +
2804	// Min-I-interval - 1
2805	MinIInterval int32
2806
2807	// Number of B-frames between reference frames.
2808	NumberBFramesBetweenReferenceFrames int32
2809
2810	// Number of reference frames to use. The encoder may use more than requested if
2811	// using B-frames and/or interlaced encoding.
2812	NumberReferenceFrames int32
2813
2814	// Optional. Specify how the service determines the pixel aspect ratio (PAR) for
2815	// this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses
2816	// the PAR from your input video for your output. To specify a different PAR in the
2817	// console, choose any value other than Follow source. To specify a different PAR
2818	// by editing the JSON job specification, choose SPECIFIED. When you choose
2819	// SPECIFIED for this setting, you must also specify values for the parNumerator
2820	// and parDenominator settings.
2821	ParControl H265ParControl
2822
2823	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
2824	// console, this corresponds to any value other than Follow source. When you
2825	// specify an output pixel aspect ratio (PAR) that is different from your input
2826	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
2827	// widescreen, you would specify the ratio 40:33. In this example, the value for
2828	// parDenominator is 33.
2829	ParDenominator int32
2830
2831	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
2832	// console, this corresponds to any value other than Follow source. When you
2833	// specify an output pixel aspect ratio (PAR) that is different from your input
2834	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
2835	// widescreen, you would specify the ratio 40:33. In this example, the value for
2836	// parNumerator is 40.
2837	ParNumerator int32
2838
2839	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want
2840	// to trade off encoding speed for output video quality. The default behavior is
2841	// faster, lower quality, single-pass encoding.
2842	QualityTuningLevel H265QualityTuningLevel
2843
2844	// Settings for quality-defined variable bitrate encoding with the H.265 codec.
2845	// Required when you set Rate control mode to QVBR. Not valid when you set Rate
2846	// control mode to a value other than QVBR, or when you don't define Rate control
2847	// mode.
2848	QvbrSettings *H265QvbrSettings
2849
2850	// Use this setting to specify whether this output has a variable bitrate (VBR),
2851	// constant bitrate (CBR) or quality-defined variable bitrate (QVBR).
2852	RateControlMode H265RateControlMode
2853
2854	// Specify Sample Adaptive Offset (SAO) filter strength. Adaptive mode dynamically
2855	// selects best strength based on content
2856	SampleAdaptiveOffsetFilterMode H265SampleAdaptiveOffsetFilterMode
2857
2858	// Use this setting for interlaced outputs, when your output frame rate is half of
2859	// your input frame rate. In this situation, choose Optimized interlacing
2860	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
2861	// case, each progressive frame from the input corresponds to an interlaced field
2862	// in the output. Keep the default value, Basic interlacing (INTERLACED), for all
2863	// other output frame rates. With basic interlacing, MediaConvert performs any
2864	// frame rate conversion first and then interlaces the frames. When you choose
2865	// Optimized interlacing and you set your output frame rate to a value that isn't
2866	// suitable for optimized interlacing, MediaConvert automatically falls back to
2867	// basic interlacing. Required settings: To use optimized interlacing, you must set
2868	// Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized
2869	// interlacing for hard telecine outputs. You must also set Interlace mode
2870	// (interlaceMode) to a value other than Progressive (PROGRESSIVE).
2871	ScanTypeConversionMode H265ScanTypeConversionMode
2872
2873	// Enable this setting to insert I-frames at scene changes that the service
2874	// automatically detects. This improves video quality and is enabled by default. If
2875	// this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for
2876	// further video quality improvement. For more information about QVBR, see
2877	// https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.
2878	SceneChangeDetect H265SceneChangeDetect
2879
2880	// Number of slices per picture. Must be less than or equal to the number of
2881	// macroblock rows for progressive pictures, and less than or equal to half the
2882	// number of macroblock rows for interlaced pictures.
2883	Slices int32
2884
2885	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
2886	// second (fps). Enable slow PAL to create a 25 fps output. When you enable slow
2887	// PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio
2888	// to keep it synchronized with the video. Note that enabling this setting will
2889	// slightly reduce the duration of your video. Required settings: You must also set
2890	// Framerate to 25. In your JSON job specification, set (framerateControl) to
2891	// (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.
2892	SlowPal H265SlowPal
2893
2894	// Keep the default value, Enabled (ENABLED), to adjust quantization within each
2895	// frame based on spatial variation of content complexity. When you enable this
2896	// feature, the encoder uses fewer bits on areas that can sustain more distortion
2897	// with no noticeable visual degradation and uses more bits on areas where any
2898	// small distortion will be noticeable. For example, complex textured blocks are
2899	// encoded with fewer bits and smooth textured blocks are encoded with more bits.
2900	// Enabling this feature will almost always improve your video quality. Note,
2901	// though, that this feature doesn't take into account where the viewer's attention
2902	// is likely to be. If viewers are likely to be focusing their attention on a part
2903	// of the screen with a lot of complex texture, you might choose to disable this
2904	// feature. Related setting: When you enable spatial adaptive quantization, set the
2905	// value for Adaptive quantization (adaptiveQuantization) depending on your
2906	// content. For homogeneous content, such as cartoons and video games, set it to
2907	// Low. For content with a wider variety of textures, set it to High or Higher.
2908	SpatialAdaptiveQuantization H265SpatialAdaptiveQuantization
2909
2910	// This field applies only if the Streams > Advanced > Framerate (framerate) field
2911	// is set to 29.970. This field works with the Streams > Advanced > Preprocessors >
2912	// Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced
2913	// Mode field (interlace_mode) to identify the scan type for the output:
2914	// Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i
2915	// output from 23.976 input. - Soft: produces 23.976; the player converts this
2916	// output to 29.97i.
2917	Telecine H265Telecine
2918
2919	// Keep the default value, Enabled (ENABLED), to adjust quantization within each
2920	// frame based on temporal variation of content complexity. When you enable this
2921	// feature, the encoder uses fewer bits on areas of the frame that aren't moving
2922	// and uses more bits on complex objects with sharp edges that move a lot. For
2923	// example, this feature improves the readability of text tickers on newscasts and
2924	// scoreboards on sports matches. Enabling this feature will almost always improve
2925	// your video quality. Note, though, that this feature doesn't take into account
2926	// where the viewer's attention is likely to be. If viewers are likely to be
2927	// focusing their attention on a part of the screen that doesn't have moving
2928	// objects with sharp edges, such as sports athletes' faces, you might choose to
2929	// disable this feature. Related setting: When you enable temporal quantization,
2930	// adjust the strength of the filter with the setting Adaptive quantization
2931	// (adaptiveQuantization).
2932	TemporalAdaptiveQuantization H265TemporalAdaptiveQuantization
2933
2934	// Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers are
2935	// supported depending on GOP structure: I- and P-frames form one layer, reference
2936	// B-frames can form a second layer and non-reference b-frames can form a third
2937	// layer. Decoders can optionally decode only the lower temporal layers to generate
2938	// a lower frame rate output. For example, given a bitstream with temporal IDs and
2939	// with b-frames = 1 (i.e. IbPbPb display order), a decoder could decode all the
2940	// frames for full frame rate output or only the I and P frames (lowest temporal
2941	// layer) for a half frame rate output.
2942	TemporalIds H265TemporalIds
2943
2944	// Enable use of tiles, allowing horizontal as well as vertical subdivision of the
2945	// encoded pictures.
2946	Tiles H265Tiles
2947
2948	// Inserts timecode for each frame as 4 bytes of an unregistered SEI message.
2949	UnregisteredSeiTimecode H265UnregisteredSeiTimecode
2950
2951	// If the location of parameter set NAL units doesn't matter in your workflow,
2952	// ignore this setting. Use this setting only with CMAF or DASH outputs, or with
2953	// standalone file outputs in an MPEG-4 container (MP4 outputs). Choose HVC1 to
2954	// mark your output as HVC1. This makes your output compliant with the following
2955	// specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition.
2956	// For these outputs, the service stores parameter set NAL units in the sample
2957	// headers but not in the samples directly. For MP4 outputs, when you choose HVC1,
2958	// your output video might not work properly with some downstream systems and video
2959	// players. The service defaults to marking your output as HEV1. For these outputs,
2960	// the service writes parameter set NAL units directly into the samples.
2961	WriteMp4PackagingType H265WriteMp4PackagingType
2962}
2963
2964// Use these settings to specify static color calibration metadata, as defined by
2965// SMPTE ST 2086. These values don't affect the pixel values that are encoded in
2966// the video stream. They are intended to help the downstream video player display
2967// content in a way that reflects the intentions of the the content creator.
2968type Hdr10Metadata struct {
2969
2970	// HDR Master Display Information must be provided by a color grader, using color
2971	// grading tools. Range is 0 to 50,000, each increment represents 0.00002 in
2972	// CIE1931 color coordinate. Note that this setting is not for color correction.
2973	BluePrimaryX int32
2974
2975	// HDR Master Display Information must be provided by a color grader, using color
2976	// grading tools. Range is 0 to 50,000, each increment represents 0.00002 in
2977	// CIE1931 color coordinate. Note that this setting is not for color correction.
2978	BluePrimaryY int32
2979
2980	// HDR Master Display Information must be provided by a color grader, using color
2981	// grading tools. Range is 0 to 50,000, each increment represents 0.00002 in
2982	// CIE1931 color coordinate. Note that this setting is not for color correction.
2983	GreenPrimaryX int32
2984
2985	// HDR Master Display Information must be provided by a color grader, using color
2986	// grading tools. Range is 0 to 50,000, each increment represents 0.00002 in
2987	// CIE1931 color coordinate. Note that this setting is not for color correction.
2988	GreenPrimaryY int32
2989
2990	// Maximum light level among all samples in the coded video sequence, in units of
2991	// candelas per square meter. This setting doesn't have a default value; you must
2992	// specify a value that is suitable for the content.
2993	MaxContentLightLevel int32
2994
2995	// Maximum average light level of any frame in the coded video sequence, in units
2996	// of candelas per square meter. This setting doesn't have a default value; you
2997	// must specify a value that is suitable for the content.
2998	MaxFrameAverageLightLevel int32
2999
3000	// Nominal maximum mastering display luminance in units of of 0.0001 candelas per
3001	// square meter.
3002	MaxLuminance int32
3003
3004	// Nominal minimum mastering display luminance in units of of 0.0001 candelas per
3005	// square meter
3006	MinLuminance int32
3007
3008	// HDR Master Display Information must be provided by a color grader, using color
3009	// grading tools. Range is 0 to 50,000, each increment represents 0.00002 in
3010	// CIE1931 color coordinate. Note that this setting is not for color correction.
3011	RedPrimaryX int32
3012
3013	// HDR Master Display Information must be provided by a color grader, using color
3014	// grading tools. Range is 0 to 50,000, each increment represents 0.00002 in
3015	// CIE1931 color coordinate. Note that this setting is not for color correction.
3016	RedPrimaryY int32
3017
3018	// HDR Master Display Information must be provided by a color grader, using color
3019	// grading tools. Range is 0 to 50,000, each increment represents 0.00002 in
3020	// CIE1931 color coordinate. Note that this setting is not for color correction.
3021	WhitePointX int32
3022
3023	// HDR Master Display Information must be provided by a color grader, using color
3024	// grading tools. Range is 0 to 50,000, each increment represents 0.00002 in
3025	// CIE1931 color coordinate. Note that this setting is not for color correction.
3026	WhitePointY int32
3027}
3028
3029// Specify the details for each additional HLS manifest that you want the service
3030// to generate for this output group. Each manifest can reference a different
3031// subset of outputs in the group.
3032type HlsAdditionalManifest struct {
3033
3034	// Specify a name modifier that the service adds to the name of this manifest to
3035	// make it different from the file names of the other main manifests in the output
3036	// group. For example, say that the default main manifest for your HLS group is
3037	// film-name.m3u8. If you enter "-no-premium" for this setting, then the file name
3038	// the service generates for this top-level manifest is film-name-no-premium.m3u8.
3039	// For HLS output groups, specify a manifestNameModifier that is different from the
3040	// nameModifier of the output. The service uses the output name modifier to create
3041	// unique names for the individual variant manifests.
3042	ManifestNameModifier *string
3043
3044	// Specify the outputs that you want this additional top-level manifest to
3045	// reference.
3046	SelectedOutputs []string
3047}
3048
3049// Caption Language Mapping
3050type HlsCaptionLanguageMapping struct {
3051
3052	// Caption channel.
3053	CaptionChannel int32
3054
3055	// Specify the language for this captions channel, using the ISO 639-2 or ISO 639-3
3056	// three-letter language code
3057	CustomLanguageCode *string
3058
3059	// Specify the language, using the ISO 639-2 three-letter code listed at
3060	// https://www.loc.gov/standards/iso639-2/php/code_list.php.
3061	LanguageCode LanguageCode
3062
3063	// Caption language description.
3064	LanguageDescription *string
3065}
3066
3067// Settings for HLS encryption
3068type HlsEncryptionSettings struct {
3069
3070	// This is a 128-bit, 16-byte hex value represented by a 32-character text string.
3071	// If this parameter is not set then the Initialization Vector will follow the
3072	// segment number by default.
3073	ConstantInitializationVector *string
3074
3075	// Encrypts the segments with the given encryption scheme. Leave blank to disable.
3076	// Selecting 'Disabled' in the web interface also disables encryption.
3077	EncryptionMethod HlsEncryptionType
3078
3079	// The Initialization Vector is a 128-bit number used in conjunction with the key
3080	// for encrypting blocks. If set to INCLUDE, Initialization Vector is listed in the
3081	// manifest. Otherwise Initialization Vector is not in the manifest.
3082	InitializationVectorInManifest HlsInitializationVectorInManifest
3083
3084	// Enable this setting to insert the EXT-X-SESSION-KEY element into the master
3085	// playlist. This allows for offline Apple HLS FairPlay content protection.
3086	OfflineEncrypted HlsOfflineEncrypted
3087
3088	// If your output group type is HLS, DASH, or Microsoft Smooth, use these settings
3089	// when doing DRM encryption with a SPEKE-compliant key provider. If your output
3090	// group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
3091	SpekeKeyProvider *SpekeKeyProvider
3092
3093	// Use these settings to set up encryption with a static key provider.
3094	StaticKeyProvider *StaticKeyProvider
3095
3096	// Specify whether your DRM encryption key is static or from a key provider that
3097	// follows the SPEKE standard. For more information about SPEKE, see
3098	// https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.
3099	Type HlsKeyProviderType
3100}
3101
3102// Settings related to your HLS output package. For more information, see
3103// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When
3104// you work directly in your JSON job specification, include this object and any
3105// required children when you set Type, under OutputGroupSettings, to
3106// HLS_GROUP_SETTINGS.
3107type HlsGroupSettings struct {
3108
3109	// Choose one or more ad marker types to decorate your Apple HLS manifest. This
3110	// setting does not determine whether SCTE-35 markers appear in the outputs
3111	// themselves.
3112	AdMarkers []HlsAdMarkers
3113
3114	// By default, the service creates one top-level .m3u8 HLS manifest for each HLS
3115	// output group in your job. This default manifest references every output in the
3116	// output group. To create additional top-level manifests that reference a subset
3117	// of the outputs in the output group, specify a list of them here.
3118	AdditionalManifests []HlsAdditionalManifest
3119
3120	// Ignore this setting unless you are using FairPlay DRM with Verimatrix and you
3121	// encounter playback issues. Keep the default value, Include (INCLUDE), to output
3122	// audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only headers
3123	// from your audio segments.
3124	AudioOnlyHeader HlsAudioOnlyHeader
3125
3126	// A partial URI prefix that will be prepended to each output in the media .m3u8
3127	// file. Can be used if base manifest is delivered from a different URL than the
3128	// main .m3u8 file.
3129	BaseUrl *string
3130
3131	// Language to be used on Caption outputs
3132	CaptionLanguageMappings []HlsCaptionLanguageMapping
3133
3134	// Applies only to 608 Embedded output captions. Insert: Include CLOSED-CAPTIONS
3135	// lines in the manifest. Specify at least one language in the CC1 Language Code
3136	// field. One CLOSED-CAPTION line is added for each Language Code you specify. Make
3137	// sure to specify the languages in the order in which they appear in the original
3138	// source (if the source is embedded format) or the order of the caption selectors
3139	// (if the source is other than embedded). Otherwise, languages in the manifest
3140	// will not match up properly with the output captions. None: Include
3141	// CLOSED-CAPTIONS=NONE line in the manifest. Omit: Omit any CLOSED-CAPTIONS line
3142	// from the manifest.
3143	CaptionLanguageSetting HlsCaptionLanguageSetting
3144
3145	// Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no
3146	// tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in
3147	// your video distribution set up. For example, use the Cache-Control http header.
3148	ClientCache HlsClientCache
3149
3150	// Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist
3151	// generation.
3152	CodecSpecification HlsCodecSpecification
3153
3154	// Use Destination (Destination) to specify the S3 output location and the output
3155	// filename base. Destination accepts format identifiers. If you do not specify the
3156	// base filename in the URI, the service will use the filename of the input file.
3157	// If your job has multiple inputs, the service uses the filename of the first
3158	// input file.
3159	Destination *string
3160
3161	// Settings associated with the destination. Will vary based on the type of
3162	// destination
3163	DestinationSettings *DestinationSettings
3164
3165	// Indicates whether segments should be placed in subdirectories.
3166	DirectoryStructure HlsDirectoryStructure
3167
3168	// DRM settings.
3169	Encryption *HlsEncryptionSettings
3170
3171	// When set to GZIP, compresses HLS playlist.
3172	ManifestCompression HlsManifestCompression
3173
3174	// Indicates whether the output manifest should use floating point values for
3175	// segment duration.
3176	ManifestDurationFormat HlsManifestDurationFormat
3177
3178	// Keep this setting at the default value of 0, unless you are troubleshooting a
3179	// problem with how devices play back the end of your video asset. If you know that
3180	// player devices are hanging on the final segment of your video because the length
3181	// of your final segment is too short, use this setting to specify a minimum final
3182	// segment length, in seconds. Choose a value that is greater than or equal to 1
3183	// and less than your segment length. When you specify a value for this setting,
3184	// the encoder will combine any final segment that is shorter than the length that
3185	// you specify with the previous segment. For example, your segment length is 3
3186	// seconds and your final segment is .5 seconds without a minimum final segment
3187	// length; when you set the minimum final segment length to 1, your final segment
3188	// is 3.5 seconds.
3189	MinFinalSegmentLength float64
3190
3191	// When set, Minimum Segment Size is enforced by looking ahead and back within the
3192	// specified range for a nearby avail and extending the segment size if needed.
3193	MinSegmentLength int32
3194
3195	// Indicates whether the .m3u8 manifest file should be generated for this HLS
3196	// output group.
3197	OutputSelection HlsOutputSelection
3198
3199	// Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. The
3200	// value is calculated as follows: either the program date and time are initialized
3201	// using the input timecode source, or the time is initialized using the input
3202	// timecode source and the date is initialized using the timestamp_offset.
3203	ProgramDateTime HlsProgramDateTime
3204
3205	// Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds.
3206	ProgramDateTimePeriod int32
3207
3208	// When set to SINGLE_FILE, emits program as a single media resource (.ts) file,
3209	// uses #EXT-X-BYTERANGE tags to index segment for playback.
3210	SegmentControl HlsSegmentControl
3211
3212	// Length of MPEG-2 Transport Stream segments to create (in seconds). Note that
3213	// segments will end on the next keyframe after this number of seconds, so actual
3214	// segment length may be longer.
3215	SegmentLength int32
3216
3217	// Number of segments to write to a subdirectory before starting a new one.
3218	// directoryStructure must be SINGLE_DIRECTORY for this setting to have an effect.
3219	SegmentsPerSubdirectory int32
3220
3221	// Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag of
3222	// variant manifest.
3223	StreamInfResolution HlsStreamInfResolution
3224
3225	// Indicates ID3 frame that has the timecode.
3226	TimedMetadataId3Frame HlsTimedMetadataId3Frame
3227
3228	// Timed Metadata interval in seconds.
3229	TimedMetadataId3Period int32
3230
3231	// Provides an extra millisecond delta offset to fine tune the timestamps.
3232	TimestampDeltaMilliseconds int32
3233}
3234
3235// Settings for HLS output groups
3236type HlsSettings struct {
3237
3238	// Specifies the group to which the audio rendition belongs.
3239	AudioGroupId *string
3240
3241	// Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream
3242	// (M2TS) to create a file in an MPEG2-TS container. Keep the default value
3243	// Automatic (AUTOMATIC) to create an audio-only file in a raw container.
3244	// Regardless of the value that you specify here, if this output has video, the
3245	// service will place the output into an MPEG2-TS container.
3246	AudioOnlyContainer HlsAudioOnlyContainer
3247
3248	// List all the audio groups that are used with the video output stream. Input all
3249	// the audio GROUP-IDs that are associated to the video, separate by ','.
3250	AudioRenditionSets *string
3251
3252	// Four types of audio-only tracks are supported: Audio-Only Variant Stream The
3253	// client can play back this audio-only stream instead of video in low-bandwidth
3254	// scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest. Alternate
3255	// Audio, Auto Select, Default Alternate rendition that the client should try to
3256	// play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with
3257	// DEFAULT=YES, AUTOSELECT=YES Alternate Audio, Auto Select, Not Default Alternate
3258	// rendition that the client may try to play back by default. Represented as an
3259	// EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=YES Alternate Audio,
3260	// not Auto Select Alternate rendition that the client will not try to play back by
3261	// default. Represented as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO,
3262	// AUTOSELECT=NO
3263	AudioTrackType HlsAudioTrackType
3264
3265	// Specify whether to flag this audio track as descriptive video service (DVS) in
3266	// your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the
3267	// parameter CHARACTERISTICS="public.accessibility.describes-video" in the
3268	// EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag
3269	// (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with
3270	// accessibility on Apple devices. For more information, see the Apple
3271	// documentation.
3272	DescriptiveVideoServiceFlag HlsDescriptiveVideoServiceFlag
3273
3274	// Choose Include (INCLUDE) to have MediaConvert generate a child manifest that
3275	// lists only the I-frames for this rendition, in addition to your regular manifest
3276	// for this rendition. You might use this manifest as part of a workflow that
3277	// creates preview functions for your video. MediaConvert adds both the I-frame
3278	// only child manifest and the regular child manifest to the parent manifest. When
3279	// you don't need the I-frame only child manifest, keep the default value Exclude
3280	// (EXCLUDE).
3281	IFrameOnlyManifest HlsIFrameOnlyManifest
3282
3283	// Use this setting to add an identifying string to the filename of each segment.
3284	// The service adds this string between the name modifier and segment index number.
3285	// You can use format identifiers in the string. For more information, see
3286	// https://docs.aws.amazon.com/mediaconvert/latest/ug/using-variables-in-your-job-settings.html
3287	SegmentModifier *string
3288}
3289
3290// Optional. Configuration for a destination queue to which the job can hop once a
3291// customer-defined minimum wait time has passed.
3292type HopDestination struct {
3293
3294	// Optional. When you set up a job to use queue hopping, you can specify a
3295	// different relative priority for the job in the destination queue. If you don't
3296	// specify, the relative priority will remain the same as in the previous queue.
3297	Priority int32
3298
3299	// Optional unless the job is submitted on the default queue. When you set up a job
3300	// to use queue hopping, you can specify a destination queue. This queue cannot be
3301	// the original queue to which the job is submitted. If the original queue isn't
3302	// the default queue and you don't specify the destination queue, the job will move
3303	// to the default queue.
3304	Queue *string
3305
3306	// Required for setting up a job to use queue hopping. Minimum wait time in minutes
3307	// until the job can hop to the destination queue. Valid range is 1 to 1440
3308	// minutes, inclusive.
3309	WaitMinutes int32
3310}
3311
3312// To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) to
3313// specify the base 64 encoded string and use Timecode (TimeCode) to specify the
3314// time when the tag should be inserted. To insert multiple ID3 tags in your
3315// output, create multiple instances of ID3 insertion (Id3Insertion).
3316type Id3Insertion struct {
3317
3318	// Use ID3 tag (Id3) to provide a tag value in base64-encode format.
3319	Id3 *string
3320
3321	// Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format.
3322	Timecode *string
3323}
3324
3325// Use the image inserter feature to include a graphic overlay on your video.
3326// Enable or disable this feature for each input or output individually. For more
3327// information, see
3328// https://docs.aws.amazon.com/mediaconvert/latest/ug/graphic-overlay.html. This
3329// setting is disabled by default.
3330type ImageInserter struct {
3331
3332	// Specify the images that you want to overlay on your video. The images must be
3333	// PNG or TGA files.
3334	InsertableImages []InsertableImage
3335}
3336
3337// Settings related to IMSC captions. IMSC is a sidecar format that holds captions
3338// in a file that is separate from the video container. Set up sidecar captions in
3339// the same output group, but different output from your video. For more
3340// information, see
3341// https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
3342// When you work directly in your JSON job specification, include this object and
3343// any required children when you set destinationType to IMSC.
3344type ImscDestinationSettings struct {
3345
3346	// Keep this setting enabled to have MediaConvert use the font style and position
3347	// information from the captions source in the output. This option is available
3348	// only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting
3349	// for simplified output captions.
3350	StylePassthrough ImscStylePassthrough
3351}
3352
3353// Use inputs to define the source files used in your transcoding job. For more
3354// information, see
3355// https://docs.aws.amazon.com/mediaconvert/latest/ug/specify-input-settings.html.
3356// You can use multiple video inputs to do input stitching. For more information,
3357// see
3358// https://docs.aws.amazon.com/mediaconvert/latest/ug/assembling-multiple-inputs-and-input-clips.html
3359type Input struct {
3360
3361	// Use audio selector groups to combine multiple sidecar audio inputs so that you
3362	// can assign them to a single output audio tab (AudioDescription). Note that, if
3363	// you're working with embedded audio, it's simpler to assign multiple input tracks
3364	// into a single audio selector rather than use an audio selector group.
3365	AudioSelectorGroups map[string]AudioSelectorGroup
3366
3367	// Use Audio selectors (AudioSelectors) to specify a track or set of tracks from
3368	// the input that you will use in your outputs. You can use multiple Audio
3369	// selectors per input.
3370	AudioSelectors map[string]AudioSelector
3371
3372	// Use captions selectors to specify the captions data from your input that you use
3373	// in your outputs. You can use up to 20 captions selectors per input.
3374	CaptionSelectors map[string]CaptionSelector
3375
3376	// Use Cropping selection (crop) to specify the video area that the service will
3377	// include in the output video frame. If you specify a value here, it will override
3378	// any value that you specify in the output setting Cropping selection (crop).
3379	Crop *Rectangle
3380
3381	// Enable Deblock (InputDeblockFilter) to produce smoother motion in the output.
3382	// Default is disabled. Only manually controllable for MPEG2 and uncompressed video
3383	// inputs.
3384	DeblockFilter InputDeblockFilter
3385
3386	// Settings for decrypting any input files that you encrypt before you upload them
3387	// to Amazon S3. MediaConvert can decrypt files only when you use AWS Key
3388	// Management Service (KMS) to encrypt the data key that you use to encrypt your
3389	// content.
3390	DecryptionSettings *InputDecryptionSettings
3391
3392	// Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is
3393	// disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.
3394	DenoiseFilter InputDenoiseFilter
3395
3396	// Specify the source file for your transcoding job. You can use multiple inputs in
3397	// a single job. The service concatenates these inputs, in the order that you
3398	// specify them in the job, to create the outputs. If your input format is IMF,
3399	// specify your input by providing the path to your CPL. For example,
3400	// "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to use
3401	// Supplemental IMPs (SupplementalImps) to specify any supplemental IMPs that
3402	// contain assets referenced by the CPL.
3403	FileInput *string
3404
3405	// Specify how the transcoding service applies the denoise and deblock filters. You
3406	// must also enable the filters separately, with Denoise (InputDenoiseFilter) and
3407	// Deblock (InputDeblockFilter). * Auto - The transcoding service determines
3408	// whether to apply filtering, depending on input type and quality. * Disable - The
3409	// input is not filtered. This is true even if you use the API to enable them in
3410	// (InputDeblockFilter) and (InputDeblockFilter). * Force - The input is filtered
3411	// regardless of input type.
3412	FilterEnable InputFilterEnable
3413
3414	// Use Filter strength (FilterStrength) to adjust the magnitude the input filter
3415	// settings (Deblock and Denoise). The range is -5 to 5. Default is 0.
3416	FilterStrength int32
3417
3418	// Enable the image inserter feature to include a graphic overlay on your video.
3419	// Enable or disable this feature for each input individually. This setting is
3420	// disabled by default.
3421	ImageInserter *ImageInserter
3422
3423	// (InputClippings) contains sets of start and end times that together specify a
3424	// portion of the input to be used in the outputs. If you provide only a start
3425	// time, the clip will be the entire input from that point to the end. If you
3426	// provide only an end time, it will be the entire input up to that point. When you
3427	// specify more than one input clip, the transcoding service creates the job
3428	// outputs by stringing the clips together in the order you specify them.
3429	InputClippings []InputClipping
3430
3431	// When you have a progressive segmented frame (PsF) input, use this setting to
3432	// flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore,
3433	// flagging your input as PsF results in better preservation of video quality when
3434	// you do deinterlacing and frame rate conversion. If you don't specify, the
3435	// default value is Auto (AUTO). Auto is the correct setting for all inputs that
3436	// are not PsF. Don't set this value to PsF when your input is interlaced. Doing so
3437	// creates horizontal interlacing artifacts.
3438	InputScanType InputScanType
3439
3440	// Use Selection placement (position) to define the video area in your output
3441	// frame. The area outside of the rectangle that you specify here is black. If you
3442	// specify a value here, it will override any value that you specify in the output
3443	// setting Selection placement (position). If you specify a value here, this will
3444	// override any AFD values in your input, even if you set Respond to AFD
3445	// (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will
3446	// ignore anything that you specify for the setting Scaling Behavior
3447	// (scalingBehavior).
3448	Position *Rectangle
3449
3450	// Use Program (programNumber) to select a specific program from within a
3451	// multi-program transport stream. Note that Quad 4K is not currently supported.
3452	// Default is the first program within the transport stream. If the program you
3453	// specify doesn't exist, the transcoding service will use this default.
3454	ProgramNumber int32
3455
3456	// Set PSI control (InputPsiControl) for transport stream inputs to specify which
3457	// data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and
3458	// video. * Use PSI - Scan only PSI data.
3459	PsiControl InputPsiControl
3460
3461	// Provide a list of any necessary supplemental IMPs. You need supplemental IMPs if
3462	// the CPL that you're using for your input is in an incomplete IMP. Specify either
3463	// the supplemental IMP directories with a trailing slash or the ASSETMAP.xml
3464	// files. For example ["s3://bucket/ov/", "s3://bucket/vf2/ASSETMAP.xml"]. You
3465	// don't need to specify the IMP that contains your input CPL, because the service
3466	// automatically detects it.
3467	SupplementalImps []string
3468
3469	// Use this Timecode source setting, located under the input settings
3470	// (InputTimecodeSource), to specify how the service counts input video frames.
3471	// This input frame count affects only the behavior of features that apply to a
3472	// single input at a time, such as input clipping and synchronizing some captions
3473	// formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video.
3474	// Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose
3475	// Specified start (SPECIFIEDSTART) to start the first frame at the timecode that
3476	// you specify in the setting Start timecode (timecodeStart). If you don't specify
3477	// a value for Timecode source, the service will use Embedded by default. For more
3478	// information about timecodes, see
3479	// https://docs.aws.amazon.com/console/mediaconvert/timecode.
3480	TimecodeSource InputTimecodeSource
3481
3482	// Specify the timecode that you want the service to use for this input's initial
3483	// frame. To use this setting, you must set the Timecode source setting, located
3484	// under the input settings (InputTimecodeSource), to Specified start
3485	// (SPECIFIEDSTART). For more information about timecodes, see
3486	// https://docs.aws.amazon.com/console/mediaconvert/timecode.
3487	TimecodeStart *string
3488
3489	// Input video selectors contain the video settings for the input. Each of your
3490	// inputs can have up to one video selector.
3491	VideoSelector *VideoSelector
3492}
3493
3494// To transcode only portions of your input, include one input clip for each part
3495// of your input that you want in your output. All input clips that you specify
3496// will be included in every output of the job. For more information, see
3497// https://docs.aws.amazon.com/mediaconvert/latest/ug/assembling-multiple-inputs-and-input-clips.html.
3498type InputClipping struct {
3499
3500	// Set End timecode (EndTimecode) to the end of the portion of the input you are
3501	// clipping. The frame corresponding to the End timecode value is included in the
3502	// clip. Start timecode or End timecode may be left blank, but not both. Use the
3503	// format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is
3504	// the second, and FF is the frame number. When choosing this value, take into
3505	// account your setting for timecode source under input settings
3506	// (InputTimecodeSource). For example, if you have embedded timecodes that start at
3507	// 01:00:00:00 and you want your clip to end six minutes into the video, use
3508	// 01:06:00:00.
3509	EndTimecode *string
3510
3511	// Set Start timecode (StartTimecode) to the beginning of the portion of the input
3512	// you are clipping. The frame corresponding to the Start timecode value is
3513	// included in the clip. Start timecode or End timecode may be left blank, but not
3514	// both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the
3515	// minute, SS is the second, and FF is the frame number. When choosing this value,
3516	// take into account your setting for Input timecode source. For example, if you
3517	// have embedded timecodes that start at 01:00:00:00 and you want your clip to
3518	// begin five minutes into the video, use 01:05:00:00.
3519	StartTimecode *string
3520}
3521
3522// Settings for decrypting any input files that you encrypt before you upload them
3523// to Amazon S3. MediaConvert can decrypt files only when you use AWS Key
3524// Management Service (KMS) to encrypt the data key that you use to encrypt your
3525// content.
3526type InputDecryptionSettings struct {
3527
3528	// Specify the encryption mode that you used to encrypt your input files.
3529	DecryptionMode DecryptionMode
3530
3531	// Warning! Don't provide your encryption key in plaintext. Your job settings could
3532	// be intercepted, making your encrypted content vulnerable. Specify the encrypted
3533	// version of the data key that you used to encrypt your content. The data key must
3534	// be encrypted by AWS Key Management Service (KMS). The key can be 128, 192, or
3535	// 256 bits.
3536	EncryptedDecryptionKey *string
3537
3538	// Specify the initialization vector that you used when you encrypted your content
3539	// before uploading it to Amazon S3. You can use a 16-byte initialization vector
3540	// with any encryption mode. Or, you can use a 12-byte initialization vector with
3541	// GCM or CTR. MediaConvert accepts only initialization vectors that are
3542	// base64-encoded.
3543	InitializationVector *string
3544
3545	// Specify the AWS Region for AWS Key Management Service (KMS) that you used to
3546	// encrypt your data key, if that Region is different from the one you are using
3547	// for AWS Elemental MediaConvert.
3548	KmsKeyRegion *string
3549}
3550
3551// Specified video input in a template.
3552type InputTemplate struct {
3553
3554	// Use audio selector groups to combine multiple sidecar audio inputs so that you
3555	// can assign them to a single output audio tab (AudioDescription). Note that, if
3556	// you're working with embedded audio, it's simpler to assign multiple input tracks
3557	// into a single audio selector rather than use an audio selector group.
3558	AudioSelectorGroups map[string]AudioSelectorGroup
3559
3560	// Use Audio selectors (AudioSelectors) to specify a track or set of tracks from
3561	// the input that you will use in your outputs. You can use multiple Audio
3562	// selectors per input.
3563	AudioSelectors map[string]AudioSelector
3564
3565	// Use captions selectors to specify the captions data from your input that you use
3566	// in your outputs. You can use up to 20 captions selectors per input.
3567	CaptionSelectors map[string]CaptionSelector
3568
3569	// Use Cropping selection (crop) to specify the video area that the service will
3570	// include in the output video frame. If you specify a value here, it will override
3571	// any value that you specify in the output setting Cropping selection (crop).
3572	Crop *Rectangle
3573
3574	// Enable Deblock (InputDeblockFilter) to produce smoother motion in the output.
3575	// Default is disabled. Only manually controllable for MPEG2 and uncompressed video
3576	// inputs.
3577	DeblockFilter InputDeblockFilter
3578
3579	// Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is
3580	// disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.
3581	DenoiseFilter InputDenoiseFilter
3582
3583	// Specify how the transcoding service applies the denoise and deblock filters. You
3584	// must also enable the filters separately, with Denoise (InputDenoiseFilter) and
3585	// Deblock (InputDeblockFilter). * Auto - The transcoding service determines
3586	// whether to apply filtering, depending on input type and quality. * Disable - The
3587	// input is not filtered. This is true even if you use the API to enable them in
3588	// (InputDeblockFilter) and (InputDeblockFilter). * Force - The input is filtered
3589	// regardless of input type.
3590	FilterEnable InputFilterEnable
3591
3592	// Use Filter strength (FilterStrength) to adjust the magnitude the input filter
3593	// settings (Deblock and Denoise). The range is -5 to 5. Default is 0.
3594	FilterStrength int32
3595
3596	// Enable the image inserter feature to include a graphic overlay on your video.
3597	// Enable or disable this feature for each input individually. This setting is
3598	// disabled by default.
3599	ImageInserter *ImageInserter
3600
3601	// (InputClippings) contains sets of start and end times that together specify a
3602	// portion of the input to be used in the outputs. If you provide only a start
3603	// time, the clip will be the entire input from that point to the end. If you
3604	// provide only an end time, it will be the entire input up to that point. When you
3605	// specify more than one input clip, the transcoding service creates the job
3606	// outputs by stringing the clips together in the order you specify them.
3607	InputClippings []InputClipping
3608
3609	// When you have a progressive segmented frame (PsF) input, use this setting to
3610	// flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore,
3611	// flagging your input as PsF results in better preservation of video quality when
3612	// you do deinterlacing and frame rate conversion. If you don't specify, the
3613	// default value is Auto (AUTO). Auto is the correct setting for all inputs that
3614	// are not PsF. Don't set this value to PsF when your input is interlaced. Doing so
3615	// creates horizontal interlacing artifacts.
3616	InputScanType InputScanType
3617
3618	// Use Selection placement (position) to define the video area in your output
3619	// frame. The area outside of the rectangle that you specify here is black. If you
3620	// specify a value here, it will override any value that you specify in the output
3621	// setting Selection placement (position). If you specify a value here, this will
3622	// override any AFD values in your input, even if you set Respond to AFD
3623	// (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will
3624	// ignore anything that you specify for the setting Scaling Behavior
3625	// (scalingBehavior).
3626	Position *Rectangle
3627
3628	// Use Program (programNumber) to select a specific program from within a
3629	// multi-program transport stream. Note that Quad 4K is not currently supported.
3630	// Default is the first program within the transport stream. If the program you
3631	// specify doesn't exist, the transcoding service will use this default.
3632	ProgramNumber int32
3633
3634	// Set PSI control (InputPsiControl) for transport stream inputs to specify which
3635	// data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and
3636	// video. * Use PSI - Scan only PSI data.
3637	PsiControl InputPsiControl
3638
3639	// Use this Timecode source setting, located under the input settings
3640	// (InputTimecodeSource), to specify how the service counts input video frames.
3641	// This input frame count affects only the behavior of features that apply to a
3642	// single input at a time, such as input clipping and synchronizing some captions
3643	// formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video.
3644	// Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose
3645	// Specified start (SPECIFIEDSTART) to start the first frame at the timecode that
3646	// you specify in the setting Start timecode (timecodeStart). If you don't specify
3647	// a value for Timecode source, the service will use Embedded by default. For more
3648	// information about timecodes, see
3649	// https://docs.aws.amazon.com/console/mediaconvert/timecode.
3650	TimecodeSource InputTimecodeSource
3651
3652	// Specify the timecode that you want the service to use for this input's initial
3653	// frame. To use this setting, you must set the Timecode source setting, located
3654	// under the input settings (InputTimecodeSource), to Specified start
3655	// (SPECIFIEDSTART). For more information about timecodes, see
3656	// https://docs.aws.amazon.com/console/mediaconvert/timecode.
3657	TimecodeStart *string
3658
3659	// Input video selectors contain the video settings for the input. Each of your
3660	// inputs can have up to one video selector.
3661	VideoSelector *VideoSelector
3662}
3663
3664// These settings apply to a specific graphic overlay. You can include multiple
3665// overlays in your job.
3666type InsertableImage struct {
3667
3668	// Specify the time, in milliseconds, for the image to remain on the output video.
3669	// This duration includes fade-in time but not fade-out time.
3670	Duration int32
3671
3672	// Specify the length of time, in milliseconds, between the Start time that you
3673	// specify for the image insertion and the time that the image appears at full
3674	// opacity. Full opacity is the level that you specify for the opacity setting. If
3675	// you don't specify a value for Fade-in, the image will appear abruptly at the
3676	// overlay start time.
3677	FadeIn int32
3678
3679	// Specify the length of time, in milliseconds, between the end of the time that
3680	// you have specified for the image overlay Duration and when the overlaid image
3681	// has faded to total transparency. If you don't specify a value for Fade-out, the
3682	// image will disappear abruptly at the end of the inserted image duration.
3683	FadeOut int32
3684
3685	// Specify the height of the inserted image in pixels. If you specify a value
3686	// that's larger than the video resolution height, the service will crop your
3687	// overlaid image to fit. To use the native height of the image, keep this setting
3688	// blank.
3689	Height int32
3690
3691	// Specify the HTTP, HTTPS, or Amazon S3 location of the image that you want to
3692	// overlay on the video. Use a PNG or TGA file.
3693	ImageInserterInput *string
3694
3695	// Specify the distance, in pixels, between the inserted image and the left edge of
3696	// the video frame. Required for any image overlay that you specify.
3697	ImageX int32
3698
3699	// Specify the distance, in pixels, between the overlaid image and the top edge of
3700	// the video frame. Required for any image overlay that you specify.
3701	ImageY int32
3702
3703	// Specify how overlapping inserted images appear. Images with higher values for
3704	// Layer appear on top of images with lower values for Layer.
3705	Layer int32
3706
3707	// Use Opacity (Opacity) to specify how much of the underlying video shows through
3708	// the inserted image. 0 is transparent and 100 is fully opaque. Default is 50.
3709	Opacity int32
3710
3711	// Specify the timecode of the frame that you want the overlay to first appear on.
3712	// This must be in timecode (HH:MM:SS:FF or HH:MM:SS;FF) format. Remember to take
3713	// into account your timecode source settings.
3714	StartTime *string
3715
3716	// Specify the width of the inserted image in pixels. If you specify a value that's
3717	// larger than the video resolution width, the service will crop your overlaid
3718	// image to fit. To use the native width of the image, keep this setting blank.
3719	Width int32
3720}
3721
3722// Each job converts an input file into an output file or files. For more
3723// information, see the User Guide at
3724// https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html
3725type Job struct {
3726
3727	// The IAM role you use for creating this job. For details about permissions, see
3728	// the User Guide topic at the User Guide at
3729	// https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html
3730	//
3731	// This member is required.
3732	Role *string
3733
3734	// JobSettings contains all the transcode settings for a job.
3735	//
3736	// This member is required.
3737	Settings *JobSettings
3738
3739	// Accelerated transcoding can significantly speed up jobs with long, visually
3740	// complex content.
3741	AccelerationSettings *AccelerationSettings
3742
3743	// Describes whether the current job is running with accelerated transcoding. For
3744	// jobs that have Acceleration (AccelerationMode) set to DISABLED,
3745	// AccelerationStatus is always NOT_APPLICABLE. For jobs that have Acceleration
3746	// (AccelerationMode) set to ENABLED or PREFERRED, AccelerationStatus is one of the
3747	// other states. AccelerationStatus is IN_PROGRESS initially, while the service
3748	// determines whether the input files and job settings are compatible with
3749	// accelerated transcoding. If they are, AcclerationStatus is ACCELERATED. If your
3750	// input files and job settings aren't compatible with accelerated transcoding, the
3751	// service either fails your job or runs it without accelerated transcoding,
3752	// depending on how you set Acceleration (AccelerationMode). When the service runs
3753	// your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED.
3754	AccelerationStatus AccelerationStatus
3755
3756	// An identifier for this resource that is unique within all of AWS.
3757	Arn *string
3758
3759	// The tag type that AWS Billing and Cost Management will use to sort your AWS
3760	// Elemental MediaConvert costs on any billing report that you set up.
3761	BillingTagsSource BillingTagsSource
3762
3763	// The time, in Unix epoch format in seconds, when the job got created.
3764	CreatedAt *time.Time
3765
3766	// A job's phase can be PROBING, TRANSCODING OR UPLOADING
3767	CurrentPhase JobPhase
3768
3769	// Error code for the job
3770	ErrorCode int32
3771
3772	// Error message of Job
3773	ErrorMessage *string
3774
3775	// Optional list of hop destinations.
3776	HopDestinations []HopDestination
3777
3778	// A portion of the job's ARN, unique within your AWS Elemental MediaConvert
3779	// resources
3780	Id *string
3781
3782	// An estimate of how far your job has progressed. This estimate is shown as a
3783	// percentage of the total time from when your job leaves its queue to when your
3784	// output files appear in your output Amazon S3 bucket. AWS Elemental MediaConvert
3785	// provides jobPercentComplete in CloudWatch STATUS_UPDATE events and in the
3786	// response to GetJob and ListJobs requests. The jobPercentComplete estimate is
3787	// reliable for the following input containers: Quicktime, Transport Stream, MP4,
3788	// and MXF. For some jobs, the service can't provide information about job
3789	// progress. In those cases, jobPercentComplete returns a null value.
3790	JobPercentComplete int32
3791
3792	// The job template that the job is created from, if it is created from a job
3793	// template.
3794	JobTemplate *string
3795
3796	// Provides messages from the service about jobs that you have already successfully
3797	// submitted.
3798	Messages *JobMessages
3799
3800	// List of output group details
3801	OutputGroupDetails []OutputGroupDetail
3802
3803	// Relative priority on the job.
3804	Priority int32
3805
3806	// When you create a job, you can specify a queue to send it to. If you don't
3807	// specify, the job will go to the default queue. For more about queues, see the
3808	// User Guide topic at
3809	// https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html
3810	Queue *string
3811
3812	// The job's queue hopping history.
3813	QueueTransitions []QueueTransition
3814
3815	// The number of times that the service automatically attempted to process your job
3816	// after encountering an error.
3817	RetryCount int32
3818
3819	// Enable this setting when you run a test job to estimate how many reserved
3820	// transcoding slots (RTS) you need. When this is enabled, MediaConvert runs your
3821	// job from an on-demand queue with similar performance to what you will see with
3822	// one RTS in a reserved queue. This setting is disabled by default.
3823	SimulateReservedQueue SimulateReservedQueue
3824
3825	// A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR.
3826	Status JobStatus
3827
3828	// Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch
3829	// Events. Set the interval, in seconds, between status updates. MediaConvert sends
3830	// an update at this interval from the time the service begins processing your job
3831	// to the time it completes the transcode or encounters an error.
3832	StatusUpdateInterval StatusUpdateInterval
3833
3834	// Information about when jobs are submitted, started, and finished is specified in
3835	// Unix epoch format in seconds.
3836	Timing *Timing
3837
3838	// User-defined metadata that you want to associate with an MediaConvert job. You
3839	// specify metadata in key/value pairs.
3840	UserMetadata map[string]string
3841}
3842
3843// Provides messages from the service about jobs that you have already successfully
3844// submitted.
3845type JobMessages struct {
3846
3847	// List of messages that are informational only and don't indicate a problem with
3848	// your job.
3849	Info []string
3850
3851	// List of messages that warn about conditions that might cause your job not to run
3852	// or to fail.
3853	Warning []string
3854}
3855
3856// JobSettings contains all the transcode settings for a job.
3857type JobSettings struct {
3858
3859	// When specified, this offset (in milliseconds) is added to the input Ad Avail PTS
3860	// time.
3861	AdAvailOffset int32
3862
3863	// Settings for ad avail blanking. Video can be blanked or overlaid with an image,
3864	// and audio muted during SCTE-35 triggered ad avails.
3865	AvailBlanking *AvailBlanking
3866
3867	// Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion,
3868	// you can ignore these settings.
3869	Esam *EsamSettings
3870
3871	// Use Inputs (inputs) to define source file used in the transcode job. There can
3872	// be multiple inputs add in a job. These inputs will be concantenated together to
3873	// create the output.
3874	Inputs []Input
3875
3876	// Use these settings only when you use Kantar watermarking. Specify the values
3877	// that MediaConvert uses to generate and place Kantar watermarks in your output
3878	// audio. These settings apply to every output in your job. In addition to
3879	// specifying these values, you also need to store your Kantar credentials in AWS
3880	// Secrets Manager. For more information, see
3881	// https://docs.aws.amazon.com/mediaconvert/latest/ug/kantar-watermarking.html.
3882	KantarWatermark *KantarWatermarkSettings
3883
3884	// Overlay motion graphics on top of your video. The motion graphics that you
3885	// specify here appear on all outputs in all output groups. For more information,
3886	// see
3887	// https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html.
3888	MotionImageInserter *MotionImageInserter
3889
3890	// Settings for your Nielsen configuration. If you don't do Nielsen measurement and
3891	// analytics, ignore these settings. When you enable Nielsen configuration
3892	// (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs
3893	// in the job. To enable Nielsen configuration programmatically, include an
3894	// instance of nielsenConfiguration in your JSON job specification. Even if you
3895	// don't include any children of nielsenConfiguration, you still enable the
3896	// setting.
3897	NielsenConfiguration *NielsenConfiguration
3898
3899	// Ignore these settings unless you are using Nielsen non-linear watermarking.
3900	// Specify the values that MediaConvert uses to generate and place Nielsen
3901	// watermarks in your output audio. In addition to specifying these values, you
3902	// also need to set up your cloud TIC server. These settings apply to every output
3903	// in your job. The MediaConvert implementation is currently with the following
3904	// Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark
3905	// Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0]
3906	NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings
3907
3908	// (OutputGroups) contains one group of settings for each set of outputs that share
3909	// a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF,
3910	// and no container) are grouped in a single output group as well. Required in
3911	// (OutputGroups) is a group of settings that apply to the whole group. This
3912	// required object depends on the value you set for (Type) under
3913	// (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as
3914	// follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS,
3915	// HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings *
3916	// MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS,
3917	// CmafGroupSettings
3918	OutputGroups []OutputGroup
3919
3920	// These settings control how the service handles timecodes throughout the job.
3921	// These settings don't affect input clipping.
3922	TimecodeConfig *TimecodeConfig
3923
3924	// Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in
3925	// any HLS outputs. To include timed metadata, you must enable it here, enable it
3926	// in each output container, and specify tags and timecodes in ID3 insertion
3927	// (Id3Insertion) objects.
3928	TimedMetadataInsertion *TimedMetadataInsertion
3929}
3930
3931// A job template is a pre-made set of encoding instructions that you can use to
3932// quickly create a job.
3933type JobTemplate struct {
3934
3935	// A name you create for each job template. Each name must be unique within your
3936	// account.
3937	//
3938	// This member is required.
3939	Name *string
3940
3941	// JobTemplateSettings contains all the transcode settings saved in the template
3942	// that will be applied to jobs created from it.
3943	//
3944	// This member is required.
3945	Settings *JobTemplateSettings
3946
3947	// Accelerated transcoding can significantly speed up jobs with long, visually
3948	// complex content.
3949	AccelerationSettings *AccelerationSettings
3950
3951	// An identifier for this resource that is unique within all of AWS.
3952	Arn *string
3953
3954	// An optional category you create to organize your job templates.
3955	Category *string
3956
3957	// The timestamp in epoch seconds for Job template creation.
3958	CreatedAt *time.Time
3959
3960	// An optional description you create for each job template.
3961	Description *string
3962
3963	// Optional list of hop destinations.
3964	HopDestinations []HopDestination
3965
3966	// The timestamp in epoch seconds when the Job template was last updated.
3967	LastUpdated *time.Time
3968
3969	// Relative priority on the job.
3970	Priority int32
3971
3972	// Optional. The queue that jobs created from this template are assigned to. If you
3973	// don't specify this, jobs will go to the default queue.
3974	Queue *string
3975
3976	// Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch
3977	// Events. Set the interval, in seconds, between status updates. MediaConvert sends
3978	// an update at this interval from the time the service begins processing your job
3979	// to the time it completes the transcode or encounters an error.
3980	StatusUpdateInterval StatusUpdateInterval
3981
3982	// A job template can be of two types: system or custom. System or built-in job
3983	// templates can't be modified or deleted by the user.
3984	Type Type
3985}
3986
3987// JobTemplateSettings contains all the transcode settings saved in the template
3988// that will be applied to jobs created from it.
3989type JobTemplateSettings struct {
3990
3991	// When specified, this offset (in milliseconds) is added to the input Ad Avail PTS
3992	// time.
3993	AdAvailOffset int32
3994
3995	// Settings for ad avail blanking. Video can be blanked or overlaid with an image,
3996	// and audio muted during SCTE-35 triggered ad avails.
3997	AvailBlanking *AvailBlanking
3998
3999	// Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion,
4000	// you can ignore these settings.
4001	Esam *EsamSettings
4002
4003	// Use Inputs (inputs) to define the source file used in the transcode job. There
4004	// can only be one input in a job template. Using the API, you can include multiple
4005	// inputs when referencing a job template.
4006	Inputs []InputTemplate
4007
4008	// Use these settings only when you use Kantar watermarking. Specify the values
4009	// that MediaConvert uses to generate and place Kantar watermarks in your output
4010	// audio. These settings apply to every output in your job. In addition to
4011	// specifying these values, you also need to store your Kantar credentials in AWS
4012	// Secrets Manager. For more information, see
4013	// https://docs.aws.amazon.com/mediaconvert/latest/ug/kantar-watermarking.html.
4014	KantarWatermark *KantarWatermarkSettings
4015
4016	// Overlay motion graphics on top of your video. The motion graphics that you
4017	// specify here appear on all outputs in all output groups. For more information,
4018	// see
4019	// https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html.
4020	MotionImageInserter *MotionImageInserter
4021
4022	// Settings for your Nielsen configuration. If you don't do Nielsen measurement and
4023	// analytics, ignore these settings. When you enable Nielsen configuration
4024	// (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs
4025	// in the job. To enable Nielsen configuration programmatically, include an
4026	// instance of nielsenConfiguration in your JSON job specification. Even if you
4027	// don't include any children of nielsenConfiguration, you still enable the
4028	// setting.
4029	NielsenConfiguration *NielsenConfiguration
4030
4031	// Ignore these settings unless you are using Nielsen non-linear watermarking.
4032	// Specify the values that MediaConvert uses to generate and place Nielsen
4033	// watermarks in your output audio. In addition to specifying these values, you
4034	// also need to set up your cloud TIC server. These settings apply to every output
4035	// in your job. The MediaConvert implementation is currently with the following
4036	// Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark
4037	// Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0]
4038	NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings
4039
4040	// (OutputGroups) contains one group of settings for each set of outputs that share
4041	// a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF,
4042	// and no container) are grouped in a single output group as well. Required in
4043	// (OutputGroups) is a group of settings that apply to the whole group. This
4044	// required object depends on the value you set for (Type) under
4045	// (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as
4046	// follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS,
4047	// HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings *
4048	// MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS,
4049	// CmafGroupSettings
4050	OutputGroups []OutputGroup
4051
4052	// These settings control how the service handles timecodes throughout the job.
4053	// These settings don't affect input clipping.
4054	TimecodeConfig *TimecodeConfig
4055
4056	// Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in
4057	// any HLS outputs. To include timed metadata, you must enable it here, enable it
4058	// in each output container, and specify tags and timecodes in ID3 insertion
4059	// (Id3Insertion) objects.
4060	TimedMetadataInsertion *TimedMetadataInsertion
4061}
4062
4063// Use these settings only when you use Kantar watermarking. Specify the values
4064// that MediaConvert uses to generate and place Kantar watermarks in your output
4065// audio. These settings apply to every output in your job. In addition to
4066// specifying these values, you also need to store your Kantar credentials in AWS
4067// Secrets Manager. For more information, see
4068// https://docs.aws.amazon.com/mediaconvert/latest/ug/kantar-watermarking.html.
4069type KantarWatermarkSettings struct {
4070
4071	// Provide an audio channel name from your Kantar audio license.
4072	ChannelName *string
4073
4074	// Specify a unique identifier for Kantar to use for this piece of content.
4075	ContentReference *string
4076
4077	// Provide the name of the AWS Secrets Manager secret where your Kantar credentials
4078	// are stored. Note that your MediaConvert service role must provide access to this
4079	// secret. For more information, see
4080	// https://docs.aws.amazon.com/mediaconvert/latest/ug/granting-permissions-for-mediaconvert-to-access-secrets-manager-secret.html.
4081	// For instructions on creating a secret, see
4082	// https://docs.aws.amazon.com/secretsmanager/latest/userguide/tutorials_basic.html,
4083	// in the AWS Secrets Manager User Guide.
4084	CredentialsSecretName *string
4085
4086	// Optional. Specify an offset, in whole seconds, from the start of your output and
4087	// the beginning of the watermarking. When you don't specify an offset, Kantar
4088	// defaults to zero.
4089	FileOffset float64
4090
4091	// Provide your Kantar license ID number. You should get this number from Kantar.
4092	KantarLicenseId int32
4093
4094	// Provide the HTTPS endpoint to the Kantar server. You should get this endpoint
4095	// from Kantar.
4096	KantarServerUrl *string
4097
4098	// Optional. Specify the Amazon S3 bucket where you want MediaConvert to store your
4099	// Kantar watermark XML logs. When you don't specify a bucket, MediaConvert doesn't
4100	// save these logs. Note that your MediaConvert service role must provide access to
4101	// this location. For more information, see
4102	// https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html
4103	LogDestination *string
4104
4105	// You can optionally use this field to specify the first timestamp that Kantar
4106	// embeds during watermarking. Kantar suggests that you be very cautious when using
4107	// this Kantar feature, and that you use it only on channels that are managed
4108	// specifically for use with this feature by your Audience Measurement Operator.
4109	// For more information about this feature, contact Kantar technical support.
4110	Metadata3 *string
4111
4112	// Additional metadata that MediaConvert sends to Kantar. Maximum length is 50
4113	// characters.
4114	Metadata4 *string
4115
4116	// Additional metadata that MediaConvert sends to Kantar. Maximum length is 50
4117	// characters.
4118	Metadata5 *string
4119
4120	// Additional metadata that MediaConvert sends to Kantar. Maximum length is 50
4121	// characters.
4122	Metadata6 *string
4123
4124	// Additional metadata that MediaConvert sends to Kantar. Maximum length is 50
4125	// characters.
4126	Metadata7 *string
4127
4128	// Additional metadata that MediaConvert sends to Kantar. Maximum length is 50
4129	// characters.
4130	Metadata8 *string
4131}
4132
4133// Settings for SCTE-35 signals from ESAM. Include this in your job settings to put
4134// SCTE-35 markers in your HLS and transport stream outputs at the insertion points
4135// that you specify in an ESAM XML document. Provide the document in the setting
4136// SCC XML (sccXml).
4137type M2tsScte35Esam struct {
4138
4139	// Packet Identifier (PID) of the SCTE-35 stream in the transport stream generated
4140	// by ESAM.
4141	Scte35EsamPid int32
4142}
4143
4144// MPEG-2 TS container settings. These apply to outputs in a File output group when
4145// the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In
4146// these assets, data is organized by the program map table (PMT). Each transport
4147// stream program contains subsets of data, including audio, video, and metadata.
4148// Each of these subsets of data has a numerical label called a packet identifier
4149// (PID). Each transport stream program corresponds to one MediaConvert output. The
4150// PMT lists the types of data in a program along with their PID. Downstream
4151// systems and players use the program map table to look up the PID for each type
4152// of data it accesses and then uses the PIDs to locate specific data within the
4153// asset.
4154type M2tsSettings struct {
4155
4156	// Selects between the DVB and ATSC buffer models for Dolby Digital audio.
4157	AudioBufferModel M2tsAudioBufferModel
4158
4159	// Specify this setting only when your output will be consumed by a downstream
4160	// repackaging workflow that is sensitive to very small duration differences
4161	// between video and audio. For this situation, choose Match video duration
4162	// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
4163	// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
4164	// MediaConvert pads the output audio streams with silence or trims them to ensure
4165	// that the total duration of each audio stream is at least as long as the total
4166	// duration of the video stream. After padding or trimming, the audio stream
4167	// duration is no more than one frame longer than the video stream. MediaConvert
4168	// applies audio padding or trimming only to the end of the last segment of the
4169	// output. For unsegmented outputs, MediaConvert adds padding only to the end of
4170	// the file. When you keep the default value, any minor discrepancies between audio
4171	// and video duration will depend on your output audio codec.
4172	AudioDuration M2tsAudioDuration
4173
4174	// The number of audio frames to insert for each PES packet.
4175	AudioFramesPerPes int32
4176
4177	// Specify the packet identifiers (PIDs) for any elementary audio streams you
4178	// include in this output. Specify multiple PIDs as a JSON array. Default is the
4179	// range 482-492.
4180	AudioPids []int32
4181
4182	// Specify the output bitrate of the transport stream in bits per second. Setting
4183	// to 0 lets the muxer automatically determine the appropriate bitrate. Other
4184	// common values are 3750000, 7500000, and 15000000.
4185	Bitrate int32
4186
4187	// Controls what buffer model to use for accurate interleaving. If set to
4188	// MULTIPLEX, use multiplex buffer model. If set to NONE, this can lead to lower
4189	// latency, but low-memory devices may not be able to play back the stream without
4190	// interruptions.
4191	BufferModel M2tsBufferModel
4192
4193	// Use these settings to insert a DVB Network Information Table (NIT) in the
4194	// transport stream of this output. When you work directly in your JSON job
4195	// specification, include this object only when your job has a transport stream
4196	// output and the container settings contain the object M2tsSettings.
4197	DvbNitSettings *DvbNitSettings
4198
4199	// Use these settings to insert a DVB Service Description Table (SDT) in the
4200	// transport stream of this output. When you work directly in your JSON job
4201	// specification, include this object only when your job has a transport stream
4202	// output and the container settings contain the object M2tsSettings.
4203	DvbSdtSettings *DvbSdtSettings
4204
4205	// Specify the packet identifiers (PIDs) for DVB subtitle data included in this
4206	// output. Specify multiple PIDs as a JSON array. Default is the range 460-479.
4207	DvbSubPids []int32
4208
4209	// Use these settings to insert a DVB Time and Date Table (TDT) in the transport
4210	// stream of this output. When you work directly in your JSON job specification,
4211	// include this object only when your job has a transport stream output and the
4212	// container settings contain the object M2tsSettings.
4213	DvbTdtSettings *DvbTdtSettings
4214
4215	// Specify the packet identifier (PID) for DVB teletext data you include in this
4216	// output. Default is 499.
4217	DvbTeletextPid int32
4218
4219	// When set to VIDEO_AND_FIXED_INTERVALS, audio EBP markers will be added to
4220	// partitions 3 and 4. The interval between these additional markers will be fixed,
4221	// and will be slightly shorter than the video EBP marker interval. When set to
4222	// VIDEO_INTERVAL, these additional markers will not be inserted. Only applicable
4223	// when EBP segmentation markers are is selected (segmentationMarkers is EBP or
4224	// EBP_LEGACY).
4225	EbpAudioInterval M2tsEbpAudioInterval
4226
4227	// Selects which PIDs to place EBP markers on. They can either be placed only on
4228	// the video PID, or on both the video PID and all audio PIDs. Only applicable when
4229	// EBP segmentation markers are is selected (segmentationMarkers is EBP or
4230	// EBP_LEGACY).
4231	EbpPlacement M2tsEbpPlacement
4232
4233	// Controls whether to include the ES Rate field in the PES header.
4234	EsRateInPes M2tsEsRateInPes
4235
4236	// Keep the default value (DEFAULT) unless you know that your audio EBP markers are
4237	// incorrectly appearing before your video EBP markers. To correct this problem,
4238	// set this value to Force (FORCE).
4239	ForceTsVideoEbpOrder M2tsForceTsVideoEbpOrder
4240
4241	// The length, in seconds, of each fragment. Only used with EBP markers.
4242	FragmentTime float64
4243
4244	// Specify the maximum time, in milliseconds, between Program Clock References
4245	// (PCRs) inserted into the transport stream.
4246	MaxPcrInterval int32
4247
4248	// When set, enforces that Encoder Boundary Points do not come within the specified
4249	// time interval of each other by looking ahead at input video. If another EBP is
4250	// going to come in within the specified time interval, the current EBP is not
4251	// emitted, and the segment is "stretched" to the next marker. The lookahead value
4252	// does not add latency to the system. The Live Event must be configured elsewhere
4253	// to create sufficient latency to make the lookahead accurate.
4254	MinEbpInterval int32
4255
4256	// If INSERT, Nielsen inaudible tones for media tracking will be detected in the
4257	// input audio and an equivalent ID3 tag will be inserted in the output.
4258	NielsenId3 M2tsNielsenId3
4259
4260	// Value in bits per second of extra null packets to insert into the transport
4261	// stream. This can be used if a downstream encryption system requires periodic
4262	// null packets.
4263	NullPacketBitrate float64
4264
4265	// The number of milliseconds between instances of this table in the output
4266	// transport stream.
4267	PatInterval int32
4268
4269	// When set to PCR_EVERY_PES_PACKET, a Program Clock Reference value is inserted
4270	// for every Packetized Elementary Stream (PES) header. This is effective only when
4271	// the PCR PID is the same as the video or audio elementary stream.
4272	PcrControl M2tsPcrControl
4273
4274	// Specify the packet identifier (PID) for the program clock reference (PCR) in
4275	// this output. If you do not specify a value, the service will use the value for
4276	// Video PID (VideoPid).
4277	PcrPid int32
4278
4279	// Specify the number of milliseconds between instances of the program map table
4280	// (PMT) in the output transport stream.
4281	PmtInterval int32
4282
4283	// Specify the packet identifier (PID) for the program map table (PMT) itself.
4284	// Default is 480.
4285	PmtPid int32
4286
4287	// Specify the packet identifier (PID) of the private metadata stream. Default is
4288	// 503.
4289	PrivateMetadataPid int32
4290
4291	// Use Program number (programNumber) to specify the program number used in the
4292	// program map table (PMT) for this output. Default is 1. Program numbers and
4293	// program map tables are parts of MPEG-2 transport stream containers, used for
4294	// organizing data.
4295	ProgramNumber int32
4296
4297	// When set to CBR, inserts null packets into transport stream to fill specified
4298	// bitrate. When set to VBR, the bitrate setting acts as the maximum bitrate, but
4299	// the output will not be padded up to that bitrate.
4300	RateMode M2tsRateMode
4301
4302	// Include this in your job settings to put SCTE-35 markers in your HLS and
4303	// transport stream outputs at the insertion points that you specify in an ESAM XML
4304	// document. Provide the document in the setting SCC XML (sccXml).
4305	Scte35Esam *M2tsScte35Esam
4306
4307	// Specify the packet identifier (PID) of the SCTE-35 stream in the transport
4308	// stream.
4309	Scte35Pid int32
4310
4311	// For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you
4312	// want SCTE-35 markers that appear in your input to also appear in this output.
4313	// Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35
4314	// markers from an ESAM XML document-- Choose None (NONE). Also provide the ESAM
4315	// XML as a string in the setting Signal processing notification XML (sccXml). Also
4316	// enable ESAM SCTE-35 (include the property scte35Esam).
4317	Scte35Source M2tsScte35Source
4318
4319	// Inserts segmentation markers at each segmentation_time period. rai_segstart sets
4320	// the Random Access Indicator bit in the adaptation field. rai_adapt sets the RAI
4321	// bit and adds the current timecode in the private data bytes. psi_segstart
4322	// inserts PAT and PMT tables at the start of segments. ebp adds Encoder Boundary
4323	// Point information to the adaptation field as per OpenCable specification
4324	// OC-SP-EBP-I01-130118. ebp_legacy adds Encoder Boundary Point information to the
4325	// adaptation field using a legacy proprietary format.
4326	SegmentationMarkers M2tsSegmentationMarkers
4327
4328	// The segmentation style parameter controls how segmentation markers are inserted
4329	// into the transport stream. With avails, it is possible that segments may be
4330	// truncated, which can influence where future segmentation markers are inserted.
4331	// When a segmentation style of "reset_cadence" is selected and a segment is
4332	// truncated due to an avail, we will reset the segmentation cadence. This means
4333	// the subsequent segment will have a duration of of $segmentation_time seconds.
4334	// When a segmentation style of "maintain_cadence" is selected and a segment is
4335	// truncated due to an avail, we will not reset the segmentation cadence. This
4336	// means the subsequent segment will likely be truncated as well. However, all
4337	// segments after that will have a duration of $segmentation_time seconds. Note
4338	// that EBP lookahead is a slight exception to this rule.
4339	SegmentationStyle M2tsSegmentationStyle
4340
4341	// Specify the length, in seconds, of each segment. Required unless markers is set
4342	// to none.
4343	SegmentationTime float64
4344
4345	// Specify the packet identifier (PID) for timed metadata in this output. Default
4346	// is 502.
4347	TimedMetadataPid int32
4348
4349	// Specify the ID for the transport stream itself in the program map table for this
4350	// output. Transport stream IDs and program map tables are parts of MPEG-2
4351	// transport stream containers, used for organizing data.
4352	TransportStreamId int32
4353
4354	// Specify the packet identifier (PID) of the elementary video stream in the
4355	// transport stream.
4356	VideoPid int32
4357}
4358
4359// These settings relate to the MPEG-2 transport stream (MPEG2-TS) container for
4360// the MPEG2-TS segments in your HLS outputs.
4361type M3u8Settings struct {
4362
4363	// Specify this setting only when your output will be consumed by a downstream
4364	// repackaging workflow that is sensitive to very small duration differences
4365	// between video and audio. For this situation, choose Match video duration
4366	// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
4367	// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
4368	// MediaConvert pads the output audio streams with silence or trims them to ensure
4369	// that the total duration of each audio stream is at least as long as the total
4370	// duration of the video stream. After padding or trimming, the audio stream
4371	// duration is no more than one frame longer than the video stream. MediaConvert
4372	// applies audio padding or trimming only to the end of the last segment of the
4373	// output. For unsegmented outputs, MediaConvert adds padding only to the end of
4374	// the file. When you keep the default value, any minor discrepancies between audio
4375	// and video duration will depend on your output audio codec.
4376	AudioDuration M3u8AudioDuration
4377
4378	// The number of audio frames to insert for each PES packet.
4379	AudioFramesPerPes int32
4380
4381	// Packet Identifier (PID) of the elementary audio stream(s) in the transport
4382	// stream. Multiple values are accepted, and can be entered in ranges and/or by
4383	// comma separation.
4384	AudioPids []int32
4385
4386	// Specify the maximum time, in milliseconds, between Program Clock References
4387	// (PCRs) inserted into the transport stream.
4388	MaxPcrInterval int32
4389
4390	// If INSERT, Nielsen inaudible tones for media tracking will be detected in the
4391	// input audio and an equivalent ID3 tag will be inserted in the output.
4392	NielsenId3 M3u8NielsenId3
4393
4394	// The number of milliseconds between instances of this table in the output
4395	// transport stream.
4396	PatInterval int32
4397
4398	// When set to PCR_EVERY_PES_PACKET a Program Clock Reference value is inserted for
4399	// every Packetized Elementary Stream (PES) header. This parameter is effective
4400	// only when the PCR PID is the same as the video or audio elementary stream.
4401	PcrControl M3u8PcrControl
4402
4403	// Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport
4404	// stream. When no value is given, the encoder will assign the same value as the
4405	// Video PID.
4406	PcrPid int32
4407
4408	// The number of milliseconds between instances of this table in the output
4409	// transport stream.
4410	PmtInterval int32
4411
4412	// Packet Identifier (PID) for the Program Map Table (PMT) in the transport stream.
4413	PmtPid int32
4414
4415	// Packet Identifier (PID) of the private metadata stream in the transport stream.
4416	PrivateMetadataPid int32
4417
4418	// The value of the program number field in the Program Map Table.
4419	ProgramNumber int32
4420
4421	// Packet Identifier (PID) of the SCTE-35 stream in the transport stream.
4422	Scte35Pid int32
4423
4424	// For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you
4425	// want SCTE-35 markers that appear in your input to also appear in this output.
4426	// Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35
4427	// markers from an ESAM XML document-- Choose None (NONE) if you don't want
4428	// manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose Ad markers
4429	// (adMarkers) if you do want manifest conditioning. In both cases, also provide
4430	// the ESAM XML as a string in the setting Signal processing notification XML
4431	// (sccXml).
4432	Scte35Source M3u8Scte35Source
4433
4434	// Applies only to HLS outputs. Use this setting to specify whether the service
4435	// inserts the ID3 timed metadata from the input in this output.
4436	TimedMetadata TimedMetadata
4437
4438	// Packet Identifier (PID) of the timed metadata stream in the transport stream.
4439	TimedMetadataPid int32
4440
4441	// The value of the transport stream ID field in the Program Map Table.
4442	TransportStreamId int32
4443
4444	// Packet Identifier (PID) of the elementary video stream in the transport stream.
4445	VideoPid int32
4446}
4447
4448// Overlay motion graphics on top of your video. The motion graphics that you
4449// specify here appear on all outputs in all output groups. For more information,
4450// see
4451// https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html.
4452type MotionImageInserter struct {
4453
4454	// If your motion graphic asset is a .mov file, keep this setting unspecified. If
4455	// your motion graphic asset is a series of .png files, specify the frame rate of
4456	// the overlay in frames per second, as a fraction. For example, specify 24 fps as
4457	// 24/1. Make sure that the number of images in your series matches the frame rate
4458	// and your intended overlay duration. For example, if you want a 30-second overlay
4459	// at 30 fps, you should have 900 .png images. This overlay frame rate doesn't need
4460	// to match the frame rate of the underlying video.
4461	Framerate *MotionImageInsertionFramerate
4462
4463	// Specify the .mov file or series of .png files that you want to overlay on your
4464	// video. For .png files, provide the file name of the first file in the series.
4465	// Make sure that the names of the .png files end with sequential numbers that
4466	// specify the order that they are played in. For example, overlay_000.png,
4467	// overlay_001.png, overlay_002.png, and so on. The sequence must start at zero,
4468	// and each image file name must have the same number of digits. Pad your initial
4469	// file names with enough zeros to complete the sequence. For example, if the first
4470	// image is overlay_0.png, there can be only 10 images in the sequence, with the
4471	// last image being overlay_9.png. But if the first image is overlay_00.png, there
4472	// can be 100 images in the sequence.
4473	Input *string
4474
4475	// Choose the type of motion graphic asset that you are providing for your overlay.
4476	// You can choose either a .mov file or a series of .png files.
4477	InsertionMode MotionImageInsertionMode
4478
4479	// Use Offset to specify the placement of your motion graphic overlay on the video
4480	// frame. Specify in pixels, from the upper-left corner of the frame. If you don't
4481	// specify an offset, the service scales your overlay to the full size of the
4482	// frame. Otherwise, the service inserts the overlay at its native resolution and
4483	// scales the size up or down with any video scaling.
4484	Offset *MotionImageInsertionOffset
4485
4486	// Specify whether your motion graphic overlay repeats on a loop or plays only
4487	// once.
4488	Playback MotionImagePlayback
4489
4490	// Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF or
4491	// HH:MM:SS;FF). Make sure that the timecode you provide here takes into account
4492	// how you have set up your timecode configuration under both job settings and
4493	// input settings. The simplest way to do that is to set both to start at 0. If you
4494	// need to set up your job to follow timecodes embedded in your source that don't
4495	// start at zero, make sure that you specify a start time that is after the first
4496	// embedded timecode. For more information, see
4497	// https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html Find
4498	// job-wide and input timecode configuration settings in your JSON job settings
4499	// specification at settings>timecodeConfig>source and
4500	// settings>inputs>timecodeSource.
4501	StartTime *string
4502}
4503
4504// For motion overlays that don't have a built-in frame rate, specify the frame
4505// rate of the overlay in frames per second, as a fraction. For example, specify 24
4506// fps as 24/1. The overlay frame rate doesn't need to match the frame rate of the
4507// underlying video.
4508type MotionImageInsertionFramerate struct {
4509
4510	// The bottom of the fraction that expresses your overlay frame rate. For example,
4511	// if your frame rate is 24 fps, set this value to 1.
4512	FramerateDenominator int32
4513
4514	// The top of the fraction that expresses your overlay frame rate. For example, if
4515	// your frame rate is 24 fps, set this value to 24.
4516	FramerateNumerator int32
4517}
4518
4519// Specify the offset between the upper-left corner of the video frame and the top
4520// left corner of the overlay.
4521type MotionImageInsertionOffset struct {
4522
4523	// Set the distance, in pixels, between the overlay and the left edge of the video
4524	// frame.
4525	ImageX int32
4526
4527	// Set the distance, in pixels, between the overlay and the top edge of the video
4528	// frame.
4529	ImageY int32
4530}
4531
4532// These settings relate to your QuickTime MOV output container.
4533type MovSettings struct {
4534
4535	// When enabled, include 'clap' atom if appropriate for the video output settings.
4536	ClapAtom MovClapAtom
4537
4538	// When enabled, file composition times will start at zero, composition times in
4539	// the 'ctts' (composition time to sample) box for B-frames will be negative, and a
4540	// 'cslg' (composition shift least greatest) box will be included per 14496-1
4541	// amendment 1. This improves compatibility with Apple players and tools.
4542	CslgAtom MovCslgAtom
4543
4544	// When set to XDCAM, writes MPEG2 video streams into the QuickTime file using
4545	// XDCAM fourcc codes. This increases compatibility with Apple editors and players,
4546	// but may decrease compatibility with other players. Only applicable when the
4547	// video codec is MPEG2.
4548	Mpeg2FourCCControl MovMpeg2FourCCControl
4549
4550	// To make this output compatible with Omenon, keep the default value, OMNEON.
4551	// Unless you need Omneon compatibility, set this value to NONE. When you keep the
4552	// default value, OMNEON, MediaConvert increases the length of the edit list atom.
4553	// This might cause file rejections when a recipient of the output file doesn't
4554	// expct this extra padding.
4555	PaddingControl MovPaddingControl
4556
4557	// Always keep the default value (SELF_CONTAINED) for this setting.
4558	Reference MovReference
4559}
4560
4561// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
4562// value MP2.
4563type Mp2Settings struct {
4564
4565	// Specify the average bitrate in bits per second.
4566	Bitrate int32
4567
4568	// Set Channels to specify the number of channels in this output audio track.
4569	// Choosing Mono in the console will give you 1 output channel; choosing Stereo
4570	// will give you 2. In the API, valid values are 1 and 2.
4571	Channels int32
4572
4573	// Sample rate in hz.
4574	SampleRate int32
4575}
4576
4577// Required when you set Codec, under AudioDescriptions>CodecSettings, to the value
4578// MP3.
4579type Mp3Settings struct {
4580
4581	// Specify the average bitrate in bits per second.
4582	Bitrate int32
4583
4584	// Specify the number of channels in this output audio track. Choosing Mono on the
4585	// console gives you 1 output channel; choosing Stereo gives you 2. In the API,
4586	// valid values are 1 and 2.
4587	Channels int32
4588
4589	// Specify whether the service encodes this MP3 audio output with a constant
4590	// bitrate (CBR) or a variable bitrate (VBR).
4591	RateControlMode Mp3RateControlMode
4592
4593	// Sample rate in hz.
4594	SampleRate int32
4595
4596	// Required when you set Bitrate control mode (rateControlMode) to VBR. Specify the
4597	// audio quality of this MP3 output from 0 (highest quality) to 9 (lowest quality).
4598	VbrQuality int32
4599}
4600
4601// These settings relate to your MP4 output container. You can create audio only
4602// outputs with this container. For more information, see
4603// https://docs.aws.amazon.com/mediaconvert/latest/ug/supported-codecs-containers-audio-only.html#output-codecs-and-containers-supported-for-audio-only.
4604type Mp4Settings struct {
4605
4606	// Specify this setting only when your output will be consumed by a downstream
4607	// repackaging workflow that is sensitive to very small duration differences
4608	// between video and audio. For this situation, choose Match video duration
4609	// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
4610	// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
4611	// MediaConvert pads the output audio streams with silence or trims them to ensure
4612	// that the total duration of each audio stream is at least as long as the total
4613	// duration of the video stream. After padding or trimming, the audio stream
4614	// duration is no more than one frame longer than the video stream. MediaConvert
4615	// applies audio padding or trimming only to the end of the last segment of the
4616	// output. For unsegmented outputs, MediaConvert adds padding only to the end of
4617	// the file. When you keep the default value, any minor discrepancies between audio
4618	// and video duration will depend on your output audio codec.
4619	AudioDuration CmfcAudioDuration
4620
4621	// When enabled, file composition times will start at zero, composition times in
4622	// the 'ctts' (composition time to sample) box for B-frames will be negative, and a
4623	// 'cslg' (composition shift least greatest) box will be included per 14496-1
4624	// amendment 1. This improves compatibility with Apple players and tools.
4625	CslgAtom Mp4CslgAtom
4626
4627	// Ignore this setting unless compliance to the CTTS box version specification
4628	// matters in your workflow. Specify a value of 1 to set your CTTS box version to 1
4629	// and make your output compliant with the specification. When you specify a value
4630	// of 1, you must also set CSLG atom (cslgAtom) to the value INCLUDE. Keep the
4631	// default value 0 to set your CTTS box version to 0. This can provide backward
4632	// compatibility for some players and packagers.
4633	CttsVersion int32
4634
4635	// Inserts a free-space box immediately after the moov box.
4636	FreeSpaceBox Mp4FreeSpaceBox
4637
4638	// If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning of
4639	// the archive as required for progressive downloading. Otherwise it is placed
4640	// normally at the end.
4641	MoovPlacement Mp4MoovPlacement
4642
4643	// Overrides the "Major Brand" field in the output file. Usually not necessary to
4644	// specify.
4645	Mp4MajorBrand *string
4646}
4647
4648// These settings relate to the fragmented MP4 container for the segments in your
4649// DASH outputs.
4650type MpdSettings struct {
4651
4652	// Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH
4653	// manifest with elements for embedded 608 captions. This markup isn't generally
4654	// required, but some video players require it to discover and play embedded 608
4655	// captions. Keep the default value, Exclude (EXCLUDE), to leave these elements
4656	// out. When you enable this setting, this is the markup that MediaConvert includes
4657	// in your manifest:
4658	AccessibilityCaptionHints MpdAccessibilityCaptionHints
4659
4660	// Specify this setting only when your output will be consumed by a downstream
4661	// repackaging workflow that is sensitive to very small duration differences
4662	// between video and audio. For this situation, choose Match video duration
4663	// (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default
4664	// codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration,
4665	// MediaConvert pads the output audio streams with silence or trims them to ensure
4666	// that the total duration of each audio stream is at least as long as the total
4667	// duration of the video stream. After padding or trimming, the audio stream
4668	// duration is no more than one frame longer than the video stream. MediaConvert
4669	// applies audio padding or trimming only to the end of the last segment of the
4670	// output. For unsegmented outputs, MediaConvert adds padding only to the end of
4671	// the file. When you keep the default value, any minor discrepancies between audio
4672	// and video duration will depend on your output audio codec.
4673	AudioDuration MpdAudioDuration
4674
4675	// Use this setting only in DASH output groups that include sidecar TTML or IMSC
4676	// captions. You specify sidecar captions in a separate output from your audio and
4677	// video. Choose Raw (RAW) for captions in a single XML file in a raw container.
4678	// Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in XML format contained
4679	// within fragmented MP4 files. This set of fragmented MP4 files is separate from
4680	// your video and audio fragmented MP4 files.
4681	CaptionContainerType MpdCaptionContainerType
4682
4683	// Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT
4684	// to put SCTE-35 markers in this output at the insertion points that you specify
4685	// in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).
4686	Scte35Esam MpdScte35Esam
4687
4688	// Ignore this setting unless you have SCTE-35 markers in your input video file.
4689	// Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your
4690	// input to also appear in this output. Choose None (NONE) if you don't want those
4691	// SCTE-35 markers in this output.
4692	Scte35Source MpdScte35Source
4693}
4694
4695// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
4696// value MPEG2.
4697type Mpeg2Settings struct {
4698
4699	// Specify the strength of any adaptive quantization filters that you enable. The
4700	// value that you choose here applies to the following settings: Spatial adaptive
4701	// quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization
4702	// (temporalAdaptiveQuantization).
4703	AdaptiveQuantization Mpeg2AdaptiveQuantization
4704
4705	// Specify the average bitrate in bits per second. Required for VBR and CBR. For MS
4706	// Smooth outputs, bitrates must be unique when rounded down to the nearest
4707	// multiple of 1000.
4708	Bitrate int32
4709
4710	// Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output.
4711	CodecLevel Mpeg2CodecLevel
4712
4713	// Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output.
4714	CodecProfile Mpeg2CodecProfile
4715
4716	// Choose Adaptive to improve subjective video quality for high-motion content.
4717	// This will cause the service to use fewer B-frames (which infer information based
4718	// on other frames) for high-motion portions of the video and more B-frames for
4719	// low-motion portions. The maximum number of B-frames is limited by the value you
4720	// provide for the setting B frames between reference frames
4721	// (numberBFramesBetweenReferenceFrames).
4722	DynamicSubGop Mpeg2DynamicSubGop
4723
4724	// If you are using the console, use the Framerate setting to specify the frame
4725	// rate for this output. If you want to keep the same frame rate as the input
4726	// video, choose Follow source. If you want to do frame rate conversion, choose a
4727	// frame rate from the dropdown list or choose Custom. The framerates shown in the
4728	// dropdown list are decimal approximations of fractions. If you choose Custom,
4729	// specify your frame rate as a fraction. If you are creating your transcoding job
4730	// specification as a JSON file without the console, use FramerateControl to
4731	// specify which value the service uses for the frame rate for this output. Choose
4732	// INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the
4733	// input. Choose SPECIFIED if you want the service to use the frame rate you
4734	// specify in the settings FramerateNumerator and FramerateDenominator.
4735	FramerateControl Mpeg2FramerateControl
4736
4737	// Choose the method that you want MediaConvert to use when increasing or
4738	// decreasing the frame rate. We recommend using drop duplicate (DUPLICATE_DROP)
4739	// for numerically simple conversions, such as 60 fps to 30 fps. For numerically
4740	// complex conversions, you can use interpolate (INTERPOLATE) to avoid stutter.
4741	// This results in a smooth picture, but might introduce undesirable video
4742	// artifacts. For complex frame rate conversions, especially if your source video
4743	// has already been converted from its original cadence, use FrameFormer
4744	// (FRAMEFORMER) to do motion-compensated interpolation. FrameFormer chooses the
4745	// best conversion method frame by frame. Note that using FrameFormer increases the
4746	// transcoding time and incurs a significant add-on cost.
4747	FramerateConversionAlgorithm Mpeg2FramerateConversionAlgorithm
4748
4749	// When you use the API for transcode jobs that use frame rate conversion, specify
4750	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
4751	// FramerateDenominator to specify the denominator of this fraction. In this
4752	// example, use 1001 for the value of FramerateDenominator. When you use the
4753	// console for transcode jobs that use frame rate conversion, provide the value as
4754	// a decimal number for Framerate. In this example, specify 23.976.
4755	FramerateDenominator int32
4756
4757	// When you use the API for transcode jobs that use frame rate conversion, specify
4758	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
4759	// FramerateNumerator to specify the numerator of this fraction. In this example,
4760	// use 24000 for the value of FramerateNumerator. When you use the console for
4761	// transcode jobs that use frame rate conversion, provide the value as a decimal
4762	// number for Framerate. In this example, specify 23.976.
4763	FramerateNumerator int32
4764
4765	// Frequency of closed GOPs. In streaming applications, it is recommended that this
4766	// be set to 1 so a decoder joining mid-stream will receive an IDR frame as quickly
4767	// as possible. Setting this value to 0 will break output segmenting.
4768	GopClosedCadence int32
4769
4770	// GOP Length (keyframe interval) in frames or seconds. Must be greater than zero.
4771	GopSize float64
4772
4773	// Indicates if the GOP Size in MPEG2 is specified in frames or seconds. If seconds
4774	// the system will convert the GOP Size into a frame count at run time.
4775	GopSizeUnits Mpeg2GopSizeUnits
4776
4777	// Percentage of the buffer that should initially be filled (HRD buffer model).
4778	HrdBufferInitialFillPercentage int32
4779
4780	// Size of buffer (HRD buffer model) in bits. For example, enter five megabits as
4781	// 5000000.
4782	HrdBufferSize int32
4783
4784	// Choose the scan line type for the output. Keep the default value, Progressive
4785	// (PROGRESSIVE) to create a progressive output, regardless of the scan type of
4786	// your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
4787	// to create an output that's interlaced with the same field polarity throughout.
4788	// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom
4789	// (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the
4790	// source. For jobs that have multiple inputs, the output field polarity might
4791	// change over the course of the output. Follow behavior depends on the input scan
4792	// type. If the source is interlaced, the output will be interlaced with the same
4793	// polarity as the source. If the source is progressive, the output will be
4794	// interlaced with top field bottom field first, depending on which of the Follow
4795	// options you choose.
4796	InterlaceMode Mpeg2InterlaceMode
4797
4798	// Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision for
4799	// intra-block DC coefficients. If you choose the value auto, the service will
4800	// automatically select the precision based on the per-frame compression ratio.
4801	IntraDcPrecision Mpeg2IntraDcPrecision
4802
4803	// Maximum bitrate in bits/second. For example, enter five megabits per second as
4804	// 5000000.
4805	MaxBitrate int32
4806
4807	// Enforces separation between repeated (cadence) I-frames and I-frames inserted by
4808	// Scene Change Detection. If a scene change I-frame is within I-interval frames of
4809	// a cadence I-frame, the GOP is shrunk and/or stretched to the scene change
4810	// I-frame. GOP stretch requires enabling lookahead as well as setting I-interval.
4811	// The normal cadence resumes for the next GOP. This setting is only used when
4812	// Scene Change Detect is enabled. Note: Maximum GOP stretch = GOP size +
4813	// Min-I-interval - 1
4814	MinIInterval int32
4815
4816	// Number of B-frames between reference frames.
4817	NumberBFramesBetweenReferenceFrames int32
4818
4819	// Optional. Specify how the service determines the pixel aspect ratio (PAR) for
4820	// this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses
4821	// the PAR from your input video for your output. To specify a different PAR in the
4822	// console, choose any value other than Follow source. To specify a different PAR
4823	// by editing the JSON job specification, choose SPECIFIED. When you choose
4824	// SPECIFIED for this setting, you must also specify values for the parNumerator
4825	// and parDenominator settings.
4826	ParControl Mpeg2ParControl
4827
4828	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
4829	// console, this corresponds to any value other than Follow source. When you
4830	// specify an output pixel aspect ratio (PAR) that is different from your input
4831	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
4832	// widescreen, you would specify the ratio 40:33. In this example, the value for
4833	// parDenominator is 33.
4834	ParDenominator int32
4835
4836	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
4837	// console, this corresponds to any value other than Follow source. When you
4838	// specify an output pixel aspect ratio (PAR) that is different from your input
4839	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
4840	// widescreen, you would specify the ratio 40:33. In this example, the value for
4841	// parNumerator is 40.
4842	ParNumerator int32
4843
4844	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want
4845	// to trade off encoding speed for output video quality. The default behavior is
4846	// faster, lower quality, single-pass encoding.
4847	QualityTuningLevel Mpeg2QualityTuningLevel
4848
4849	// Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate is
4850	// variable (vbr) or constant (cbr).
4851	RateControlMode Mpeg2RateControlMode
4852
4853	// Use this setting for interlaced outputs, when your output frame rate is half of
4854	// your input frame rate. In this situation, choose Optimized interlacing
4855	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
4856	// case, each progressive frame from the input corresponds to an interlaced field
4857	// in the output. Keep the default value, Basic interlacing (INTERLACED), for all
4858	// other output frame rates. With basic interlacing, MediaConvert performs any
4859	// frame rate conversion first and then interlaces the frames. When you choose
4860	// Optimized interlacing and you set your output frame rate to a value that isn't
4861	// suitable for optimized interlacing, MediaConvert automatically falls back to
4862	// basic interlacing. Required settings: To use optimized interlacing, you must set
4863	// Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized
4864	// interlacing for hard telecine outputs. You must also set Interlace mode
4865	// (interlaceMode) to a value other than Progressive (PROGRESSIVE).
4866	ScanTypeConversionMode Mpeg2ScanTypeConversionMode
4867
4868	// Enable this setting to insert I-frames at scene changes that the service
4869	// automatically detects. This improves video quality and is enabled by default.
4870	SceneChangeDetect Mpeg2SceneChangeDetect
4871
4872	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
4873	// second (fps). Enable slow PAL to create a 25 fps output. When you enable slow
4874	// PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio
4875	// to keep it synchronized with the video. Note that enabling this setting will
4876	// slightly reduce the duration of your video. Required settings: You must also set
4877	// Framerate to 25. In your JSON job specification, set (framerateControl) to
4878	// (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.
4879	SlowPal Mpeg2SlowPal
4880
4881	// Ignore this setting unless you need to comply with a specification that requires
4882	// a specific value. If you don't have a specification requirement, we recommend
4883	// that you adjust the softness of your output by using a lower value for the
4884	// setting Sharpness (sharpness) or by enabling a noise reducer filter
4885	// (noiseReducerFilter). The Softness (softness) setting specifies the quantization
4886	// matrices that the encoder uses. Keep the default value, 0, to use the AWS
4887	// Elemental default matrices. Choose a value from 17 to 128 to use planar
4888	// interpolation. Increasing values from 17 to 128 result in increasing reduction
4889	// of high-frequency data. The value 128 results in the softest video.
4890	Softness int32
4891
4892	// Keep the default value, Enabled (ENABLED), to adjust quantization within each
4893	// frame based on spatial variation of content complexity. When you enable this
4894	// feature, the encoder uses fewer bits on areas that can sustain more distortion
4895	// with no noticeable visual degradation and uses more bits on areas where any
4896	// small distortion will be noticeable. For example, complex textured blocks are
4897	// encoded with fewer bits and smooth textured blocks are encoded with more bits.
4898	// Enabling this feature will almost always improve your video quality. Note,
4899	// though, that this feature doesn't take into account where the viewer's attention
4900	// is likely to be. If viewers are likely to be focusing their attention on a part
4901	// of the screen with a lot of complex texture, you might choose to disable this
4902	// feature. Related setting: When you enable spatial adaptive quantization, set the
4903	// value for Adaptive quantization (adaptiveQuantization) depending on your
4904	// content. For homogeneous content, such as cartoons and video games, set it to
4905	// Low. For content with a wider variety of textures, set it to High or Higher.
4906	SpatialAdaptiveQuantization Mpeg2SpatialAdaptiveQuantization
4907
4908	// Specify whether this output's video uses the D10 syntax. Keep the default value
4909	// to not use the syntax. Related settings: When you choose D10 (D_10) for your MXF
4910	// profile (profile), you must also set this value to to D10 (D_10).
4911	Syntax Mpeg2Syntax
4912
4913	// When you do frame rate conversion from 23.976 frames per second (fps) to 29.97
4914	// fps, and your output scan type is interlaced, you can optionally enable hard or
4915	// soft telecine to create a smoother picture. Hard telecine (HARD) produces a
4916	// 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that
4917	// signals to the video player device to do the conversion during play back. When
4918	// you keep the default value, None (NONE), MediaConvert does a standard frame rate
4919	// conversion to 29.97 without doing anything with the field polarity to create a
4920	// smoother picture.
4921	Telecine Mpeg2Telecine
4922
4923	// Keep the default value, Enabled (ENABLED), to adjust quantization within each
4924	// frame based on temporal variation of content complexity. When you enable this
4925	// feature, the encoder uses fewer bits on areas of the frame that aren't moving
4926	// and uses more bits on complex objects with sharp edges that move a lot. For
4927	// example, this feature improves the readability of text tickers on newscasts and
4928	// scoreboards on sports matches. Enabling this feature will almost always improve
4929	// your video quality. Note, though, that this feature doesn't take into account
4930	// where the viewer's attention is likely to be. If viewers are likely to be
4931	// focusing their attention on a part of the screen that doesn't have moving
4932	// objects with sharp edges, such as sports athletes' faces, you might choose to
4933	// disable this feature. Related setting: When you enable temporal quantization,
4934	// adjust the strength of the filter with the setting Adaptive quantization
4935	// (adaptiveQuantization).
4936	TemporalAdaptiveQuantization Mpeg2TemporalAdaptiveQuantization
4937}
4938
4939// Specify the details for each additional Microsoft Smooth Streaming manifest that
4940// you want the service to generate for this output group. Each manifest can
4941// reference a different subset of outputs in the group.
4942type MsSmoothAdditionalManifest struct {
4943
4944	// Specify a name modifier that the service adds to the name of this manifest to
4945	// make it different from the file names of the other main manifests in the output
4946	// group. For example, say that the default main manifest for your Microsoft Smooth
4947	// group is film-name.ismv. If you enter "-no-premium" for this setting, then the
4948	// file name the service generates for this top-level manifest is
4949	// film-name-no-premium.ismv.
4950	ManifestNameModifier *string
4951
4952	// Specify the outputs that you want this additional top-level manifest to
4953	// reference.
4954	SelectedOutputs []string
4955}
4956
4957// If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the
4958// value SpekeKeyProvider.
4959type MsSmoothEncryptionSettings struct {
4960
4961	// If your output group type is HLS, DASH, or Microsoft Smooth, use these settings
4962	// when doing DRM encryption with a SPEKE-compliant key provider. If your output
4963	// group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
4964	SpekeKeyProvider *SpekeKeyProvider
4965}
4966
4967// Settings related to your Microsoft Smooth Streaming output package. For more
4968// information, see
4969// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When
4970// you work directly in your JSON job specification, include this object and any
4971// required children when you set Type, under OutputGroupSettings, to
4972// MS_SMOOTH_GROUP_SETTINGS.
4973type MsSmoothGroupSettings struct {
4974
4975	// By default, the service creates one .ism Microsoft Smooth Streaming manifest for
4976	// each Microsoft Smooth Streaming output group in your job. This default manifest
4977	// references every output in the output group. To create additional manifests that
4978	// reference a subset of the outputs in the output group, specify a list of them
4979	// here.
4980	AdditionalManifests []MsSmoothAdditionalManifest
4981
4982	// COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across a
4983	// Microsoft Smooth output group into a single audio stream.
4984	AudioDeduplication MsSmoothAudioDeduplication
4985
4986	// Use Destination (Destination) to specify the S3 output location and the output
4987	// filename base. Destination accepts format identifiers. If you do not specify the
4988	// base filename in the URI, the service will use the filename of the input file.
4989	// If your job has multiple inputs, the service uses the filename of the first
4990	// input file.
4991	Destination *string
4992
4993	// Settings associated with the destination. Will vary based on the type of
4994	// destination
4995	DestinationSettings *DestinationSettings
4996
4997	// If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the
4998	// value SpekeKeyProvider.
4999	Encryption *MsSmoothEncryptionSettings
5000
5001	// Use Fragment length (FragmentLength) to specify the mp4 fragment sizes in
5002	// seconds. Fragment length must be compatible with GOP size and frame rate.
5003	FragmentLength int32
5004
5005	// Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding format
5006	// for the server and client manifest. Valid options are utf8 and utf16.
5007	ManifestEncoding MsSmoothManifestEncoding
5008}
5009
5010// These settings relate to your MXF output container.
5011type MxfSettings struct {
5012
5013	// Optional. When you have AFD signaling set up in your output video stream, use
5014	// this setting to choose whether to also include it in the MXF wrapper. Choose
5015	// Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. Choose Copy
5016	// from video stream (COPY_FROM_VIDEO) to copy the AFD values from the video stream
5017	// for this output to the MXF wrapper. Regardless of which option you choose, the
5018	// AFD values remain in the video stream. Related settings: To set up your output
5019	// to include or exclude AFD values, see AfdSignaling, under VideoDescription. On
5020	// the console, find AFD signaling under the output's video encoding settings.
5021	AfdSignaling MxfAfdSignaling
5022
5023	// Specify the MXF profile, also called shim, for this output. When you choose
5024	// Auto, MediaConvert chooses a profile based on the video codec and resolution.
5025	// For a list of codecs supported with each MXF profile, see
5026	// https://docs.aws.amazon.com/mediaconvert/latest/ug/codecs-supported-with-each-mxf-profile.html.
5027	// For more information about the automatic selection behavior, see
5028	// https://docs.aws.amazon.com/mediaconvert/latest/ug/default-automatic-selection-of-mxf-profiles.html.
5029	Profile MxfProfile
5030}
5031
5032// For forensic video watermarking, MediaConvert supports Nagra NexGuard File
5033// Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2) and
5034// OTT Streaming workflows.
5035type NexGuardFileMarkerSettings struct {
5036
5037	// Use the base64 license string that Nagra provides you. Enter it directly in your
5038	// JSON job specification or in the console. Required when you include Nagra
5039	// NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in your job.
5040	License *string
5041
5042	// Specify the payload ID that you want associated with this output. Valid values
5043	// vary depending on your Nagra NexGuard forensic watermarking workflow. Required
5044	// when you include Nagra NexGuard File Marker watermarking
5045	// (NexGuardWatermarkingSettings) in your job. For PreRelease Content (NGPR/G2),
5046	// specify an integer from 1 through 4,194,303. You must generate a unique ID for
5047	// each asset you watermark, and keep a record of which ID you have assigned to
5048	// each asset. Neither Nagra nor MediaConvert keep track of the relationship
5049	// between output files and your IDs. For OTT Streaming, create two adaptive
5050	// bitrate (ABR) stacks for each asset. Do this by setting up two output groups.
5051	// For one output group, set the value of Payload ID (payload) to 0 in every
5052	// output. For the other output group, set Payload ID (payload) to 1 in every
5053	// output.
5054	Payload int32
5055
5056	// Enter one of the watermarking preset strings that Nagra provides you. Required
5057	// when you include Nagra NexGuard File Marker watermarking
5058	// (NexGuardWatermarkingSettings) in your job.
5059	Preset *string
5060
5061	// Optional. Ignore this setting unless Nagra support directs you to specify a
5062	// value. When you don't specify a value here, the Nagra NexGuard library uses its
5063	// default value.
5064	Strength WatermarkingStrength
5065}
5066
5067// Settings for your Nielsen configuration. If you don't do Nielsen measurement and
5068// analytics, ignore these settings. When you enable Nielsen configuration
5069// (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs
5070// in the job. To enable Nielsen configuration programmatically, include an
5071// instance of nielsenConfiguration in your JSON job specification. Even if you
5072// don't include any children of nielsenConfiguration, you still enable the
5073// setting.
5074type NielsenConfiguration struct {
5075
5076	// Nielsen has discontinued the use of breakout code functionality. If you must
5077	// include this property, set the value to zero.
5078	BreakoutCode int32
5079
5080	// Use Distributor ID (DistributorID) to specify the distributor ID that is
5081	// assigned to your organization by Neilsen.
5082	DistributorId *string
5083}
5084
5085// Ignore these settings unless you are using Nielsen non-linear watermarking.
5086// Specify the values that MediaConvert uses to generate and place Nielsen
5087// watermarks in your output audio. In addition to specifying these values, you
5088// also need to set up your cloud TIC server. These settings apply to every output
5089// in your job. The MediaConvert implementation is currently with the following
5090// Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark
5091// Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0]
5092type NielsenNonLinearWatermarkSettings struct {
5093
5094	// Choose the type of Nielsen watermarks that you want in your outputs. When you
5095	// choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the setting
5096	// SID (sourceId). When you choose CBET (CBET), you must provide a value for the
5097	// setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET
5098	// (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings.
5099	ActiveWatermarkProcess NielsenActiveWatermarkProcessType
5100
5101	// Optional. Use this setting when you want the service to include an ADI file in
5102	// the Nielsen metadata .zip file. To provide an ADI file, store it in Amazon S3
5103	// and provide a URL to it here. The URL should be in the following format:
5104	// S3://bucket/path/ADI-file. For more information about the metadata .zip file,
5105	// see the setting Metadata destination (metadataDestination).
5106	AdiFilename *string
5107
5108	// Use the asset ID that you provide to Nielsen to uniquely identify this asset.
5109	// Required for all Nielsen non-linear watermarking.
5110	AssetId *string
5111
5112	// Use the asset name that you provide to Nielsen for this asset. Required for all
5113	// Nielsen non-linear watermarking.
5114	AssetName *string
5115
5116	// Use the CSID that Nielsen provides to you. This CBET source ID should be unique
5117	// to your Nielsen account but common to all of your output assets that have CBET
5118	// watermarking. Required when you choose a value for the setting Watermark types
5119	// (ActiveWatermarkProcess) that includes CBET.
5120	CbetSourceId *string
5121
5122	// Optional. If this asset uses an episode ID with Nielsen, provide it here.
5123	EpisodeId *string
5124
5125	// Specify the Amazon S3 location where you want MediaConvert to save your Nielsen
5126	// non-linear metadata .zip file. This Amazon S3 bucket must be in the same Region
5127	// as the one where you do your MediaConvert transcoding. If you want to include an
5128	// ADI file in this .zip file, use the setting ADI file (adiFilename) to specify
5129	// it. MediaConvert delivers the Nielsen metadata .zip files only to your metadata
5130	// destination Amazon S3 bucket. It doesn't deliver the .zip files to Nielsen. You
5131	// are responsible for delivering the metadata .zip files to Nielsen.
5132	MetadataDestination *string
5133
5134	// Use the SID that Nielsen provides to you. This source ID should be unique to
5135	// your Nielsen account but common to all of your output assets. Required for all
5136	// Nielsen non-linear watermarking. This ID should be unique to your Nielsen
5137	// account but common to all of your output assets. Required for all Nielsen
5138	// non-linear watermarking.
5139	SourceId int32
5140
5141	// Required. Specify whether your source content already contains Nielsen
5142	// non-linear watermarks. When you set this value to Watermarked (WATERMARKED), the
5143	// service fails the job. Nielsen requires that you add non-linear watermarking to
5144	// only clean content that doesn't already have non-linear Nielsen watermarks.
5145	SourceWatermarkStatus NielsenSourceWatermarkStatusType
5146
5147	// Specify the endpoint for the TIC server that you have deployed and configured in
5148	// the AWS Cloud. Required for all Nielsen non-linear watermarking. MediaConvert
5149	// can't connect directly to a TIC server. Instead, you must use API Gateway to
5150	// provide a RESTful interface between MediaConvert and a TIC server that you
5151	// deploy in your AWS account. For more information on deploying a TIC server in
5152	// your AWS account and the required API Gateway, contact Nielsen support.
5153	TicServerUrl *string
5154
5155	// To create assets that have the same TIC values in each audio track, keep the
5156	// default value Share TICs (SAME_TICS_PER_TRACK). To create assets that have
5157	// unique TIC values for each audio track, choose Use unique TICs
5158	// (RESERVE_UNIQUE_TICS_PER_TRACK).
5159	UniqueTicPerAudioTrack NielsenUniqueTicPerAudioTrackType
5160}
5161
5162// Enable the Noise reducer (NoiseReducer) feature to remove noise from your video
5163// output if necessary. Enable or disable this feature for each output
5164// individually. This setting is disabled by default. When you enable Noise reducer
5165// (NoiseReducer), you must also select a value for Noise reducer filter
5166// (NoiseReducerFilter).
5167type NoiseReducer struct {
5168
5169	// Use Noise reducer filter (NoiseReducerFilter) to select one of the following
5170	// spatial image filtering functions. To use this setting, you must also enable
5171	// Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing noise.
5172	// * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution
5173	// filtering. * Conserve does min/max noise reduction. * Spatial does
5174	// frequency-domain filtering based on JND principles. * Temporal optimizes video
5175	// quality for complex motion.
5176	Filter NoiseReducerFilter
5177
5178	// Settings for a noise reducer filter
5179	FilterSettings *NoiseReducerFilterSettings
5180
5181	// Noise reducer filter settings for spatial filter.
5182	SpatialFilterSettings *NoiseReducerSpatialFilterSettings
5183
5184	// Noise reducer filter settings for temporal filter.
5185	TemporalFilterSettings *NoiseReducerTemporalFilterSettings
5186}
5187
5188// Settings for a noise reducer filter
5189type NoiseReducerFilterSettings struct {
5190
5191	// Relative strength of noise reducing filter. Higher values produce stronger
5192	// filtering.
5193	Strength int32
5194}
5195
5196// Noise reducer filter settings for spatial filter.
5197type NoiseReducerSpatialFilterSettings struct {
5198
5199	// Specify strength of post noise reduction sharpening filter, with 0 disabling the
5200	// filter and 3 enabling it at maximum strength.
5201	PostFilterSharpenStrength int32
5202
5203	// The speed of the filter, from -2 (lower speed) to 3 (higher speed), with 0 being
5204	// the nominal value.
5205	Speed int32
5206
5207	// Relative strength of noise reducing filter. Higher values produce stronger
5208	// filtering.
5209	Strength int32
5210}
5211
5212// Noise reducer filter settings for temporal filter.
5213type NoiseReducerTemporalFilterSettings struct {
5214
5215	// Use Aggressive mode for content that has complex motion. Higher values produce
5216	// stronger temporal filtering. This filters highly complex scenes more
5217	// aggressively and creates better VQ for low bitrate outputs.
5218	AggressiveMode int32
5219
5220	// Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), you
5221	// can use this setting to apply sharpening. The default behavior, Auto (AUTO),
5222	// allows the transcoder to determine whether to apply filtering, depending on
5223	// input type and quality. When you set Noise reducer to Temporal, your output
5224	// bandwidth is reduced. When Post temporal sharpening is also enabled, that
5225	// bandwidth reduction is smaller.
5226	PostTemporalSharpening NoiseFilterPostTemporalSharpening
5227
5228	// The speed of the filter (higher number is faster). Low setting reduces bit rate
5229	// at the cost of transcode time, high setting improves transcode time at the cost
5230	// of bit rate.
5231	Speed int32
5232
5233	// Specify the strength of the noise reducing filter on this output. Higher values
5234	// produce stronger filtering. We recommend the following value ranges, depending
5235	// on the result that you want: * 0-2 for complexity reduction with minimal
5236	// sharpness loss * 2-8 for complexity reduction with image preservation * 8-16 for
5237	// a high level of complexity reduction
5238	Strength int32
5239}
5240
5241// Required when you set Codec, under AudioDescriptions>CodecSettings, to the value
5242// OPUS.
5243type OpusSettings struct {
5244
5245	// Optional. Specify the average bitrate in bits per second. Valid values are
5246	// multiples of 8000, from 32000 through 192000. The default value is 96000, which
5247	// we recommend for quality and bandwidth.
5248	Bitrate int32
5249
5250	// Specify the number of channels in this output audio track. Choosing Mono on the
5251	// console gives you 1 output channel; choosing Stereo gives you 2. In the API,
5252	// valid values are 1 and 2.
5253	Channels int32
5254
5255	// Optional. Sample rate in hz. Valid values are 16000, 24000, and 48000. The
5256	// default value is 48000.
5257	SampleRate int32
5258}
5259
5260// Each output in your job is a collection of settings that describes how you want
5261// MediaConvert to encode a single output file or stream. For more information, see
5262// https://docs.aws.amazon.com/mediaconvert/latest/ug/create-outputs.html.
5263type Output struct {
5264
5265	// (AudioDescriptions) contains groups of audio encoding settings organized by
5266	// audio codec. Include one instance of (AudioDescriptions) per output.
5267	// (AudioDescriptions) can contain multiple groups of encoding settings.
5268	AudioDescriptions []AudioDescription
5269
5270	// (CaptionDescriptions) contains groups of captions settings. For each output that
5271	// has captions, include one instance of (CaptionDescriptions).
5272	// (CaptionDescriptions) can contain multiple groups of captions settings.
5273	CaptionDescriptions []CaptionDescription
5274
5275	// Container specific settings.
5276	ContainerSettings *ContainerSettings
5277
5278	// Use Extension (Extension) to specify the file extension for outputs in File
5279	// output groups. If you do not specify a value, the service will use default
5280	// extensions by container type as follows * MPEG-2 transport stream, m2ts *
5281	// Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container,
5282	// webm * No Container, the service will use codec extensions (e.g. AAC, H265,
5283	// H265, AC3)
5284	Extension *string
5285
5286	// Use Name modifier (NameModifier) to have the service add a string to the end of
5287	// each output filename. You specify the base filename as part of your destination
5288	// URI. When you create multiple outputs in the same output group, Name modifier
5289	// (NameModifier) is required. Name modifier also accepts format identifiers. For
5290	// DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in one
5291	// output, you must use them in the same way in all outputs of the output group.
5292	NameModifier *string
5293
5294	// Specific settings for this type of output.
5295	OutputSettings *OutputSettings
5296
5297	// Use Preset (Preset) to specify a preset for your transcoding settings. Provide
5298	// the system or custom preset name. You can specify either Preset (Preset) or
5299	// Container settings (ContainerSettings), but not both.
5300	Preset *string
5301
5302	// VideoDescription contains a group of video encoding settings. The specific video
5303	// settings depend on the video codec that you choose for the property codec.
5304	// Include one instance of VideoDescription per output.
5305	VideoDescription *VideoDescription
5306}
5307
5308// OutputChannel mapping settings.
5309type OutputChannelMapping struct {
5310
5311	// Use this setting to specify your remix values when they are integers, such as
5312	// -10, 0, or 4.
5313	InputChannels []int32
5314
5315	// Use this setting to specify your remix values when they have a decimal
5316	// component, such as -10.312, 0.08, or 4.9. MediaConvert rounds your remixing
5317	// values to the nearest thousandth.
5318	InputChannelsFineTune []float64
5319}
5320
5321// Details regarding output
5322type OutputDetail struct {
5323
5324	// Duration in milliseconds
5325	DurationInMs int32
5326
5327	// Contains details about the output's video stream
5328	VideoDetails *VideoDetail
5329}
5330
5331// Group of outputs
5332type OutputGroup struct {
5333
5334	// Use automated encoding to have MediaConvert choose your encoding settings for
5335	// you, based on characteristics of your input video.
5336	AutomatedEncodingSettings *AutomatedEncodingSettings
5337
5338	// Use Custom Group Name (CustomName) to specify a name for the output group. This
5339	// value is displayed on the console and can make your job settings JSON more
5340	// human-readable. It does not affect your outputs. Use up to twelve characters
5341	// that are either letters, numbers, spaces, or underscores.
5342	CustomName *string
5343
5344	// Name of the output group
5345	Name *string
5346
5347	// Output Group settings, including type
5348	OutputGroupSettings *OutputGroupSettings
5349
5350	// This object holds groups of encoding settings, one group of settings per output.
5351	Outputs []Output
5352}
5353
5354// Contains details about the output groups specified in the job settings.
5355type OutputGroupDetail struct {
5356
5357	// Details about the output
5358	OutputDetails []OutputDetail
5359}
5360
5361// Output Group settings, including type
5362type OutputGroupSettings struct {
5363
5364	// Settings related to your CMAF output package. For more information, see
5365	// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When
5366	// you work directly in your JSON job specification, include this object and any
5367	// required children when you set Type, under OutputGroupSettings, to
5368	// CMAF_GROUP_SETTINGS.
5369	CmafGroupSettings *CmafGroupSettings
5370
5371	// Settings related to your DASH output package. For more information, see
5372	// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When
5373	// you work directly in your JSON job specification, include this object and any
5374	// required children when you set Type, under OutputGroupSettings, to
5375	// DASH_ISO_GROUP_SETTINGS.
5376	DashIsoGroupSettings *DashIsoGroupSettings
5377
5378	// Settings related to your File output group. MediaConvert uses this group of
5379	// settings to generate a single standalone file, rather than a streaming package.
5380	// When you work directly in your JSON job specification, include this object and
5381	// any required children when you set Type, under OutputGroupSettings, to
5382	// FILE_GROUP_SETTINGS.
5383	FileGroupSettings *FileGroupSettings
5384
5385	// Settings related to your HLS output package. For more information, see
5386	// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When
5387	// you work directly in your JSON job specification, include this object and any
5388	// required children when you set Type, under OutputGroupSettings, to
5389	// HLS_GROUP_SETTINGS.
5390	HlsGroupSettings *HlsGroupSettings
5391
5392	// Settings related to your Microsoft Smooth Streaming output package. For more
5393	// information, see
5394	// https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When
5395	// you work directly in your JSON job specification, include this object and any
5396	// required children when you set Type, under OutputGroupSettings, to
5397	// MS_SMOOTH_GROUP_SETTINGS.
5398	MsSmoothGroupSettings *MsSmoothGroupSettings
5399
5400	// Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth
5401	// Streaming, CMAF)
5402	Type OutputGroupType
5403}
5404
5405// Specific settings for this type of output.
5406type OutputSettings struct {
5407
5408	// Settings for HLS output groups
5409	HlsSettings *HlsSettings
5410}
5411
5412// If you work with a third party video watermarking partner, use the group of
5413// settings that correspond with your watermarking partner to include watermarks in
5414// your output.
5415type PartnerWatermarking struct {
5416
5417	// For forensic video watermarking, MediaConvert supports Nagra NexGuard File
5418	// Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2) and
5419	// OTT Streaming workflows.
5420	NexguardFileMarkerSettings *NexGuardFileMarkerSettings
5421}
5422
5423// A preset is a collection of preconfigured media conversion settings that you
5424// want MediaConvert to apply to the output during the conversion process.
5425type Preset struct {
5426
5427	// A name you create for each preset. Each name must be unique within your account.
5428	//
5429	// This member is required.
5430	Name *string
5431
5432	// Settings for preset
5433	//
5434	// This member is required.
5435	Settings *PresetSettings
5436
5437	// An identifier for this resource that is unique within all of AWS.
5438	Arn *string
5439
5440	// An optional category you create to organize your presets.
5441	Category *string
5442
5443	// The timestamp in epoch seconds for preset creation.
5444	CreatedAt *time.Time
5445
5446	// An optional description you create for each preset.
5447	Description *string
5448
5449	// The timestamp in epoch seconds when the preset was last updated.
5450	LastUpdated *time.Time
5451
5452	// A preset can be of two types: system or custom. System or built-in preset can't
5453	// be modified or deleted by the user.
5454	Type Type
5455}
5456
5457// Settings for preset
5458type PresetSettings struct {
5459
5460	// (AudioDescriptions) contains groups of audio encoding settings organized by
5461	// audio codec. Include one instance of (AudioDescriptions) per output.
5462	// (AudioDescriptions) can contain multiple groups of encoding settings.
5463	AudioDescriptions []AudioDescription
5464
5465	// This object holds groups of settings related to captions for one output. For
5466	// each output that has captions, include one instance of CaptionDescriptions.
5467	CaptionDescriptions []CaptionDescriptionPreset
5468
5469	// Container specific settings.
5470	ContainerSettings *ContainerSettings
5471
5472	// VideoDescription contains a group of video encoding settings. The specific video
5473	// settings depend on the video codec that you choose for the property codec.
5474	// Include one instance of VideoDescription per output.
5475	VideoDescription *VideoDescription
5476}
5477
5478// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
5479// value PRORES.
5480type ProresSettings struct {
5481
5482	// Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec to
5483	// use for this output.
5484	CodecProfile ProresCodecProfile
5485
5486	// If you are using the console, use the Framerate setting to specify the frame
5487	// rate for this output. If you want to keep the same frame rate as the input
5488	// video, choose Follow source. If you want to do frame rate conversion, choose a
5489	// frame rate from the dropdown list or choose Custom. The framerates shown in the
5490	// dropdown list are decimal approximations of fractions. If you choose Custom,
5491	// specify your frame rate as a fraction. If you are creating your transcoding job
5492	// specification as a JSON file without the console, use FramerateControl to
5493	// specify which value the service uses for the frame rate for this output. Choose
5494	// INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the
5495	// input. Choose SPECIFIED if you want the service to use the frame rate you
5496	// specify in the settings FramerateNumerator and FramerateDenominator.
5497	FramerateControl ProresFramerateControl
5498
5499	// Choose the method that you want MediaConvert to use when increasing or
5500	// decreasing the frame rate. We recommend using drop duplicate (DUPLICATE_DROP)
5501	// for numerically simple conversions, such as 60 fps to 30 fps. For numerically
5502	// complex conversions, you can use interpolate (INTERPOLATE) to avoid stutter.
5503	// This results in a smooth picture, but might introduce undesirable video
5504	// artifacts. For complex frame rate conversions, especially if your source video
5505	// has already been converted from its original cadence, use FrameFormer
5506	// (FRAMEFORMER) to do motion-compensated interpolation. FrameFormer chooses the
5507	// best conversion method frame by frame. Note that using FrameFormer increases the
5508	// transcoding time and incurs a significant add-on cost.
5509	FramerateConversionAlgorithm ProresFramerateConversionAlgorithm
5510
5511	// When you use the API for transcode jobs that use frame rate conversion, specify
5512	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
5513	// FramerateDenominator to specify the denominator of this fraction. In this
5514	// example, use 1001 for the value of FramerateDenominator. When you use the
5515	// console for transcode jobs that use frame rate conversion, provide the value as
5516	// a decimal number for Framerate. In this example, specify 23.976.
5517	FramerateDenominator int32
5518
5519	// When you use the API for transcode jobs that use frame rate conversion, specify
5520	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
5521	// FramerateNumerator to specify the numerator of this fraction. In this example,
5522	// use 24000 for the value of FramerateNumerator. When you use the console for
5523	// transcode jobs that use frame rate conversion, provide the value as a decimal
5524	// number for Framerate. In this example, specify 23.976.
5525	FramerateNumerator int32
5526
5527	// Choose the scan line type for the output. Keep the default value, Progressive
5528	// (PROGRESSIVE) to create a progressive output, regardless of the scan type of
5529	// your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD)
5530	// to create an output that's interlaced with the same field polarity throughout.
5531	// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom
5532	// (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the
5533	// source. For jobs that have multiple inputs, the output field polarity might
5534	// change over the course of the output. Follow behavior depends on the input scan
5535	// type. If the source is interlaced, the output will be interlaced with the same
5536	// polarity as the source. If the source is progressive, the output will be
5537	// interlaced with top field bottom field first, depending on which of the Follow
5538	// options you choose.
5539	InterlaceMode ProresInterlaceMode
5540
5541	// Optional. Specify how the service determines the pixel aspect ratio (PAR) for
5542	// this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses
5543	// the PAR from your input video for your output. To specify a different PAR in the
5544	// console, choose any value other than Follow source. To specify a different PAR
5545	// by editing the JSON job specification, choose SPECIFIED. When you choose
5546	// SPECIFIED for this setting, you must also specify values for the parNumerator
5547	// and parDenominator settings.
5548	ParControl ProresParControl
5549
5550	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
5551	// console, this corresponds to any value other than Follow source. When you
5552	// specify an output pixel aspect ratio (PAR) that is different from your input
5553	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
5554	// widescreen, you would specify the ratio 40:33. In this example, the value for
5555	// parDenominator is 33.
5556	ParDenominator int32
5557
5558	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
5559	// console, this corresponds to any value other than Follow source. When you
5560	// specify an output pixel aspect ratio (PAR) that is different from your input
5561	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
5562	// widescreen, you would specify the ratio 40:33. In this example, the value for
5563	// parNumerator is 40.
5564	ParNumerator int32
5565
5566	// Use this setting for interlaced outputs, when your output frame rate is half of
5567	// your input frame rate. In this situation, choose Optimized interlacing
5568	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
5569	// case, each progressive frame from the input corresponds to an interlaced field
5570	// in the output. Keep the default value, Basic interlacing (INTERLACED), for all
5571	// other output frame rates. With basic interlacing, MediaConvert performs any
5572	// frame rate conversion first and then interlaces the frames. When you choose
5573	// Optimized interlacing and you set your output frame rate to a value that isn't
5574	// suitable for optimized interlacing, MediaConvert automatically falls back to
5575	// basic interlacing. Required settings: To use optimized interlacing, you must set
5576	// Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized
5577	// interlacing for hard telecine outputs. You must also set Interlace mode
5578	// (interlaceMode) to a value other than Progressive (PROGRESSIVE).
5579	ScanTypeConversionMode ProresScanTypeConversionMode
5580
5581	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
5582	// second (fps). Enable slow PAL to create a 25 fps output. When you enable slow
5583	// PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio
5584	// to keep it synchronized with the video. Note that enabling this setting will
5585	// slightly reduce the duration of your video. Required settings: You must also set
5586	// Framerate to 25. In your JSON job specification, set (framerateControl) to
5587	// (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.
5588	SlowPal ProresSlowPal
5589
5590	// When you do frame rate conversion from 23.976 frames per second (fps) to 29.97
5591	// fps, and your output scan type is interlaced, you can optionally enable hard
5592	// telecine (HARD) to create a smoother picture. When you keep the default value,
5593	// None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without
5594	// doing anything with the field polarity to create a smoother picture.
5595	Telecine ProresTelecine
5596}
5597
5598// You can use queues to manage the resources that are available to your AWS
5599// account for running multiple transcoding jobs at the same time. If you don't
5600// specify a queue, the service sends all jobs through the default queue. For more
5601// information, see
5602// https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-queues.html.
5603type Queue struct {
5604
5605	// A name that you create for each queue. Each name must be unique within your
5606	// account.
5607	//
5608	// This member is required.
5609	Name *string
5610
5611	// An identifier for this resource that is unique within all of AWS.
5612	Arn *string
5613
5614	// The timestamp in epoch seconds for when you created the queue.
5615	CreatedAt *time.Time
5616
5617	// An optional description that you create for each queue.
5618	Description *string
5619
5620	// The timestamp in epoch seconds for when you most recently updated the queue.
5621	LastUpdated *time.Time
5622
5623	// Specifies whether the pricing plan for the queue is on-demand or reserved. For
5624	// on-demand, you pay per minute, billed in increments of .01 minute. For reserved,
5625	// you pay for the transcoding capacity of the entire queue, regardless of how much
5626	// or how little you use it. Reserved pricing requires a 12-month commitment.
5627	PricingPlan PricingPlan
5628
5629	// The estimated number of jobs with a PROGRESSING status.
5630	ProgressingJobsCount int32
5631
5632	// Details about the pricing plan for your reserved queue. Required for reserved
5633	// queues and not applicable to on-demand queues.
5634	ReservationPlan *ReservationPlan
5635
5636	// Queues can be ACTIVE or PAUSED. If you pause a queue, the service won't begin
5637	// processing jobs in that queue. Jobs that are running when you pause the queue
5638	// continue to run until they finish or result in an error.
5639	Status QueueStatus
5640
5641	// The estimated number of jobs with a SUBMITTED status.
5642	SubmittedJobsCount int32
5643
5644	// Specifies whether this on-demand queue is system or custom. System queues are
5645	// built in. You can't modify or delete system queues. You can create and modify
5646	// custom queues.
5647	Type Type
5648}
5649
5650// Description of the source and destination queues between which the job has
5651// moved, along with the timestamp of the move
5652type QueueTransition struct {
5653
5654	// The queue that the job was on after the transition.
5655	DestinationQueue *string
5656
5657	// The queue that the job was on before the transition.
5658	SourceQueue *string
5659
5660	// The time, in Unix epoch format, that the job moved from the source queue to the
5661	// destination queue.
5662	Timestamp *time.Time
5663}
5664
5665// Use Rectangle to identify a specific area of the video frame.
5666type Rectangle struct {
5667
5668	// Height of rectangle in pixels. Specify only even numbers.
5669	Height int32
5670
5671	// Width of rectangle in pixels. Specify only even numbers.
5672	Width int32
5673
5674	// The distance, in pixels, between the rectangle and the left edge of the video
5675	// frame. Specify only even numbers.
5676	X int32
5677
5678	// The distance, in pixels, between the rectangle and the top edge of the video
5679	// frame. Specify only even numbers.
5680	Y int32
5681}
5682
5683// Use Manual audio remixing (RemixSettings) to adjust audio levels for each audio
5684// channel in each output of your job. With audio remixing, you can output more or
5685// fewer audio channels than your input audio source provides.
5686type RemixSettings struct {
5687
5688	// Channel mapping (ChannelMapping) contains the group of fields that hold the
5689	// remixing value for each channel, in dB. Specify remix values to indicate how
5690	// much of the content from your input audio channel you want in your output audio
5691	// channels. Each instance of the InputChannels or InputChannelsFineTune array
5692	// specifies these values for one output channel. Use one instance of this array
5693	// for each output channel. In the console, each array corresponds to a column in
5694	// the graphical depiction of the mapping matrix. The rows of the graphical matrix
5695	// correspond to input channels. Valid values are within the range from -60 (mute)
5696	// through 6. A setting of 0 passes the input channel unchanged to the output
5697	// channel (no attenuation or amplification). Use InputChannels or
5698	// InputChannelsFineTune to specify your remix values. Don't use both.
5699	ChannelMapping *ChannelMapping
5700
5701	// Specify the number of audio channels from your input that you want to use in
5702	// your output. With remixing, you might combine or split the data in these
5703	// channels, so the number of channels in your final output might be different. If
5704	// you are doing both input channel mapping and output channel mapping, the number
5705	// of output channels in your input mapping must be the same as the number of input
5706	// channels in your output mapping.
5707	ChannelsIn int32
5708
5709	// Specify the number of channels in this output after remixing. Valid values: 1,
5710	// 2, 4, 6, 8... 64. (1 and even numbers to 64.) If you are doing both input
5711	// channel mapping and output channel mapping, the number of output channels in
5712	// your input mapping must be the same as the number of input channels in your
5713	// output mapping.
5714	ChannelsOut int32
5715}
5716
5717// Details about the pricing plan for your reserved queue. Required for reserved
5718// queues and not applicable to on-demand queues.
5719type ReservationPlan struct {
5720
5721	// The length of the term of your reserved queue pricing plan commitment.
5722	Commitment Commitment
5723
5724	// The timestamp in epoch seconds for when the current pricing plan term for this
5725	// reserved queue expires.
5726	ExpiresAt *time.Time
5727
5728	// The timestamp in epoch seconds for when you set up the current pricing plan for
5729	// this reserved queue.
5730	PurchasedAt *time.Time
5731
5732	// Specifies whether the term of your reserved queue pricing plan is automatically
5733	// extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term.
5734	RenewalType RenewalType
5735
5736	// Specifies the number of reserved transcode slots (RTS) for this queue. The
5737	// number of RTS determines how many jobs the queue can process in parallel; each
5738	// RTS can process one job at a time. When you increase this number, you extend
5739	// your existing commitment with a new 12-month commitment for a larger number of
5740	// RTS. The new commitment begins when you purchase the additional capacity. You
5741	// can't decrease the number of RTS in your reserved queue.
5742	ReservedSlots int32
5743
5744	// Specifies whether the pricing plan for your reserved queue is ACTIVE or EXPIRED.
5745	Status ReservationPlanStatus
5746}
5747
5748// Details about the pricing plan for your reserved queue. Required for reserved
5749// queues and not applicable to on-demand queues.
5750type ReservationPlanSettings struct {
5751
5752	// The length of the term of your reserved queue pricing plan commitment.
5753	//
5754	// This member is required.
5755	Commitment Commitment
5756
5757	// Specifies whether the term of your reserved queue pricing plan is automatically
5758	// extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term. When your term
5759	// is auto renewed, you extend your commitment by 12 months from the auto renew
5760	// date. You can cancel this commitment.
5761	//
5762	// This member is required.
5763	RenewalType RenewalType
5764
5765	// Specifies the number of reserved transcode slots (RTS) for this queue. The
5766	// number of RTS determines how many jobs the queue can process in parallel; each
5767	// RTS can process one job at a time. You can't decrease the number of RTS in your
5768	// reserved queue. You can increase the number of RTS by extending your existing
5769	// commitment with a new 12-month commitment for the larger number. The new
5770	// commitment begins when you purchase the additional capacity. You can't cancel
5771	// your commitment or revert to your original commitment after you increase the
5772	// capacity.
5773	//
5774	// This member is required.
5775	ReservedSlots int32
5776}
5777
5778// The Amazon Resource Name (ARN) and tags for an AWS Elemental MediaConvert
5779// resource.
5780type ResourceTags struct {
5781
5782	// The Amazon Resource Name (ARN) of the resource.
5783	Arn *string
5784
5785	// The tags for the resource.
5786	Tags map[string]string
5787}
5788
5789// Optional. Have MediaConvert automatically apply Amazon S3 access control for the
5790// outputs in this output group. When you don't use this setting, S3 automatically
5791// applies the default access control list PRIVATE.
5792type S3DestinationAccessControl struct {
5793
5794	// Choose an Amazon S3 canned ACL for MediaConvert to apply to this output.
5795	CannedAcl S3ObjectCannedAcl
5796}
5797
5798// Settings associated with S3 destination
5799type S3DestinationSettings struct {
5800
5801	// Optional. Have MediaConvert automatically apply Amazon S3 access control for the
5802	// outputs in this output group. When you don't use this setting, S3 automatically
5803	// applies the default access control list PRIVATE.
5804	AccessControl *S3DestinationAccessControl
5805
5806	// Settings for how your job outputs are encrypted as they are uploaded to Amazon
5807	// S3.
5808	Encryption *S3EncryptionSettings
5809}
5810
5811// Settings for how your job outputs are encrypted as they are uploaded to Amazon
5812// S3.
5813type S3EncryptionSettings struct {
5814
5815	// Specify how you want your data keys managed. AWS uses data keys to encrypt your
5816	// content. AWS also encrypts the data keys themselves, using a customer master key
5817	// (CMK), and then stores the encrypted data keys alongside your encrypted content.
5818	// Use this setting to specify which AWS service manages the CMK. For simplest set
5819	// up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). If you want your master key to
5820	// be managed by AWS Key Management Service (KMS), choose AWS KMS
5821	// (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose AWS KMS, KMS uses the
5822	// AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your
5823	// data keys. You can optionally choose to specify a different, customer managed
5824	// CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the
5825	// setting KMS ARN (kmsKeyArn).
5826	EncryptionType S3ServerSideEncryptionType
5827
5828	// Optionally, specify the customer master key (CMK) that you want to use to
5829	// encrypt the data key that AWS uses to encrypt your output content. Enter the
5830	// Amazon Resource Name (ARN) of the CMK. To use this setting, you must also set
5831	// Server-side encryption (S3ServerSideEncryptionType) to AWS KMS
5832	// (SERVER_SIDE_ENCRYPTION_KMS). If you set Server-side encryption to AWS KMS but
5833	// don't specify a CMK here, AWS uses the AWS managed CMK associated with Amazon
5834	// S3.
5835	KmsKeyArn *string
5836}
5837
5838// Settings related to SCC captions. SCC is a sidecar format that holds captions in
5839// a file that is separate from the video container. Set up sidecar captions in the
5840// same output group, but different output from your video. For more information,
5841// see
5842// https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html.
5843// When you work directly in your JSON job specification, include this object and
5844// any required children when you set destinationType to SCC.
5845type SccDestinationSettings struct {
5846
5847	// Set Framerate (SccDestinationFramerate) to make sure that the captions and the
5848	// video are synchronized in the output. Specify a frame rate that matches the
5849	// frame rate of the associated video. If the video frame rate is 29.97, choose
5850	// 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has
5851	// video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97
5852	// non-dropframe (FRAMERATE_29_97_NON_DROPFRAME).
5853	Framerate SccDestinationFramerate
5854}
5855
5856// If your output group type is HLS, DASH, or Microsoft Smooth, use these settings
5857// when doing DRM encryption with a SPEKE-compliant key provider. If your output
5858// group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
5859type SpekeKeyProvider struct {
5860
5861	// If you want your key provider to encrypt the content keys that it provides to
5862	// MediaConvert, set up a certificate with a master key using AWS Certificate
5863	// Manager. Specify the certificate's Amazon Resource Name (ARN) here.
5864	CertificateArn *string
5865
5866	// Specify the resource ID that your SPEKE-compliant key provider uses to identify
5867	// this content.
5868	ResourceId *string
5869
5870	// Relates to SPEKE implementation. DRM system identifiers. DASH output groups
5871	// support a max of two system ids. Other group types support one system id. See
5872	// https://dashif.org/identifiers/content_protection/ for more details.
5873	SystemIds []string
5874
5875	// Specify the URL to the key server that your SPEKE-compliant DRM key provider
5876	// uses to provide keys for encrypting your content.
5877	Url *string
5878}
5879
5880// If your output group type is CMAF, use these settings when doing DRM encryption
5881// with a SPEKE-compliant key provider. If your output group type is HLS, DASH, or
5882// Microsoft Smooth, use the SpekeKeyProvider settings instead.
5883type SpekeKeyProviderCmaf struct {
5884
5885	// If you want your key provider to encrypt the content keys that it provides to
5886	// MediaConvert, set up a certificate with a master key using AWS Certificate
5887	// Manager. Specify the certificate's Amazon Resource Name (ARN) here.
5888	CertificateArn *string
5889
5890	// Specify the DRM system IDs that you want signaled in the DASH manifest that
5891	// MediaConvert creates as part of this CMAF package. The DASH manifest can
5892	// currently signal up to three system IDs. For more information, see
5893	// https://dashif.org/identifiers/content_protection/.
5894	DashSignaledSystemIds []string
5895
5896	// Specify the DRM system ID that you want signaled in the HLS manifest that
5897	// MediaConvert creates as part of this CMAF package. The HLS manifest can
5898	// currently signal only one system ID. For more information, see
5899	// https://dashif.org/identifiers/content_protection/.
5900	HlsSignaledSystemIds []string
5901
5902	// Specify the resource ID that your SPEKE-compliant key provider uses to identify
5903	// this content.
5904	ResourceId *string
5905
5906	// Specify the URL to the key server that your SPEKE-compliant DRM key provider
5907	// uses to provide keys for encrypting your content.
5908	Url *string
5909}
5910
5911// Use these settings to set up encryption with a static key provider.
5912type StaticKeyProvider struct {
5913
5914	// Relates to DRM implementation. Sets the value of the KEYFORMAT attribute. Must
5915	// be 'identity' or a reverse DNS string. May be omitted to indicate an implicit
5916	// value of 'identity'.
5917	KeyFormat *string
5918
5919	// Relates to DRM implementation. Either a single positive integer version value or
5920	// a slash delimited list of version values (1/2/3).
5921	KeyFormatVersions *string
5922
5923	// Relates to DRM implementation. Use a 32-character hexidecimal string to specify
5924	// Key Value (StaticKeyValue).
5925	StaticKeyValue *string
5926
5927	// Relates to DRM implementation. The location of the license server used for
5928	// protecting content.
5929	Url *string
5930}
5931
5932// Settings related to teletext captions. Set up teletext captions in the same
5933// output as your video. For more information, see
5934// https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html.
5935// When you work directly in your JSON job specification, include this object and
5936// any required children when you set destinationType to TELETEXT.
5937type TeletextDestinationSettings struct {
5938
5939	// Set pageNumber to the Teletext page number for the destination captions for this
5940	// output. This value must be a three-digit hexadecimal string; strings ending in
5941	// -FF are invalid. If you are passing through the entire set of Teletext data, do
5942	// not use this field.
5943	PageNumber *string
5944
5945	// Specify the page types for this Teletext page. If you don't specify a value
5946	// here, the service sets the page type to the default value Subtitle
5947	// (PAGE_TYPE_SUBTITLE). If you pass through the entire set of Teletext data, don't
5948	// use this field. When you pass through a set of Teletext pages, your output has
5949	// the same page types as your input.
5950	PageTypes []TeletextPageType
5951}
5952
5953// Settings specific to Teletext caption sources, including Page number.
5954type TeletextSourceSettings struct {
5955
5956	// Use Page Number (PageNumber) to specify the three-digit hexadecimal page number
5957	// that will be used for Teletext captions. Do not use this setting if you are
5958	// passing through teletext from the input source to output.
5959	PageNumber *string
5960}
5961
5962// Settings for burning the output timecode and specified prefix into the output.
5963type TimecodeBurnin struct {
5964
5965	// Use Font Size (FontSize) to set the font size of any burned-in timecode. Valid
5966	// values are 10, 16, 32, 48.
5967	FontSize int32
5968
5969	// Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to specify
5970	// the location the burned-in timecode on output video.
5971	Position TimecodeBurninPosition
5972
5973	// Use Prefix (Prefix) to place ASCII characters before any burned-in timecode. For
5974	// example, a prefix of "EZ-" will result in the timecode "EZ-00:00:00:00". Provide
5975	// either the characters themselves or the ASCII code equivalents. The supported
5976	// range of characters is 0x20 through 0x7e. This includes letters, numbers, and
5977	// all special characters represented on a standard English keyboard.
5978	Prefix *string
5979}
5980
5981// These settings control how the service handles timecodes throughout the job.
5982// These settings don't affect input clipping.
5983type TimecodeConfig struct {
5984
5985	// If you use an editing platform that relies on an anchor timecode, use Anchor
5986	// Timecode (Anchor) to specify a timecode that will match the input video frame to
5987	// the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or
5988	// (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior for
5989	// Anchor Timecode varies depending on your setting for Source (TimecodeSource). *
5990	// If Source (TimecodeSource) is set to Specified Start (SPECIFIEDSTART), the first
5991	// input frame is the specified value in Start Timecode (Start). Anchor Timecode
5992	// (Anchor) and Start Timecode (Start) are used calculate output timecode. * If
5993	// Source (TimecodeSource) is set to Start at 0 (ZEROBASED) the first frame is
5994	// 00:00:00:00. * If Source (TimecodeSource) is set to Embedded (EMBEDDED), the
5995	// first frame is the timecode value on the first input frame of the input.
5996	Anchor *string
5997
5998	// Use Source (TimecodeSource) to set how timecodes are handled within this job. To
5999	// make sure that your video, audio, captions, and markers are synchronized and
6000	// that time-based features, such as image inserter, work correctly, choose the
6001	// Timecode source option that matches your assets. All timecodes are in a 24-hour
6002	// format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - Use the timecode
6003	// that is in the input video. If no embedded timecode is in the source, the
6004	// service will use Start at 0 (ZEROBASED) instead. * Start at 0 (ZEROBASED) - Set
6005	// the timecode of the initial frame to 00:00:00:00. * Specified Start
6006	// (SPECIFIEDSTART) - Set the timecode of the initial frame to a value other than
6007	// zero. You use Start timecode (Start) to provide this value.
6008	Source TimecodeSource
6009
6010	// Only use when you set Source (TimecodeSource) to Specified start
6011	// (SPECIFIEDSTART). Use Start timecode (Start) to specify the timecode for the
6012	// initial frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or
6013	// (HH:MM:SS;FF).
6014	Start *string
6015
6016	// Only applies to outputs that support program-date-time stamp. Use Timestamp
6017	// offset (TimestampOffset) to overwrite the timecode date without affecting the
6018	// time and frame number. Provide the new date as a string in the format
6019	// "yyyy-mm-dd". To use Time stamp offset, you must also enable Insert
6020	// program-date-time (InsertProgramDateTime) in the output settings. For example,
6021	// if the date part of your timecodes is 2002-1-25 and you want to change it to one
6022	// year later, set Timestamp offset (TimestampOffset) to 2003-1-25.
6023	TimestampOffset *string
6024}
6025
6026// Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags in
6027// any HLS outputs. To include timed metadata, you must enable it here, enable it
6028// in each output container, and specify tags and timecodes in ID3 insertion
6029// (Id3Insertion) objects.
6030type TimedMetadataInsertion struct {
6031
6032	// Id3Insertions contains the array of Id3Insertion instances.
6033	Id3Insertions []Id3Insertion
6034}
6035
6036// Information about when jobs are submitted, started, and finished is specified in
6037// Unix epoch format in seconds.
6038type Timing struct {
6039
6040	// The time, in Unix epoch format, that the transcoding job finished
6041	FinishTime *time.Time
6042
6043	// The time, in Unix epoch format, that transcoding for the job began.
6044	StartTime *time.Time
6045
6046	// The time, in Unix epoch format, that you submitted the job.
6047	SubmitTime *time.Time
6048}
6049
6050// Settings specific to caption sources that are specified by track number.
6051// Currently, this is only IMSC captions in an IMF package. If your caption source
6052// is IMSC 1.1 in a separate xml file, use FileSourceSettings instead of
6053// TrackSourceSettings.
6054type TrackSourceSettings struct {
6055
6056	// Use this setting to select a single captions track from a source. Track numbers
6057	// correspond to the order in the captions source file. For IMF sources, track
6058	// numbering is based on the order that the captions appear in the CPL. For
6059	// example, use 1 to select the captions asset that is listed first in the CPL. To
6060	// include more than one captions track in your job outputs, create multiple input
6061	// captions selectors. Specify one track per selector.
6062	TrackNumber int32
6063}
6064
6065// Settings related to TTML captions. TTML is a sidecar format that holds captions
6066// in a file that is separate from the video container. Set up sidecar captions in
6067// the same output group, but different output from your video. For more
6068// information, see
6069// https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
6070// When you work directly in your JSON job specification, include this object and
6071// any required children when you set destinationType to TTML.
6072type TtmlDestinationSettings struct {
6073
6074	// Pass through style and position information from a TTML-like input source (TTML,
6075	// IMSC, SMPTE-TT) to the TTML output.
6076	StylePassthrough TtmlStylePassthrough
6077}
6078
6079// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
6080// value VC3
6081type Vc3Settings struct {
6082
6083	// If you are using the console, use the Framerate setting to specify the frame
6084	// rate for this output. If you want to keep the same frame rate as the input
6085	// video, choose Follow source. If you want to do frame rate conversion, choose a
6086	// frame rate from the dropdown list or choose Custom. The framerates shown in the
6087	// dropdown list are decimal approximations of fractions. If you choose Custom,
6088	// specify your frame rate as a fraction. If you are creating your transcoding job
6089	// specification as a JSON file without the console, use FramerateControl to
6090	// specify which value the service uses for the frame rate for this output. Choose
6091	// INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the
6092	// input. Choose SPECIFIED if you want the service to use the frame rate you
6093	// specify in the settings FramerateNumerator and FramerateDenominator.
6094	FramerateControl Vc3FramerateControl
6095
6096	// Choose the method that you want MediaConvert to use when increasing or
6097	// decreasing the frame rate. We recommend using drop duplicate (DUPLICATE_DROP)
6098	// for numerically simple conversions, such as 60 fps to 30 fps. For numerically
6099	// complex conversions, you can use interpolate (INTERPOLATE) to avoid stutter.
6100	// This results in a smooth picture, but might introduce undesirable video
6101	// artifacts. For complex frame rate conversions, especially if your source video
6102	// has already been converted from its original cadence, use FrameFormer
6103	// (FRAMEFORMER) to do motion-compensated interpolation. FrameFormer chooses the
6104	// best conversion method frame by frame. Note that using FrameFormer increases the
6105	// transcoding time and incurs a significant add-on cost.
6106	FramerateConversionAlgorithm Vc3FramerateConversionAlgorithm
6107
6108	// When you use the API for transcode jobs that use frame rate conversion, specify
6109	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
6110	// FramerateDenominator to specify the denominator of this fraction. In this
6111	// example, use 1001 for the value of FramerateDenominator. When you use the
6112	// console for transcode jobs that use frame rate conversion, provide the value as
6113	// a decimal number for Framerate. In this example, specify 23.976.
6114	FramerateDenominator int32
6115
6116	// When you use the API for transcode jobs that use frame rate conversion, specify
6117	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
6118	// FramerateNumerator to specify the numerator of this fraction. In this example,
6119	// use 24000 for the value of FramerateNumerator. When you use the console for
6120	// transcode jobs that use frame rate conversion, provide the value as a decimal
6121	// number for Framerate. In this example, specify 23.976.
6122	FramerateNumerator int32
6123
6124	// Optional. Choose the scan line type for this output. If you don't specify a
6125	// value, MediaConvert will create a progressive output.
6126	InterlaceMode Vc3InterlaceMode
6127
6128	// Use this setting for interlaced outputs, when your output frame rate is half of
6129	// your input frame rate. In this situation, choose Optimized interlacing
6130	// (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this
6131	// case, each progressive frame from the input corresponds to an interlaced field
6132	// in the output. Keep the default value, Basic interlacing (INTERLACED), for all
6133	// other output frame rates. With basic interlacing, MediaConvert performs any
6134	// frame rate conversion first and then interlaces the frames. When you choose
6135	// Optimized interlacing and you set your output frame rate to a value that isn't
6136	// suitable for optimized interlacing, MediaConvert automatically falls back to
6137	// basic interlacing. Required settings: To use optimized interlacing, you must set
6138	// Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized
6139	// interlacing for hard telecine outputs. You must also set Interlace mode
6140	// (interlaceMode) to a value other than Progressive (PROGRESSIVE).
6141	ScanTypeConversionMode Vc3ScanTypeConversionMode
6142
6143	// Ignore this setting unless your input frame rate is 23.976 or 24 frames per
6144	// second (fps). Enable slow PAL to create a 25 fps output by relabeling the video
6145	// frames and resampling your audio. Note that enabling this setting will slightly
6146	// reduce the duration of your video. Related settings: You must also set Framerate
6147	// to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED),
6148	// (framerateNumerator) to 25 and (framerateDenominator) to 1.
6149	SlowPal Vc3SlowPal
6150
6151	// When you do frame rate conversion from 23.976 frames per second (fps) to 29.97
6152	// fps, and your output scan type is interlaced, you can optionally enable hard
6153	// telecine (HARD) to create a smoother picture. When you keep the default value,
6154	// None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without
6155	// doing anything with the field polarity to create a smoother picture.
6156	Telecine Vc3Telecine
6157
6158	// Specify the VC3 class to choose the quality characteristics for this output. VC3
6159	// class, together with the settings Framerate (framerateNumerator and
6160	// framerateDenominator) and Resolution (height and width), determine your output
6161	// bitrate. For example, say that your video resolution is 1920x1080 and your
6162	// framerate is 29.97. Then Class 145 (CLASS_145) gives you an output with a
6163	// bitrate of approximately 145 Mbps and Class 220 (CLASS_220) gives you and output
6164	// with a bitrate of approximately 220 Mbps. VC3 class also specifies the color bit
6165	// depth of your output.
6166	Vc3Class Vc3Class
6167}
6168
6169// Video codec settings, (CodecSettings) under (VideoDescription), contains the
6170// group of settings related to video encoding. The settings in this group vary
6171// depending on the value that you choose for Video codec (Codec). For each codec
6172// enum that you choose, define the corresponding settings object. The following
6173// lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA,
6174// AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings *
6175// H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3,
6176// Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings
6177type VideoCodecSettings struct {
6178
6179	// Required when you set Codec, under VideoDescription>CodecSettings to the value
6180	// AV1.
6181	Av1Settings *Av1Settings
6182
6183	// Required when you choose AVC-Intra for your output video codec. For more
6184	// information about the AVC-Intra settings, see the relevant specification. For
6185	// detailed information about SD and HD in AVC-Intra, see
6186	// https://ieeexplore.ieee.org/document/7290936. For information about 4K/2K in
6187	// AVC-Intra, see https://pro-av.panasonic.net/en/avc-ultra/AVC-ULTRAoverview.pdf.
6188	AvcIntraSettings *AvcIntraSettings
6189
6190	// Specifies the video codec. This must be equal to one of the enum values defined
6191	// by the object VideoCodec.
6192	Codec VideoCodec
6193
6194	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
6195	// value FRAME_CAPTURE.
6196	FrameCaptureSettings *FrameCaptureSettings
6197
6198	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
6199	// value H_264.
6200	H264Settings *H264Settings
6201
6202	// Settings for H265 codec
6203	H265Settings *H265Settings
6204
6205	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
6206	// value MPEG2.
6207	Mpeg2Settings *Mpeg2Settings
6208
6209	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
6210	// value PRORES.
6211	ProresSettings *ProresSettings
6212
6213	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
6214	// value VC3
6215	Vc3Settings *Vc3Settings
6216
6217	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
6218	// value VP8.
6219	Vp8Settings *Vp8Settings
6220
6221	// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
6222	// value VP9.
6223	Vp9Settings *Vp9Settings
6224}
6225
6226// Settings related to video encoding of your output. The specific video settings
6227// depend on the video codec that you choose. When you work directly in your JSON
6228// job specification, include one instance of Video description (VideoDescription)
6229// per output.
6230type VideoDescription struct {
6231
6232	// This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD
6233	// signaling (AfdSignaling) to specify whether the service includes AFD values in
6234	// the output video data and what those values are. * Choose None to remove all AFD
6235	// values from this output. * Choose Fixed to ignore input AFD values and instead
6236	// encode the value specified in the job. * Choose Auto to calculate output AFD
6237	// values based on the input AFD scaler data.
6238	AfdSignaling AfdSignaling
6239
6240	// The anti-alias filter is automatically applied to all outputs. The service no
6241	// longer accepts the value DISABLED for AntiAlias. If you specify that in your
6242	// job, the service will ignore the setting.
6243	AntiAlias AntiAlias
6244
6245	// Video codec settings, (CodecSettings) under (VideoDescription), contains the
6246	// group of settings related to video encoding. The settings in this group vary
6247	// depending on the value that you choose for Video codec (Codec). For each codec
6248	// enum that you choose, define the corresponding settings object. The following
6249	// lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA,
6250	// AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings *
6251	// H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3,
6252	// Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings
6253	CodecSettings *VideoCodecSettings
6254
6255	// Choose Insert (INSERT) for this setting to include color metadata in this
6256	// output. Choose Ignore (IGNORE) to exclude color metadata from this output. If
6257	// you don't specify a value, the service sets this to Insert by default.
6258	ColorMetadata ColorMetadata
6259
6260	// Use Cropping selection (crop) to specify the video area that the service will
6261	// include in the output video frame.
6262	Crop *Rectangle
6263
6264	// Applies only to 29.97 fps outputs. When this feature is enabled, the service
6265	// will use drop-frame timecode on outputs. If it is not possible to use drop-frame
6266	// timecode, the system will fall back to non-drop-frame. This setting is enabled
6267	// by default when Timecode insertion (TimecodeInsertion) is enabled.
6268	DropFrameTimecode DropFrameTimecode
6269
6270	// Applies only if you set AFD Signaling(AfdSignaling) to Fixed (FIXED). Use Fixed
6271	// (FixedAfd) to specify a four-bit AFD value which the service will write on all
6272	// frames of this video output.
6273	FixedAfd int32
6274
6275	// Use the Height (Height) setting to define the video resolution height for this
6276	// output. Specify in pixels. If you don't provide a value here, the service will
6277	// use the input height.
6278	Height int32
6279
6280	// Use Selection placement (position) to define the video area in your output
6281	// frame. The area outside of the rectangle that you specify here is black.
6282	Position *Rectangle
6283
6284	// Use Respond to AFD (RespondToAfd) to specify how the service changes the video
6285	// itself in response to AFD values in the input. * Choose Respond to clip the
6286	// input video frame according to the AFD value, input display aspect ratio, and
6287	// output display aspect ratio. * Choose Passthrough to include the input AFD
6288	// values. Do not choose this when AfdSignaling is set to (NONE). A preferred
6289	// implementation of this workflow is to set RespondToAfd to (NONE) and set
6290	// AfdSignaling to (AUTO). * Choose None to remove all input AFD values from this
6291	// output.
6292	RespondToAfd RespondToAfd
6293
6294	// Specify how the service handles outputs that have a different aspect ratio from
6295	// the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) to have the
6296	// service stretch your video image to fit. Keep the setting Default (DEFAULT) to
6297	// have the service letterbox your video instead. This setting overrides any value
6298	// that you specify for the setting Selection placement (position) in this output.
6299	ScalingBehavior ScalingBehavior
6300
6301	// Use Sharpness (Sharpness) setting to specify the strength of anti-aliasing. This
6302	// setting changes the width of the anti-alias filter kernel used for scaling.
6303	// Sharpness only applies if your output resolution is different from your input
6304	// resolution. 0 is the softest setting, 100 the sharpest, and 50 recommended for
6305	// most content.
6306	Sharpness int32
6307
6308	// Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode
6309	// insertion when the input frame rate is identical to the output frame rate. To
6310	// include timecodes in this output, set Timecode insertion
6311	// (VideoTimecodeInsertion) to PIC_TIMING_SEI. To leave them out, set it to
6312	// DISABLED. Default is DISABLED. When the service inserts timecodes in an output,
6313	// by default, it uses any embedded timecodes from the input. If none are present,
6314	// the service will set the timecode for the first output frame to zero. To change
6315	// this default behavior, adjust the settings under Timecode configuration
6316	// (TimecodeConfig). In the console, these settings are located under Job > Job
6317	// settings > Timecode configuration. Note - Timecode source under input settings
6318	// (InputTimecodeSource) does not affect the timecodes that are inserted in the
6319	// output. Source under Job settings > Timecode configuration (TimecodeSource)
6320	// does.
6321	TimecodeInsertion VideoTimecodeInsertion
6322
6323	// Find additional transcoding features under Preprocessors (VideoPreprocessors).
6324	// Enable the features at each output individually. These features are disabled by
6325	// default.
6326	VideoPreprocessors *VideoPreprocessor
6327
6328	// Use Width (Width) to define the video resolution width, in pixels, for this
6329	// output. If you don't provide a value here, the service will use the input width.
6330	Width int32
6331}
6332
6333// Contains details about the output's video stream
6334type VideoDetail struct {
6335
6336	// Height in pixels for the output
6337	HeightInPx int32
6338
6339	// Width in pixels for the output
6340	WidthInPx int32
6341}
6342
6343// Find additional transcoding features under Preprocessors (VideoPreprocessors).
6344// Enable the features at each output individually. These features are disabled by
6345// default.
6346type VideoPreprocessor struct {
6347
6348	// Use these settings to convert the color space or to modify properties such as
6349	// hue and contrast for this output. For more information, see
6350	// https://docs.aws.amazon.com/mediaconvert/latest/ug/converting-the-color-space.html.
6351	ColorCorrector *ColorCorrector
6352
6353	// Use the deinterlacer to produce smoother motion and a clearer picture. For more
6354	// information, see
6355	// https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-scan-type.html.
6356	Deinterlacer *Deinterlacer
6357
6358	// Enable Dolby Vision feature to produce Dolby Vision compatible video output.
6359	DolbyVision *DolbyVision
6360
6361	// Enable the Image inserter (ImageInserter) feature to include a graphic overlay
6362	// on your video. Enable or disable this feature for each output individually. This
6363	// setting is disabled by default.
6364	ImageInserter *ImageInserter
6365
6366	// Enable the Noise reducer (NoiseReducer) feature to remove noise from your video
6367	// output if necessary. Enable or disable this feature for each output
6368	// individually. This setting is disabled by default.
6369	NoiseReducer *NoiseReducer
6370
6371	// If you work with a third party video watermarking partner, use the group of
6372	// settings that correspond with your watermarking partner to include watermarks in
6373	// your output.
6374	PartnerWatermarking *PartnerWatermarking
6375
6376	// Settings for burning the output timecode and specified prefix into the output.
6377	TimecodeBurnin *TimecodeBurnin
6378}
6379
6380// Input video selectors contain the video settings for the input. Each of your
6381// inputs can have up to one video selector.
6382type VideoSelector struct {
6383
6384	// Ignore this setting unless this input is a QuickTime animation with an alpha
6385	// channel. Use this setting to create separate Key and Fill outputs. In each
6386	// output, specify which part of the input MediaConvert uses. Leave this setting at
6387	// the default value DISCARD to delete the alpha channel and preserve the video.
6388	// Set it to REMAP_TO_LUMA to delete the video and map the alpha channel to the
6389	// luma channel of your outputs.
6390	AlphaBehavior AlphaBehavior
6391
6392	// If your input video has accurate color space metadata, or if you don't know
6393	// about color space, leave this set to the default value Follow (FOLLOW). The
6394	// service will automatically detect your input color space. If your input video
6395	// has metadata indicating the wrong color space, specify the accurate color space
6396	// here. If your input video is HDR 10 and the SMPTE ST 2086 Mastering Display
6397	// Color Volume static metadata isn't present in your video stream, or if that
6398	// metadata is present but not accurate, choose Force HDR 10 (FORCE_HDR10) here and
6399	// specify correct values in the input HDR 10 metadata (Hdr10Metadata) settings.
6400	// For more information about MediaConvert HDR jobs, see
6401	// https://docs.aws.amazon.com/console/mediaconvert/hdr.
6402	ColorSpace ColorSpace
6403
6404	// There are two sources for color metadata, the input file and the job input
6405	// settings Color space (ColorSpace) and HDR master display information
6406	// settings(Hdr10Metadata). The Color space usage setting determines which takes
6407	// precedence. Choose Force (FORCE) to use color metadata from the input job
6408	// settings. If you don't specify values for those settings, the service defaults
6409	// to using metadata from your input. FALLBACK - Choose Fallback (FALLBACK) to use
6410	// color metadata from the source when it is present. If there's no color metadata
6411	// in your input file, the service defaults to using values you specify in the
6412	// input settings.
6413	ColorSpaceUsage ColorSpaceUsage
6414
6415	// Use these settings to provide HDR 10 metadata that is missing or inaccurate in
6416	// your input video. Appropriate values vary depending on the input video and must
6417	// be provided by a color grader. The color grader generates these values during
6418	// the HDR 10 mastering process. The valid range for each of these settings is 0 to
6419	// 50,000. Each increment represents 0.00002 in CIE1931 color coordinate. Related
6420	// settings - When you specify these values, you must also set Color space
6421	// (ColorSpace) to HDR 10 (HDR10). To specify whether the the values you specify
6422	// here take precedence over the values in the metadata of your input file, set
6423	// Color space usage (ColorSpaceUsage). To specify whether color metadata is
6424	// included in an output, set Color metadata (ColorMetadata). For more information
6425	// about MediaConvert HDR jobs, see
6426	// https://docs.aws.amazon.com/console/mediaconvert/hdr.
6427	Hdr10Metadata *Hdr10Metadata
6428
6429	// Use PID (Pid) to select specific video data from an input file. Specify this
6430	// value as an integer; the system automatically converts it to the hexidecimal
6431	// value. For example, 257 selects PID 0x101. A PID, or packet identifier, is an
6432	// identifier for a set of data in an MPEG-2 transport stream container.
6433	Pid int32
6434
6435	// Selects a specific program from within a multi-program transport stream. Note
6436	// that Quad 4K is not currently supported.
6437	ProgramNumber int32
6438
6439	// Use Rotate (InputRotate) to specify how the service rotates your video. You can
6440	// choose automatic rotation or specify a rotation. You can specify a clockwise
6441	// rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or
6442	// .mp4 and your input has rotation metadata, you can choose Automatic to have the
6443	// service rotate your video according to the rotation specified in the metadata.
6444	// The rotation must be within one degree of 90, 180, or 270 degrees. If the
6445	// rotation metadata specifies any other rotation, the service will default to no
6446	// rotation. By default, the service does no rotation, even if your input video has
6447	// rotation metadata. The service doesn't pass through rotation metadata.
6448	Rotate InputRotate
6449
6450	// Use this setting when your input video codec is AVC-Intra. Ignore this setting
6451	// for all other inputs. If the sample range metadata in your input video is
6452	// accurate, or if you don't know about sample range, keep the default value,
6453	// Follow (FOLLOW), for this setting. When you do, the service automatically
6454	// detects your input sample range. If your input video has metadata indicating the
6455	// wrong sample range, specify the accurate sample range here. When you do,
6456	// MediaConvert ignores any sample range information in the input metadata.
6457	// Regardless of whether MediaConvert uses the input sample range or the sample
6458	// range that you specify, MediaConvert uses the sample range for transcoding and
6459	// also writes it to the output metadata.
6460	SampleRange InputSampleRange
6461}
6462
6463// Required when you set Codec, under AudioDescriptions>CodecSettings, to the value
6464// Vorbis.
6465type VorbisSettings struct {
6466
6467	// Optional. Specify the number of channels in this output audio track. Choosing
6468	// Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In
6469	// the API, valid values are 1 and 2. The default value is 2.
6470	Channels int32
6471
6472	// Optional. Specify the audio sample rate in Hz. Valid values are 22050, 32000,
6473	// 44100, and 48000. The default value is 48000.
6474	SampleRate int32
6475
6476	// Optional. Specify the variable audio quality of this Vorbis output from -1
6477	// (lowest quality, ~45 kbit/s) to 10 (highest quality, ~500 kbit/s). The default
6478	// value is 4 (~128 kbit/s). Values 5 and 6 are approximately 160 and 192 kbit/s,
6479	// respectively.
6480	VbrQuality int32
6481}
6482
6483// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
6484// value VP8.
6485type Vp8Settings struct {
6486
6487	// Target bitrate in bits/second. For example, enter five megabits per second as
6488	// 5000000.
6489	Bitrate int32
6490
6491	// If you are using the console, use the Framerate setting to specify the frame
6492	// rate for this output. If you want to keep the same frame rate as the input
6493	// video, choose Follow source. If you want to do frame rate conversion, choose a
6494	// frame rate from the dropdown list or choose Custom. The framerates shown in the
6495	// dropdown list are decimal approximations of fractions. If you choose Custom,
6496	// specify your frame rate as a fraction. If you are creating your transcoding job
6497	// specification as a JSON file without the console, use FramerateControl to
6498	// specify which value the service uses for the frame rate for this output. Choose
6499	// INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the
6500	// input. Choose SPECIFIED if you want the service to use the frame rate you
6501	// specify in the settings FramerateNumerator and FramerateDenominator.
6502	FramerateControl Vp8FramerateControl
6503
6504	// Choose the method that you want MediaConvert to use when increasing or
6505	// decreasing the frame rate. We recommend using drop duplicate (DUPLICATE_DROP)
6506	// for numerically simple conversions, such as 60 fps to 30 fps. For numerically
6507	// complex conversions, you can use interpolate (INTERPOLATE) to avoid stutter.
6508	// This results in a smooth picture, but might introduce undesirable video
6509	// artifacts. For complex frame rate conversions, especially if your source video
6510	// has already been converted from its original cadence, use FrameFormer
6511	// (FRAMEFORMER) to do motion-compensated interpolation. FrameFormer chooses the
6512	// best conversion method frame by frame. Note that using FrameFormer increases the
6513	// transcoding time and incurs a significant add-on cost.
6514	FramerateConversionAlgorithm Vp8FramerateConversionAlgorithm
6515
6516	// When you use the API for transcode jobs that use frame rate conversion, specify
6517	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
6518	// FramerateDenominator to specify the denominator of this fraction. In this
6519	// example, use 1001 for the value of FramerateDenominator. When you use the
6520	// console for transcode jobs that use frame rate conversion, provide the value as
6521	// a decimal number for Framerate. In this example, specify 23.976.
6522	FramerateDenominator int32
6523
6524	// When you use the API for transcode jobs that use frame rate conversion, specify
6525	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
6526	// FramerateNumerator to specify the numerator of this fraction. In this example,
6527	// use 24000 for the value of FramerateNumerator. When you use the console for
6528	// transcode jobs that use frame rate conversion, provide the value as a decimal
6529	// number for Framerate. In this example, specify 23.976.
6530	FramerateNumerator int32
6531
6532	// GOP Length (keyframe interval) in frames. Must be greater than zero.
6533	GopSize float64
6534
6535	// Optional. Size of buffer (HRD buffer model) in bits. For example, enter five
6536	// megabits as 5000000.
6537	HrdBufferSize int32
6538
6539	// Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional.
6540	// Specify the maximum bitrate in bits/second. For example, enter five megabits per
6541	// second as 5000000. The default behavior uses twice the target bitrate as the
6542	// maximum bitrate.
6543	MaxBitrate int32
6544
6545	// Optional. Specify how the service determines the pixel aspect ratio (PAR) for
6546	// this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses
6547	// the PAR from your input video for your output. To specify a different PAR in the
6548	// console, choose any value other than Follow source. To specify a different PAR
6549	// by editing the JSON job specification, choose SPECIFIED. When you choose
6550	// SPECIFIED for this setting, you must also specify values for the parNumerator
6551	// and parDenominator settings.
6552	ParControl Vp8ParControl
6553
6554	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
6555	// console, this corresponds to any value other than Follow source. When you
6556	// specify an output pixel aspect ratio (PAR) that is different from your input
6557	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
6558	// widescreen, you would specify the ratio 40:33. In this example, the value for
6559	// parDenominator is 33.
6560	ParDenominator int32
6561
6562	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
6563	// console, this corresponds to any value other than Follow source. When you
6564	// specify an output pixel aspect ratio (PAR) that is different from your input
6565	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
6566	// widescreen, you would specify the ratio 40:33. In this example, the value for
6567	// parNumerator is 40.
6568	ParNumerator int32
6569
6570	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want
6571	// to trade off encoding speed for output video quality. The default behavior is
6572	// faster, lower quality, multi-pass encoding.
6573	QualityTuningLevel Vp8QualityTuningLevel
6574
6575	// With the VP8 codec, you can use only the variable bitrate (VBR) rate control
6576	// mode.
6577	RateControlMode Vp8RateControlMode
6578}
6579
6580// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the
6581// value VP9.
6582type Vp9Settings struct {
6583
6584	// Target bitrate in bits/second. For example, enter five megabits per second as
6585	// 5000000.
6586	Bitrate int32
6587
6588	// If you are using the console, use the Framerate setting to specify the frame
6589	// rate for this output. If you want to keep the same frame rate as the input
6590	// video, choose Follow source. If you want to do frame rate conversion, choose a
6591	// frame rate from the dropdown list or choose Custom. The framerates shown in the
6592	// dropdown list are decimal approximations of fractions. If you choose Custom,
6593	// specify your frame rate as a fraction. If you are creating your transcoding job
6594	// specification as a JSON file without the console, use FramerateControl to
6595	// specify which value the service uses for the frame rate for this output. Choose
6596	// INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the
6597	// input. Choose SPECIFIED if you want the service to use the frame rate you
6598	// specify in the settings FramerateNumerator and FramerateDenominator.
6599	FramerateControl Vp9FramerateControl
6600
6601	// Choose the method that you want MediaConvert to use when increasing or
6602	// decreasing the frame rate. We recommend using drop duplicate (DUPLICATE_DROP)
6603	// for numerically simple conversions, such as 60 fps to 30 fps. For numerically
6604	// complex conversions, you can use interpolate (INTERPOLATE) to avoid stutter.
6605	// This results in a smooth picture, but might introduce undesirable video
6606	// artifacts. For complex frame rate conversions, especially if your source video
6607	// has already been converted from its original cadence, use FrameFormer
6608	// (FRAMEFORMER) to do motion-compensated interpolation. FrameFormer chooses the
6609	// best conversion method frame by frame. Note that using FrameFormer increases the
6610	// transcoding time and incurs a significant add-on cost.
6611	FramerateConversionAlgorithm Vp9FramerateConversionAlgorithm
6612
6613	// When you use the API for transcode jobs that use frame rate conversion, specify
6614	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
6615	// FramerateDenominator to specify the denominator of this fraction. In this
6616	// example, use 1001 for the value of FramerateDenominator. When you use the
6617	// console for transcode jobs that use frame rate conversion, provide the value as
6618	// a decimal number for Framerate. In this example, specify 23.976.
6619	FramerateDenominator int32
6620
6621	// When you use the API for transcode jobs that use frame rate conversion, specify
6622	// the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use
6623	// FramerateNumerator to specify the numerator of this fraction. In this example,
6624	// use 24000 for the value of FramerateNumerator. When you use the console for
6625	// transcode jobs that use frame rate conversion, provide the value as a decimal
6626	// number for Framerate. In this example, specify 23.976.
6627	FramerateNumerator int32
6628
6629	// GOP Length (keyframe interval) in frames. Must be greater than zero.
6630	GopSize float64
6631
6632	// Size of buffer (HRD buffer model) in bits. For example, enter five megabits as
6633	// 5000000.
6634	HrdBufferSize int32
6635
6636	// Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional.
6637	// Specify the maximum bitrate in bits/second. For example, enter five megabits per
6638	// second as 5000000. The default behavior uses twice the target bitrate as the
6639	// maximum bitrate.
6640	MaxBitrate int32
6641
6642	// Optional. Specify how the service determines the pixel aspect ratio for this
6643	// output. The default behavior is to use the same pixel aspect ratio as your input
6644	// video.
6645	ParControl Vp9ParControl
6646
6647	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
6648	// console, this corresponds to any value other than Follow source. When you
6649	// specify an output pixel aspect ratio (PAR) that is different from your input
6650	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
6651	// widescreen, you would specify the ratio 40:33. In this example, the value for
6652	// parDenominator is 33.
6653	ParDenominator int32
6654
6655	// Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the
6656	// console, this corresponds to any value other than Follow source. When you
6657	// specify an output pixel aspect ratio (PAR) that is different from your input
6658	// video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC
6659	// widescreen, you would specify the ratio 40:33. In this example, the value for
6660	// parNumerator is 40.
6661	ParNumerator int32
6662
6663	// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want
6664	// to trade off encoding speed for output video quality. The default behavior is
6665	// faster, lower quality, multi-pass encoding.
6666	QualityTuningLevel Vp9QualityTuningLevel
6667
6668	// With the VP9 codec, you can use only the variable bitrate (VBR) rate control
6669	// mode.
6670	RateControlMode Vp9RateControlMode
6671}
6672
6673// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the
6674// value WAV.
6675type WavSettings struct {
6676
6677	// Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality
6678	// for this audio track.
6679	BitDepth int32
6680
6681	// Specify the number of channels in this output audio track. Valid values are 1
6682	// and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64.
6683	Channels int32
6684
6685	// The service defaults to using RIFF for WAV outputs. If your output audio is
6686	// likely to exceed 4 GB in file size, or if you otherwise need the extended
6687	// support of the RF64 format, set your output WAV file format to RF64.
6688	Format WavFormat
6689
6690	// Sample rate in Hz.
6691	SampleRate int32
6692}
6693
6694// WEBVTT Destination Settings
6695type WebvttDestinationSettings struct {
6696
6697	// Choose Enabled (ENABLED) to have MediaConvert use the font style, color, and
6698	// position information from the captions source in the input. Keep the default
6699	// value, Disabled (DISABLED), for simplified output captions.
6700	StylePassthrough WebvttStylePassthrough
6701}
6702