1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/cloud/dialogflow/v2/audio_config.proto
3
4package dialogflow
5
6import (
7	fmt "fmt"
8	math "math"
9
10	proto "github.com/golang/protobuf/proto"
11	duration "github.com/golang/protobuf/ptypes/duration"
12	_ "google.golang.org/genproto/googleapis/api/annotations"
13)
14
15// Reference imports to suppress errors if they are not otherwise used.
16var _ = proto.Marshal
17var _ = fmt.Errorf
18var _ = math.Inf
19
20// This is a compile-time assertion to ensure that this generated file
21// is compatible with the proto package it is being compiled against.
22// A compilation error at this line likely means your copy of the
23// proto package needs to be updated.
24const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
25
26// Audio encoding of the audio content sent in the conversational query request.
27// Refer to the
28// [Cloud Speech API
29// documentation](https://cloud.google.com/speech-to-text/docs/basics) for more
30// details.
31type AudioEncoding int32
32
33const (
34	// Not specified.
35	AudioEncoding_AUDIO_ENCODING_UNSPECIFIED AudioEncoding = 0
36	// Uncompressed 16-bit signed little-endian samples (Linear PCM).
37	AudioEncoding_AUDIO_ENCODING_LINEAR_16 AudioEncoding = 1
38	// [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
39	// Codec) is the recommended encoding because it is lossless (therefore
40	// recognition is not compromised) and requires only about half the
41	// bandwidth of `LINEAR16`. `FLAC` stream encoding supports 16-bit and
42	// 24-bit samples, however, not all fields in `STREAMINFO` are supported.
43	AudioEncoding_AUDIO_ENCODING_FLAC AudioEncoding = 2
44	// 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
45	AudioEncoding_AUDIO_ENCODING_MULAW AudioEncoding = 3
46	// Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
47	AudioEncoding_AUDIO_ENCODING_AMR AudioEncoding = 4
48	// Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
49	AudioEncoding_AUDIO_ENCODING_AMR_WB AudioEncoding = 5
50	// Opus encoded audio frames in Ogg container
51	// ([OggOpus](https://wiki.xiph.org/OggOpus)).
52	// `sample_rate_hertz` must be 16000.
53	AudioEncoding_AUDIO_ENCODING_OGG_OPUS AudioEncoding = 6
54	// Although the use of lossy encodings is not recommended, if a very low
55	// bitrate encoding is required, `OGG_OPUS` is highly preferred over
56	// Speex encoding. The [Speex](https://speex.org/) encoding supported by
57	// Dialogflow API has a header byte in each block, as in MIME type
58	// `audio/x-speex-with-header-byte`.
59	// It is a variant of the RTP Speex encoding defined in
60	// [RFC 5574](https://tools.ietf.org/html/rfc5574).
61	// The stream is a sequence of blocks, one block per RTP packet. Each block
62	// starts with a byte containing the length of the block, in bytes, followed
63	// by one or more frames of Speex data, padded to an integral number of
64	// bytes (octets) as specified in RFC 5574. In other words, each RTP header
65	// is replaced with a single byte containing the block length. Only Speex
66	// wideband is supported. `sample_rate_hertz` must be 16000.
67	AudioEncoding_AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE AudioEncoding = 7
68)
69
70var AudioEncoding_name = map[int32]string{
71	0: "AUDIO_ENCODING_UNSPECIFIED",
72	1: "AUDIO_ENCODING_LINEAR_16",
73	2: "AUDIO_ENCODING_FLAC",
74	3: "AUDIO_ENCODING_MULAW",
75	4: "AUDIO_ENCODING_AMR",
76	5: "AUDIO_ENCODING_AMR_WB",
77	6: "AUDIO_ENCODING_OGG_OPUS",
78	7: "AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE",
79}
80
81var AudioEncoding_value = map[string]int32{
82	"AUDIO_ENCODING_UNSPECIFIED":            0,
83	"AUDIO_ENCODING_LINEAR_16":              1,
84	"AUDIO_ENCODING_FLAC":                   2,
85	"AUDIO_ENCODING_MULAW":                  3,
86	"AUDIO_ENCODING_AMR":                    4,
87	"AUDIO_ENCODING_AMR_WB":                 5,
88	"AUDIO_ENCODING_OGG_OPUS":               6,
89	"AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE": 7,
90}
91
92func (x AudioEncoding) String() string {
93	return proto.EnumName(AudioEncoding_name, int32(x))
94}
95
96func (AudioEncoding) EnumDescriptor() ([]byte, []int) {
97	return fileDescriptor_3ff9be2146363af6, []int{0}
98}
99
100// Variant of the specified [Speech model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
101//
102// See the [Cloud Speech
103// documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
104// for which models have different variants. For example, the "phone_call" model
105// has both a standard and an enhanced variant. When you use an enhanced model,
106// you will generally receive higher quality results than for a standard model.
107type SpeechModelVariant int32
108
109const (
110	// No model variant specified. In this case Dialogflow defaults to
111	// USE_BEST_AVAILABLE.
112	SpeechModelVariant_SPEECH_MODEL_VARIANT_UNSPECIFIED SpeechModelVariant = 0
113	// Use the best available variant of the [Speech
114	// model][InputAudioConfig.model] that the caller is eligible for.
115	//
116	// Please see the [Dialogflow
117	// docs](https://cloud.google.com/dialogflow/docs/data-logging) for
118	// how to make your project eligible for enhanced models.
119	SpeechModelVariant_USE_BEST_AVAILABLE SpeechModelVariant = 1
120	// Use standard model variant even if an enhanced model is available.  See the
121	// [Cloud Speech
122	// documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
123	// for details about enhanced models.
124	SpeechModelVariant_USE_STANDARD SpeechModelVariant = 2
125	// Use an enhanced model variant:
126	//
127	// * If an enhanced variant does not exist for the given
128	//   [model][google.cloud.dialogflow.v2.InputAudioConfig.model] and request language, Dialogflow falls
129	//   back to the standard variant.
130	//
131	//   The [Cloud Speech
132	//   documentation](https://cloud.google.com/speech-to-text/docs/enhanced-models)
133	//   describes which models have enhanced variants.
134	//
135	// * If the API caller isn't eligible for enhanced models, Dialogflow returns
136	//   an error. Please see the [Dialogflow
137	//   docs](https://cloud.google.com/dialogflow/docs/data-logging)
138	//   for how to make your project eligible.
139	SpeechModelVariant_USE_ENHANCED SpeechModelVariant = 3
140)
141
142var SpeechModelVariant_name = map[int32]string{
143	0: "SPEECH_MODEL_VARIANT_UNSPECIFIED",
144	1: "USE_BEST_AVAILABLE",
145	2: "USE_STANDARD",
146	3: "USE_ENHANCED",
147}
148
149var SpeechModelVariant_value = map[string]int32{
150	"SPEECH_MODEL_VARIANT_UNSPECIFIED": 0,
151	"USE_BEST_AVAILABLE":               1,
152	"USE_STANDARD":                     2,
153	"USE_ENHANCED":                     3,
154}
155
156func (x SpeechModelVariant) String() string {
157	return proto.EnumName(SpeechModelVariant_name, int32(x))
158}
159
160func (SpeechModelVariant) EnumDescriptor() ([]byte, []int) {
161	return fileDescriptor_3ff9be2146363af6, []int{1}
162}
163
164// Gender of the voice as described in
165// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice).
166type SsmlVoiceGender int32
167
168const (
169	// An unspecified gender, which means that the client doesn't care which
170	// gender the selected voice will have.
171	SsmlVoiceGender_SSML_VOICE_GENDER_UNSPECIFIED SsmlVoiceGender = 0
172	// A male voice.
173	SsmlVoiceGender_SSML_VOICE_GENDER_MALE SsmlVoiceGender = 1
174	// A female voice.
175	SsmlVoiceGender_SSML_VOICE_GENDER_FEMALE SsmlVoiceGender = 2
176	// A gender-neutral voice.
177	SsmlVoiceGender_SSML_VOICE_GENDER_NEUTRAL SsmlVoiceGender = 3
178)
179
180var SsmlVoiceGender_name = map[int32]string{
181	0: "SSML_VOICE_GENDER_UNSPECIFIED",
182	1: "SSML_VOICE_GENDER_MALE",
183	2: "SSML_VOICE_GENDER_FEMALE",
184	3: "SSML_VOICE_GENDER_NEUTRAL",
185}
186
187var SsmlVoiceGender_value = map[string]int32{
188	"SSML_VOICE_GENDER_UNSPECIFIED": 0,
189	"SSML_VOICE_GENDER_MALE":        1,
190	"SSML_VOICE_GENDER_FEMALE":      2,
191	"SSML_VOICE_GENDER_NEUTRAL":     3,
192}
193
194func (x SsmlVoiceGender) String() string {
195	return proto.EnumName(SsmlVoiceGender_name, int32(x))
196}
197
198func (SsmlVoiceGender) EnumDescriptor() ([]byte, []int) {
199	return fileDescriptor_3ff9be2146363af6, []int{2}
200}
201
202// Audio encoding of the output audio format in Text-To-Speech.
203type OutputAudioEncoding int32
204
205const (
206	// Not specified.
207	OutputAudioEncoding_OUTPUT_AUDIO_ENCODING_UNSPECIFIED OutputAudioEncoding = 0
208	// Uncompressed 16-bit signed little-endian samples (Linear PCM).
209	// Audio content returned as LINEAR16 also contains a WAV header.
210	OutputAudioEncoding_OUTPUT_AUDIO_ENCODING_LINEAR_16 OutputAudioEncoding = 1
211	// MP3 audio at 32kbps.
212	OutputAudioEncoding_OUTPUT_AUDIO_ENCODING_MP3 OutputAudioEncoding = 2
213	// Opus encoded audio wrapped in an ogg container. The result will be a
214	// file which can be played natively on Android, and in browsers (at least
215	// Chrome and Firefox). The quality of the encoding is considerably higher
216	// than MP3 while using approximately the same bitrate.
217	OutputAudioEncoding_OUTPUT_AUDIO_ENCODING_OGG_OPUS OutputAudioEncoding = 3
218)
219
220var OutputAudioEncoding_name = map[int32]string{
221	0: "OUTPUT_AUDIO_ENCODING_UNSPECIFIED",
222	1: "OUTPUT_AUDIO_ENCODING_LINEAR_16",
223	2: "OUTPUT_AUDIO_ENCODING_MP3",
224	3: "OUTPUT_AUDIO_ENCODING_OGG_OPUS",
225}
226
227var OutputAudioEncoding_value = map[string]int32{
228	"OUTPUT_AUDIO_ENCODING_UNSPECIFIED": 0,
229	"OUTPUT_AUDIO_ENCODING_LINEAR_16":   1,
230	"OUTPUT_AUDIO_ENCODING_MP3":         2,
231	"OUTPUT_AUDIO_ENCODING_OGG_OPUS":    3,
232}
233
234func (x OutputAudioEncoding) String() string {
235	return proto.EnumName(OutputAudioEncoding_name, int32(x))
236}
237
238func (OutputAudioEncoding) EnumDescriptor() ([]byte, []int) {
239	return fileDescriptor_3ff9be2146363af6, []int{3}
240}
241
242// Hints for the speech recognizer to help with recognition in a specific
243// conversation state.
244type SpeechContext struct {
245	// Optional. A list of strings containing words and phrases that the speech
246	// recognizer should recognize with higher likelihood.
247	//
248	// This list can be used to:
249	// * improve accuracy for words and phrases you expect the user to say,
250	//   e.g. typical commands for your Dialogflow agent
251	// * add additional words to the speech recognizer vocabulary
252	// * ...
253	//
254	// See the [Cloud Speech
255	// documentation](https://cloud.google.com/speech-to-text/quotas) for usage
256	// limits.
257	Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
258	// Optional. Boost for this context compared to other contexts:
259	// * If the boost is positive, Dialogflow will increase the probability that
260	//   the phrases in this context are recognized over similar sounding phrases.
261	// * If the boost is unspecified or non-positive, Dialogflow will not apply
262	//   any boost.
263	//
264	// Dialogflow recommends that you use boosts in the range (0, 20] and that you
265	// find a value that fits your use case with binary search.
266	Boost                float32  `protobuf:"fixed32,2,opt,name=boost,proto3" json:"boost,omitempty"`
267	XXX_NoUnkeyedLiteral struct{} `json:"-"`
268	XXX_unrecognized     []byte   `json:"-"`
269	XXX_sizecache        int32    `json:"-"`
270}
271
272func (m *SpeechContext) Reset()         { *m = SpeechContext{} }
273func (m *SpeechContext) String() string { return proto.CompactTextString(m) }
274func (*SpeechContext) ProtoMessage()    {}
275func (*SpeechContext) Descriptor() ([]byte, []int) {
276	return fileDescriptor_3ff9be2146363af6, []int{0}
277}
278
279func (m *SpeechContext) XXX_Unmarshal(b []byte) error {
280	return xxx_messageInfo_SpeechContext.Unmarshal(m, b)
281}
282func (m *SpeechContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
283	return xxx_messageInfo_SpeechContext.Marshal(b, m, deterministic)
284}
285func (m *SpeechContext) XXX_Merge(src proto.Message) {
286	xxx_messageInfo_SpeechContext.Merge(m, src)
287}
288func (m *SpeechContext) XXX_Size() int {
289	return xxx_messageInfo_SpeechContext.Size(m)
290}
291func (m *SpeechContext) XXX_DiscardUnknown() {
292	xxx_messageInfo_SpeechContext.DiscardUnknown(m)
293}
294
295var xxx_messageInfo_SpeechContext proto.InternalMessageInfo
296
297func (m *SpeechContext) GetPhrases() []string {
298	if m != nil {
299		return m.Phrases
300	}
301	return nil
302}
303
304func (m *SpeechContext) GetBoost() float32 {
305	if m != nil {
306		return m.Boost
307	}
308	return 0
309}
310
311// Information for a word recognized by the speech recognizer.
312type SpeechWordInfo struct {
313	// The word this info is for.
314	Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
315	// Time offset relative to the beginning of the audio that corresponds to the
316	// start of the spoken word. This is an experimental feature and the accuracy
317	// of the time offset can vary.
318	StartOffset *duration.Duration `protobuf:"bytes,1,opt,name=start_offset,json=startOffset,proto3" json:"start_offset,omitempty"`
319	// Time offset relative to the beginning of the audio that corresponds to the
320	// end of the spoken word. This is an experimental feature and the accuracy of
321	// the time offset can vary.
322	EndOffset *duration.Duration `protobuf:"bytes,2,opt,name=end_offset,json=endOffset,proto3" json:"end_offset,omitempty"`
323	// The Speech confidence between 0.0 and 1.0 for this word. A higher number
324	// indicates an estimated greater likelihood that the recognized word is
325	// correct. The default of 0.0 is a sentinel value indicating that confidence
326	// was not set.
327	//
328	// This field is not guaranteed to be fully stable over time for the same
329	// audio input. Users should also not rely on it to always be provided.
330	Confidence           float32  `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
331	XXX_NoUnkeyedLiteral struct{} `json:"-"`
332	XXX_unrecognized     []byte   `json:"-"`
333	XXX_sizecache        int32    `json:"-"`
334}
335
336func (m *SpeechWordInfo) Reset()         { *m = SpeechWordInfo{} }
337func (m *SpeechWordInfo) String() string { return proto.CompactTextString(m) }
338func (*SpeechWordInfo) ProtoMessage()    {}
339func (*SpeechWordInfo) Descriptor() ([]byte, []int) {
340	return fileDescriptor_3ff9be2146363af6, []int{1}
341}
342
343func (m *SpeechWordInfo) XXX_Unmarshal(b []byte) error {
344	return xxx_messageInfo_SpeechWordInfo.Unmarshal(m, b)
345}
346func (m *SpeechWordInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
347	return xxx_messageInfo_SpeechWordInfo.Marshal(b, m, deterministic)
348}
349func (m *SpeechWordInfo) XXX_Merge(src proto.Message) {
350	xxx_messageInfo_SpeechWordInfo.Merge(m, src)
351}
352func (m *SpeechWordInfo) XXX_Size() int {
353	return xxx_messageInfo_SpeechWordInfo.Size(m)
354}
355func (m *SpeechWordInfo) XXX_DiscardUnknown() {
356	xxx_messageInfo_SpeechWordInfo.DiscardUnknown(m)
357}
358
359var xxx_messageInfo_SpeechWordInfo proto.InternalMessageInfo
360
361func (m *SpeechWordInfo) GetWord() string {
362	if m != nil {
363		return m.Word
364	}
365	return ""
366}
367
368func (m *SpeechWordInfo) GetStartOffset() *duration.Duration {
369	if m != nil {
370		return m.StartOffset
371	}
372	return nil
373}
374
375func (m *SpeechWordInfo) GetEndOffset() *duration.Duration {
376	if m != nil {
377		return m.EndOffset
378	}
379	return nil
380}
381
382func (m *SpeechWordInfo) GetConfidence() float32 {
383	if m != nil {
384		return m.Confidence
385	}
386	return 0
387}
388
389// Instructs the speech recognizer how to process the audio content.
390type InputAudioConfig struct {
391	// Required. Audio encoding of the audio content to process.
392	AudioEncoding AudioEncoding `protobuf:"varint,1,opt,name=audio_encoding,json=audioEncoding,proto3,enum=google.cloud.dialogflow.v2.AudioEncoding" json:"audio_encoding,omitempty"`
393	// Required. Sample rate (in Hertz) of the audio content sent in the query.
394	// Refer to
395	// [Cloud Speech API
396	// documentation](https://cloud.google.com/speech-to-text/docs/basics) for
397	// more details.
398	SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
399	// Required. The language of the supplied audio. Dialogflow does not do
400	// translations. See [Language
401	// Support](https://cloud.google.com/dialogflow/docs/reference/language)
402	// for a list of the currently supported language codes. Note that queries in
403	// the same session do not necessarily need to specify the same language.
404	LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
405	// If `true`, Dialogflow returns [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
406	// [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult] with information about the recognized speech
407	// words, e.g. start and end time offsets. If false or unspecified, Speech
408	// doesn't return any word-level information.
409	EnableWordInfo bool `protobuf:"varint,13,opt,name=enable_word_info,json=enableWordInfo,proto3" json:"enable_word_info,omitempty"`
410	// A list of strings containing words and phrases that the speech
411	// recognizer should recognize with higher likelihood.
412	//
413	// See [the Cloud Speech
414	// documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
415	// for more details.
416	//
417	// This field is deprecated. Please use [speech_contexts]() instead. If you
418	// specify both [phrase_hints]() and [speech_contexts](), Dialogflow will
419	// treat the [phrase_hints]() as a single additional [SpeechContext]().
420	PhraseHints []string `protobuf:"bytes,4,rep,name=phrase_hints,json=phraseHints,proto3" json:"phrase_hints,omitempty"` // Deprecated: Do not use.
421	// Context information to assist speech recognition.
422	//
423	// See [the Cloud Speech
424	// documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
425	// for more details.
426	SpeechContexts []*SpeechContext `protobuf:"bytes,11,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
427	// Which Speech model to select for the given request. Select the
428	// model best suited to your domain to get best results. If a model is not
429	// explicitly specified, then we auto-select a model based on the parameters
430	// in the InputAudioConfig.
431	// If enhanced speech model is enabled for the agent and an enhanced
432	// version of the specified model for the language does not exist, then the
433	// speech is recognized using the standard version of the specified model.
434	// Refer to
435	// [Cloud Speech API
436	// documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
437	// for more details.
438	Model string `protobuf:"bytes,7,opt,name=model,proto3" json:"model,omitempty"`
439	// Which variant of the [Speech model][google.cloud.dialogflow.v2.InputAudioConfig.model] to use.
440	ModelVariant SpeechModelVariant `protobuf:"varint,10,opt,name=model_variant,json=modelVariant,proto3,enum=google.cloud.dialogflow.v2.SpeechModelVariant" json:"model_variant,omitempty"`
441	// If `false` (default), recognition does not cease until the
442	// client closes the stream.
443	// If `true`, the recognizer will detect a single spoken utterance in input
444	// audio. Recognition ceases when it detects the audio's voice has
445	// stopped or paused. In this case, once a detected intent is received, the
446	// client should close the stream and start a new request with a new stream as
447	// needed.
448	// Note: This setting is relevant only for streaming methods.
449	// Note: When specified, InputAudioConfig.single_utterance takes precedence
450	// over StreamingDetectIntentRequest.single_utterance.
451	SingleUtterance      bool     `protobuf:"varint,8,opt,name=single_utterance,json=singleUtterance,proto3" json:"single_utterance,omitempty"`
452	XXX_NoUnkeyedLiteral struct{} `json:"-"`
453	XXX_unrecognized     []byte   `json:"-"`
454	XXX_sizecache        int32    `json:"-"`
455}
456
457func (m *InputAudioConfig) Reset()         { *m = InputAudioConfig{} }
458func (m *InputAudioConfig) String() string { return proto.CompactTextString(m) }
459func (*InputAudioConfig) ProtoMessage()    {}
460func (*InputAudioConfig) Descriptor() ([]byte, []int) {
461	return fileDescriptor_3ff9be2146363af6, []int{2}
462}
463
464func (m *InputAudioConfig) XXX_Unmarshal(b []byte) error {
465	return xxx_messageInfo_InputAudioConfig.Unmarshal(m, b)
466}
467func (m *InputAudioConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
468	return xxx_messageInfo_InputAudioConfig.Marshal(b, m, deterministic)
469}
470func (m *InputAudioConfig) XXX_Merge(src proto.Message) {
471	xxx_messageInfo_InputAudioConfig.Merge(m, src)
472}
473func (m *InputAudioConfig) XXX_Size() int {
474	return xxx_messageInfo_InputAudioConfig.Size(m)
475}
476func (m *InputAudioConfig) XXX_DiscardUnknown() {
477	xxx_messageInfo_InputAudioConfig.DiscardUnknown(m)
478}
479
480var xxx_messageInfo_InputAudioConfig proto.InternalMessageInfo
481
482func (m *InputAudioConfig) GetAudioEncoding() AudioEncoding {
483	if m != nil {
484		return m.AudioEncoding
485	}
486	return AudioEncoding_AUDIO_ENCODING_UNSPECIFIED
487}
488
489func (m *InputAudioConfig) GetSampleRateHertz() int32 {
490	if m != nil {
491		return m.SampleRateHertz
492	}
493	return 0
494}
495
496func (m *InputAudioConfig) GetLanguageCode() string {
497	if m != nil {
498		return m.LanguageCode
499	}
500	return ""
501}
502
503func (m *InputAudioConfig) GetEnableWordInfo() bool {
504	if m != nil {
505		return m.EnableWordInfo
506	}
507	return false
508}
509
510// Deprecated: Do not use.
511func (m *InputAudioConfig) GetPhraseHints() []string {
512	if m != nil {
513		return m.PhraseHints
514	}
515	return nil
516}
517
518func (m *InputAudioConfig) GetSpeechContexts() []*SpeechContext {
519	if m != nil {
520		return m.SpeechContexts
521	}
522	return nil
523}
524
525func (m *InputAudioConfig) GetModel() string {
526	if m != nil {
527		return m.Model
528	}
529	return ""
530}
531
532func (m *InputAudioConfig) GetModelVariant() SpeechModelVariant {
533	if m != nil {
534		return m.ModelVariant
535	}
536	return SpeechModelVariant_SPEECH_MODEL_VARIANT_UNSPECIFIED
537}
538
539func (m *InputAudioConfig) GetSingleUtterance() bool {
540	if m != nil {
541		return m.SingleUtterance
542	}
543	return false
544}
545
546// Description of which voice to use for speech synthesis.
547type VoiceSelectionParams struct {
548	// Optional. The name of the voice. If not set, the service will choose a
549	// voice based on the other parameters such as language_code and
550	// [ssml_gender][google.cloud.dialogflow.v2.VoiceSelectionParams.ssml_gender].
551	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
552	// Optional. The preferred gender of the voice. If not set, the service will
553	// choose a voice based on the other parameters such as language_code and
554	// [name][google.cloud.dialogflow.v2.VoiceSelectionParams.name]. Note that this is only a preference, not requirement. If a
555	// voice of the appropriate gender is not available, the synthesizer should
556	// substitute a voice with a different gender rather than failing the request.
557	SsmlGender           SsmlVoiceGender `protobuf:"varint,2,opt,name=ssml_gender,json=ssmlGender,proto3,enum=google.cloud.dialogflow.v2.SsmlVoiceGender" json:"ssml_gender,omitempty"`
558	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
559	XXX_unrecognized     []byte          `json:"-"`
560	XXX_sizecache        int32           `json:"-"`
561}
562
563func (m *VoiceSelectionParams) Reset()         { *m = VoiceSelectionParams{} }
564func (m *VoiceSelectionParams) String() string { return proto.CompactTextString(m) }
565func (*VoiceSelectionParams) ProtoMessage()    {}
566func (*VoiceSelectionParams) Descriptor() ([]byte, []int) {
567	return fileDescriptor_3ff9be2146363af6, []int{3}
568}
569
570func (m *VoiceSelectionParams) XXX_Unmarshal(b []byte) error {
571	return xxx_messageInfo_VoiceSelectionParams.Unmarshal(m, b)
572}
573func (m *VoiceSelectionParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
574	return xxx_messageInfo_VoiceSelectionParams.Marshal(b, m, deterministic)
575}
576func (m *VoiceSelectionParams) XXX_Merge(src proto.Message) {
577	xxx_messageInfo_VoiceSelectionParams.Merge(m, src)
578}
579func (m *VoiceSelectionParams) XXX_Size() int {
580	return xxx_messageInfo_VoiceSelectionParams.Size(m)
581}
582func (m *VoiceSelectionParams) XXX_DiscardUnknown() {
583	xxx_messageInfo_VoiceSelectionParams.DiscardUnknown(m)
584}
585
586var xxx_messageInfo_VoiceSelectionParams proto.InternalMessageInfo
587
588func (m *VoiceSelectionParams) GetName() string {
589	if m != nil {
590		return m.Name
591	}
592	return ""
593}
594
595func (m *VoiceSelectionParams) GetSsmlGender() SsmlVoiceGender {
596	if m != nil {
597		return m.SsmlGender
598	}
599	return SsmlVoiceGender_SSML_VOICE_GENDER_UNSPECIFIED
600}
601
602// Configuration of how speech should be synthesized.
603type SynthesizeSpeechConfig struct {
604	// Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
605	// native speed supported by the specific voice. 2.0 is twice as fast, and
606	// 0.5 is half as fast. If unset(0.0), defaults to the native 1.0 speed. Any
607	// other values < 0.25 or > 4.0 will return an error.
608	SpeakingRate float64 `protobuf:"fixed64,1,opt,name=speaking_rate,json=speakingRate,proto3" json:"speaking_rate,omitempty"`
609	// Optional. Speaking pitch, in the range [-20.0, 20.0]. 20 means increase 20
610	// semitones from the original pitch. -20 means decrease 20 semitones from the
611	// original pitch.
612	Pitch float64 `protobuf:"fixed64,2,opt,name=pitch,proto3" json:"pitch,omitempty"`
613	// Optional. Volume gain (in dB) of the normal native volume supported by the
614	// specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of
615	// 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB)
616	// will play at approximately half the amplitude of the normal native signal
617	// amplitude. A value of +6.0 (dB) will play at approximately twice the
618	// amplitude of the normal native signal amplitude. We strongly recommend not
619	// to exceed +10 (dB) as there's usually no effective increase in loudness for
620	// any value greater than that.
621	VolumeGainDb float64 `protobuf:"fixed64,3,opt,name=volume_gain_db,json=volumeGainDb,proto3" json:"volume_gain_db,omitempty"`
622	// Optional. An identifier which selects 'audio effects' profiles that are
623	// applied on (post synthesized) text to speech. Effects are applied on top of
624	// each other in the order they are given.
625	EffectsProfileId []string `protobuf:"bytes,5,rep,name=effects_profile_id,json=effectsProfileId,proto3" json:"effects_profile_id,omitempty"`
626	// Optional. The desired voice of the synthesized audio.
627	Voice                *VoiceSelectionParams `protobuf:"bytes,4,opt,name=voice,proto3" json:"voice,omitempty"`
628	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
629	XXX_unrecognized     []byte                `json:"-"`
630	XXX_sizecache        int32                 `json:"-"`
631}
632
633func (m *SynthesizeSpeechConfig) Reset()         { *m = SynthesizeSpeechConfig{} }
634func (m *SynthesizeSpeechConfig) String() string { return proto.CompactTextString(m) }
635func (*SynthesizeSpeechConfig) ProtoMessage()    {}
636func (*SynthesizeSpeechConfig) Descriptor() ([]byte, []int) {
637	return fileDescriptor_3ff9be2146363af6, []int{4}
638}
639
640func (m *SynthesizeSpeechConfig) XXX_Unmarshal(b []byte) error {
641	return xxx_messageInfo_SynthesizeSpeechConfig.Unmarshal(m, b)
642}
643func (m *SynthesizeSpeechConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
644	return xxx_messageInfo_SynthesizeSpeechConfig.Marshal(b, m, deterministic)
645}
646func (m *SynthesizeSpeechConfig) XXX_Merge(src proto.Message) {
647	xxx_messageInfo_SynthesizeSpeechConfig.Merge(m, src)
648}
649func (m *SynthesizeSpeechConfig) XXX_Size() int {
650	return xxx_messageInfo_SynthesizeSpeechConfig.Size(m)
651}
652func (m *SynthesizeSpeechConfig) XXX_DiscardUnknown() {
653	xxx_messageInfo_SynthesizeSpeechConfig.DiscardUnknown(m)
654}
655
656var xxx_messageInfo_SynthesizeSpeechConfig proto.InternalMessageInfo
657
658func (m *SynthesizeSpeechConfig) GetSpeakingRate() float64 {
659	if m != nil {
660		return m.SpeakingRate
661	}
662	return 0
663}
664
665func (m *SynthesizeSpeechConfig) GetPitch() float64 {
666	if m != nil {
667		return m.Pitch
668	}
669	return 0
670}
671
672func (m *SynthesizeSpeechConfig) GetVolumeGainDb() float64 {
673	if m != nil {
674		return m.VolumeGainDb
675	}
676	return 0
677}
678
679func (m *SynthesizeSpeechConfig) GetEffectsProfileId() []string {
680	if m != nil {
681		return m.EffectsProfileId
682	}
683	return nil
684}
685
686func (m *SynthesizeSpeechConfig) GetVoice() *VoiceSelectionParams {
687	if m != nil {
688		return m.Voice
689	}
690	return nil
691}
692
693// Instructs the speech synthesizer on how to generate the output audio content.
694// If this audio config is supplied in a request, it overrides all existing
695// text-to-speech settings applied to the agent.
696type OutputAudioConfig struct {
697	// Required. Audio encoding of the synthesized audio content.
698	AudioEncoding OutputAudioEncoding `protobuf:"varint,1,opt,name=audio_encoding,json=audioEncoding,proto3,enum=google.cloud.dialogflow.v2.OutputAudioEncoding" json:"audio_encoding,omitempty"`
699	// The synthesis sample rate (in hertz) for this audio. If not
700	// provided, then the synthesizer will use the default sample rate based on
701	// the audio encoding. If this is different from the voice's natural sample
702	// rate, then the synthesizer will honor this request by converting to the
703	// desired sample rate (which might result in worse audio quality).
704	SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
705	// Configuration of how speech should be synthesized.
706	SynthesizeSpeechConfig *SynthesizeSpeechConfig `protobuf:"bytes,3,opt,name=synthesize_speech_config,json=synthesizeSpeechConfig,proto3" json:"synthesize_speech_config,omitempty"`
707	XXX_NoUnkeyedLiteral   struct{}                `json:"-"`
708	XXX_unrecognized       []byte                  `json:"-"`
709	XXX_sizecache          int32                   `json:"-"`
710}
711
712func (m *OutputAudioConfig) Reset()         { *m = OutputAudioConfig{} }
713func (m *OutputAudioConfig) String() string { return proto.CompactTextString(m) }
714func (*OutputAudioConfig) ProtoMessage()    {}
715func (*OutputAudioConfig) Descriptor() ([]byte, []int) {
716	return fileDescriptor_3ff9be2146363af6, []int{5}
717}
718
719func (m *OutputAudioConfig) XXX_Unmarshal(b []byte) error {
720	return xxx_messageInfo_OutputAudioConfig.Unmarshal(m, b)
721}
722func (m *OutputAudioConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
723	return xxx_messageInfo_OutputAudioConfig.Marshal(b, m, deterministic)
724}
725func (m *OutputAudioConfig) XXX_Merge(src proto.Message) {
726	xxx_messageInfo_OutputAudioConfig.Merge(m, src)
727}
728func (m *OutputAudioConfig) XXX_Size() int {
729	return xxx_messageInfo_OutputAudioConfig.Size(m)
730}
731func (m *OutputAudioConfig) XXX_DiscardUnknown() {
732	xxx_messageInfo_OutputAudioConfig.DiscardUnknown(m)
733}
734
735var xxx_messageInfo_OutputAudioConfig proto.InternalMessageInfo
736
737func (m *OutputAudioConfig) GetAudioEncoding() OutputAudioEncoding {
738	if m != nil {
739		return m.AudioEncoding
740	}
741	return OutputAudioEncoding_OUTPUT_AUDIO_ENCODING_UNSPECIFIED
742}
743
744func (m *OutputAudioConfig) GetSampleRateHertz() int32 {
745	if m != nil {
746		return m.SampleRateHertz
747	}
748	return 0
749}
750
751func (m *OutputAudioConfig) GetSynthesizeSpeechConfig() *SynthesizeSpeechConfig {
752	if m != nil {
753		return m.SynthesizeSpeechConfig
754	}
755	return nil
756}
757
758func init() {
759	proto.RegisterEnum("google.cloud.dialogflow.v2.AudioEncoding", AudioEncoding_name, AudioEncoding_value)
760	proto.RegisterEnum("google.cloud.dialogflow.v2.SpeechModelVariant", SpeechModelVariant_name, SpeechModelVariant_value)
761	proto.RegisterEnum("google.cloud.dialogflow.v2.SsmlVoiceGender", SsmlVoiceGender_name, SsmlVoiceGender_value)
762	proto.RegisterEnum("google.cloud.dialogflow.v2.OutputAudioEncoding", OutputAudioEncoding_name, OutputAudioEncoding_value)
763	proto.RegisterType((*SpeechContext)(nil), "google.cloud.dialogflow.v2.SpeechContext")
764	proto.RegisterType((*SpeechWordInfo)(nil), "google.cloud.dialogflow.v2.SpeechWordInfo")
765	proto.RegisterType((*InputAudioConfig)(nil), "google.cloud.dialogflow.v2.InputAudioConfig")
766	proto.RegisterType((*VoiceSelectionParams)(nil), "google.cloud.dialogflow.v2.VoiceSelectionParams")
767	proto.RegisterType((*SynthesizeSpeechConfig)(nil), "google.cloud.dialogflow.v2.SynthesizeSpeechConfig")
768	proto.RegisterType((*OutputAudioConfig)(nil), "google.cloud.dialogflow.v2.OutputAudioConfig")
769}
770
771func init() {
772	proto.RegisterFile("google/cloud/dialogflow/v2/audio_config.proto", fileDescriptor_3ff9be2146363af6)
773}
774
775var fileDescriptor_3ff9be2146363af6 = []byte{
776	// 1151 bytes of a gzipped FileDescriptorProto
777	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdb, 0x6e, 0xdb, 0x46,
778	0x13, 0xfe, 0x49, 0xd9, 0x49, 0x3c, 0x3a, 0x98, 0xd9, 0xe4, 0x77, 0x68, 0x37, 0x71, 0x14, 0x25,
779	0x01, 0x14, 0xb7, 0x95, 0x5a, 0x05, 0x28, 0x0a, 0xb4, 0x40, 0x41, 0x89, 0xb4, 0x2c, 0x40, 0x27,
780	0xac, 0x0e, 0xee, 0xe1, 0x62, 0xb1, 0x12, 0x97, 0x14, 0x51, 0x6a, 0x57, 0x20, 0x29, 0x25, 0xcd,
781	0x23, 0x14, 0xe8, 0x33, 0x14, 0x28, 0x7a, 0xd5, 0x67, 0xe8, 0xc3, 0xf4, 0x0d, 0xda, 0xcb, 0x02,
782	0xbd, 0x29, 0xb8, 0x94, 0x6c, 0x45, 0x56, 0x94, 0x8b, 0xde, 0xed, 0xcc, 0x37, 0x33, 0x3b, 0x3b,
783	0xdf, 0x37, 0x20, 0xe1, 0x63, 0x57, 0x08, 0xd7, 0x67, 0xe5, 0xb1, 0x2f, 0xe6, 0x76, 0xd9, 0xf6,
784	0xa8, 0x2f, 0x5c, 0xc7, 0x17, 0xaf, 0xca, 0x8b, 0x4a, 0x99, 0xce, 0x6d, 0x4f, 0x90, 0xb1, 0xe0,
785	0x8e, 0xe7, 0x96, 0x66, 0x81, 0x88, 0x04, 0x3a, 0x49, 0xc2, 0x4b, 0x32, 0xbc, 0x74, 0x1d, 0x5e,
786	0x5a, 0x54, 0x4e, 0x1e, 0x2f, 0x4b, 0xd1, 0x99, 0x57, 0x76, 0x3c, 0xe6, 0xdb, 0x64, 0xc4, 0x26,
787	0x74, 0xe1, 0x89, 0x20, 0x49, 0x3e, 0x39, 0x5e, 0x0b, 0x08, 0x58, 0x28, 0xe6, 0xc1, 0x98, 0x2d,
788	0xa1, 0xd3, 0x25, 0x24, 0xad, 0xd1, 0xdc, 0x29, 0xdb, 0xf3, 0x80, 0x46, 0x9e, 0xe0, 0x4b, 0xfc,
789	0xe1, 0x5a, 0x2a, 0xe5, 0x5c, 0x44, 0x12, 0x0c, 0x13, 0xb4, 0xf0, 0x15, 0x64, 0x7b, 0x33, 0xc6,
790	0xc6, 0x93, 0x9a, 0xe0, 0x11, 0x7b, 0x1d, 0x21, 0x1d, 0x6e, 0xcf, 0x26, 0x01, 0x0d, 0x59, 0xa8,
791	0x2b, 0xf9, 0x54, 0xf1, 0x00, 0xaf, 0x4c, 0x74, 0x1f, 0xf6, 0x47, 0x42, 0x84, 0x91, 0xae, 0xe6,
792	0x95, 0xa2, 0x8a, 0x13, 0xa3, 0xf0, 0xbb, 0x02, 0xb9, 0xa4, 0xc2, 0xa5, 0x08, 0xec, 0x06, 0x77,
793	0x04, 0x42, 0xb0, 0xf7, 0x4a, 0x04, 0xb6, 0x9e, 0xca, 0x2b, 0xc5, 0x03, 0x2c, 0xcf, 0xe8, 0x4b,
794	0xc8, 0x84, 0x11, 0x0d, 0x22, 0x22, 0x1c, 0x27, 0x64, 0x91, 0xae, 0xe4, 0x95, 0x62, 0xba, 0x72,
795	0x5c, 0x5a, 0x0e, 0x65, 0xd5, 0x7c, 0xc9, 0x5c, 0x36, 0x8f, 0xd3, 0x32, 0xbc, 0x23, 0xa3, 0xd1,
796	0xe7, 0x00, 0x8c, 0xdb, 0xab, 0x5c, 0xf5, 0x7d, 0xb9, 0x07, 0x8c, 0xdb, 0xcb, 0xcc, 0x53, 0x00,
797	0xc9, 0x82, 0xcd, 0xf8, 0x98, 0xe9, 0x7b, 0xb2, 0xf3, 0x35, 0x4f, 0xe1, 0xaf, 0x14, 0x68, 0x0d,
798	0x3e, 0x9b, 0x47, 0x46, 0xcc, 0x58, 0x4d, 0x12, 0x86, 0xba, 0x90, 0x4b, 0x08, 0x64, 0x7c, 0x2c,
799	0x6c, 0x8f, 0xbb, 0xb2, 0xdd, 0x5c, 0xe5, 0x45, 0xe9, 0xdd, 0x1c, 0x96, 0x64, 0x01, 0x6b, 0x99,
800	0x80, 0xb3, 0x74, 0xdd, 0x44, 0x67, 0x70, 0x37, 0xa4, 0xd3, 0x99, 0xcf, 0x48, 0x40, 0x23, 0x46,
801	0x26, 0x2c, 0x88, 0xde, 0xc8, 0x77, 0xec, 0xe3, 0xc3, 0x04, 0xc0, 0x34, 0x62, 0x17, 0xb1, 0x1b,
802	0x3d, 0x85, 0xac, 0x4f, 0xb9, 0x3b, 0xa7, 0x2e, 0x23, 0x63, 0x61, 0xb3, 0xe5, 0x1c, 0x33, 0x2b,
803	0x67, 0x4d, 0xd8, 0x0c, 0x15, 0x41, 0x63, 0x9c, 0x8e, 0x7c, 0x46, 0xe2, 0xf1, 0x12, 0x8f, 0x3b,
804	0x42, 0xcf, 0xe6, 0x95, 0xe2, 0x1d, 0x9c, 0x4b, 0xfc, 0x57, 0x6c, 0x3c, 0x87, 0x4c, 0xc2, 0x20,
805	0x99, 0x78, 0x3c, 0x0a, 0xf5, 0xbd, 0x98, 0xd5, 0xaa, 0xaa, 0x2b, 0x38, 0x9d, 0xf8, 0x2f, 0x62,
806	0x37, 0xc2, 0x70, 0x18, 0x4a, 0x1a, 0x63, 0xd5, 0xc6, 0x4a, 0x08, 0xf5, 0x74, 0x3e, 0x55, 0x4c,
807	0xef, 0x7e, 0xf4, 0x5b, 0xda, 0xc1, 0xb9, 0x70, 0xdd, 0x94, 0x8a, 0x99, 0x0a, 0x9b, 0xf9, 0xfa,
808	0x6d, 0xf9, 0x82, 0xc4, 0x40, 0x3d, 0xc8, 0xca, 0x03, 0x59, 0xd0, 0xc0, 0xa3, 0x3c, 0xd2, 0x41,
809	0x0e, 0xb7, 0xf4, 0xfe, 0x7b, 0x5a, 0x71, 0xda, 0x30, 0xc9, 0xc2, 0x99, 0xe9, 0x9a, 0x85, 0x5e,
810	0x80, 0x16, 0x7a, 0xdc, 0xf5, 0x19, 0x99, 0x47, 0x11, 0x0b, 0x68, 0xcc, 0xf6, 0x1d, 0x39, 0x8f,
811	0xc3, 0xc4, 0x3f, 0x58, 0xb9, 0x0b, 0xaf, 0xe1, 0xfe, 0x50, 0x78, 0x63, 0xd6, 0x63, 0x3e, 0x1b,
812	0xc7, 0x7a, 0xe9, 0xd2, 0x80, 0x4e, 0xc3, 0x58, 0xb6, 0x9c, 0x4e, 0x99, 0xe4, 0xfa, 0x00, 0xcb,
813	0x33, 0x6a, 0x42, 0x3a, 0x0c, 0xa7, 0x3e, 0x71, 0x19, 0xb7, 0x59, 0x20, 0x19, 0xcb, 0x55, 0x3e,
814	0xdc, 0xd9, 0x69, 0x38, 0xf5, 0x65, 0xf9, 0xba, 0x4c, 0xc1, 0x10, 0xe7, 0x27, 0xe7, 0xc2, 0x9f,
815	0x0a, 0x1c, 0xf5, 0x7e, 0xe0, 0xd1, 0x84, 0x85, 0xde, 0x1b, 0x76, 0x35, 0xbb, 0x58, 0x72, 0x4f,
816	0x21, 0x1b, 0xce, 0x18, 0xfd, 0xde, 0xe3, 0xae, 0x94, 0x88, 0xec, 0x42, 0xc1, 0x99, 0x95, 0x33,
817	0x96, 0x47, 0x3c, 0xcf, 0x99, 0x17, 0x8d, 0x27, 0xb2, 0x0f, 0x05, 0x27, 0x06, 0x7a, 0x06, 0xb9,
818	0x85, 0xf0, 0xe7, 0x53, 0x46, 0x5c, 0xea, 0x71, 0x62, 0x8f, 0xa4, 0x60, 0x14, 0x9c, 0x49, 0xbc,
819	0x75, 0xea, 0x71, 0x73, 0x84, 0x3e, 0x02, 0xc4, 0x1c, 0x87, 0x8d, 0xa3, 0x90, 0xcc, 0x02, 0xe1,
820	0x78, 0x3e, 0x23, 0x9e, 0xad, 0xef, 0xcb, 0x15, 0xd7, 0x96, 0x48, 0x37, 0x01, 0x1a, 0x36, 0x3a,
821	0x87, 0xfd, 0x45, 0xfc, 0x08, 0xb9, 0x31, 0xe9, 0xca, 0x27, 0xbb, 0x5e, 0xbc, 0x6d, 0x98, 0x38,
822	0x49, 0x2f, 0xfc, 0xa8, 0xc2, 0xdd, 0xce, 0x3c, 0xda, 0xd8, 0xaf, 0xef, 0xde, 0xb1, 0x5f, 0xe5,
823	0x5d, 0xd7, 0xac, 0x95, 0x59, 0xad, 0x55, 0x35, 0xf5, 0x87, 0xa1, 0xfe, 0x97, 0x55, 0xf3, 0x41,
824	0x0f, 0xaf, 0xf8, 0x20, 0xd7, 0xfa, 0x77, 0x3c, 0x57, 0x0e, 0x31, 0x5d, 0xa9, 0xec, 0xe4, 0x7a,
825	0x2b, 0x97, 0xf8, 0x28, 0xdc, 0xea, 0x3f, 0xfb, 0x47, 0x81, 0xec, 0x5b, 0xfd, 0xa3, 0x53, 0x38,
826	0x31, 0x06, 0x66, 0xa3, 0x43, 0xac, 0x76, 0xad, 0x63, 0x36, 0xda, 0x75, 0x32, 0x68, 0xf7, 0xba,
827	0x56, 0xad, 0x71, 0xde, 0xb0, 0x4c, 0xed, 0x7f, 0xe8, 0x21, 0xe8, 0x1b, 0x78, 0xb3, 0xd1, 0xb6,
828	0x0c, 0x4c, 0x3e, 0xfd, 0x4c, 0x53, 0xd0, 0x03, 0xb8, 0xb7, 0x81, 0x9e, 0x37, 0x8d, 0x9a, 0xa6,
829	0x22, 0x1d, 0xee, 0x6f, 0x00, 0xad, 0x41, 0xd3, 0xb8, 0xd4, 0x52, 0xe8, 0x08, 0xd0, 0x06, 0x62,
830	0xb4, 0xb0, 0xb6, 0x87, 0x8e, 0xe1, 0xff, 0x37, 0xfd, 0xe4, 0xb2, 0xaa, 0xed, 0xa3, 0x0f, 0xe0,
831	0xc1, 0x06, 0xd4, 0xa9, 0xd7, 0x49, 0xa7, 0x3b, 0xe8, 0x69, 0xb7, 0xd0, 0x0b, 0x78, 0xbe, 0x01,
832	0xf6, 0xba, 0x96, 0xf5, 0x35, 0xb9, 0x6c, 0xf4, 0x2f, 0xc8, 0x85, 0x65, 0x98, 0x16, 0x26, 0xd5,
833	0x6f, 0xfa, 0x96, 0x76, 0xfb, 0x6c, 0x01, 0xe8, 0xe6, 0x16, 0xa3, 0x67, 0x90, 0x8f, 0x33, 0x6a,
834	0x17, 0xa4, 0xd5, 0x31, 0xad, 0x26, 0x19, 0x1a, 0xb8, 0x61, 0xb4, 0xfb, 0x1b, 0x73, 0x38, 0x02,
835	0x34, 0xe8, 0x59, 0xa4, 0x6a, 0xf5, 0xfa, 0xc4, 0x18, 0x1a, 0x8d, 0xa6, 0x51, 0x6d, 0x5a, 0x9a,
836	0x82, 0x34, 0xc8, 0xc4, 0xfe, 0x5e, 0xdf, 0x68, 0x9b, 0x06, 0x36, 0x35, 0x75, 0xe5, 0xb1, 0xda,
837	0x17, 0x46, 0xbb, 0x66, 0x99, 0x5a, 0xea, 0xec, 0x27, 0x05, 0x0e, 0x37, 0x96, 0x12, 0x3d, 0x81,
838	0x47, 0xbd, 0x5e, 0xab, 0x49, 0x86, 0x9d, 0x46, 0xcd, 0x22, 0x75, 0xab, 0x1d, 0xf7, 0xf9, 0xf6,
839	0x95, 0x27, 0x70, 0x74, 0x33, 0xa4, 0x65, 0xc8, 0x6b, 0x1f, 0x82, 0x7e, 0x13, 0x3b, 0xb7, 0x24,
840	0xaa, 0xa2, 0x47, 0x70, 0x7c, 0x13, 0x6d, 0x5b, 0x83, 0x3e, 0x36, 0x9a, 0x5a, 0xea, 0xec, 0x57,
841	0x05, 0xee, 0x6d, 0xd1, 0x32, 0x7a, 0x0e, 0x4f, 0x3a, 0x83, 0x7e, 0x77, 0xd0, 0x27, 0x3b, 0x25,
842	0xf1, 0x14, 0x1e, 0x6f, 0x0f, 0x5b, 0x57, 0xc6, 0x23, 0x38, 0xde, 0x1e, 0xd4, 0xea, 0xbe, 0xd4,
843	0x54, 0x54, 0x80, 0xd3, 0xed, 0xf0, 0x15, 0xb3, 0xa9, 0xea, 0xcf, 0x0a, 0x9c, 0x8e, 0xc5, 0x74,
844	0x87, 0xfc, 0xab, 0xda, 0xda, 0x4e, 0x77, 0xe3, 0x8f, 0x70, 0x57, 0xf9, 0xd6, 0x5c, 0xc6, 0xbb,
845	0x22, 0xfe, 0x5c, 0x95, 0x44, 0xe0, 0x96, 0x5d, 0xc6, 0xe5, 0x27, 0xba, 0x9c, 0x40, 0x74, 0xe6,
846	0x85, 0xdb, 0xfe, 0x99, 0xbe, 0xb8, 0xb6, 0xfe, 0x56, 0x94, 0x5f, 0x54, 0xd5, 0x3c, 0xff, 0x4d,
847	0x3d, 0xa9, 0x27, 0xe5, 0x6a, 0xf2, 0x7a, 0xf3, 0xfa, 0xfa, 0x61, 0x65, 0x74, 0x4b, 0x56, 0x7d,
848	0xf9, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0xfa, 0xbf, 0x1c, 0x88, 0x09, 0x00, 0x00,
849}
850