1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/cloud/speech/v1p1beta1/cloud_speech.proto
3
4package speech // import "google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1"
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9import _ "github.com/golang/protobuf/ptypes/any"
10import duration "github.com/golang/protobuf/ptypes/duration"
11import timestamp "github.com/golang/protobuf/ptypes/timestamp"
12import _ "google.golang.org/genproto/googleapis/api/annotations"
13import longrunning "google.golang.org/genproto/googleapis/longrunning"
14import status "google.golang.org/genproto/googleapis/rpc/status"
15
16import (
17	context "golang.org/x/net/context"
18	grpc "google.golang.org/grpc"
19)
20
21// Reference imports to suppress errors if they are not otherwise used.
22var _ = proto.Marshal
23var _ = fmt.Errorf
24var _ = math.Inf
25
26// This is a compile-time assertion to ensure that this generated file
27// is compatible with the proto package it is being compiled against.
28// A compilation error at this line likely means your copy of the
29// proto package needs to be updated.
30const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
31
32// The encoding of the audio data sent in the request.
33//
34// All encodings support only 1 channel (mono) audio.
35//
36// For best results, the audio source should be captured and transmitted using
37// a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
38// recognition can be reduced if lossy codecs are used to capture or transmit
39// audio, particularly if background noise is present. Lossy codecs include
40// `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`.
41//
42// The `FLAC` and `WAV` audio file formats include a header that describes the
43// included audio content. You can request recognition for `WAV` files that
44// contain either `LINEAR16` or `MULAW` encoded audio.
45// If you send `FLAC` or `WAV` audio file format in
46// your request, you do not need to specify an `AudioEncoding`; the audio
47// encoding format is determined from the file header. If you specify
48// an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
49// encoding configuration must match the encoding described in the audio
50// header; otherwise the request returns an
51// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code.
52type RecognitionConfig_AudioEncoding int32
53
54const (
55	// Not specified.
56	RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0
57	// Uncompressed 16-bit signed little-endian samples (Linear PCM).
58	RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1
59	// `FLAC` (Free Lossless Audio
60	// Codec) is the recommended encoding because it is
61	// lossless--therefore recognition is not compromised--and
62	// requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
63	// encoding supports 16-bit and 24-bit samples, however, not all fields in
64	// `STREAMINFO` are supported.
65	RecognitionConfig_FLAC RecognitionConfig_AudioEncoding = 2
66	// 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
67	RecognitionConfig_MULAW RecognitionConfig_AudioEncoding = 3
68	// Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
69	RecognitionConfig_AMR RecognitionConfig_AudioEncoding = 4
70	// Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
71	RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5
72	// Opus encoded audio frames in Ogg container
73	// ([OggOpus](https://wiki.xiph.org/OggOpus)).
74	// `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
75	RecognitionConfig_OGG_OPUS RecognitionConfig_AudioEncoding = 6
76	// Although the use of lossy encodings is not recommended, if a very low
77	// bitrate encoding is required, `OGG_OPUS` is highly preferred over
78	// Speex encoding. The [Speex](https://speex.org/)  encoding supported by
79	// Cloud Speech API has a header byte in each block, as in MIME type
80	// `audio/x-speex-with-header-byte`.
81	// It is a variant of the RTP Speex encoding defined in
82	// [RFC 5574](https://tools.ietf.org/html/rfc5574).
83	// The stream is a sequence of blocks, one block per RTP packet. Each block
84	// starts with a byte containing the length of the block, in bytes, followed
85	// by one or more frames of Speex data, padded to an integral number of
86	// bytes (octets) as specified in RFC 5574. In other words, each RTP header
87	// is replaced with a single byte containing the block length. Only Speex
88	// wideband is supported. `sample_rate_hertz` must be 16000.
89	RecognitionConfig_SPEEX_WITH_HEADER_BYTE RecognitionConfig_AudioEncoding = 7
90)
91
92var RecognitionConfig_AudioEncoding_name = map[int32]string{
93	0: "ENCODING_UNSPECIFIED",
94	1: "LINEAR16",
95	2: "FLAC",
96	3: "MULAW",
97	4: "AMR",
98	5: "AMR_WB",
99	6: "OGG_OPUS",
100	7: "SPEEX_WITH_HEADER_BYTE",
101}
102var RecognitionConfig_AudioEncoding_value = map[string]int32{
103	"ENCODING_UNSPECIFIED":   0,
104	"LINEAR16":               1,
105	"FLAC":                   2,
106	"MULAW":                  3,
107	"AMR":                    4,
108	"AMR_WB":                 5,
109	"OGG_OPUS":               6,
110	"SPEEX_WITH_HEADER_BYTE": 7,
111}
112
113func (x RecognitionConfig_AudioEncoding) String() string {
114	return proto.EnumName(RecognitionConfig_AudioEncoding_name, int32(x))
115}
116func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) {
117	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{4, 0}
118}
119
120// Use case categories that the audio recognition request can be described
121// by.
122type RecognitionMetadata_InteractionType int32
123
124const (
125	// Use case is either unknown or is something other than one of the other
126	// values below.
127	RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED RecognitionMetadata_InteractionType = 0
128	// Multiple people in a conversation or discussion. For example in a
129	// meeting with two or more people actively participating. Typically
130	// all the primary people speaking would be in the same room (if not,
131	// see PHONE_CALL)
132	RecognitionMetadata_DISCUSSION RecognitionMetadata_InteractionType = 1
133	// One or more persons lecturing or presenting to others, mostly
134	// uninterrupted.
135	RecognitionMetadata_PRESENTATION RecognitionMetadata_InteractionType = 2
136	// A phone-call or video-conference in which two or more people, who are
137	// not in the same room, are actively participating.
138	RecognitionMetadata_PHONE_CALL RecognitionMetadata_InteractionType = 3
139	// A recorded message intended for another person to listen to.
140	RecognitionMetadata_VOICEMAIL RecognitionMetadata_InteractionType = 4
141	// Professionally produced audio (eg. TV Show, Podcast).
142	RecognitionMetadata_PROFESSIONALLY_PRODUCED RecognitionMetadata_InteractionType = 5
143	// Transcribe spoken questions and queries into text.
144	RecognitionMetadata_VOICE_SEARCH RecognitionMetadata_InteractionType = 6
145	// Transcribe voice commands, such as for controlling a device.
146	RecognitionMetadata_VOICE_COMMAND RecognitionMetadata_InteractionType = 7
147	// Transcribe speech to text to create a written document, such as a
148	// text-message, email or report.
149	RecognitionMetadata_DICTATION RecognitionMetadata_InteractionType = 8
150)
151
152var RecognitionMetadata_InteractionType_name = map[int32]string{
153	0: "INTERACTION_TYPE_UNSPECIFIED",
154	1: "DISCUSSION",
155	2: "PRESENTATION",
156	3: "PHONE_CALL",
157	4: "VOICEMAIL",
158	5: "PROFESSIONALLY_PRODUCED",
159	6: "VOICE_SEARCH",
160	7: "VOICE_COMMAND",
161	8: "DICTATION",
162}
163var RecognitionMetadata_InteractionType_value = map[string]int32{
164	"INTERACTION_TYPE_UNSPECIFIED": 0,
165	"DISCUSSION":                   1,
166	"PRESENTATION":                 2,
167	"PHONE_CALL":                   3,
168	"VOICEMAIL":                    4,
169	"PROFESSIONALLY_PRODUCED":      5,
170	"VOICE_SEARCH":                 6,
171	"VOICE_COMMAND":                7,
172	"DICTATION":                    8,
173}
174
175func (x RecognitionMetadata_InteractionType) String() string {
176	return proto.EnumName(RecognitionMetadata_InteractionType_name, int32(x))
177}
178func (RecognitionMetadata_InteractionType) EnumDescriptor() ([]byte, []int) {
179	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{5, 0}
180}
181
182// Enumerates the types of capture settings describing an audio file.
183type RecognitionMetadata_MicrophoneDistance int32
184
185const (
186	// Audio type is not known.
187	RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED RecognitionMetadata_MicrophoneDistance = 0
188	// The audio was captured from a closely placed microphone. Eg. phone,
189	// dictaphone, or handheld microphone. Generally if there speaker is within
190	// 1 meter of the microphone.
191	RecognitionMetadata_NEARFIELD RecognitionMetadata_MicrophoneDistance = 1
192	// The speaker if within 3 meters of the microphone.
193	RecognitionMetadata_MIDFIELD RecognitionMetadata_MicrophoneDistance = 2
194	// The speaker is more than 3 meters away from the microphone.
195	RecognitionMetadata_FARFIELD RecognitionMetadata_MicrophoneDistance = 3
196)
197
198var RecognitionMetadata_MicrophoneDistance_name = map[int32]string{
199	0: "MICROPHONE_DISTANCE_UNSPECIFIED",
200	1: "NEARFIELD",
201	2: "MIDFIELD",
202	3: "FARFIELD",
203}
204var RecognitionMetadata_MicrophoneDistance_value = map[string]int32{
205	"MICROPHONE_DISTANCE_UNSPECIFIED": 0,
206	"NEARFIELD":                       1,
207	"MIDFIELD":                        2,
208	"FARFIELD":                        3,
209}
210
211func (x RecognitionMetadata_MicrophoneDistance) String() string {
212	return proto.EnumName(RecognitionMetadata_MicrophoneDistance_name, int32(x))
213}
214func (RecognitionMetadata_MicrophoneDistance) EnumDescriptor() ([]byte, []int) {
215	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{5, 1}
216}
217
218// The original media the speech was recorded on.
219type RecognitionMetadata_OriginalMediaType int32
220
221const (
222	// Unknown original media type.
223	RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED RecognitionMetadata_OriginalMediaType = 0
224	// The speech data is an audio recording.
225	RecognitionMetadata_AUDIO RecognitionMetadata_OriginalMediaType = 1
226	// The speech data originally recorded on a video.
227	RecognitionMetadata_VIDEO RecognitionMetadata_OriginalMediaType = 2
228)
229
230var RecognitionMetadata_OriginalMediaType_name = map[int32]string{
231	0: "ORIGINAL_MEDIA_TYPE_UNSPECIFIED",
232	1: "AUDIO",
233	2: "VIDEO",
234}
235var RecognitionMetadata_OriginalMediaType_value = map[string]int32{
236	"ORIGINAL_MEDIA_TYPE_UNSPECIFIED": 0,
237	"AUDIO": 1,
238	"VIDEO": 2,
239}
240
241func (x RecognitionMetadata_OriginalMediaType) String() string {
242	return proto.EnumName(RecognitionMetadata_OriginalMediaType_name, int32(x))
243}
244func (RecognitionMetadata_OriginalMediaType) EnumDescriptor() ([]byte, []int) {
245	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{5, 2}
246}
247
248// The type of device the speech was recorded with.
249type RecognitionMetadata_RecordingDeviceType int32
250
251const (
252	// The recording device is unknown.
253	RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED RecognitionMetadata_RecordingDeviceType = 0
254	// Speech was recorded on a smartphone.
255	RecognitionMetadata_SMARTPHONE RecognitionMetadata_RecordingDeviceType = 1
256	// Speech was recorded using a personal computer or tablet.
257	RecognitionMetadata_PC RecognitionMetadata_RecordingDeviceType = 2
258	// Speech was recorded over a phone line.
259	RecognitionMetadata_PHONE_LINE RecognitionMetadata_RecordingDeviceType = 3
260	// Speech was recorded in a vehicle.
261	RecognitionMetadata_VEHICLE RecognitionMetadata_RecordingDeviceType = 4
262	// Speech was recorded outdoors.
263	RecognitionMetadata_OTHER_OUTDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 5
264	// Speech was recorded indoors.
265	RecognitionMetadata_OTHER_INDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 6
266)
267
268var RecognitionMetadata_RecordingDeviceType_name = map[int32]string{
269	0: "RECORDING_DEVICE_TYPE_UNSPECIFIED",
270	1: "SMARTPHONE",
271	2: "PC",
272	3: "PHONE_LINE",
273	4: "VEHICLE",
274	5: "OTHER_OUTDOOR_DEVICE",
275	6: "OTHER_INDOOR_DEVICE",
276}
277var RecognitionMetadata_RecordingDeviceType_value = map[string]int32{
278	"RECORDING_DEVICE_TYPE_UNSPECIFIED": 0,
279	"SMARTPHONE":                        1,
280	"PC":                                2,
281	"PHONE_LINE":                        3,
282	"VEHICLE":                           4,
283	"OTHER_OUTDOOR_DEVICE":              5,
284	"OTHER_INDOOR_DEVICE":               6,
285}
286
287func (x RecognitionMetadata_RecordingDeviceType) String() string {
288	return proto.EnumName(RecognitionMetadata_RecordingDeviceType_name, int32(x))
289}
290func (RecognitionMetadata_RecordingDeviceType) EnumDescriptor() ([]byte, []int) {
291	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{5, 3}
292}
293
294// Indicates the type of speech event.
295type StreamingRecognizeResponse_SpeechEventType int32
296
297const (
298	// No speech event specified.
299	StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED StreamingRecognizeResponse_SpeechEventType = 0
300	// This event indicates that the server has detected the end of the user's
301	// speech utterance and expects no additional speech. Therefore, the server
302	// will not process additional audio (although it may subsequently return
303	// additional results). The client should stop sending additional audio
304	// data, half-close the gRPC connection, and wait for any additional results
305	// until the server closes the gRPC connection. This event is only sent if
306	// `single_utterance` was set to `true`, and is not used otherwise.
307	StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE StreamingRecognizeResponse_SpeechEventType = 1
308)
309
310var StreamingRecognizeResponse_SpeechEventType_name = map[int32]string{
311	0: "SPEECH_EVENT_UNSPECIFIED",
312	1: "END_OF_SINGLE_UTTERANCE",
313}
314var StreamingRecognizeResponse_SpeechEventType_value = map[string]int32{
315	"SPEECH_EVENT_UNSPECIFIED": 0,
316	"END_OF_SINGLE_UTTERANCE":  1,
317}
318
319func (x StreamingRecognizeResponse_SpeechEventType) String() string {
320	return proto.EnumName(StreamingRecognizeResponse_SpeechEventType_name, int32(x))
321}
322func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int) {
323	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{11, 0}
324}
325
326// The top-level message sent by the client for the `Recognize` method.
327type RecognizeRequest struct {
328	// *Required* Provides information to the recognizer that specifies how to
329	// process the request.
330	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
331	// *Required* The audio data to be recognized.
332	Audio                *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
333	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
334	XXX_unrecognized     []byte            `json:"-"`
335	XXX_sizecache        int32             `json:"-"`
336}
337
338func (m *RecognizeRequest) Reset()         { *m = RecognizeRequest{} }
339func (m *RecognizeRequest) String() string { return proto.CompactTextString(m) }
340func (*RecognizeRequest) ProtoMessage()    {}
341func (*RecognizeRequest) Descriptor() ([]byte, []int) {
342	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{0}
343}
344func (m *RecognizeRequest) XXX_Unmarshal(b []byte) error {
345	return xxx_messageInfo_RecognizeRequest.Unmarshal(m, b)
346}
347func (m *RecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
348	return xxx_messageInfo_RecognizeRequest.Marshal(b, m, deterministic)
349}
350func (dst *RecognizeRequest) XXX_Merge(src proto.Message) {
351	xxx_messageInfo_RecognizeRequest.Merge(dst, src)
352}
353func (m *RecognizeRequest) XXX_Size() int {
354	return xxx_messageInfo_RecognizeRequest.Size(m)
355}
356func (m *RecognizeRequest) XXX_DiscardUnknown() {
357	xxx_messageInfo_RecognizeRequest.DiscardUnknown(m)
358}
359
360var xxx_messageInfo_RecognizeRequest proto.InternalMessageInfo
361
362func (m *RecognizeRequest) GetConfig() *RecognitionConfig {
363	if m != nil {
364		return m.Config
365	}
366	return nil
367}
368
369func (m *RecognizeRequest) GetAudio() *RecognitionAudio {
370	if m != nil {
371		return m.Audio
372	}
373	return nil
374}
375
376// The top-level message sent by the client for the `LongRunningRecognize`
377// method.
378type LongRunningRecognizeRequest struct {
379	// *Required* Provides information to the recognizer that specifies how to
380	// process the request.
381	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
382	// *Required* The audio data to be recognized.
383	Audio                *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
384	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
385	XXX_unrecognized     []byte            `json:"-"`
386	XXX_sizecache        int32             `json:"-"`
387}
388
389func (m *LongRunningRecognizeRequest) Reset()         { *m = LongRunningRecognizeRequest{} }
390func (m *LongRunningRecognizeRequest) String() string { return proto.CompactTextString(m) }
391func (*LongRunningRecognizeRequest) ProtoMessage()    {}
392func (*LongRunningRecognizeRequest) Descriptor() ([]byte, []int) {
393	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{1}
394}
395func (m *LongRunningRecognizeRequest) XXX_Unmarshal(b []byte) error {
396	return xxx_messageInfo_LongRunningRecognizeRequest.Unmarshal(m, b)
397}
398func (m *LongRunningRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
399	return xxx_messageInfo_LongRunningRecognizeRequest.Marshal(b, m, deterministic)
400}
401func (dst *LongRunningRecognizeRequest) XXX_Merge(src proto.Message) {
402	xxx_messageInfo_LongRunningRecognizeRequest.Merge(dst, src)
403}
404func (m *LongRunningRecognizeRequest) XXX_Size() int {
405	return xxx_messageInfo_LongRunningRecognizeRequest.Size(m)
406}
407func (m *LongRunningRecognizeRequest) XXX_DiscardUnknown() {
408	xxx_messageInfo_LongRunningRecognizeRequest.DiscardUnknown(m)
409}
410
411var xxx_messageInfo_LongRunningRecognizeRequest proto.InternalMessageInfo
412
413func (m *LongRunningRecognizeRequest) GetConfig() *RecognitionConfig {
414	if m != nil {
415		return m.Config
416	}
417	return nil
418}
419
420func (m *LongRunningRecognizeRequest) GetAudio() *RecognitionAudio {
421	if m != nil {
422		return m.Audio
423	}
424	return nil
425}
426
427// The top-level message sent by the client for the `StreamingRecognize` method.
428// Multiple `StreamingRecognizeRequest` messages are sent. The first message
429// must contain a `streaming_config` message and must not contain `audio` data.
430// All subsequent messages must contain `audio` data and must not contain a
431// `streaming_config` message.
432type StreamingRecognizeRequest struct {
433	// The streaming request, which is either a streaming config or audio content.
434	//
435	// Types that are valid to be assigned to StreamingRequest:
436	//	*StreamingRecognizeRequest_StreamingConfig
437	//	*StreamingRecognizeRequest_AudioContent
438	StreamingRequest     isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
439	XXX_NoUnkeyedLiteral struct{}                                     `json:"-"`
440	XXX_unrecognized     []byte                                       `json:"-"`
441	XXX_sizecache        int32                                        `json:"-"`
442}
443
444func (m *StreamingRecognizeRequest) Reset()         { *m = StreamingRecognizeRequest{} }
445func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) }
446func (*StreamingRecognizeRequest) ProtoMessage()    {}
447func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) {
448	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{2}
449}
450func (m *StreamingRecognizeRequest) XXX_Unmarshal(b []byte) error {
451	return xxx_messageInfo_StreamingRecognizeRequest.Unmarshal(m, b)
452}
453func (m *StreamingRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
454	return xxx_messageInfo_StreamingRecognizeRequest.Marshal(b, m, deterministic)
455}
456func (dst *StreamingRecognizeRequest) XXX_Merge(src proto.Message) {
457	xxx_messageInfo_StreamingRecognizeRequest.Merge(dst, src)
458}
459func (m *StreamingRecognizeRequest) XXX_Size() int {
460	return xxx_messageInfo_StreamingRecognizeRequest.Size(m)
461}
462func (m *StreamingRecognizeRequest) XXX_DiscardUnknown() {
463	xxx_messageInfo_StreamingRecognizeRequest.DiscardUnknown(m)
464}
465
466var xxx_messageInfo_StreamingRecognizeRequest proto.InternalMessageInfo
467
468type isStreamingRecognizeRequest_StreamingRequest interface {
469	isStreamingRecognizeRequest_StreamingRequest()
470}
471
472type StreamingRecognizeRequest_StreamingConfig struct {
473	StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,proto3,oneof"`
474}
475
476type StreamingRecognizeRequest_AudioContent struct {
477	AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"`
478}
479
480func (*StreamingRecognizeRequest_StreamingConfig) isStreamingRecognizeRequest_StreamingRequest() {}
481
482func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {}
483
484func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest {
485	if m != nil {
486		return m.StreamingRequest
487	}
488	return nil
489}
490
491func (m *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig {
492	if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_StreamingConfig); ok {
493		return x.StreamingConfig
494	}
495	return nil
496}
497
498func (m *StreamingRecognizeRequest) GetAudioContent() []byte {
499	if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_AudioContent); ok {
500		return x.AudioContent
501	}
502	return nil
503}
504
505// XXX_OneofFuncs is for the internal use of the proto package.
506func (*StreamingRecognizeRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
507	return _StreamingRecognizeRequest_OneofMarshaler, _StreamingRecognizeRequest_OneofUnmarshaler, _StreamingRecognizeRequest_OneofSizer, []interface{}{
508		(*StreamingRecognizeRequest_StreamingConfig)(nil),
509		(*StreamingRecognizeRequest_AudioContent)(nil),
510	}
511}
512
513func _StreamingRecognizeRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
514	m := msg.(*StreamingRecognizeRequest)
515	// streaming_request
516	switch x := m.StreamingRequest.(type) {
517	case *StreamingRecognizeRequest_StreamingConfig:
518		b.EncodeVarint(1<<3 | proto.WireBytes)
519		if err := b.EncodeMessage(x.StreamingConfig); err != nil {
520			return err
521		}
522	case *StreamingRecognizeRequest_AudioContent:
523		b.EncodeVarint(2<<3 | proto.WireBytes)
524		b.EncodeRawBytes(x.AudioContent)
525	case nil:
526	default:
527		return fmt.Errorf("StreamingRecognizeRequest.StreamingRequest has unexpected type %T", x)
528	}
529	return nil
530}
531
532func _StreamingRecognizeRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
533	m := msg.(*StreamingRecognizeRequest)
534	switch tag {
535	case 1: // streaming_request.streaming_config
536		if wire != proto.WireBytes {
537			return true, proto.ErrInternalBadWireType
538		}
539		msg := new(StreamingRecognitionConfig)
540		err := b.DecodeMessage(msg)
541		m.StreamingRequest = &StreamingRecognizeRequest_StreamingConfig{msg}
542		return true, err
543	case 2: // streaming_request.audio_content
544		if wire != proto.WireBytes {
545			return true, proto.ErrInternalBadWireType
546		}
547		x, err := b.DecodeRawBytes(true)
548		m.StreamingRequest = &StreamingRecognizeRequest_AudioContent{x}
549		return true, err
550	default:
551		return false, nil
552	}
553}
554
555func _StreamingRecognizeRequest_OneofSizer(msg proto.Message) (n int) {
556	m := msg.(*StreamingRecognizeRequest)
557	// streaming_request
558	switch x := m.StreamingRequest.(type) {
559	case *StreamingRecognizeRequest_StreamingConfig:
560		s := proto.Size(x.StreamingConfig)
561		n += 1 // tag and wire
562		n += proto.SizeVarint(uint64(s))
563		n += s
564	case *StreamingRecognizeRequest_AudioContent:
565		n += 1 // tag and wire
566		n += proto.SizeVarint(uint64(len(x.AudioContent)))
567		n += len(x.AudioContent)
568	case nil:
569	default:
570		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
571	}
572	return n
573}
574
575// Provides information to the recognizer that specifies how to process the
576// request.
577type StreamingRecognitionConfig struct {
578	// *Required* Provides information to the recognizer that specifies how to
579	// process the request.
580	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
581	// *Optional* If `false` or omitted, the recognizer will perform continuous
582	// recognition (continuing to wait for and process audio even if the user
583	// pauses speaking) until the client closes the input stream (gRPC API) or
584	// until the maximum time limit has been reached. May return multiple
585	// `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
586	//
587	// If `true`, the recognizer will detect a single spoken utterance. When it
588	// detects that the user has paused or stopped speaking, it will return an
589	// `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
590	// more than one `StreamingRecognitionResult` with the `is_final` flag set to
591	// `true`.
592	SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance,proto3" json:"single_utterance,omitempty"`
593	// *Optional* If `true`, interim results (tentative hypotheses) may be
594	// returned as they become available (these interim results are indicated with
595	// the `is_final=false` flag).
596	// If `false` or omitted, only `is_final=true` result(s) are returned.
597	InterimResults       bool     `protobuf:"varint,3,opt,name=interim_results,json=interimResults,proto3" json:"interim_results,omitempty"`
598	XXX_NoUnkeyedLiteral struct{} `json:"-"`
599	XXX_unrecognized     []byte   `json:"-"`
600	XXX_sizecache        int32    `json:"-"`
601}
602
603func (m *StreamingRecognitionConfig) Reset()         { *m = StreamingRecognitionConfig{} }
604func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) }
605func (*StreamingRecognitionConfig) ProtoMessage()    {}
606func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) {
607	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{3}
608}
609func (m *StreamingRecognitionConfig) XXX_Unmarshal(b []byte) error {
610	return xxx_messageInfo_StreamingRecognitionConfig.Unmarshal(m, b)
611}
612func (m *StreamingRecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
613	return xxx_messageInfo_StreamingRecognitionConfig.Marshal(b, m, deterministic)
614}
615func (dst *StreamingRecognitionConfig) XXX_Merge(src proto.Message) {
616	xxx_messageInfo_StreamingRecognitionConfig.Merge(dst, src)
617}
618func (m *StreamingRecognitionConfig) XXX_Size() int {
619	return xxx_messageInfo_StreamingRecognitionConfig.Size(m)
620}
621func (m *StreamingRecognitionConfig) XXX_DiscardUnknown() {
622	xxx_messageInfo_StreamingRecognitionConfig.DiscardUnknown(m)
623}
624
625var xxx_messageInfo_StreamingRecognitionConfig proto.InternalMessageInfo
626
627func (m *StreamingRecognitionConfig) GetConfig() *RecognitionConfig {
628	if m != nil {
629		return m.Config
630	}
631	return nil
632}
633
634func (m *StreamingRecognitionConfig) GetSingleUtterance() bool {
635	if m != nil {
636		return m.SingleUtterance
637	}
638	return false
639}
640
641func (m *StreamingRecognitionConfig) GetInterimResults() bool {
642	if m != nil {
643		return m.InterimResults
644	}
645	return false
646}
647
648// Provides information to the recognizer that specifies how to process the
649// request.
650type RecognitionConfig struct {
651	// Encoding of audio data sent in all `RecognitionAudio` messages.
652	// This field is optional for `FLAC` and `WAV` audio files and required
653	// for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
654	Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"`
655	// Sample rate in Hertz of the audio data sent in all
656	// `RecognitionAudio` messages. Valid values are: 8000-48000.
657	// 16000 is optimal. For best results, set the sampling rate of the audio
658	// source to 16000 Hz. If that's not possible, use the native sample rate of
659	// the audio source (instead of re-sampling).
660	// This field is optional for `FLAC` and `WAV` audio files and required
661	// for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
662	SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
663	// *Optional* The number of channels in the input audio data.
664	// ONLY set this for MULTI-CHANNEL recognition.
665	// Valid values for LINEAR16 and FLAC are `1`-`8`.
666	// Valid values for OGG_OPUS are '1'-'254'.
667	// Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
668	// If `0` or omitted, defaults to one channel (mono).
669	// NOTE: We only recognize the first channel by default.
670	// To perform independent recognition on each channel set
671	// enable_separate_recognition_per_channel to 'true'.
672	AudioChannelCount int32 `protobuf:"varint,7,opt,name=audio_channel_count,json=audioChannelCount,proto3" json:"audio_channel_count,omitempty"`
673	// This needs to be set to ‘true’ explicitly and audio_channel_count > 1
674	// to get each channel recognized separately. The recognition result will
675	// contain a channel_tag field to state which channel that result belongs to.
676	// If this is not ‘true’, we will only recognize the first channel.
677	// NOTE: The request is also billed cumulatively for all channels recognized:
678	//     (audio_channel_count times the audio length)
679	EnableSeparateRecognitionPerChannel bool `protobuf:"varint,12,opt,name=enable_separate_recognition_per_channel,json=enableSeparateRecognitionPerChannel,proto3" json:"enable_separate_recognition_per_channel,omitempty"`
680	// *Required* The language of the supplied audio as a
681	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
682	// Example: "en-US".
683	// See [Language Support](https://cloud.google.com/speech/docs/languages)
684	// for a list of the currently supported language codes.
685	LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
686	// *Optional* A list of up to 3 additional
687	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
688	// listing possible alternative languages of the supplied audio.
689	// See [Language Support](https://cloud.google.com/speech/docs/languages)
690	// for a list of the currently supported language codes.
691	// If alternative languages are listed, recognition result will contain
692	// recognition in the most likely language detected including the main
693	// language_code. The recognition result will include the language tag
694	// of the language detected in the audio.
695	// NOTE: This feature is only supported for Voice Command and Voice Search
696	// use cases and performance may vary for other use cases (e.g., phone call
697	// transcription).
698	AlternativeLanguageCodes []string `protobuf:"bytes,18,rep,name=alternative_language_codes,json=alternativeLanguageCodes,proto3" json:"alternative_language_codes,omitempty"`
699	// *Optional* Maximum number of recognition hypotheses to be returned.
700	// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
701	// within each `SpeechRecognitionResult`.
702	// The server may return fewer than `max_alternatives`.
703	// Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
704	// one. If omitted, will return a maximum of one.
705	MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
706	// *Optional* If set to `true`, the server will attempt to filter out
707	// profanities, replacing all but the initial character in each filtered word
708	// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
709	// won't be filtered out.
710	ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter,proto3" json:"profanity_filter,omitempty"`
711	// *Optional* A means to provide context to assist the speech recognition.
712	SpeechContexts []*SpeechContext `protobuf:"bytes,6,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
713	// *Optional* If `true`, the top result includes a list of words and
714	// the start and end time offsets (timestamps) for those words. If
715	// `false`, no word-level time offset information is returned. The default is
716	// `false`.
717	EnableWordTimeOffsets bool `protobuf:"varint,8,opt,name=enable_word_time_offsets,json=enableWordTimeOffsets,proto3" json:"enable_word_time_offsets,omitempty"`
718	// *Optional* If `true`, the top result includes a list of words and the
719	// confidence for those words. If `false`, no word-level confidence
720	// information is returned. The default is `false`.
721	EnableWordConfidence bool `protobuf:"varint,15,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"`
722	// *Optional* If 'true', adds punctuation to recognition result hypotheses.
723	// This feature is only available in select languages. Setting this for
724	// requests in other languages has no effect at all.
725	// The default 'false' value does not add punctuation to result hypotheses.
726	// NOTE: "This is currently offered as an experimental service, complimentary
727	// to all users. In the future this may be exclusively available as a
728	// premium feature."
729	EnableAutomaticPunctuation bool `protobuf:"varint,11,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation,proto3" json:"enable_automatic_punctuation,omitempty"`
730	// *Optional* If 'true', enables speaker detection for each recognized word in
731	// the top alternative of the recognition result using a speaker_tag provided
732	// in the WordInfo.
733	// Note: When this is true, we send all the words from the beginning of the
734	// audio for the top alternative in every consecutive responses.
735	// This is done in order to improve our speaker tags as our models learn to
736	// identify the speakers in the conversation over time.
737	EnableSpeakerDiarization bool `protobuf:"varint,16,opt,name=enable_speaker_diarization,json=enableSpeakerDiarization,proto3" json:"enable_speaker_diarization,omitempty"`
738	// *Optional*
739	// If set, specifies the estimated number of speakers in the conversation.
740	// If not set, defaults to '2'.
741	// Ignored unless enable_speaker_diarization is set to true."
742	DiarizationSpeakerCount int32 `protobuf:"varint,17,opt,name=diarization_speaker_count,json=diarizationSpeakerCount,proto3" json:"diarization_speaker_count,omitempty"`
743	// *Optional* Metadata regarding this request.
744	Metadata *RecognitionMetadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"`
745	// *Optional* Which model to select for the given request. Select the model
746	// best suited to your domain to get best results. If a model is not
747	// explicitly specified, then we auto-select a model based on the parameters
748	// in the RecognitionConfig.
749	// <table>
750	//   <tr>
751	//     <td><b>Model</b></td>
752	//     <td><b>Description</b></td>
753	//   </tr>
754	//   <tr>
755	//     <td><code>command_and_search</code></td>
756	//     <td>Best for short queries such as voice commands or voice search.</td>
757	//   </tr>
758	//   <tr>
759	//     <td><code>phone_call</code></td>
760	//     <td>Best for audio that originated from a phone call (typically
761	//     recorded at an 8khz sampling rate).</td>
762	//   </tr>
763	//   <tr>
764	//     <td><code>video</code></td>
765	//     <td>Best for audio that originated from from video or includes multiple
766	//         speakers. Ideally the audio is recorded at a 16khz or greater
767	//         sampling rate. This is a premium model that costs more than the
768	//         standard rate.</td>
769	//   </tr>
770	//   <tr>
771	//     <td><code>default</code></td>
772	//     <td>Best for audio that is not one of the specific audio models.
773	//         For example, long-form audio. Ideally the audio is high-fidelity,
774	//         recorded at a 16khz or greater sampling rate.</td>
775	//   </tr>
776	// </table>
777	Model string `protobuf:"bytes,13,opt,name=model,proto3" json:"model,omitempty"`
778	// *Optional* Set to true to use an enhanced model for speech recognition.
779	// You must also set the `model` field to a valid, enhanced model. If
780	// `use_enhanced` is set to true and the `model` field is not set, then
781	// `use_enhanced` is ignored. If `use_enhanced` is true and an enhanced
782	// version of the specified model does not exist, then the speech is
783	// recognized using the standard version of the specified model.
784	//
785	// Enhanced speech models require that you opt-in to the audio logging using
786	// instructions in the [alpha documentation](/speech/data-sharing). If you set
787	// `use_enhanced` to true and you have not enabled audio logging, then you
788	// will receive an error.
789	UseEnhanced          bool     `protobuf:"varint,14,opt,name=use_enhanced,json=useEnhanced,proto3" json:"use_enhanced,omitempty"`
790	XXX_NoUnkeyedLiteral struct{} `json:"-"`
791	XXX_unrecognized     []byte   `json:"-"`
792	XXX_sizecache        int32    `json:"-"`
793}
794
795func (m *RecognitionConfig) Reset()         { *m = RecognitionConfig{} }
796func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) }
797func (*RecognitionConfig) ProtoMessage()    {}
798func (*RecognitionConfig) Descriptor() ([]byte, []int) {
799	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{4}
800}
801func (m *RecognitionConfig) XXX_Unmarshal(b []byte) error {
802	return xxx_messageInfo_RecognitionConfig.Unmarshal(m, b)
803}
804func (m *RecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
805	return xxx_messageInfo_RecognitionConfig.Marshal(b, m, deterministic)
806}
807func (dst *RecognitionConfig) XXX_Merge(src proto.Message) {
808	xxx_messageInfo_RecognitionConfig.Merge(dst, src)
809}
810func (m *RecognitionConfig) XXX_Size() int {
811	return xxx_messageInfo_RecognitionConfig.Size(m)
812}
813func (m *RecognitionConfig) XXX_DiscardUnknown() {
814	xxx_messageInfo_RecognitionConfig.DiscardUnknown(m)
815}
816
817var xxx_messageInfo_RecognitionConfig proto.InternalMessageInfo
818
819func (m *RecognitionConfig) GetEncoding() RecognitionConfig_AudioEncoding {
820	if m != nil {
821		return m.Encoding
822	}
823	return RecognitionConfig_ENCODING_UNSPECIFIED
824}
825
826func (m *RecognitionConfig) GetSampleRateHertz() int32 {
827	if m != nil {
828		return m.SampleRateHertz
829	}
830	return 0
831}
832
833func (m *RecognitionConfig) GetAudioChannelCount() int32 {
834	if m != nil {
835		return m.AudioChannelCount
836	}
837	return 0
838}
839
840func (m *RecognitionConfig) GetEnableSeparateRecognitionPerChannel() bool {
841	if m != nil {
842		return m.EnableSeparateRecognitionPerChannel
843	}
844	return false
845}
846
847func (m *RecognitionConfig) GetLanguageCode() string {
848	if m != nil {
849		return m.LanguageCode
850	}
851	return ""
852}
853
854func (m *RecognitionConfig) GetAlternativeLanguageCodes() []string {
855	if m != nil {
856		return m.AlternativeLanguageCodes
857	}
858	return nil
859}
860
861func (m *RecognitionConfig) GetMaxAlternatives() int32 {
862	if m != nil {
863		return m.MaxAlternatives
864	}
865	return 0
866}
867
868func (m *RecognitionConfig) GetProfanityFilter() bool {
869	if m != nil {
870		return m.ProfanityFilter
871	}
872	return false
873}
874
875func (m *RecognitionConfig) GetSpeechContexts() []*SpeechContext {
876	if m != nil {
877		return m.SpeechContexts
878	}
879	return nil
880}
881
882func (m *RecognitionConfig) GetEnableWordTimeOffsets() bool {
883	if m != nil {
884		return m.EnableWordTimeOffsets
885	}
886	return false
887}
888
889func (m *RecognitionConfig) GetEnableWordConfidence() bool {
890	if m != nil {
891		return m.EnableWordConfidence
892	}
893	return false
894}
895
896func (m *RecognitionConfig) GetEnableAutomaticPunctuation() bool {
897	if m != nil {
898		return m.EnableAutomaticPunctuation
899	}
900	return false
901}
902
903func (m *RecognitionConfig) GetEnableSpeakerDiarization() bool {
904	if m != nil {
905		return m.EnableSpeakerDiarization
906	}
907	return false
908}
909
910func (m *RecognitionConfig) GetDiarizationSpeakerCount() int32 {
911	if m != nil {
912		return m.DiarizationSpeakerCount
913	}
914	return 0
915}
916
917func (m *RecognitionConfig) GetMetadata() *RecognitionMetadata {
918	if m != nil {
919		return m.Metadata
920	}
921	return nil
922}
923
924func (m *RecognitionConfig) GetModel() string {
925	if m != nil {
926		return m.Model
927	}
928	return ""
929}
930
931func (m *RecognitionConfig) GetUseEnhanced() bool {
932	if m != nil {
933		return m.UseEnhanced
934	}
935	return false
936}
937
938// Description of audio data to be recognized.
939type RecognitionMetadata struct {
940	// The use case most closely describing the audio content to be recognized.
941	InteractionType RecognitionMetadata_InteractionType `protobuf:"varint,1,opt,name=interaction_type,json=interactionType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType" json:"interaction_type,omitempty"`
942	// The industry vertical to which this speech recognition request most
943	// closely applies. This is most indicative of the topics contained
944	// in the audio.  Use the 6-digit NAICS code to identify the industry
945	// vertical - see https://www.naics.com/search/.
946	IndustryNaicsCodeOfAudio uint32 `protobuf:"varint,3,opt,name=industry_naics_code_of_audio,json=industryNaicsCodeOfAudio,proto3" json:"industry_naics_code_of_audio,omitempty"`
947	// The audio type that most closely describes the audio being recognized.
948	MicrophoneDistance RecognitionMetadata_MicrophoneDistance `protobuf:"varint,4,opt,name=microphone_distance,json=microphoneDistance,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance" json:"microphone_distance,omitempty"`
949	// The original media the speech was recorded on.
950	OriginalMediaType RecognitionMetadata_OriginalMediaType `protobuf:"varint,5,opt,name=original_media_type,json=originalMediaType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType" json:"original_media_type,omitempty"`
951	// The type of device the speech was recorded with.
952	RecordingDeviceType RecognitionMetadata_RecordingDeviceType `protobuf:"varint,6,opt,name=recording_device_type,json=recordingDeviceType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType" json:"recording_device_type,omitempty"`
953	// The device used to make the recording.  Examples 'Nexus 5X' or
954	// 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
955	// 'Cardioid Microphone'.
956	RecordingDeviceName string `protobuf:"bytes,7,opt,name=recording_device_name,json=recordingDeviceName,proto3" json:"recording_device_name,omitempty"`
957	// Mime type of the original audio file.  For example `audio/m4a`,
958	// `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
959	// A list of possible audio mime types is maintained at
960	// http://www.iana.org/assignments/media-types/media-types.xhtml#audio
961	OriginalMimeType string `protobuf:"bytes,8,opt,name=original_mime_type,json=originalMimeType,proto3" json:"original_mime_type,omitempty"`
962	// Obfuscated (privacy-protected) ID of the user, to identify number of
963	// unique users using the service.
964	ObfuscatedId int64 `protobuf:"varint,9,opt,name=obfuscated_id,json=obfuscatedId,proto3" json:"obfuscated_id,omitempty"`
965	// Description of the content. Eg. "Recordings of federal supreme court
966	// hearings from 2012".
967	AudioTopic           string   `protobuf:"bytes,10,opt,name=audio_topic,json=audioTopic,proto3" json:"audio_topic,omitempty"`
968	XXX_NoUnkeyedLiteral struct{} `json:"-"`
969	XXX_unrecognized     []byte   `json:"-"`
970	XXX_sizecache        int32    `json:"-"`
971}
972
973func (m *RecognitionMetadata) Reset()         { *m = RecognitionMetadata{} }
974func (m *RecognitionMetadata) String() string { return proto.CompactTextString(m) }
975func (*RecognitionMetadata) ProtoMessage()    {}
976func (*RecognitionMetadata) Descriptor() ([]byte, []int) {
977	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{5}
978}
979func (m *RecognitionMetadata) XXX_Unmarshal(b []byte) error {
980	return xxx_messageInfo_RecognitionMetadata.Unmarshal(m, b)
981}
982func (m *RecognitionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
983	return xxx_messageInfo_RecognitionMetadata.Marshal(b, m, deterministic)
984}
985func (dst *RecognitionMetadata) XXX_Merge(src proto.Message) {
986	xxx_messageInfo_RecognitionMetadata.Merge(dst, src)
987}
988func (m *RecognitionMetadata) XXX_Size() int {
989	return xxx_messageInfo_RecognitionMetadata.Size(m)
990}
991func (m *RecognitionMetadata) XXX_DiscardUnknown() {
992	xxx_messageInfo_RecognitionMetadata.DiscardUnknown(m)
993}
994
995var xxx_messageInfo_RecognitionMetadata proto.InternalMessageInfo
996
997func (m *RecognitionMetadata) GetInteractionType() RecognitionMetadata_InteractionType {
998	if m != nil {
999		return m.InteractionType
1000	}
1001	return RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED
1002}
1003
1004func (m *RecognitionMetadata) GetIndustryNaicsCodeOfAudio() uint32 {
1005	if m != nil {
1006		return m.IndustryNaicsCodeOfAudio
1007	}
1008	return 0
1009}
1010
1011func (m *RecognitionMetadata) GetMicrophoneDistance() RecognitionMetadata_MicrophoneDistance {
1012	if m != nil {
1013		return m.MicrophoneDistance
1014	}
1015	return RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED
1016}
1017
1018func (m *RecognitionMetadata) GetOriginalMediaType() RecognitionMetadata_OriginalMediaType {
1019	if m != nil {
1020		return m.OriginalMediaType
1021	}
1022	return RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED
1023}
1024
1025func (m *RecognitionMetadata) GetRecordingDeviceType() RecognitionMetadata_RecordingDeviceType {
1026	if m != nil {
1027		return m.RecordingDeviceType
1028	}
1029	return RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED
1030}
1031
1032func (m *RecognitionMetadata) GetRecordingDeviceName() string {
1033	if m != nil {
1034		return m.RecordingDeviceName
1035	}
1036	return ""
1037}
1038
1039func (m *RecognitionMetadata) GetOriginalMimeType() string {
1040	if m != nil {
1041		return m.OriginalMimeType
1042	}
1043	return ""
1044}
1045
1046func (m *RecognitionMetadata) GetObfuscatedId() int64 {
1047	if m != nil {
1048		return m.ObfuscatedId
1049	}
1050	return 0
1051}
1052
1053func (m *RecognitionMetadata) GetAudioTopic() string {
1054	if m != nil {
1055		return m.AudioTopic
1056	}
1057	return ""
1058}
1059
1060// Provides "hints" to the speech recognizer to favor specific words and phrases
1061// in the results.
1062type SpeechContext struct {
1063	// *Optional* A list of strings containing words and phrases "hints" so that
1064	// the speech recognition is more likely to recognize them. This can be used
1065	// to improve the accuracy for specific words and phrases, for example, if
1066	// specific commands are typically spoken by the user. This can also be used
1067	// to add additional words to the vocabulary of the recognizer. See
1068	// [usage limits](https://cloud.google.com/speech/limits#content).
1069	Phrases              []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
1070	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1071	XXX_unrecognized     []byte   `json:"-"`
1072	XXX_sizecache        int32    `json:"-"`
1073}
1074
1075func (m *SpeechContext) Reset()         { *m = SpeechContext{} }
1076func (m *SpeechContext) String() string { return proto.CompactTextString(m) }
1077func (*SpeechContext) ProtoMessage()    {}
1078func (*SpeechContext) Descriptor() ([]byte, []int) {
1079	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{6}
1080}
1081func (m *SpeechContext) XXX_Unmarshal(b []byte) error {
1082	return xxx_messageInfo_SpeechContext.Unmarshal(m, b)
1083}
1084func (m *SpeechContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1085	return xxx_messageInfo_SpeechContext.Marshal(b, m, deterministic)
1086}
1087func (dst *SpeechContext) XXX_Merge(src proto.Message) {
1088	xxx_messageInfo_SpeechContext.Merge(dst, src)
1089}
1090func (m *SpeechContext) XXX_Size() int {
1091	return xxx_messageInfo_SpeechContext.Size(m)
1092}
1093func (m *SpeechContext) XXX_DiscardUnknown() {
1094	xxx_messageInfo_SpeechContext.DiscardUnknown(m)
1095}
1096
1097var xxx_messageInfo_SpeechContext proto.InternalMessageInfo
1098
1099func (m *SpeechContext) GetPhrases() []string {
1100	if m != nil {
1101		return m.Phrases
1102	}
1103	return nil
1104}
1105
1106// Contains audio data in the encoding specified in the `RecognitionConfig`.
1107// Either `content` or `uri` must be supplied. Supplying both or neither
1108// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
1109// [audio limits](https://cloud.google.com/speech/limits#content).
1110type RecognitionAudio struct {
1111	// The audio source, which is either inline content or a Google Cloud
1112	// Storage uri.
1113	//
1114	// Types that are valid to be assigned to AudioSource:
1115	//	*RecognitionAudio_Content
1116	//	*RecognitionAudio_Uri
1117	AudioSource          isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"`
1118	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
1119	XXX_unrecognized     []byte                         `json:"-"`
1120	XXX_sizecache        int32                          `json:"-"`
1121}
1122
1123func (m *RecognitionAudio) Reset()         { *m = RecognitionAudio{} }
1124func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) }
1125func (*RecognitionAudio) ProtoMessage()    {}
1126func (*RecognitionAudio) Descriptor() ([]byte, []int) {
1127	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{7}
1128}
1129func (m *RecognitionAudio) XXX_Unmarshal(b []byte) error {
1130	return xxx_messageInfo_RecognitionAudio.Unmarshal(m, b)
1131}
1132func (m *RecognitionAudio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1133	return xxx_messageInfo_RecognitionAudio.Marshal(b, m, deterministic)
1134}
1135func (dst *RecognitionAudio) XXX_Merge(src proto.Message) {
1136	xxx_messageInfo_RecognitionAudio.Merge(dst, src)
1137}
1138func (m *RecognitionAudio) XXX_Size() int {
1139	return xxx_messageInfo_RecognitionAudio.Size(m)
1140}
1141func (m *RecognitionAudio) XXX_DiscardUnknown() {
1142	xxx_messageInfo_RecognitionAudio.DiscardUnknown(m)
1143}
1144
1145var xxx_messageInfo_RecognitionAudio proto.InternalMessageInfo
1146
1147type isRecognitionAudio_AudioSource interface {
1148	isRecognitionAudio_AudioSource()
1149}
1150
1151type RecognitionAudio_Content struct {
1152	Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"`
1153}
1154
1155type RecognitionAudio_Uri struct {
1156	Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"`
1157}
1158
1159func (*RecognitionAudio_Content) isRecognitionAudio_AudioSource() {}
1160
1161func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {}
1162
1163func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource {
1164	if m != nil {
1165		return m.AudioSource
1166	}
1167	return nil
1168}
1169
1170func (m *RecognitionAudio) GetContent() []byte {
1171	if x, ok := m.GetAudioSource().(*RecognitionAudio_Content); ok {
1172		return x.Content
1173	}
1174	return nil
1175}
1176
1177func (m *RecognitionAudio) GetUri() string {
1178	if x, ok := m.GetAudioSource().(*RecognitionAudio_Uri); ok {
1179		return x.Uri
1180	}
1181	return ""
1182}
1183
1184// XXX_OneofFuncs is for the internal use of the proto package.
1185func (*RecognitionAudio) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
1186	return _RecognitionAudio_OneofMarshaler, _RecognitionAudio_OneofUnmarshaler, _RecognitionAudio_OneofSizer, []interface{}{
1187		(*RecognitionAudio_Content)(nil),
1188		(*RecognitionAudio_Uri)(nil),
1189	}
1190}
1191
1192func _RecognitionAudio_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
1193	m := msg.(*RecognitionAudio)
1194	// audio_source
1195	switch x := m.AudioSource.(type) {
1196	case *RecognitionAudio_Content:
1197		b.EncodeVarint(1<<3 | proto.WireBytes)
1198		b.EncodeRawBytes(x.Content)
1199	case *RecognitionAudio_Uri:
1200		b.EncodeVarint(2<<3 | proto.WireBytes)
1201		b.EncodeStringBytes(x.Uri)
1202	case nil:
1203	default:
1204		return fmt.Errorf("RecognitionAudio.AudioSource has unexpected type %T", x)
1205	}
1206	return nil
1207}
1208
1209func _RecognitionAudio_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
1210	m := msg.(*RecognitionAudio)
1211	switch tag {
1212	case 1: // audio_source.content
1213		if wire != proto.WireBytes {
1214			return true, proto.ErrInternalBadWireType
1215		}
1216		x, err := b.DecodeRawBytes(true)
1217		m.AudioSource = &RecognitionAudio_Content{x}
1218		return true, err
1219	case 2: // audio_source.uri
1220		if wire != proto.WireBytes {
1221			return true, proto.ErrInternalBadWireType
1222		}
1223		x, err := b.DecodeStringBytes()
1224		m.AudioSource = &RecognitionAudio_Uri{x}
1225		return true, err
1226	default:
1227		return false, nil
1228	}
1229}
1230
1231func _RecognitionAudio_OneofSizer(msg proto.Message) (n int) {
1232	m := msg.(*RecognitionAudio)
1233	// audio_source
1234	switch x := m.AudioSource.(type) {
1235	case *RecognitionAudio_Content:
1236		n += 1 // tag and wire
1237		n += proto.SizeVarint(uint64(len(x.Content)))
1238		n += len(x.Content)
1239	case *RecognitionAudio_Uri:
1240		n += 1 // tag and wire
1241		n += proto.SizeVarint(uint64(len(x.Uri)))
1242		n += len(x.Uri)
1243	case nil:
1244	default:
1245		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
1246	}
1247	return n
1248}
1249
1250// The only message returned to the client by the `Recognize` method. It
1251// contains the result as zero or more sequential `SpeechRecognitionResult`
1252// messages.
1253type RecognizeResponse struct {
1254	// Output only. Sequential list of transcription results corresponding to
1255	// sequential portions of audio.
1256	Results              []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
1257	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
1258	XXX_unrecognized     []byte                     `json:"-"`
1259	XXX_sizecache        int32                      `json:"-"`
1260}
1261
1262func (m *RecognizeResponse) Reset()         { *m = RecognizeResponse{} }
1263func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) }
1264func (*RecognizeResponse) ProtoMessage()    {}
1265func (*RecognizeResponse) Descriptor() ([]byte, []int) {
1266	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{8}
1267}
1268func (m *RecognizeResponse) XXX_Unmarshal(b []byte) error {
1269	return xxx_messageInfo_RecognizeResponse.Unmarshal(m, b)
1270}
1271func (m *RecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1272	return xxx_messageInfo_RecognizeResponse.Marshal(b, m, deterministic)
1273}
1274func (dst *RecognizeResponse) XXX_Merge(src proto.Message) {
1275	xxx_messageInfo_RecognizeResponse.Merge(dst, src)
1276}
1277func (m *RecognizeResponse) XXX_Size() int {
1278	return xxx_messageInfo_RecognizeResponse.Size(m)
1279}
1280func (m *RecognizeResponse) XXX_DiscardUnknown() {
1281	xxx_messageInfo_RecognizeResponse.DiscardUnknown(m)
1282}
1283
1284var xxx_messageInfo_RecognizeResponse proto.InternalMessageInfo
1285
1286func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult {
1287	if m != nil {
1288		return m.Results
1289	}
1290	return nil
1291}
1292
1293// The only message returned to the client by the `LongRunningRecognize` method.
1294// It contains the result as zero or more sequential `SpeechRecognitionResult`
1295// messages. It is included in the `result.response` field of the `Operation`
1296// returned by the `GetOperation` call of the `google::longrunning::Operations`
1297// service.
1298type LongRunningRecognizeResponse struct {
1299	// Output only. Sequential list of transcription results corresponding to
1300	// sequential portions of audio.
1301	Results              []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
1302	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
1303	XXX_unrecognized     []byte                     `json:"-"`
1304	XXX_sizecache        int32                      `json:"-"`
1305}
1306
1307func (m *LongRunningRecognizeResponse) Reset()         { *m = LongRunningRecognizeResponse{} }
1308func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) }
1309func (*LongRunningRecognizeResponse) ProtoMessage()    {}
1310func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) {
1311	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{9}
1312}
1313func (m *LongRunningRecognizeResponse) XXX_Unmarshal(b []byte) error {
1314	return xxx_messageInfo_LongRunningRecognizeResponse.Unmarshal(m, b)
1315}
1316func (m *LongRunningRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1317	return xxx_messageInfo_LongRunningRecognizeResponse.Marshal(b, m, deterministic)
1318}
1319func (dst *LongRunningRecognizeResponse) XXX_Merge(src proto.Message) {
1320	xxx_messageInfo_LongRunningRecognizeResponse.Merge(dst, src)
1321}
1322func (m *LongRunningRecognizeResponse) XXX_Size() int {
1323	return xxx_messageInfo_LongRunningRecognizeResponse.Size(m)
1324}
1325func (m *LongRunningRecognizeResponse) XXX_DiscardUnknown() {
1326	xxx_messageInfo_LongRunningRecognizeResponse.DiscardUnknown(m)
1327}
1328
1329var xxx_messageInfo_LongRunningRecognizeResponse proto.InternalMessageInfo
1330
1331func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult {
1332	if m != nil {
1333		return m.Results
1334	}
1335	return nil
1336}
1337
1338// Describes the progress of a long-running `LongRunningRecognize` call. It is
1339// included in the `metadata` field of the `Operation` returned by the
1340// `GetOperation` call of the `google::longrunning::Operations` service.
1341type LongRunningRecognizeMetadata struct {
1342	// Approximate percentage of audio processed thus far. Guaranteed to be 100
1343	// when the audio is fully processed and the results are available.
1344	ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
1345	// Time when the request was received.
1346	StartTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
1347	// Time of the most recent processing update.
1348	LastUpdateTime       *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
1349	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
1350	XXX_unrecognized     []byte               `json:"-"`
1351	XXX_sizecache        int32                `json:"-"`
1352}
1353
1354func (m *LongRunningRecognizeMetadata) Reset()         { *m = LongRunningRecognizeMetadata{} }
1355func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) }
1356func (*LongRunningRecognizeMetadata) ProtoMessage()    {}
1357func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) {
1358	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{10}
1359}
1360func (m *LongRunningRecognizeMetadata) XXX_Unmarshal(b []byte) error {
1361	return xxx_messageInfo_LongRunningRecognizeMetadata.Unmarshal(m, b)
1362}
1363func (m *LongRunningRecognizeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1364	return xxx_messageInfo_LongRunningRecognizeMetadata.Marshal(b, m, deterministic)
1365}
1366func (dst *LongRunningRecognizeMetadata) XXX_Merge(src proto.Message) {
1367	xxx_messageInfo_LongRunningRecognizeMetadata.Merge(dst, src)
1368}
1369func (m *LongRunningRecognizeMetadata) XXX_Size() int {
1370	return xxx_messageInfo_LongRunningRecognizeMetadata.Size(m)
1371}
1372func (m *LongRunningRecognizeMetadata) XXX_DiscardUnknown() {
1373	xxx_messageInfo_LongRunningRecognizeMetadata.DiscardUnknown(m)
1374}
1375
1376var xxx_messageInfo_LongRunningRecognizeMetadata proto.InternalMessageInfo
1377
1378func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 {
1379	if m != nil {
1380		return m.ProgressPercent
1381	}
1382	return 0
1383}
1384
1385func (m *LongRunningRecognizeMetadata) GetStartTime() *timestamp.Timestamp {
1386	if m != nil {
1387		return m.StartTime
1388	}
1389	return nil
1390}
1391
1392func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *timestamp.Timestamp {
1393	if m != nil {
1394		return m.LastUpdateTime
1395	}
1396	return nil
1397}
1398
1399// `StreamingRecognizeResponse` is the only message returned to the client by
1400// `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
1401// messages are streamed back to the client. If there is no recognizable
1402// audio, and `single_utterance` is set to false, then no messages are streamed
1403// back to the client.
1404//
1405// Here's an example of a series of ten `StreamingRecognizeResponse`s that might
1406// be returned while processing audio:
1407//
1408// 1. results { alternatives { transcript: "tube" } stability: 0.01 }
1409//
1410// 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
1411//
1412// 3. results { alternatives { transcript: "to be" } stability: 0.9 }
1413//    results { alternatives { transcript: " or not to be" } stability: 0.01 }
1414//
1415// 4. results { alternatives { transcript: "to be or not to be"
1416//                             confidence: 0.92 }
1417//              alternatives { transcript: "to bee or not to bee" }
1418//              is_final: true }
1419//
1420// 5. results { alternatives { transcript: " that's" } stability: 0.01 }
1421//
1422// 6. results { alternatives { transcript: " that is" } stability: 0.9 }
1423//    results { alternatives { transcript: " the question" } stability: 0.01 }
1424//
1425// 7. results { alternatives { transcript: " that is the question"
1426//                             confidence: 0.98 }
1427//              alternatives { transcript: " that was the question" }
1428//              is_final: true }
1429//
1430// Notes:
1431//
1432// - Only two of the above responses #4 and #7 contain final results; they are
1433//   indicated by `is_final: true`. Concatenating these together generates the
1434//   full transcript: "to be or not to be that is the question".
1435//
1436// - The others contain interim `results`. #3 and #6 contain two interim
1437//   `results`: the first portion has a high stability and is less likely to
1438//   change; the second portion has a low stability and is very likely to
1439//   change. A UI designer might choose to show only high stability `results`.
1440//
1441// - The specific `stability` and `confidence` values shown above are only for
1442//   illustrative purposes. Actual values may vary.
1443//
1444// - In each response, only one of these fields will be set:
1445//     `error`,
1446//     `speech_event_type`, or
1447//     one or more (repeated) `results`.
1448type StreamingRecognizeResponse struct {
1449	// Output only. If set, returns a [google.rpc.Status][google.rpc.Status] message that
1450	// specifies the error for the operation.
1451	Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
1452	// Output only. This repeated list contains zero or more results that
1453	// correspond to consecutive portions of the audio currently being processed.
1454	// It contains zero or one `is_final=true` result (the newly settled portion),
1455	// followed by zero or more `is_final=false` results (the interim results).
1456	Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
1457	// Output only. Indicates the type of speech event.
1458	SpeechEventType      StreamingRecognizeResponse_SpeechEventType `protobuf:"varint,4,opt,name=speech_event_type,json=speechEventType,proto3,enum=google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType" json:"speech_event_type,omitempty"`
1459	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
1460	XXX_unrecognized     []byte                                     `json:"-"`
1461	XXX_sizecache        int32                                      `json:"-"`
1462}
1463
1464func (m *StreamingRecognizeResponse) Reset()         { *m = StreamingRecognizeResponse{} }
1465func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) }
1466func (*StreamingRecognizeResponse) ProtoMessage()    {}
1467func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) {
1468	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{11}
1469}
1470func (m *StreamingRecognizeResponse) XXX_Unmarshal(b []byte) error {
1471	return xxx_messageInfo_StreamingRecognizeResponse.Unmarshal(m, b)
1472}
1473func (m *StreamingRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1474	return xxx_messageInfo_StreamingRecognizeResponse.Marshal(b, m, deterministic)
1475}
1476func (dst *StreamingRecognizeResponse) XXX_Merge(src proto.Message) {
1477	xxx_messageInfo_StreamingRecognizeResponse.Merge(dst, src)
1478}
1479func (m *StreamingRecognizeResponse) XXX_Size() int {
1480	return xxx_messageInfo_StreamingRecognizeResponse.Size(m)
1481}
1482func (m *StreamingRecognizeResponse) XXX_DiscardUnknown() {
1483	xxx_messageInfo_StreamingRecognizeResponse.DiscardUnknown(m)
1484}
1485
1486var xxx_messageInfo_StreamingRecognizeResponse proto.InternalMessageInfo
1487
1488func (m *StreamingRecognizeResponse) GetError() *status.Status {
1489	if m != nil {
1490		return m.Error
1491	}
1492	return nil
1493}
1494
1495func (m *StreamingRecognizeResponse) GetResults() []*StreamingRecognitionResult {
1496	if m != nil {
1497		return m.Results
1498	}
1499	return nil
1500}
1501
1502func (m *StreamingRecognizeResponse) GetSpeechEventType() StreamingRecognizeResponse_SpeechEventType {
1503	if m != nil {
1504		return m.SpeechEventType
1505	}
1506	return StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED
1507}
1508
1509// A streaming speech recognition result corresponding to a portion of the audio
1510// that is currently being processed.
1511type StreamingRecognitionResult struct {
1512	// Output only. May contain one or more recognition hypotheses (up to the
1513	// maximum specified in `max_alternatives`).
1514	// These alternatives are ordered in terms of accuracy, with the top (first)
1515	// alternative being the most probable, as ranked by the recognizer.
1516	Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
1517	// Output only. If `false`, this `StreamingRecognitionResult` represents an
1518	// interim result that may change. If `true`, this is the final time the
1519	// speech service will return this particular `StreamingRecognitionResult`,
1520	// the recognizer will not return any further hypotheses for this portion of
1521	// the transcript and corresponding audio.
1522	IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"`
1523	// Output only. An estimate of the likelihood that the recognizer will not
1524	// change its guess about this interim result. Values range from 0.0
1525	// (completely unstable) to 1.0 (completely stable).
1526	// This field is only provided for interim results (`is_final=false`).
1527	// The default of 0.0 is a sentinel value indicating `stability` was not set.
1528	Stability float32 `protobuf:"fixed32,3,opt,name=stability,proto3" json:"stability,omitempty"`
1529	// For multi-channel audio, this is the channel number corresponding to the
1530	// recognized result for the audio from that channel.
1531	// For audio_channel_count = N, its output values can range from '1' to 'N'.
1532	ChannelTag int32 `protobuf:"varint,5,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
1533	// Output only. The
1534	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
1535	// language in this result. This language code was detected to have the most
1536	// likelihood of being spoken in the audio.
1537	LanguageCode         string   `protobuf:"bytes,6,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
1538	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1539	XXX_unrecognized     []byte   `json:"-"`
1540	XXX_sizecache        int32    `json:"-"`
1541}
1542
1543func (m *StreamingRecognitionResult) Reset()         { *m = StreamingRecognitionResult{} }
1544func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) }
1545func (*StreamingRecognitionResult) ProtoMessage()    {}
1546func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) {
1547	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{12}
1548}
1549func (m *StreamingRecognitionResult) XXX_Unmarshal(b []byte) error {
1550	return xxx_messageInfo_StreamingRecognitionResult.Unmarshal(m, b)
1551}
1552func (m *StreamingRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1553	return xxx_messageInfo_StreamingRecognitionResult.Marshal(b, m, deterministic)
1554}
1555func (dst *StreamingRecognitionResult) XXX_Merge(src proto.Message) {
1556	xxx_messageInfo_StreamingRecognitionResult.Merge(dst, src)
1557}
1558func (m *StreamingRecognitionResult) XXX_Size() int {
1559	return xxx_messageInfo_StreamingRecognitionResult.Size(m)
1560}
1561func (m *StreamingRecognitionResult) XXX_DiscardUnknown() {
1562	xxx_messageInfo_StreamingRecognitionResult.DiscardUnknown(m)
1563}
1564
1565var xxx_messageInfo_StreamingRecognitionResult proto.InternalMessageInfo
1566
1567func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
1568	if m != nil {
1569		return m.Alternatives
1570	}
1571	return nil
1572}
1573
1574func (m *StreamingRecognitionResult) GetIsFinal() bool {
1575	if m != nil {
1576		return m.IsFinal
1577	}
1578	return false
1579}
1580
1581func (m *StreamingRecognitionResult) GetStability() float32 {
1582	if m != nil {
1583		return m.Stability
1584	}
1585	return 0
1586}
1587
1588func (m *StreamingRecognitionResult) GetChannelTag() int32 {
1589	if m != nil {
1590		return m.ChannelTag
1591	}
1592	return 0
1593}
1594
1595func (m *StreamingRecognitionResult) GetLanguageCode() string {
1596	if m != nil {
1597		return m.LanguageCode
1598	}
1599	return ""
1600}
1601
1602// A speech recognition result corresponding to a portion of the audio.
1603type SpeechRecognitionResult struct {
1604	// Output only. May contain one or more recognition hypotheses (up to the
1605	// maximum specified in `max_alternatives`).
1606	// These alternatives are ordered in terms of accuracy, with the top (first)
1607	// alternative being the most probable, as ranked by the recognizer.
1608	Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
1609	// For multi-channel audio, this is the channel number corresponding to the
1610	// recognized result for the audio from that channel.
1611	// For audio_channel_count = N, its output values can range from '1' to 'N'.
1612	ChannelTag int32 `protobuf:"varint,2,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
1613	// Output only. The
1614	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
1615	// language in this result. This language code was detected to have the most
1616	// likelihood of being spoken in the audio.
1617	LanguageCode         string   `protobuf:"bytes,5,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
1618	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1619	XXX_unrecognized     []byte   `json:"-"`
1620	XXX_sizecache        int32    `json:"-"`
1621}
1622
1623func (m *SpeechRecognitionResult) Reset()         { *m = SpeechRecognitionResult{} }
1624func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) }
1625func (*SpeechRecognitionResult) ProtoMessage()    {}
1626func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) {
1627	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{13}
1628}
1629func (m *SpeechRecognitionResult) XXX_Unmarshal(b []byte) error {
1630	return xxx_messageInfo_SpeechRecognitionResult.Unmarshal(m, b)
1631}
1632func (m *SpeechRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1633	return xxx_messageInfo_SpeechRecognitionResult.Marshal(b, m, deterministic)
1634}
1635func (dst *SpeechRecognitionResult) XXX_Merge(src proto.Message) {
1636	xxx_messageInfo_SpeechRecognitionResult.Merge(dst, src)
1637}
1638func (m *SpeechRecognitionResult) XXX_Size() int {
1639	return xxx_messageInfo_SpeechRecognitionResult.Size(m)
1640}
1641func (m *SpeechRecognitionResult) XXX_DiscardUnknown() {
1642	xxx_messageInfo_SpeechRecognitionResult.DiscardUnknown(m)
1643}
1644
1645var xxx_messageInfo_SpeechRecognitionResult proto.InternalMessageInfo
1646
1647func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
1648	if m != nil {
1649		return m.Alternatives
1650	}
1651	return nil
1652}
1653
1654func (m *SpeechRecognitionResult) GetChannelTag() int32 {
1655	if m != nil {
1656		return m.ChannelTag
1657	}
1658	return 0
1659}
1660
1661func (m *SpeechRecognitionResult) GetLanguageCode() string {
1662	if m != nil {
1663		return m.LanguageCode
1664	}
1665	return ""
1666}
1667
1668// Alternative hypotheses (a.k.a. n-best list).
1669type SpeechRecognitionAlternative struct {
1670	// Output only. Transcript text representing the words that the user spoke.
1671	Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
1672	// Output only. The confidence estimate between 0.0 and 1.0. A higher number
1673	// indicates an estimated greater likelihood that the recognized words are
1674	// correct. This field is set only for the top alternative of a non-streaming
1675	// result or, of a streaming result where `is_final=true`.
1676	// This field is not guaranteed to be accurate and users should not rely on it
1677	// to be always provided.
1678	// The default of 0.0 is a sentinel value indicating `confidence` was not set.
1679	Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
1680	// Output only. A list of word-specific information for each recognized word.
1681	// Note: When enable_speaker_diarization is true, you will see all the words
1682	// from the beginning of the audio.
1683	Words                []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
1684	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
1685	XXX_unrecognized     []byte      `json:"-"`
1686	XXX_sizecache        int32       `json:"-"`
1687}
1688
1689func (m *SpeechRecognitionAlternative) Reset()         { *m = SpeechRecognitionAlternative{} }
1690func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) }
1691func (*SpeechRecognitionAlternative) ProtoMessage()    {}
1692func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) {
1693	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{14}
1694}
1695func (m *SpeechRecognitionAlternative) XXX_Unmarshal(b []byte) error {
1696	return xxx_messageInfo_SpeechRecognitionAlternative.Unmarshal(m, b)
1697}
1698func (m *SpeechRecognitionAlternative) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1699	return xxx_messageInfo_SpeechRecognitionAlternative.Marshal(b, m, deterministic)
1700}
1701func (dst *SpeechRecognitionAlternative) XXX_Merge(src proto.Message) {
1702	xxx_messageInfo_SpeechRecognitionAlternative.Merge(dst, src)
1703}
1704func (m *SpeechRecognitionAlternative) XXX_Size() int {
1705	return xxx_messageInfo_SpeechRecognitionAlternative.Size(m)
1706}
1707func (m *SpeechRecognitionAlternative) XXX_DiscardUnknown() {
1708	xxx_messageInfo_SpeechRecognitionAlternative.DiscardUnknown(m)
1709}
1710
1711var xxx_messageInfo_SpeechRecognitionAlternative proto.InternalMessageInfo
1712
1713func (m *SpeechRecognitionAlternative) GetTranscript() string {
1714	if m != nil {
1715		return m.Transcript
1716	}
1717	return ""
1718}
1719
1720func (m *SpeechRecognitionAlternative) GetConfidence() float32 {
1721	if m != nil {
1722		return m.Confidence
1723	}
1724	return 0
1725}
1726
1727func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo {
1728	if m != nil {
1729		return m.Words
1730	}
1731	return nil
1732}
1733
1734// Word-specific information for recognized words.
1735type WordInfo struct {
1736	// Output only. Time offset relative to the beginning of the audio,
1737	// and corresponding to the start of the spoken word.
1738	// This field is only set if `enable_word_time_offsets=true` and only
1739	// in the top hypothesis.
1740	// This is an experimental feature and the accuracy of the time offset can
1741	// vary.
1742	StartTime *duration.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
1743	// Output only. Time offset relative to the beginning of the audio,
1744	// and corresponding to the end of the spoken word.
1745	// This field is only set if `enable_word_time_offsets=true` and only
1746	// in the top hypothesis.
1747	// This is an experimental feature and the accuracy of the time offset can
1748	// vary.
1749	EndTime *duration.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
1750	// Output only. The word corresponding to this set of information.
1751	Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
1752	// Output only. The confidence estimate between 0.0 and 1.0. A higher number
1753	// indicates an estimated greater likelihood that the recognized words are
1754	// correct. This field is set only for the top alternative of a non-streaming
1755	// result or, of a streaming result where `is_final=true`.
1756	// This field is not guaranteed to be accurate and users should not rely on it
1757	// to be always provided.
1758	// The default of 0.0 is a sentinel value indicating `confidence` was not set.
1759	Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
1760	// Output only. A distinct integer value is assigned for every speaker within
1761	// the audio. This field specifies which one of those speakers was detected to
1762	// have spoken this word. Value ranges from '1' to diarization_speaker_count.
1763	// speaker_tag is set if enable_speaker_diarization = 'true' and only in the
1764	// top alternative.
1765	SpeakerTag           int32    `protobuf:"varint,5,opt,name=speaker_tag,json=speakerTag,proto3" json:"speaker_tag,omitempty"`
1766	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1767	XXX_unrecognized     []byte   `json:"-"`
1768	XXX_sizecache        int32    `json:"-"`
1769}
1770
1771func (m *WordInfo) Reset()         { *m = WordInfo{} }
1772func (m *WordInfo) String() string { return proto.CompactTextString(m) }
1773func (*WordInfo) ProtoMessage()    {}
1774func (*WordInfo) Descriptor() ([]byte, []int) {
1775	return fileDescriptor_cloud_speech_9c9e7aa236afd686, []int{15}
1776}
1777func (m *WordInfo) XXX_Unmarshal(b []byte) error {
1778	return xxx_messageInfo_WordInfo.Unmarshal(m, b)
1779}
1780func (m *WordInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1781	return xxx_messageInfo_WordInfo.Marshal(b, m, deterministic)
1782}
1783func (dst *WordInfo) XXX_Merge(src proto.Message) {
1784	xxx_messageInfo_WordInfo.Merge(dst, src)
1785}
1786func (m *WordInfo) XXX_Size() int {
1787	return xxx_messageInfo_WordInfo.Size(m)
1788}
1789func (m *WordInfo) XXX_DiscardUnknown() {
1790	xxx_messageInfo_WordInfo.DiscardUnknown(m)
1791}
1792
1793var xxx_messageInfo_WordInfo proto.InternalMessageInfo
1794
1795func (m *WordInfo) GetStartTime() *duration.Duration {
1796	if m != nil {
1797		return m.StartTime
1798	}
1799	return nil
1800}
1801
1802func (m *WordInfo) GetEndTime() *duration.Duration {
1803	if m != nil {
1804		return m.EndTime
1805	}
1806	return nil
1807}
1808
1809func (m *WordInfo) GetWord() string {
1810	if m != nil {
1811		return m.Word
1812	}
1813	return ""
1814}
1815
1816func (m *WordInfo) GetConfidence() float32 {
1817	if m != nil {
1818		return m.Confidence
1819	}
1820	return 0
1821}
1822
1823func (m *WordInfo) GetSpeakerTag() int32 {
1824	if m != nil {
1825		return m.SpeakerTag
1826	}
1827	return 0
1828}
1829
1830func init() {
1831	proto.RegisterType((*RecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.RecognizeRequest")
1832	proto.RegisterType((*LongRunningRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest")
1833	proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeRequest")
1834	proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionConfig")
1835	proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.RecognitionConfig")
1836	proto.RegisterType((*RecognitionMetadata)(nil), "google.cloud.speech.v1p1beta1.RecognitionMetadata")
1837	proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1p1beta1.SpeechContext")
1838	proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1p1beta1.RecognitionAudio")
1839	proto.RegisterType((*RecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.RecognizeResponse")
1840	proto.RegisterType((*LongRunningRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse")
1841	proto.RegisterType((*LongRunningRecognizeMetadata)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata")
1842	proto.RegisterType((*StreamingRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeResponse")
1843	proto.RegisterType((*StreamingRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionResult")
1844	proto.RegisterType((*SpeechRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionResult")
1845	proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative")
1846	proto.RegisterType((*WordInfo)(nil), "google.cloud.speech.v1p1beta1.WordInfo")
1847	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value)
1848	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType", RecognitionMetadata_InteractionType_name, RecognitionMetadata_InteractionType_value)
1849	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance", RecognitionMetadata_MicrophoneDistance_name, RecognitionMetadata_MicrophoneDistance_value)
1850	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType", RecognitionMetadata_OriginalMediaType_name, RecognitionMetadata_OriginalMediaType_value)
1851	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType", RecognitionMetadata_RecordingDeviceType_name, RecognitionMetadata_RecordingDeviceType_value)
1852	proto.RegisterEnum("google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType", StreamingRecognizeResponse_SpeechEventType_name, StreamingRecognizeResponse_SpeechEventType_value)
1853}
1854
1855// Reference imports to suppress errors if they are not otherwise used.
1856var _ context.Context
1857var _ grpc.ClientConn
1858
1859// This is a compile-time assertion to ensure that this generated file
1860// is compatible with the grpc package it is being compiled against.
1861const _ = grpc.SupportPackageIsVersion4
1862
1863// SpeechClient is the client API for Speech service.
1864//
1865// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
1866type SpeechClient interface {
1867	// Performs synchronous speech recognition: receive results after all audio
1868	// has been sent and processed.
1869	Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error)
1870	// Performs asynchronous speech recognition: receive results via the
1871	// google.longrunning.Operations interface. Returns either an
1872	// `Operation.error` or an `Operation.response` which contains
1873	// a `LongRunningRecognizeResponse` message.
1874	LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
1875	// Performs bidirectional streaming speech recognition: receive results while
1876	// sending audio. This method is only available via the gRPC API (not REST).
1877	StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error)
1878}
1879
1880type speechClient struct {
1881	cc *grpc.ClientConn
1882}
1883
1884func NewSpeechClient(cc *grpc.ClientConn) SpeechClient {
1885	return &speechClient{cc}
1886}
1887
1888func (c *speechClient) Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) {
1889	out := new(RecognizeResponse)
1890	err := c.cc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/Recognize", in, out, opts...)
1891	if err != nil {
1892		return nil, err
1893	}
1894	return out, nil
1895}
1896
1897func (c *speechClient) LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
1898	out := new(longrunning.Operation)
1899	err := c.cc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", in, out, opts...)
1900	if err != nil {
1901		return nil, err
1902	}
1903	return out, nil
1904}
1905
1906func (c *speechClient) StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) {
1907	stream, err := c.cc.NewStream(ctx, &_Speech_serviceDesc.Streams[0], "/google.cloud.speech.v1p1beta1.Speech/StreamingRecognize", opts...)
1908	if err != nil {
1909		return nil, err
1910	}
1911	x := &speechStreamingRecognizeClient{stream}
1912	return x, nil
1913}
1914
1915type Speech_StreamingRecognizeClient interface {
1916	Send(*StreamingRecognizeRequest) error
1917	Recv() (*StreamingRecognizeResponse, error)
1918	grpc.ClientStream
1919}
1920
1921type speechStreamingRecognizeClient struct {
1922	grpc.ClientStream
1923}
1924
1925func (x *speechStreamingRecognizeClient) Send(m *StreamingRecognizeRequest) error {
1926	return x.ClientStream.SendMsg(m)
1927}
1928
1929func (x *speechStreamingRecognizeClient) Recv() (*StreamingRecognizeResponse, error) {
1930	m := new(StreamingRecognizeResponse)
1931	if err := x.ClientStream.RecvMsg(m); err != nil {
1932		return nil, err
1933	}
1934	return m, nil
1935}
1936
1937// SpeechServer is the server API for Speech service.
1938type SpeechServer interface {
1939	// Performs synchronous speech recognition: receive results after all audio
1940	// has been sent and processed.
1941	Recognize(context.Context, *RecognizeRequest) (*RecognizeResponse, error)
1942	// Performs asynchronous speech recognition: receive results via the
1943	// google.longrunning.Operations interface. Returns either an
1944	// `Operation.error` or an `Operation.response` which contains
1945	// a `LongRunningRecognizeResponse` message.
1946	LongRunningRecognize(context.Context, *LongRunningRecognizeRequest) (*longrunning.Operation, error)
1947	// Performs bidirectional streaming speech recognition: receive results while
1948	// sending audio. This method is only available via the gRPC API (not REST).
1949	StreamingRecognize(Speech_StreamingRecognizeServer) error
1950}
1951
1952func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) {
1953	s.RegisterService(&_Speech_serviceDesc, srv)
1954}
1955
1956func _Speech_Recognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
1957	in := new(RecognizeRequest)
1958	if err := dec(in); err != nil {
1959		return nil, err
1960	}
1961	if interceptor == nil {
1962		return srv.(SpeechServer).Recognize(ctx, in)
1963	}
1964	info := &grpc.UnaryServerInfo{
1965		Server:     srv,
1966		FullMethod: "/google.cloud.speech.v1p1beta1.Speech/Recognize",
1967	}
1968	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
1969		return srv.(SpeechServer).Recognize(ctx, req.(*RecognizeRequest))
1970	}
1971	return interceptor(ctx, in, info, handler)
1972}
1973
1974func _Speech_LongRunningRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
1975	in := new(LongRunningRecognizeRequest)
1976	if err := dec(in); err != nil {
1977		return nil, err
1978	}
1979	if interceptor == nil {
1980		return srv.(SpeechServer).LongRunningRecognize(ctx, in)
1981	}
1982	info := &grpc.UnaryServerInfo{
1983		Server:     srv,
1984		FullMethod: "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize",
1985	}
1986	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
1987		return srv.(SpeechServer).LongRunningRecognize(ctx, req.(*LongRunningRecognizeRequest))
1988	}
1989	return interceptor(ctx, in, info, handler)
1990}
1991
1992func _Speech_StreamingRecognize_Handler(srv interface{}, stream grpc.ServerStream) error {
1993	return srv.(SpeechServer).StreamingRecognize(&speechStreamingRecognizeServer{stream})
1994}
1995
1996type Speech_StreamingRecognizeServer interface {
1997	Send(*StreamingRecognizeResponse) error
1998	Recv() (*StreamingRecognizeRequest, error)
1999	grpc.ServerStream
2000}
2001
2002type speechStreamingRecognizeServer struct {
2003	grpc.ServerStream
2004}
2005
2006func (x *speechStreamingRecognizeServer) Send(m *StreamingRecognizeResponse) error {
2007	return x.ServerStream.SendMsg(m)
2008}
2009
2010func (x *speechStreamingRecognizeServer) Recv() (*StreamingRecognizeRequest, error) {
2011	m := new(StreamingRecognizeRequest)
2012	if err := x.ServerStream.RecvMsg(m); err != nil {
2013		return nil, err
2014	}
2015	return m, nil
2016}
2017
2018var _Speech_serviceDesc = grpc.ServiceDesc{
2019	ServiceName: "google.cloud.speech.v1p1beta1.Speech",
2020	HandlerType: (*SpeechServer)(nil),
2021	Methods: []grpc.MethodDesc{
2022		{
2023			MethodName: "Recognize",
2024			Handler:    _Speech_Recognize_Handler,
2025		},
2026		{
2027			MethodName: "LongRunningRecognize",
2028			Handler:    _Speech_LongRunningRecognize_Handler,
2029		},
2030	},
2031	Streams: []grpc.StreamDesc{
2032		{
2033			StreamName:    "StreamingRecognize",
2034			Handler:       _Speech_StreamingRecognize_Handler,
2035			ServerStreams: true,
2036			ClientStreams: true,
2037		},
2038	},
2039	Metadata: "google/cloud/speech/v1p1beta1/cloud_speech.proto",
2040}
2041
2042func init() {
2043	proto.RegisterFile("google/cloud/speech/v1p1beta1/cloud_speech.proto", fileDescriptor_cloud_speech_9c9e7aa236afd686)
2044}
2045
2046var fileDescriptor_cloud_speech_9c9e7aa236afd686 = []byte{
2047	// 2147 bytes of a gzipped FileDescriptorProto
2048	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xbf, 0x73, 0xdb, 0xc8,
2049	0xf5, 0x37, 0x48, 0x51, 0x12, 0x9f, 0x7e, 0x41, 0x2b, 0xdf, 0x09, 0x96, 0x75, 0x67, 0x1b, 0x9e,
2050	0x3b, 0xfb, 0xee, 0x7b, 0x43, 0xd9, 0xfa, 0xde, 0x5c, 0xce, 0xbe, 0xe4, 0x26, 0x14, 0x00, 0x99,
2051	0x98, 0x21, 0x09, 0xce, 0x92, 0xb2, 0xe3, 0x6b, 0x76, 0x56, 0xc0, 0x92, 0xc2, 0x84, 0x04, 0x10,
2052	0xfc, 0x70, 0x2c, 0x97, 0x69, 0x33, 0xa9, 0x32, 0x93, 0x2e, 0x55, 0xae, 0xce, 0x1f, 0x90, 0x26,
2053	0x69, 0xd2, 0xa4, 0x49, 0x91, 0x26, 0x65, 0x8a, 0xfc, 0x07, 0x69, 0x32, 0x93, 0x14, 0x99, 0xdd,
2054	0x05, 0x28, 0x88, 0x94, 0x2d, 0x5b, 0x93, 0x9b, 0x49, 0x87, 0xfd, 0xbc, 0x1f, 0xfb, 0xde, 0xdb,
2055	0xb7, 0x6f, 0xdf, 0x03, 0x3c, 0x18, 0x85, 0xe1, 0x68, 0xcc, 0xf6, 0xdc, 0x71, 0x98, 0x79, 0x7b,
2056	0x49, 0xc4, 0x98, 0x7b, 0xb2, 0xf7, 0xe2, 0x61, 0xf4, 0xf0, 0x98, 0xa5, 0xf4, 0xa1, 0x84, 0x89,
2057	0x84, 0x1b, 0x51, 0x1c, 0xa6, 0x21, 0xfa, 0x40, 0x4a, 0x34, 0x04, 0xa9, 0x91, 0x93, 0xa6, 0x12,
2058	0x3b, 0xbb, 0xb9, 0x42, 0x1a, 0xf9, 0x7b, 0x34, 0x08, 0xc2, 0x94, 0xa6, 0x7e, 0x18, 0x24, 0x52,
2059	0x78, 0xe7, 0x6e, 0x4e, 0x1d, 0x87, 0xc1, 0x28, 0xce, 0x82, 0xc0, 0x0f, 0x46, 0x7b, 0x61, 0xc4,
2060	0xe2, 0x73, 0x4c, 0x37, 0x72, 0x26, 0xb1, 0x3a, 0xce, 0x86, 0x7b, 0x34, 0x38, 0xcd, 0x49, 0x1f,
2061	0xce, 0x92, 0xbc, 0x4c, 0xca, 0xe6, 0xf4, 0x5b, 0xb3, 0xf4, 0xd4, 0x9f, 0xb0, 0x24, 0xa5, 0x93,
2062	0x28, 0x67, 0xd8, 0xce, 0x19, 0xe2, 0xc8, 0xdd, 0x4b, 0x52, 0x9a, 0x66, 0xf9, 0xa6, 0xfa, 0x6f,
2063	0x14, 0x50, 0x31, 0x73, 0xc3, 0x51, 0xe0, 0xbf, 0x62, 0x98, 0xfd, 0x24, 0x63, 0x49, 0x8a, 0x5a,
2064	0xb0, 0xe8, 0x86, 0xc1, 0xd0, 0x1f, 0x69, 0xca, 0x6d, 0xe5, 0xfe, 0xca, 0xfe, 0x83, 0xc6, 0x1b,
2065	0x9d, 0x6f, 0xe4, 0x0a, 0xb8, 0x41, 0x86, 0x90, 0xc3, 0xb9, 0x3c, 0xb2, 0xa0, 0x46, 0x33, 0xcf,
2066	0x0f, 0xb5, 0x8a, 0x50, 0xb4, 0xf7, 0xf6, 0x8a, 0x9a, 0x5c, 0x0c, 0x4b, 0x69, 0xfd, 0xb7, 0x0a,
2067	0xdc, 0x6c, 0x87, 0xc1, 0x08, 0xcb, 0xd8, 0xfd, 0xef, 0x1b, 0xfc, 0x7b, 0x05, 0x6e, 0xf4, 0xd3,
2068	0x98, 0xd1, 0xc9, 0x45, 0xe6, 0x0e, 0x41, 0x4d, 0x0a, 0x22, 0x39, 0x67, 0xf8, 0xa3, 0x4b, 0xf6,
2069	0x9b, 0xd5, 0x79, 0xe6, 0x41, 0xeb, 0x1a, 0xde, 0x98, 0x2a, 0x95, 0x10, 0xfa, 0x08, 0xd6, 0x84,
2070	0x39, 0x7c, 0x8f, 0x94, 0x05, 0xa9, 0x70, 0x6a, 0xb5, 0x75, 0x0d, 0xaf, 0x0a, 0xd8, 0x90, 0xe8,
2071	0xc1, 0x16, 0x6c, 0x9e, 0x99, 0x13, 0x4b, 0x1b, 0xf5, 0xdf, 0x29, 0xb0, 0xf3, 0xfa, 0xdd, 0xfe,
2072	0x8b, 0x11, 0xff, 0x04, 0xd4, 0xc4, 0x0f, 0x46, 0x63, 0x46, 0xb2, 0x34, 0x65, 0x31, 0x0d, 0x5c,
2073	0x26, 0xec, 0x5c, 0xc6, 0x1b, 0x12, 0x3f, 0x2a, 0x60, 0x74, 0x0f, 0x36, 0xfc, 0x20, 0x65, 0xb1,
2074	0x3f, 0x21, 0x31, 0x4b, 0xb2, 0x71, 0x9a, 0x68, 0x55, 0xc1, 0xb9, 0x9e, 0xc3, 0x58, 0xa2, 0xfa,
2075	0x3f, 0x96, 0x61, 0x73, 0xde, 0xe6, 0x6f, 0x60, 0x99, 0x05, 0x6e, 0xe8, 0xf9, 0x81, 0xb4, 0x7a,
2076	0x7d, 0xff, 0xeb, 0x77, 0xb5, 0xba, 0x21, 0x4e, 0xd9, 0xca, 0xb5, 0xe0, 0xa9, 0x3e, 0xf4, 0x29,
2077	0x6c, 0x26, 0x74, 0x12, 0x8d, 0x19, 0x89, 0x69, 0xca, 0xc8, 0x09, 0x8b, 0xd3, 0x57, 0xc2, 0x8d,
2078	0x1a, 0xde, 0x90, 0x04, 0x4c, 0x53, 0xd6, 0xe2, 0x30, 0x6a, 0xc0, 0x56, 0x7e, 0x2c, 0x27, 0x34,
2079	0x08, 0xd8, 0x98, 0xb8, 0x61, 0x16, 0xa4, 0xda, 0x92, 0xe0, 0xde, 0x94, 0x47, 0x23, 0x29, 0x06,
2080	0x27, 0xa0, 0x01, 0xdc, 0x63, 0x01, 0x3d, 0x1e, 0x33, 0x92, 0xb0, 0x88, 0x0a, 0xfd, 0xf1, 0x99,
2081	0x61, 0x24, 0x62, 0x71, 0xa1, 0x49, 0x5b, 0x15, 0xe1, 0xb8, 0x2b, 0xd9, 0xfb, 0x39, 0x77, 0xc9,
2082	0x8b, 0x1e, 0x8b, 0x73, 0xd5, 0xe8, 0x2e, 0xac, 0x8d, 0x69, 0x30, 0xca, 0xe8, 0x88, 0x11, 0x37,
2083	0xf4, 0x98, 0x08, 0x65, 0x1d, 0xaf, 0x16, 0xa0, 0x11, 0x7a, 0x0c, 0x7d, 0x1f, 0x76, 0xe8, 0x38,
2084	0x65, 0x71, 0x40, 0x53, 0xff, 0x05, 0x23, 0xe7, 0x04, 0x12, 0x0d, 0xdd, 0xae, 0xde, 0xaf, 0x63,
2085	0xad, 0xc4, 0xd1, 0x2e, 0x09, 0x27, 0xfc, 0x68, 0x27, 0xf4, 0x25, 0x29, 0xd1, 0x13, 0x6d, 0x41,
2086	0xc6, 0x64, 0x42, 0x5f, 0x36, 0x4b, 0x30, 0x67, 0x8d, 0xe2, 0x70, 0x48, 0x03, 0x3f, 0x3d, 0x25,
2087	0x43, 0x9f, 0x93, 0xb4, 0x9a, 0xcc, 0x82, 0x29, 0x7e, 0x28, 0x60, 0x74, 0x04, 0x1b, 0xf2, 0xa0,
2088	0x64, 0x5a, 0xbf, 0x4c, 0x13, 0x6d, 0xf1, 0x76, 0xf5, 0xfe, 0xca, 0xfe, 0x67, 0x97, 0x5d, 0x1e,
2089	0x01, 0x18, 0x52, 0x08, 0xaf, 0x27, 0xe5, 0x65, 0x82, 0xbe, 0x07, 0x5a, 0x1e, 0xe5, 0x9f, 0x86,
2090	0xb1, 0x47, 0x78, 0x05, 0x25, 0xe1, 0x70, 0x98, 0xb0, 0x34, 0xd1, 0x96, 0x85, 0x25, 0xef, 0x49,
2091	0xfa, 0xb3, 0x30, 0xf6, 0x06, 0xfe, 0x84, 0x39, 0x92, 0x88, 0x3e, 0x87, 0xf7, 0xcb, 0x82, 0x22,
2092	0xad, 0x3d, 0xc6, 0xd3, 0x78, 0x43, 0x88, 0x5d, 0x3f, 0x13, 0x33, 0xa6, 0x34, 0xf4, 0x43, 0xd8,
2093	0xcd, 0xa5, 0x68, 0x96, 0x86, 0x13, 0x9a, 0xfa, 0x2e, 0x89, 0xb2, 0xc0, 0x4d, 0x33, 0x51, 0xd8,
2094	0xb5, 0x15, 0x21, 0xbb, 0x23, 0x79, 0x9a, 0x05, 0x4b, 0xef, 0x8c, 0x83, 0x9f, 0x4d, 0x91, 0x16,
2095	0x11, 0xa3, 0x3f, 0x66, 0x31, 0xf1, 0x7c, 0x1a, 0xfb, 0xaf, 0xa4, 0xbc, 0x2a, 0xe4, 0x73, 0x97,
2096	0xfa, 0x92, 0xc1, 0x3c, 0xa3, 0xa3, 0xc7, 0x70, 0xa3, 0xc4, 0x3e, 0x55, 0x21, 0x53, 0x71, 0x53,
2097	0x1c, 0xd2, 0x76, 0x89, 0x21, 0xd7, 0x20, 0x13, 0xb2, 0x0b, 0xcb, 0x13, 0x96, 0x52, 0x8f, 0xa6,
2098	0x54, 0xab, 0x8b, 0xeb, 0xbf, 0xff, 0xf6, 0x17, 0xa9, 0x93, 0x4b, 0xe2, 0xa9, 0x0e, 0x74, 0x1d,
2099	0x6a, 0x93, 0xd0, 0x63, 0x63, 0x6d, 0x4d, 0xa4, 0xa0, 0x5c, 0xa0, 0x3b, 0xb0, 0x9a, 0x25, 0x8c,
2100	0xb0, 0xe0, 0x84, 0x5f, 0x7e, 0x4f, 0x5b, 0x17, 0x1e, 0xad, 0x64, 0x09, 0xb3, 0x72, 0x48, 0xff,
2101	0xb9, 0x02, 0x6b, 0xe7, 0x6e, 0x24, 0xd2, 0xe0, 0xba, 0xd5, 0x35, 0x1c, 0xd3, 0xee, 0x3e, 0x21,
2102	0x47, 0xdd, 0x7e, 0xcf, 0x32, 0xec, 0x43, 0xdb, 0x32, 0xd5, 0x6b, 0x68, 0x15, 0x96, 0xdb, 0x76,
2103	0xd7, 0x6a, 0xe2, 0x87, 0x5f, 0xa8, 0x0a, 0x5a, 0x86, 0x85, 0xc3, 0x76, 0xd3, 0x50, 0x2b, 0xa8,
2104	0x0e, 0xb5, 0xce, 0x51, 0xbb, 0xf9, 0x4c, 0xad, 0xa2, 0x25, 0xa8, 0x36, 0x3b, 0x58, 0x5d, 0x40,
2105	0x00, 0x8b, 0xcd, 0x0e, 0x26, 0xcf, 0x0e, 0xd4, 0x1a, 0x97, 0x73, 0x9e, 0x3c, 0x21, 0x4e, 0xef,
2106	0xa8, 0xaf, 0x2e, 0xa2, 0x1d, 0x78, 0xbf, 0xdf, 0xb3, 0xac, 0x1f, 0x91, 0x67, 0xf6, 0xa0, 0x45,
2107	0x5a, 0x56, 0xd3, 0xb4, 0x30, 0x39, 0x78, 0x3e, 0xb0, 0xd4, 0x25, 0xfd, 0x5f, 0x75, 0xd8, 0xba,
2108	0xc0, 0x51, 0x34, 0x01, 0x55, 0xd4, 0x27, 0xea, 0x8a, 0x50, 0xa7, 0xa7, 0x11, 0xcb, 0xeb, 0xcf,
2109	0xc1, 0xbb, 0x87, 0xad, 0x61, 0x9f, 0xa9, 0x1a, 0x9c, 0x46, 0x0c, 0x6f, 0xf8, 0xe7, 0x01, 0xf4,
2110	0x35, 0xec, 0xfa, 0x81, 0x97, 0x25, 0x69, 0x7c, 0x4a, 0x02, 0xea, 0xbb, 0x89, 0xb8, 0xad, 0x24,
2111	0x1c, 0x12, 0xf9, 0xb2, 0xf1, 0x7b, 0xbe, 0x86, 0xb5, 0x82, 0xa7, 0xcb, 0x59, 0xf8, 0x7d, 0x75,
2112	0x86, 0x22, 0x94, 0xe8, 0x05, 0x6c, 0x4d, 0x7c, 0x37, 0x0e, 0xa3, 0x93, 0x30, 0x60, 0xc4, 0xf3,
2113	0x93, 0x54, 0xd4, 0xe4, 0x05, 0x61, 0xb1, 0x75, 0x05, 0x8b, 0x3b, 0x53, 0x6d, 0x66, 0xae, 0x0c,
2114	0xa3, 0xc9, 0x1c, 0x86, 0x52, 0xd8, 0x0a, 0x63, 0x7f, 0xe4, 0x07, 0x74, 0x4c, 0x26, 0xcc, 0xf3,
2115	0xa9, 0x8c, 0x54, 0x4d, 0xec, 0x6b, 0x5e, 0x61, 0x5f, 0x27, 0xd7, 0xd6, 0xe1, 0xca, 0x44, 0xac,
2116	0x36, 0xc3, 0x59, 0x08, 0xbd, 0x82, 0xf7, 0x78, 0x31, 0x8d, 0x79, 0xf6, 0x10, 0x8f, 0xbd, 0xf0,
2117	0x5d, 0x26, 0xf7, 0x5d, 0x14, 0xfb, 0x1e, 0x5e, 0x61, 0x5f, 0x5c, 0xe8, 0x33, 0x85, 0x3a, 0xb1,
2118	0xf3, 0x56, 0x3c, 0x0f, 0xa2, 0xfd, 0x0b, 0xf6, 0x0e, 0xe8, 0x84, 0x89, 0xa7, 0xa0, 0x3e, 0x27,
2119	0xd3, 0xa5, 0x13, 0x86, 0x3e, 0x03, 0x74, 0x16, 0x25, 0x5e, 0xa3, 0x84, 0xb1, 0xcb, 0x42, 0x40,
2120	0x9d, 0xba, 0xe7, 0x4f, 0xe4, 0x0e, 0x77, 0x61, 0x2d, 0x3c, 0x1e, 0x66, 0x89, 0x4b, 0x53, 0xe6,
2121	0x11, 0xdf, 0x13, 0xd7, 0xb5, 0x8a, 0x57, 0xcf, 0x40, 0xdb, 0x43, 0xb7, 0x60, 0x45, 0xbe, 0x47,
2122	0x69, 0x18, 0xf9, 0xae, 0x06, 0x42, 0x17, 0x08, 0x68, 0xc0, 0x11, 0xfd, 0x8f, 0x0a, 0x6c, 0xcc,
2123	0xa4, 0x1d, 0xba, 0x0d, 0xbb, 0x76, 0x77, 0x60, 0xe1, 0xa6, 0x31, 0xb0, 0x9d, 0x2e, 0x19, 0x3c,
2124	0xef, 0x59, 0x33, 0x17, 0x6e, 0x1d, 0xc0, 0xb4, 0xfb, 0xc6, 0x51, 0xbf, 0x6f, 0x3b, 0x5d, 0x55,
2125	0x41, 0x2a, 0xac, 0xf6, 0xb0, 0xd5, 0xb7, 0xba, 0x83, 0x26, 0x17, 0x51, 0x2b, 0x9c, 0xa3, 0xd7,
2126	0x72, 0xba, 0x16, 0x31, 0x9a, 0xed, 0xb6, 0x5a, 0x45, 0x6b, 0x50, 0x7f, 0xea, 0xd8, 0x86, 0xd5,
2127	0x69, 0xda, 0x6d, 0x75, 0x01, 0xdd, 0x84, 0xed, 0x1e, 0x76, 0x0e, 0x2d, 0xa1, 0xa0, 0xd9, 0x6e,
2128	0x3f, 0x27, 0x3d, 0xec, 0x98, 0x47, 0x86, 0x65, 0xaa, 0x35, 0xae, 0x4d, 0xf0, 0x92, 0xbe, 0xd5,
2129	0xc4, 0x46, 0x4b, 0x5d, 0x44, 0x9b, 0xb0, 0x26, 0x11, 0xc3, 0xe9, 0x74, 0x9a, 0x5d, 0x53, 0x5d,
2130	0xe2, 0x0a, 0x4d, 0xdb, 0xc8, 0xf7, 0x5b, 0xd6, 0x3d, 0x40, 0xf3, 0xb9, 0x88, 0xee, 0xc2, 0xad,
2131	0x8e, 0x6d, 0x60, 0x47, 0x9a, 0x62, 0xda, 0xfd, 0x41, 0xb3, 0x6b, 0xcc, 0x3a, 0xb3, 0x06, 0x75,
2132	0x5e, 0x3b, 0x0e, 0x6d, 0xab, 0x6d, 0xaa, 0x0a, 0x2f, 0x0a, 0x1d, 0xdb, 0x94, 0xab, 0x0a, 0x5f,
2133	0x1d, 0x16, 0xb4, 0xaa, 0xde, 0x85, 0xcd, 0xb9, 0xcc, 0xe3, 0x9b, 0x38, 0xd8, 0x7e, 0x62, 0x77,
2134	0x9b, 0x6d, 0xd2, 0xb1, 0x4c, 0xbb, 0x79, 0x51, 0xc4, 0xea, 0x50, 0x6b, 0x1e, 0x99, 0xb6, 0xa3,
2135	0x2a, 0xfc, 0xf3, 0xa9, 0x6d, 0x5a, 0x8e, 0x5a, 0xd1, 0xbf, 0x55, 0x64, 0x59, 0x99, 0xcd, 0x9e,
2136	0x8f, 0xe0, 0x0e, 0xb6, 0x0c, 0x07, 0x8b, 0x5a, 0x67, 0x5a, 0x4f, 0xb9, 0xeb, 0x17, 0x1f, 0x43,
2137	0xbf, 0xd3, 0xc4, 0x03, 0xe1, 0x9e, 0xaa, 0xa0, 0x45, 0xa8, 0xf4, 0x8c, 0x72, 0xf0, 0x79, 0x55,
2138	0x54, 0xab, 0x68, 0x05, 0x96, 0x9e, 0x5a, 0x2d, 0xdb, 0x68, 0x5b, 0xea, 0x02, 0x2f, 0xa3, 0xce,
2139	0xa0, 0x65, 0x61, 0xe2, 0x1c, 0x0d, 0x4c, 0xc7, 0xc1, 0xb9, 0x7e, 0xb5, 0x86, 0xb6, 0x61, 0x4b,
2140	0x52, 0xec, 0x6e, 0x99, 0xb0, 0xa8, 0x7f, 0x02, 0x6b, 0xe7, 0x1e, 0x58, 0xa4, 0xc1, 0x52, 0x74,
2141	0x12, 0xd3, 0x84, 0x25, 0x9a, 0x22, 0x1a, 0x85, 0x62, 0xa9, 0xe3, 0xe9, 0xcc, 0x31, 0x6d, 0x9c,
2142	0xd1, 0x0e, 0x2c, 0x15, 0x5d, 0xaa, 0x92, 0x77, 0xa9, 0x05, 0x80, 0x10, 0x54, 0xb3, 0xd8, 0x17,
2143	0xed, 0x54, 0xbd, 0x75, 0x0d, 0xf3, 0xc5, 0xc1, 0x3a, 0xc8, 0x26, 0x96, 0x24, 0x61, 0x16, 0xbb,
2144	0x4c, 0x67, 0xd3, 0x8e, 0x8f, 0xf7, 0xd9, 0x49, 0x14, 0x06, 0x09, 0x43, 0x3d, 0x58, 0x2a, 0x1a,
2145	0xc5, 0x8a, 0x68, 0x11, 0xbe, 0x78, 0xab, 0x16, 0xa1, 0x64, 0x9c, 0xec, 0x28, 0x71, 0xa1, 0x46,
2146	0x8f, 0x60, 0xf7, 0xe2, 0x41, 0xe4, 0x3b, 0xdb, 0xf1, 0x4f, 0xca, 0xc5, 0x5b, 0x4e, 0x9f, 0x17,
2147	0xd9, 0x3a, 0x8d, 0x62, 0x96, 0x24, 0xbc, 0x17, 0x74, 0x8b, 0x10, 0xd6, 0x44, 0xeb, 0x24, 0xf0,
2148	0x9e, 0x84, 0xd1, 0x23, 0x80, 0x24, 0xa5, 0x71, 0x2a, 0xba, 0x9b, 0x7c, 0xc4, 0xd9, 0x29, 0x0c,
2149	0x2c, 0x86, 0xc7, 0xc6, 0xa0, 0x18, 0x1e, 0x71, 0x5d, 0x70, 0xf3, 0x35, 0x32, 0x41, 0x1d, 0xd3,
2150	0x24, 0x25, 0x59, 0xe4, 0xf1, 0x06, 0x54, 0x28, 0xa8, 0x5e, 0xaa, 0x60, 0x9d, 0xcb, 0x1c, 0x09,
2151	0x11, 0x0e, 0xea, 0x7f, 0xab, 0xcc, 0x4f, 0x15, 0xa5, 0xe8, 0xdd, 0x87, 0x1a, 0x8b, 0xe3, 0x30,
2152	0xce, 0x87, 0x0a, 0x54, 0x68, 0x8e, 0x23, 0xb7, 0xd1, 0x17, 0x63, 0x2b, 0x96, 0x0c, 0xa8, 0x3f,
2153	0x1b, 0xe7, 0xab, 0x4c, 0x4e, 0x33, 0xa1, 0x46, 0x19, 0x6c, 0xe6, 0x9d, 0x25, 0x7b, 0xc1, 0x82,
2154	0x54, 0x96, 0x56, 0xf9, 0xee, 0xd9, 0xef, 0xa8, 0xfe, 0xcc, 0xa9, 0xfc, 0x84, 0x2d, 0xae, 0x51,
2155	0x3e, 0xd8, 0xc9, 0x79, 0x40, 0x6f, 0xc3, 0xc6, 0x0c, 0x0f, 0xda, 0x05, 0x8d, 0xb7, 0x19, 0x46,
2156	0x8b, 0x58, 0x4f, 0xad, 0xee, 0x60, 0xe6, 0x4a, 0xdf, 0x84, 0x6d, 0xab, 0x6b, 0x12, 0xe7, 0x90,
2157	0xf4, 0xed, 0xee, 0x93, 0xb6, 0x45, 0x8e, 0x06, 0xbc, 0x12, 0x77, 0x0d, 0x4b, 0x55, 0xf4, 0x7f,
2158	0xbf, 0x66, 0x70, 0x93, 0xce, 0x22, 0x02, 0xab, 0xe7, 0xfa, 0x71, 0x45, 0x44, 0xef, 0xab, 0x77,
2159	0xcd, 0xd2, 0x52, 0xf3, 0x8e, 0xcf, 0x29, 0x44, 0x37, 0x60, 0xd9, 0x4f, 0xc8, 0x90, 0x97, 0xbf,
2160	0x7c, 0x8e, 0x5b, 0xf2, 0x93, 0x43, 0xbe, 0x44, 0xbb, 0xc0, 0x13, 0xea, 0xd8, 0x1f, 0xfb, 0xe9,
2161	0xa9, 0x48, 0x9e, 0x0a, 0x3e, 0x03, 0xf8, 0x33, 0x54, 0x0c, 0x44, 0x29, 0x1d, 0x89, 0x77, 0xbf,
2162	0x86, 0x21, 0x87, 0x06, 0x74, 0x34, 0x3f, 0xb1, 0x2c, 0xce, 0x4f, 0x2c, 0xfa, 0x1f, 0x14, 0xd8,
2163	0x7e, 0xcd, 0x9d, 0xfa, 0xee, 0x7d, 0x9f, 0x71, 0xa1, 0x72, 0xb9, 0x0b, 0xb5, 0x0b, 0x5c, 0xf8,
2164	0xb5, 0x02, 0xbb, 0x6f, 0xda, 0x14, 0x7d, 0x08, 0x90, 0xc6, 0x34, 0x48, 0xdc, 0xd8, 0x8f, 0xe4,
2165	0x5d, 0xaf, 0xe3, 0x12, 0xc2, 0xe9, 0xa5, 0x29, 0xa4, 0x22, 0x02, 0x5d, 0x42, 0xd0, 0x0f, 0xa0,
2166	0xc6, 0x47, 0x15, 0x3e, 0x3d, 0xf3, 0x00, 0xdc, 0xbb, 0x24, 0x00, 0x7c, 0x72, 0xb1, 0x83, 0x61,
2167	0x88, 0xa5, 0x94, 0xfe, 0x67, 0x05, 0x96, 0x0b, 0x0c, 0x7d, 0x79, 0xae, 0xa4, 0xc8, 0x7b, 0x7b,
2168	0x63, 0xae, 0x22, 0x98, 0xf9, 0xff, 0xaa, 0x72, 0x45, 0xf9, 0x9c, 0x8f, 0xe3, 0x5e, 0xb9, 0x14,
2169	0xbd, 0x41, 0x6e, 0x89, 0x05, 0x62, 0xe8, 0x42, 0x08, 0x16, 0xb8, 0x15, 0xf9, 0xb4, 0x2a, 0xbe,
2170	0x67, 0xfc, 0x5d, 0x98, 0xf3, 0xf7, 0x16, 0xac, 0x14, 0xf3, 0x4d, 0x29, 0xb3, 0x72, 0x68, 0x40,
2171	0x47, 0xfb, 0x7f, 0xad, 0xc2, 0xa2, 0x8c, 0x38, 0xfa, 0x95, 0x02, 0xf5, 0xe9, 0x1d, 0x46, 0x6f,
2172	0xf9, 0xff, 0x67, 0xfa, 0x6b, 0x67, 0xe7, 0xc1, 0xdb, 0x0b, 0xc8, 0xf2, 0xa0, 0x7f, 0xfc, 0xb3,
2173	0xbf, 0xfc, 0xfd, 0x97, 0x95, 0xdb, 0xfa, 0xcd, 0xd2, 0xff, 0x47, 0x29, 0xf6, 0x38, 0x2e, 0x98,
2174	0x1f, 0x2b, 0x9f, 0xa2, 0x6f, 0x15, 0xb8, 0x7e, 0xd1, 0x3b, 0x80, 0x1e, 0x5f, 0xb2, 0xe5, 0x1b,
2175	0x7e, 0x9c, 0xed, 0x7c, 0x50, 0xc8, 0x96, 0xfe, 0x4c, 0x36, 0x9c, 0xe2, 0xcf, 0xa4, 0xfe, 0x50,
2176	0xd8, 0xf6, 0x7f, 0xfa, 0xc7, 0xf3, 0xb6, 0x95, 0x04, 0xce, 0x99, 0xf9, 0x0b, 0x05, 0xd0, 0x7c,
2177	0x31, 0x44, 0x5f, 0x5e, 0xa1, 0x7e, 0x4a, 0x13, 0x1f, 0x5d, 0xb9, 0xf2, 0xde, 0x57, 0x1e, 0x28,
2178	0x07, 0xaf, 0xe0, 0x8e, 0x1b, 0x4e, 0xde, 0xac, 0xe3, 0x60, 0x45, 0x1e, 0x7e, 0x8f, 0xa7, 0x5d,
2179	0x4f, 0xf9, 0xc6, 0xc8, 0xb9, 0x47, 0x21, 0xbf, 0x97, 0x8d, 0x30, 0x1e, 0xed, 0x8d, 0x58, 0x20,
2180	0x92, 0x72, 0x4f, 0x92, 0x68, 0xe4, 0x27, 0xaf, 0xf9, 0x79, 0xfc, 0x95, 0x04, 0xfe, 0xa9, 0x28,
2181	0xc7, 0x8b, 0x42, 0xe4, 0xff, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x9d, 0xbc, 0xd0, 0x68, 0x6e,
2182	0x16, 0x00, 0x00,
2183}
2184