1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/cloud/speech/v1p1beta1/cloud_speech.proto
3
4package speech // import "google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1"
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9import _ "github.com/golang/protobuf/ptypes/any"
10import duration "github.com/golang/protobuf/ptypes/duration"
11import _ "github.com/golang/protobuf/ptypes/empty"
12import timestamp "github.com/golang/protobuf/ptypes/timestamp"
13import _ "google.golang.org/genproto/googleapis/api/annotations"
14import longrunning "google.golang.org/genproto/googleapis/longrunning"
15import status "google.golang.org/genproto/googleapis/rpc/status"
16
17import (
18	context "golang.org/x/net/context"
19	grpc "google.golang.org/grpc"
20)
21
22// Reference imports to suppress errors if they are not otherwise used.
23var _ = proto.Marshal
24var _ = fmt.Errorf
25var _ = math.Inf
26
27// This is a compile-time assertion to ensure that this generated file
28// is compatible with the proto package it is being compiled against.
29// A compilation error at this line likely means your copy of the
30// proto package needs to be updated.
31const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
32
33// The encoding of the audio data sent in the request.
34//
35// All encodings support only 1 channel (mono) audio.
36//
37// For best results, the audio source should be captured and transmitted using
38// a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
39// recognition can be reduced if lossy codecs are used to capture or transmit
40// audio, particularly if background noise is present. Lossy codecs include
41// `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`.
42//
43// The `FLAC` and `WAV` audio file formats include a header that describes the
44// included audio content. You can request recognition for `WAV` files that
45// contain either `LINEAR16` or `MULAW` encoded audio.
46// If you send `FLAC` or `WAV` audio file format in
47// your request, you do not need to specify an `AudioEncoding`; the audio
48// encoding format is determined from the file header. If you specify
49// an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
50// encoding configuration must match the encoding described in the audio
51// header; otherwise the request returns an
52// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code.
53type RecognitionConfig_AudioEncoding int32
54
55const (
56	// Not specified.
57	RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0
58	// Uncompressed 16-bit signed little-endian samples (Linear PCM).
59	RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1
60	// `FLAC` (Free Lossless Audio
61	// Codec) is the recommended encoding because it is
62	// lossless--therefore recognition is not compromised--and
63	// requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
64	// encoding supports 16-bit and 24-bit samples, however, not all fields in
65	// `STREAMINFO` are supported.
66	RecognitionConfig_FLAC RecognitionConfig_AudioEncoding = 2
67	// 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
68	RecognitionConfig_MULAW RecognitionConfig_AudioEncoding = 3
69	// Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
70	RecognitionConfig_AMR RecognitionConfig_AudioEncoding = 4
71	// Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
72	RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5
73	// Opus encoded audio frames in Ogg container
74	// ([OggOpus](https://wiki.xiph.org/OggOpus)).
75	// `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
76	RecognitionConfig_OGG_OPUS RecognitionConfig_AudioEncoding = 6
77	// Although the use of lossy encodings is not recommended, if a very low
78	// bitrate encoding is required, `OGG_OPUS` is highly preferred over
79	// Speex encoding. The [Speex](https://speex.org/)  encoding supported by
80	// Cloud Speech API has a header byte in each block, as in MIME type
81	// `audio/x-speex-with-header-byte`.
82	// It is a variant of the RTP Speex encoding defined in
83	// [RFC 5574](https://tools.ietf.org/html/rfc5574).
84	// The stream is a sequence of blocks, one block per RTP packet. Each block
85	// starts with a byte containing the length of the block, in bytes, followed
86	// by one or more frames of Speex data, padded to an integral number of
87	// bytes (octets) as specified in RFC 5574. In other words, each RTP header
88	// is replaced with a single byte containing the block length. Only Speex
89	// wideband is supported. `sample_rate_hertz` must be 16000.
90	RecognitionConfig_SPEEX_WITH_HEADER_BYTE RecognitionConfig_AudioEncoding = 7
91)
92
93var RecognitionConfig_AudioEncoding_name = map[int32]string{
94	0: "ENCODING_UNSPECIFIED",
95	1: "LINEAR16",
96	2: "FLAC",
97	3: "MULAW",
98	4: "AMR",
99	5: "AMR_WB",
100	6: "OGG_OPUS",
101	7: "SPEEX_WITH_HEADER_BYTE",
102}
103var RecognitionConfig_AudioEncoding_value = map[string]int32{
104	"ENCODING_UNSPECIFIED":   0,
105	"LINEAR16":               1,
106	"FLAC":                   2,
107	"MULAW":                  3,
108	"AMR":                    4,
109	"AMR_WB":                 5,
110	"OGG_OPUS":               6,
111	"SPEEX_WITH_HEADER_BYTE": 7,
112}
113
114func (x RecognitionConfig_AudioEncoding) String() string {
115	return proto.EnumName(RecognitionConfig_AudioEncoding_name, int32(x))
116}
117func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) {
118	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{4, 0}
119}
120
121// Use case categories that the audio recognition request can be described
122// by.
123type RecognitionMetadata_InteractionType int32
124
125const (
126	// Use case is either unknown or is something other than one of the other
127	// values below.
128	RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED RecognitionMetadata_InteractionType = 0
129	// Multiple people in a conversation or discussion. For example in a
130	// meeting with two or more people actively participating. Typically
131	// all the primary people speaking would be in the same room (if not,
132	// see PHONE_CALL)
133	RecognitionMetadata_DISCUSSION RecognitionMetadata_InteractionType = 1
134	// One or more persons lecturing or presenting to others, mostly
135	// uninterrupted.
136	RecognitionMetadata_PRESENTATION RecognitionMetadata_InteractionType = 2
137	// A phone-call or video-conference in which two or more people, who are
138	// not in the same room, are actively participating.
139	RecognitionMetadata_PHONE_CALL RecognitionMetadata_InteractionType = 3
140	// A recorded message intended for another person to listen to.
141	RecognitionMetadata_VOICEMAIL RecognitionMetadata_InteractionType = 4
142	// Professionally produced audio (eg. TV Show, Podcast).
143	RecognitionMetadata_PROFESSIONALLY_PRODUCED RecognitionMetadata_InteractionType = 5
144	// Transcribe spoken questions and queries into text.
145	RecognitionMetadata_VOICE_SEARCH RecognitionMetadata_InteractionType = 6
146	// Transcribe voice commands, such as for controlling a device.
147	RecognitionMetadata_VOICE_COMMAND RecognitionMetadata_InteractionType = 7
148	// Transcribe speech to text to create a written document, such as a
149	// text-message, email or report.
150	RecognitionMetadata_DICTATION RecognitionMetadata_InteractionType = 8
151)
152
153var RecognitionMetadata_InteractionType_name = map[int32]string{
154	0: "INTERACTION_TYPE_UNSPECIFIED",
155	1: "DISCUSSION",
156	2: "PRESENTATION",
157	3: "PHONE_CALL",
158	4: "VOICEMAIL",
159	5: "PROFESSIONALLY_PRODUCED",
160	6: "VOICE_SEARCH",
161	7: "VOICE_COMMAND",
162	8: "DICTATION",
163}
164var RecognitionMetadata_InteractionType_value = map[string]int32{
165	"INTERACTION_TYPE_UNSPECIFIED": 0,
166	"DISCUSSION":                   1,
167	"PRESENTATION":                 2,
168	"PHONE_CALL":                   3,
169	"VOICEMAIL":                    4,
170	"PROFESSIONALLY_PRODUCED":      5,
171	"VOICE_SEARCH":                 6,
172	"VOICE_COMMAND":                7,
173	"DICTATION":                    8,
174}
175
176func (x RecognitionMetadata_InteractionType) String() string {
177	return proto.EnumName(RecognitionMetadata_InteractionType_name, int32(x))
178}
179func (RecognitionMetadata_InteractionType) EnumDescriptor() ([]byte, []int) {
180	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{5, 0}
181}
182
183// Enumerates the types of capture settings describing an audio file.
184type RecognitionMetadata_MicrophoneDistance int32
185
186const (
187	// Audio type is not known.
188	RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED RecognitionMetadata_MicrophoneDistance = 0
189	// The audio was captured from a closely placed microphone. Eg. phone,
190	// dictaphone, or handheld microphone. Generally if there speaker is within
191	// 1 meter of the microphone.
192	RecognitionMetadata_NEARFIELD RecognitionMetadata_MicrophoneDistance = 1
193	// The speaker if within 3 meters of the microphone.
194	RecognitionMetadata_MIDFIELD RecognitionMetadata_MicrophoneDistance = 2
195	// The speaker is more than 3 meters away from the microphone.
196	RecognitionMetadata_FARFIELD RecognitionMetadata_MicrophoneDistance = 3
197)
198
199var RecognitionMetadata_MicrophoneDistance_name = map[int32]string{
200	0: "MICROPHONE_DISTANCE_UNSPECIFIED",
201	1: "NEARFIELD",
202	2: "MIDFIELD",
203	3: "FARFIELD",
204}
205var RecognitionMetadata_MicrophoneDistance_value = map[string]int32{
206	"MICROPHONE_DISTANCE_UNSPECIFIED": 0,
207	"NEARFIELD":                       1,
208	"MIDFIELD":                        2,
209	"FARFIELD":                        3,
210}
211
212func (x RecognitionMetadata_MicrophoneDistance) String() string {
213	return proto.EnumName(RecognitionMetadata_MicrophoneDistance_name, int32(x))
214}
215func (RecognitionMetadata_MicrophoneDistance) EnumDescriptor() ([]byte, []int) {
216	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{5, 1}
217}
218
219// The original media the speech was recorded on.
220type RecognitionMetadata_OriginalMediaType int32
221
222const (
223	// Unknown original media type.
224	RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED RecognitionMetadata_OriginalMediaType = 0
225	// The speech data is an audio recording.
226	RecognitionMetadata_AUDIO RecognitionMetadata_OriginalMediaType = 1
227	// The speech data originally recorded on a video.
228	RecognitionMetadata_VIDEO RecognitionMetadata_OriginalMediaType = 2
229)
230
231var RecognitionMetadata_OriginalMediaType_name = map[int32]string{
232	0: "ORIGINAL_MEDIA_TYPE_UNSPECIFIED",
233	1: "AUDIO",
234	2: "VIDEO",
235}
236var RecognitionMetadata_OriginalMediaType_value = map[string]int32{
237	"ORIGINAL_MEDIA_TYPE_UNSPECIFIED": 0,
238	"AUDIO":                           1,
239	"VIDEO":                           2,
240}
241
242func (x RecognitionMetadata_OriginalMediaType) String() string {
243	return proto.EnumName(RecognitionMetadata_OriginalMediaType_name, int32(x))
244}
245func (RecognitionMetadata_OriginalMediaType) EnumDescriptor() ([]byte, []int) {
246	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{5, 2}
247}
248
249// The type of device the speech was recorded with.
250type RecognitionMetadata_RecordingDeviceType int32
251
252const (
253	// The recording device is unknown.
254	RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED RecognitionMetadata_RecordingDeviceType = 0
255	// Speech was recorded on a smartphone.
256	RecognitionMetadata_SMARTPHONE RecognitionMetadata_RecordingDeviceType = 1
257	// Speech was recorded using a personal computer or tablet.
258	RecognitionMetadata_PC RecognitionMetadata_RecordingDeviceType = 2
259	// Speech was recorded over a phone line.
260	RecognitionMetadata_PHONE_LINE RecognitionMetadata_RecordingDeviceType = 3
261	// Speech was recorded in a vehicle.
262	RecognitionMetadata_VEHICLE RecognitionMetadata_RecordingDeviceType = 4
263	// Speech was recorded outdoors.
264	RecognitionMetadata_OTHER_OUTDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 5
265	// Speech was recorded indoors.
266	RecognitionMetadata_OTHER_INDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 6
267)
268
269var RecognitionMetadata_RecordingDeviceType_name = map[int32]string{
270	0: "RECORDING_DEVICE_TYPE_UNSPECIFIED",
271	1: "SMARTPHONE",
272	2: "PC",
273	3: "PHONE_LINE",
274	4: "VEHICLE",
275	5: "OTHER_OUTDOOR_DEVICE",
276	6: "OTHER_INDOOR_DEVICE",
277}
278var RecognitionMetadata_RecordingDeviceType_value = map[string]int32{
279	"RECORDING_DEVICE_TYPE_UNSPECIFIED": 0,
280	"SMARTPHONE":                        1,
281	"PC":                                2,
282	"PHONE_LINE":                        3,
283	"VEHICLE":                           4,
284	"OTHER_OUTDOOR_DEVICE":              5,
285	"OTHER_INDOOR_DEVICE":               6,
286}
287
288func (x RecognitionMetadata_RecordingDeviceType) String() string {
289	return proto.EnumName(RecognitionMetadata_RecordingDeviceType_name, int32(x))
290}
291func (RecognitionMetadata_RecordingDeviceType) EnumDescriptor() ([]byte, []int) {
292	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{5, 3}
293}
294
295// Indicates the type of speech event.
296type StreamingRecognizeResponse_SpeechEventType int32
297
298const (
299	// No speech event specified.
300	StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED StreamingRecognizeResponse_SpeechEventType = 0
301	// This event indicates that the server has detected the end of the user's
302	// speech utterance and expects no additional speech. Therefore, the server
303	// will not process additional audio (although it may subsequently return
304	// additional results). The client should stop sending additional audio
305	// data, half-close the gRPC connection, and wait for any additional results
306	// until the server closes the gRPC connection. This event is only sent if
307	// `single_utterance` was set to `true`, and is not used otherwise.
308	StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE StreamingRecognizeResponse_SpeechEventType = 1
309)
310
311var StreamingRecognizeResponse_SpeechEventType_name = map[int32]string{
312	0: "SPEECH_EVENT_UNSPECIFIED",
313	1: "END_OF_SINGLE_UTTERANCE",
314}
315var StreamingRecognizeResponse_SpeechEventType_value = map[string]int32{
316	"SPEECH_EVENT_UNSPECIFIED": 0,
317	"END_OF_SINGLE_UTTERANCE":  1,
318}
319
320func (x StreamingRecognizeResponse_SpeechEventType) String() string {
321	return proto.EnumName(StreamingRecognizeResponse_SpeechEventType_name, int32(x))
322}
323func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int) {
324	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{11, 0}
325}
326
327// The top-level message sent by the client for the `Recognize` method.
328type RecognizeRequest struct {
329	// *Required* Provides information to the recognizer that specifies how to
330	// process the request.
331	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
332	// *Required* The audio data to be recognized.
333	Audio                *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
334	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
335	XXX_unrecognized     []byte            `json:"-"`
336	XXX_sizecache        int32             `json:"-"`
337}
338
339func (m *RecognizeRequest) Reset()         { *m = RecognizeRequest{} }
340func (m *RecognizeRequest) String() string { return proto.CompactTextString(m) }
341func (*RecognizeRequest) ProtoMessage()    {}
342func (*RecognizeRequest) Descriptor() ([]byte, []int) {
343	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{0}
344}
345func (m *RecognizeRequest) XXX_Unmarshal(b []byte) error {
346	return xxx_messageInfo_RecognizeRequest.Unmarshal(m, b)
347}
348func (m *RecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
349	return xxx_messageInfo_RecognizeRequest.Marshal(b, m, deterministic)
350}
351func (dst *RecognizeRequest) XXX_Merge(src proto.Message) {
352	xxx_messageInfo_RecognizeRequest.Merge(dst, src)
353}
354func (m *RecognizeRequest) XXX_Size() int {
355	return xxx_messageInfo_RecognizeRequest.Size(m)
356}
357func (m *RecognizeRequest) XXX_DiscardUnknown() {
358	xxx_messageInfo_RecognizeRequest.DiscardUnknown(m)
359}
360
361var xxx_messageInfo_RecognizeRequest proto.InternalMessageInfo
362
363func (m *RecognizeRequest) GetConfig() *RecognitionConfig {
364	if m != nil {
365		return m.Config
366	}
367	return nil
368}
369
370func (m *RecognizeRequest) GetAudio() *RecognitionAudio {
371	if m != nil {
372		return m.Audio
373	}
374	return nil
375}
376
377// The top-level message sent by the client for the `LongRunningRecognize`
378// method.
379type LongRunningRecognizeRequest struct {
380	// *Required* Provides information to the recognizer that specifies how to
381	// process the request.
382	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
383	// *Required* The audio data to be recognized.
384	Audio                *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
385	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
386	XXX_unrecognized     []byte            `json:"-"`
387	XXX_sizecache        int32             `json:"-"`
388}
389
390func (m *LongRunningRecognizeRequest) Reset()         { *m = LongRunningRecognizeRequest{} }
391func (m *LongRunningRecognizeRequest) String() string { return proto.CompactTextString(m) }
392func (*LongRunningRecognizeRequest) ProtoMessage()    {}
393func (*LongRunningRecognizeRequest) Descriptor() ([]byte, []int) {
394	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{1}
395}
396func (m *LongRunningRecognizeRequest) XXX_Unmarshal(b []byte) error {
397	return xxx_messageInfo_LongRunningRecognizeRequest.Unmarshal(m, b)
398}
399func (m *LongRunningRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
400	return xxx_messageInfo_LongRunningRecognizeRequest.Marshal(b, m, deterministic)
401}
402func (dst *LongRunningRecognizeRequest) XXX_Merge(src proto.Message) {
403	xxx_messageInfo_LongRunningRecognizeRequest.Merge(dst, src)
404}
405func (m *LongRunningRecognizeRequest) XXX_Size() int {
406	return xxx_messageInfo_LongRunningRecognizeRequest.Size(m)
407}
408func (m *LongRunningRecognizeRequest) XXX_DiscardUnknown() {
409	xxx_messageInfo_LongRunningRecognizeRequest.DiscardUnknown(m)
410}
411
412var xxx_messageInfo_LongRunningRecognizeRequest proto.InternalMessageInfo
413
414func (m *LongRunningRecognizeRequest) GetConfig() *RecognitionConfig {
415	if m != nil {
416		return m.Config
417	}
418	return nil
419}
420
421func (m *LongRunningRecognizeRequest) GetAudio() *RecognitionAudio {
422	if m != nil {
423		return m.Audio
424	}
425	return nil
426}
427
428// The top-level message sent by the client for the `StreamingRecognize` method.
429// Multiple `StreamingRecognizeRequest` messages are sent. The first message
430// must contain a `streaming_config` message and must not contain `audio` data.
431// All subsequent messages must contain `audio` data and must not contain a
432// `streaming_config` message.
433type StreamingRecognizeRequest struct {
434	// The streaming request, which is either a streaming config or audio content.
435	//
436	// Types that are valid to be assigned to StreamingRequest:
437	//	*StreamingRecognizeRequest_StreamingConfig
438	//	*StreamingRecognizeRequest_AudioContent
439	StreamingRequest     isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
440	XXX_NoUnkeyedLiteral struct{}                                     `json:"-"`
441	XXX_unrecognized     []byte                                       `json:"-"`
442	XXX_sizecache        int32                                        `json:"-"`
443}
444
445func (m *StreamingRecognizeRequest) Reset()         { *m = StreamingRecognizeRequest{} }
446func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) }
447func (*StreamingRecognizeRequest) ProtoMessage()    {}
448func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) {
449	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{2}
450}
451func (m *StreamingRecognizeRequest) XXX_Unmarshal(b []byte) error {
452	return xxx_messageInfo_StreamingRecognizeRequest.Unmarshal(m, b)
453}
454func (m *StreamingRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
455	return xxx_messageInfo_StreamingRecognizeRequest.Marshal(b, m, deterministic)
456}
457func (dst *StreamingRecognizeRequest) XXX_Merge(src proto.Message) {
458	xxx_messageInfo_StreamingRecognizeRequest.Merge(dst, src)
459}
460func (m *StreamingRecognizeRequest) XXX_Size() int {
461	return xxx_messageInfo_StreamingRecognizeRequest.Size(m)
462}
463func (m *StreamingRecognizeRequest) XXX_DiscardUnknown() {
464	xxx_messageInfo_StreamingRecognizeRequest.DiscardUnknown(m)
465}
466
467var xxx_messageInfo_StreamingRecognizeRequest proto.InternalMessageInfo
468
469type isStreamingRecognizeRequest_StreamingRequest interface {
470	isStreamingRecognizeRequest_StreamingRequest()
471}
472
473type StreamingRecognizeRequest_StreamingConfig struct {
474	StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,proto3,oneof"`
475}
476
477type StreamingRecognizeRequest_AudioContent struct {
478	AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"`
479}
480
481func (*StreamingRecognizeRequest_StreamingConfig) isStreamingRecognizeRequest_StreamingRequest() {}
482
483func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {}
484
485func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest {
486	if m != nil {
487		return m.StreamingRequest
488	}
489	return nil
490}
491
492func (m *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig {
493	if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_StreamingConfig); ok {
494		return x.StreamingConfig
495	}
496	return nil
497}
498
499func (m *StreamingRecognizeRequest) GetAudioContent() []byte {
500	if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_AudioContent); ok {
501		return x.AudioContent
502	}
503	return nil
504}
505
506// XXX_OneofFuncs is for the internal use of the proto package.
507func (*StreamingRecognizeRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
508	return _StreamingRecognizeRequest_OneofMarshaler, _StreamingRecognizeRequest_OneofUnmarshaler, _StreamingRecognizeRequest_OneofSizer, []interface{}{
509		(*StreamingRecognizeRequest_StreamingConfig)(nil),
510		(*StreamingRecognizeRequest_AudioContent)(nil),
511	}
512}
513
514func _StreamingRecognizeRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
515	m := msg.(*StreamingRecognizeRequest)
516	// streaming_request
517	switch x := m.StreamingRequest.(type) {
518	case *StreamingRecognizeRequest_StreamingConfig:
519		b.EncodeVarint(1<<3 | proto.WireBytes)
520		if err := b.EncodeMessage(x.StreamingConfig); err != nil {
521			return err
522		}
523	case *StreamingRecognizeRequest_AudioContent:
524		b.EncodeVarint(2<<3 | proto.WireBytes)
525		b.EncodeRawBytes(x.AudioContent)
526	case nil:
527	default:
528		return fmt.Errorf("StreamingRecognizeRequest.StreamingRequest has unexpected type %T", x)
529	}
530	return nil
531}
532
533func _StreamingRecognizeRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
534	m := msg.(*StreamingRecognizeRequest)
535	switch tag {
536	case 1: // streaming_request.streaming_config
537		if wire != proto.WireBytes {
538			return true, proto.ErrInternalBadWireType
539		}
540		msg := new(StreamingRecognitionConfig)
541		err := b.DecodeMessage(msg)
542		m.StreamingRequest = &StreamingRecognizeRequest_StreamingConfig{msg}
543		return true, err
544	case 2: // streaming_request.audio_content
545		if wire != proto.WireBytes {
546			return true, proto.ErrInternalBadWireType
547		}
548		x, err := b.DecodeRawBytes(true)
549		m.StreamingRequest = &StreamingRecognizeRequest_AudioContent{x}
550		return true, err
551	default:
552		return false, nil
553	}
554}
555
556func _StreamingRecognizeRequest_OneofSizer(msg proto.Message) (n int) {
557	m := msg.(*StreamingRecognizeRequest)
558	// streaming_request
559	switch x := m.StreamingRequest.(type) {
560	case *StreamingRecognizeRequest_StreamingConfig:
561		s := proto.Size(x.StreamingConfig)
562		n += 1 // tag and wire
563		n += proto.SizeVarint(uint64(s))
564		n += s
565	case *StreamingRecognizeRequest_AudioContent:
566		n += 1 // tag and wire
567		n += proto.SizeVarint(uint64(len(x.AudioContent)))
568		n += len(x.AudioContent)
569	case nil:
570	default:
571		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
572	}
573	return n
574}
575
576// Provides information to the recognizer that specifies how to process the
577// request.
578type StreamingRecognitionConfig struct {
579	// *Required* Provides information to the recognizer that specifies how to
580	// process the request.
581	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
582	// *Optional* If `false` or omitted, the recognizer will perform continuous
583	// recognition (continuing to wait for and process audio even if the user
584	// pauses speaking) until the client closes the input stream (gRPC API) or
585	// until the maximum time limit has been reached. May return multiple
586	// `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
587	//
588	// If `true`, the recognizer will detect a single spoken utterance. When it
589	// detects that the user has paused or stopped speaking, it will return an
590	// `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
591	// more than one `StreamingRecognitionResult` with the `is_final` flag set to
592	// `true`.
593	SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance,proto3" json:"single_utterance,omitempty"`
594	// *Optional* If `true`, interim results (tentative hypotheses) may be
595	// returned as they become available (these interim results are indicated with
596	// the `is_final=false` flag).
597	// If `false` or omitted, only `is_final=true` result(s) are returned.
598	InterimResults       bool     `protobuf:"varint,3,opt,name=interim_results,json=interimResults,proto3" json:"interim_results,omitempty"`
599	XXX_NoUnkeyedLiteral struct{} `json:"-"`
600	XXX_unrecognized     []byte   `json:"-"`
601	XXX_sizecache        int32    `json:"-"`
602}
603
604func (m *StreamingRecognitionConfig) Reset()         { *m = StreamingRecognitionConfig{} }
605func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) }
606func (*StreamingRecognitionConfig) ProtoMessage()    {}
607func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) {
608	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{3}
609}
610func (m *StreamingRecognitionConfig) XXX_Unmarshal(b []byte) error {
611	return xxx_messageInfo_StreamingRecognitionConfig.Unmarshal(m, b)
612}
613func (m *StreamingRecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
614	return xxx_messageInfo_StreamingRecognitionConfig.Marshal(b, m, deterministic)
615}
616func (dst *StreamingRecognitionConfig) XXX_Merge(src proto.Message) {
617	xxx_messageInfo_StreamingRecognitionConfig.Merge(dst, src)
618}
619func (m *StreamingRecognitionConfig) XXX_Size() int {
620	return xxx_messageInfo_StreamingRecognitionConfig.Size(m)
621}
622func (m *StreamingRecognitionConfig) XXX_DiscardUnknown() {
623	xxx_messageInfo_StreamingRecognitionConfig.DiscardUnknown(m)
624}
625
626var xxx_messageInfo_StreamingRecognitionConfig proto.InternalMessageInfo
627
628func (m *StreamingRecognitionConfig) GetConfig() *RecognitionConfig {
629	if m != nil {
630		return m.Config
631	}
632	return nil
633}
634
635func (m *StreamingRecognitionConfig) GetSingleUtterance() bool {
636	if m != nil {
637		return m.SingleUtterance
638	}
639	return false
640}
641
642func (m *StreamingRecognitionConfig) GetInterimResults() bool {
643	if m != nil {
644		return m.InterimResults
645	}
646	return false
647}
648
649// Provides information to the recognizer that specifies how to process the
650// request.
651type RecognitionConfig struct {
652	// Encoding of audio data sent in all `RecognitionAudio` messages.
653	// This field is optional for `FLAC` and `WAV` audio files and required
654	// for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
655	Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"`
656	// Sample rate in Hertz of the audio data sent in all
657	// `RecognitionAudio` messages. Valid values are: 8000-48000.
658	// 16000 is optimal. For best results, set the sampling rate of the audio
659	// source to 16000 Hz. If that's not possible, use the native sample rate of
660	// the audio source (instead of re-sampling).
661	// This field is optional for `FLAC` and `WAV` audio files and required
662	// for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
663	SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
664	// *Optional* The number of channels in the input audio data.
665	// ONLY set this for MULTI-CHANNEL recognition.
666	// Valid values for LINEAR16 and FLAC are `1`-`8`.
667	// Valid values for OGG_OPUS are '1'-'254'.
668	// Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
669	// If `0` or omitted, defaults to one channel (mono).
670	// Note: We only recognize the first channel by default.
671	// To perform independent recognition on each channel set
672	// `enable_separate_recognition_per_channel` to 'true'.
673	AudioChannelCount int32 `protobuf:"varint,7,opt,name=audio_channel_count,json=audioChannelCount,proto3" json:"audio_channel_count,omitempty"`
674	// This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
675	// to get each channel recognized separately. The recognition result will
676	// contain a `channel_tag` field to state which channel that result belongs
677	// to. If this is not true, we will only recognize the first channel. The
678	// request is billed cumulatively for all channels recognized:
679	// `audio_channel_count` multiplied by the length of the audio.
680	EnableSeparateRecognitionPerChannel bool `protobuf:"varint,12,opt,name=enable_separate_recognition_per_channel,json=enableSeparateRecognitionPerChannel,proto3" json:"enable_separate_recognition_per_channel,omitempty"`
681	// *Required* The language of the supplied audio as a
682	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
683	// Example: "en-US".
684	// See [Language Support](/speech-to-text/docs/languages)
685	// for a list of the currently supported language codes.
686	LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
687	// *Optional* A list of up to 3 additional
688	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
689	// listing possible alternative languages of the supplied audio.
690	// See [Language Support](/speech-to-text/docs/languages)
691	// for a list of the currently supported language codes.
692	// If alternative languages are listed, recognition result will contain
693	// recognition in the most likely language detected including the main
694	// language_code. The recognition result will include the language tag
695	// of the language detected in the audio.
696	// Note: This feature is only supported for Voice Command and Voice Search
697	// use cases and performance may vary for other use cases (e.g., phone call
698	// transcription).
699	AlternativeLanguageCodes []string `protobuf:"bytes,18,rep,name=alternative_language_codes,json=alternativeLanguageCodes,proto3" json:"alternative_language_codes,omitempty"`
700	// *Optional* Maximum number of recognition hypotheses to be returned.
701	// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
702	// within each `SpeechRecognitionResult`.
703	// The server may return fewer than `max_alternatives`.
704	// Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
705	// one. If omitted, will return a maximum of one.
706	MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
707	// *Optional* If set to `true`, the server will attempt to filter out
708	// profanities, replacing all but the initial character in each filtered word
709	// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
710	// won't be filtered out.
711	ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter,proto3" json:"profanity_filter,omitempty"`
712	// *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
713	// A means to provide context to assist the speech recognition. For more
714	// information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
715	SpeechContexts []*SpeechContext `protobuf:"bytes,6,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
716	// *Optional* If `true`, the top result includes a list of words and
717	// the start and end time offsets (timestamps) for those words. If
718	// `false`, no word-level time offset information is returned. The default is
719	// `false`.
720	EnableWordTimeOffsets bool `protobuf:"varint,8,opt,name=enable_word_time_offsets,json=enableWordTimeOffsets,proto3" json:"enable_word_time_offsets,omitempty"`
721	// *Optional* If `true`, the top result includes a list of words and the
722	// confidence for those words. If `false`, no word-level confidence
723	// information is returned. The default is `false`.
724	EnableWordConfidence bool `protobuf:"varint,15,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"`
725	// *Optional* If 'true', adds punctuation to recognition result hypotheses.
726	// This feature is only available in select languages. Setting this for
727	// requests in other languages has no effect at all.
728	// The default 'false' value does not add punctuation to result hypotheses.
729	// Note: This is currently offered as an experimental service, complimentary
730	// to all users. In the future this may be exclusively available as a
731	// premium feature.
732	EnableAutomaticPunctuation bool `protobuf:"varint,11,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation,proto3" json:"enable_automatic_punctuation,omitempty"`
733	// *Optional* If 'true', enables speaker detection for each recognized word in
734	// the top alternative of the recognition result using a speaker_tag provided
735	// in the WordInfo.
736	// Note: When this is true, we send all the words from the beginning of the
737	// audio for the top alternative in every consecutive STREAMING responses.
738	// This is done in order to improve our speaker tags as our models learn to
739	// identify the speakers in the conversation over time.
740	// For non-streaming requests, the diarization results will be provided only
741	// in the top alternative of the FINAL SpeechRecognitionResult.
742	EnableSpeakerDiarization bool `protobuf:"varint,16,opt,name=enable_speaker_diarization,json=enableSpeakerDiarization,proto3" json:"enable_speaker_diarization,omitempty"`
743	// *Optional*
744	// If set, specifies the estimated number of speakers in the conversation.
745	// If not set, defaults to '2'.
746	// Ignored unless enable_speaker_diarization is set to true."
747	DiarizationSpeakerCount int32 `protobuf:"varint,17,opt,name=diarization_speaker_count,json=diarizationSpeakerCount,proto3" json:"diarization_speaker_count,omitempty"`
748	// *Optional* Metadata regarding this request.
749	Metadata *RecognitionMetadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"`
750	// *Optional* Which model to select for the given request. Select the model
751	// best suited to your domain to get best results. If a model is not
752	// explicitly specified, then we auto-select a model based on the parameters
753	// in the RecognitionConfig.
754	// <table>
755	//   <tr>
756	//     <td><b>Model</b></td>
757	//     <td><b>Description</b></td>
758	//   </tr>
759	//   <tr>
760	//     <td><code>command_and_search</code></td>
761	//     <td>Best for short queries such as voice commands or voice search.</td>
762	//   </tr>
763	//   <tr>
764	//     <td><code>phone_call</code></td>
765	//     <td>Best for audio that originated from a phone call (typically
766	//     recorded at an 8khz sampling rate).</td>
767	//   </tr>
768	//   <tr>
769	//     <td><code>video</code></td>
770	//     <td>Best for audio that originated from from video or includes multiple
771	//         speakers. Ideally the audio is recorded at a 16khz or greater
772	//         sampling rate. This is a premium model that costs more than the
773	//         standard rate.</td>
774	//   </tr>
775	//   <tr>
776	//     <td><code>default</code></td>
777	//     <td>Best for audio that is not one of the specific audio models.
778	//         For example, long-form audio. Ideally the audio is high-fidelity,
779	//         recorded at a 16khz or greater sampling rate.</td>
780	//   </tr>
781	// </table>
782	Model string `protobuf:"bytes,13,opt,name=model,proto3" json:"model,omitempty"`
783	// *Optional* Set to true to use an enhanced model for speech recognition.
784	// If `use_enhanced` is set to true and the `model` field is not set, then
785	// an appropriate enhanced model is chosen if:
786	// 1. project is eligible for requesting enhanced models
787	// 2. an enhanced model exists for the audio
788	//
789	// If `use_enhanced` is true and an enhanced version of the specified model
790	// does not exist, then the speech is recognized using the standard version
791	// of the specified model.
792	//
793	// Enhanced speech models require that you opt-in to data logging using
794	// instructions in the
795	// [documentation](/speech-to-text/docs/enable-data-logging). If you set
796	// `use_enhanced` to true and you have not enabled audio logging, then you
797	// will receive an error.
798	UseEnhanced          bool     `protobuf:"varint,14,opt,name=use_enhanced,json=useEnhanced,proto3" json:"use_enhanced,omitempty"`
799	XXX_NoUnkeyedLiteral struct{} `json:"-"`
800	XXX_unrecognized     []byte   `json:"-"`
801	XXX_sizecache        int32    `json:"-"`
802}
803
804func (m *RecognitionConfig) Reset()         { *m = RecognitionConfig{} }
805func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) }
806func (*RecognitionConfig) ProtoMessage()    {}
807func (*RecognitionConfig) Descriptor() ([]byte, []int) {
808	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{4}
809}
810func (m *RecognitionConfig) XXX_Unmarshal(b []byte) error {
811	return xxx_messageInfo_RecognitionConfig.Unmarshal(m, b)
812}
813func (m *RecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
814	return xxx_messageInfo_RecognitionConfig.Marshal(b, m, deterministic)
815}
816func (dst *RecognitionConfig) XXX_Merge(src proto.Message) {
817	xxx_messageInfo_RecognitionConfig.Merge(dst, src)
818}
819func (m *RecognitionConfig) XXX_Size() int {
820	return xxx_messageInfo_RecognitionConfig.Size(m)
821}
822func (m *RecognitionConfig) XXX_DiscardUnknown() {
823	xxx_messageInfo_RecognitionConfig.DiscardUnknown(m)
824}
825
826var xxx_messageInfo_RecognitionConfig proto.InternalMessageInfo
827
828func (m *RecognitionConfig) GetEncoding() RecognitionConfig_AudioEncoding {
829	if m != nil {
830		return m.Encoding
831	}
832	return RecognitionConfig_ENCODING_UNSPECIFIED
833}
834
835func (m *RecognitionConfig) GetSampleRateHertz() int32 {
836	if m != nil {
837		return m.SampleRateHertz
838	}
839	return 0
840}
841
842func (m *RecognitionConfig) GetAudioChannelCount() int32 {
843	if m != nil {
844		return m.AudioChannelCount
845	}
846	return 0
847}
848
849func (m *RecognitionConfig) GetEnableSeparateRecognitionPerChannel() bool {
850	if m != nil {
851		return m.EnableSeparateRecognitionPerChannel
852	}
853	return false
854}
855
856func (m *RecognitionConfig) GetLanguageCode() string {
857	if m != nil {
858		return m.LanguageCode
859	}
860	return ""
861}
862
863func (m *RecognitionConfig) GetAlternativeLanguageCodes() []string {
864	if m != nil {
865		return m.AlternativeLanguageCodes
866	}
867	return nil
868}
869
870func (m *RecognitionConfig) GetMaxAlternatives() int32 {
871	if m != nil {
872		return m.MaxAlternatives
873	}
874	return 0
875}
876
877func (m *RecognitionConfig) GetProfanityFilter() bool {
878	if m != nil {
879		return m.ProfanityFilter
880	}
881	return false
882}
883
884func (m *RecognitionConfig) GetSpeechContexts() []*SpeechContext {
885	if m != nil {
886		return m.SpeechContexts
887	}
888	return nil
889}
890
891func (m *RecognitionConfig) GetEnableWordTimeOffsets() bool {
892	if m != nil {
893		return m.EnableWordTimeOffsets
894	}
895	return false
896}
897
898func (m *RecognitionConfig) GetEnableWordConfidence() bool {
899	if m != nil {
900		return m.EnableWordConfidence
901	}
902	return false
903}
904
905func (m *RecognitionConfig) GetEnableAutomaticPunctuation() bool {
906	if m != nil {
907		return m.EnableAutomaticPunctuation
908	}
909	return false
910}
911
912func (m *RecognitionConfig) GetEnableSpeakerDiarization() bool {
913	if m != nil {
914		return m.EnableSpeakerDiarization
915	}
916	return false
917}
918
919func (m *RecognitionConfig) GetDiarizationSpeakerCount() int32 {
920	if m != nil {
921		return m.DiarizationSpeakerCount
922	}
923	return 0
924}
925
926func (m *RecognitionConfig) GetMetadata() *RecognitionMetadata {
927	if m != nil {
928		return m.Metadata
929	}
930	return nil
931}
932
933func (m *RecognitionConfig) GetModel() string {
934	if m != nil {
935		return m.Model
936	}
937	return ""
938}
939
940func (m *RecognitionConfig) GetUseEnhanced() bool {
941	if m != nil {
942		return m.UseEnhanced
943	}
944	return false
945}
946
947// Description of audio data to be recognized.
948type RecognitionMetadata struct {
949	// The use case most closely describing the audio content to be recognized.
950	InteractionType RecognitionMetadata_InteractionType `protobuf:"varint,1,opt,name=interaction_type,json=interactionType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType" json:"interaction_type,omitempty"`
951	// The industry vertical to which this speech recognition request most
952	// closely applies. This is most indicative of the topics contained
953	// in the audio.  Use the 6-digit NAICS code to identify the industry
954	// vertical - see https://www.naics.com/search/.
955	IndustryNaicsCodeOfAudio uint32 `protobuf:"varint,3,opt,name=industry_naics_code_of_audio,json=industryNaicsCodeOfAudio,proto3" json:"industry_naics_code_of_audio,omitempty"`
956	// The audio type that most closely describes the audio being recognized.
957	MicrophoneDistance RecognitionMetadata_MicrophoneDistance `protobuf:"varint,4,opt,name=microphone_distance,json=microphoneDistance,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance" json:"microphone_distance,omitempty"`
958	// The original media the speech was recorded on.
959	OriginalMediaType RecognitionMetadata_OriginalMediaType `protobuf:"varint,5,opt,name=original_media_type,json=originalMediaType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType" json:"original_media_type,omitempty"`
960	// The type of device the speech was recorded with.
961	RecordingDeviceType RecognitionMetadata_RecordingDeviceType `protobuf:"varint,6,opt,name=recording_device_type,json=recordingDeviceType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType" json:"recording_device_type,omitempty"`
962	// The device used to make the recording.  Examples 'Nexus 5X' or
963	// 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
964	// 'Cardioid Microphone'.
965	RecordingDeviceName string `protobuf:"bytes,7,opt,name=recording_device_name,json=recordingDeviceName,proto3" json:"recording_device_name,omitempty"`
966	// Mime type of the original audio file.  For example `audio/m4a`,
967	// `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
968	// A list of possible audio mime types is maintained at
969	// http://www.iana.org/assignments/media-types/media-types.xhtml#audio
970	OriginalMimeType string `protobuf:"bytes,8,opt,name=original_mime_type,json=originalMimeType,proto3" json:"original_mime_type,omitempty"`
971	// Obfuscated (privacy-protected) ID of the user, to identify number of
972	// unique users using the service.
973	ObfuscatedId int64 `protobuf:"varint,9,opt,name=obfuscated_id,json=obfuscatedId,proto3" json:"obfuscated_id,omitempty"`
974	// Description of the content. Eg. "Recordings of federal supreme court
975	// hearings from 2012".
976	AudioTopic           string   `protobuf:"bytes,10,opt,name=audio_topic,json=audioTopic,proto3" json:"audio_topic,omitempty"`
977	XXX_NoUnkeyedLiteral struct{} `json:"-"`
978	XXX_unrecognized     []byte   `json:"-"`
979	XXX_sizecache        int32    `json:"-"`
980}
981
982func (m *RecognitionMetadata) Reset()         { *m = RecognitionMetadata{} }
983func (m *RecognitionMetadata) String() string { return proto.CompactTextString(m) }
984func (*RecognitionMetadata) ProtoMessage()    {}
985func (*RecognitionMetadata) Descriptor() ([]byte, []int) {
986	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{5}
987}
988func (m *RecognitionMetadata) XXX_Unmarshal(b []byte) error {
989	return xxx_messageInfo_RecognitionMetadata.Unmarshal(m, b)
990}
991func (m *RecognitionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
992	return xxx_messageInfo_RecognitionMetadata.Marshal(b, m, deterministic)
993}
994func (dst *RecognitionMetadata) XXX_Merge(src proto.Message) {
995	xxx_messageInfo_RecognitionMetadata.Merge(dst, src)
996}
997func (m *RecognitionMetadata) XXX_Size() int {
998	return xxx_messageInfo_RecognitionMetadata.Size(m)
999}
1000func (m *RecognitionMetadata) XXX_DiscardUnknown() {
1001	xxx_messageInfo_RecognitionMetadata.DiscardUnknown(m)
1002}
1003
1004var xxx_messageInfo_RecognitionMetadata proto.InternalMessageInfo
1005
1006func (m *RecognitionMetadata) GetInteractionType() RecognitionMetadata_InteractionType {
1007	if m != nil {
1008		return m.InteractionType
1009	}
1010	return RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED
1011}
1012
1013func (m *RecognitionMetadata) GetIndustryNaicsCodeOfAudio() uint32 {
1014	if m != nil {
1015		return m.IndustryNaicsCodeOfAudio
1016	}
1017	return 0
1018}
1019
1020func (m *RecognitionMetadata) GetMicrophoneDistance() RecognitionMetadata_MicrophoneDistance {
1021	if m != nil {
1022		return m.MicrophoneDistance
1023	}
1024	return RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED
1025}
1026
1027func (m *RecognitionMetadata) GetOriginalMediaType() RecognitionMetadata_OriginalMediaType {
1028	if m != nil {
1029		return m.OriginalMediaType
1030	}
1031	return RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED
1032}
1033
1034func (m *RecognitionMetadata) GetRecordingDeviceType() RecognitionMetadata_RecordingDeviceType {
1035	if m != nil {
1036		return m.RecordingDeviceType
1037	}
1038	return RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED
1039}
1040
1041func (m *RecognitionMetadata) GetRecordingDeviceName() string {
1042	if m != nil {
1043		return m.RecordingDeviceName
1044	}
1045	return ""
1046}
1047
1048func (m *RecognitionMetadata) GetOriginalMimeType() string {
1049	if m != nil {
1050		return m.OriginalMimeType
1051	}
1052	return ""
1053}
1054
1055func (m *RecognitionMetadata) GetObfuscatedId() int64 {
1056	if m != nil {
1057		return m.ObfuscatedId
1058	}
1059	return 0
1060}
1061
1062func (m *RecognitionMetadata) GetAudioTopic() string {
1063	if m != nil {
1064		return m.AudioTopic
1065	}
1066	return ""
1067}
1068
1069// Provides "hints" to the speech recognizer to favor specific words and phrases
1070// in the results.
1071type SpeechContext struct {
1072	// *Optional* A list of strings containing words and phrases "hints" so that
1073	// the speech recognition is more likely to recognize them. This can be used
1074	// to improve the accuracy for specific words and phrases, for example, if
1075	// specific commands are typically spoken by the user. This can also be used
1076	// to add additional words to the vocabulary of the recognizer. See
1077	// [usage limits](/speech-to-text/quotas#content).
1078	Phrases              []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
1079	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1080	XXX_unrecognized     []byte   `json:"-"`
1081	XXX_sizecache        int32    `json:"-"`
1082}
1083
1084func (m *SpeechContext) Reset()         { *m = SpeechContext{} }
1085func (m *SpeechContext) String() string { return proto.CompactTextString(m) }
1086func (*SpeechContext) ProtoMessage()    {}
1087func (*SpeechContext) Descriptor() ([]byte, []int) {
1088	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{6}
1089}
1090func (m *SpeechContext) XXX_Unmarshal(b []byte) error {
1091	return xxx_messageInfo_SpeechContext.Unmarshal(m, b)
1092}
1093func (m *SpeechContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1094	return xxx_messageInfo_SpeechContext.Marshal(b, m, deterministic)
1095}
1096func (dst *SpeechContext) XXX_Merge(src proto.Message) {
1097	xxx_messageInfo_SpeechContext.Merge(dst, src)
1098}
1099func (m *SpeechContext) XXX_Size() int {
1100	return xxx_messageInfo_SpeechContext.Size(m)
1101}
1102func (m *SpeechContext) XXX_DiscardUnknown() {
1103	xxx_messageInfo_SpeechContext.DiscardUnknown(m)
1104}
1105
1106var xxx_messageInfo_SpeechContext proto.InternalMessageInfo
1107
1108func (m *SpeechContext) GetPhrases() []string {
1109	if m != nil {
1110		return m.Phrases
1111	}
1112	return nil
1113}
1114
1115// Contains audio data in the encoding specified in the `RecognitionConfig`.
1116// Either `content` or `uri` must be supplied. Supplying both or neither
1117// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]. See
1118// [content limits](/speech-to-text/quotas#content).
1119type RecognitionAudio struct {
1120	// The audio source, which is either inline content or a Google Cloud
1121	// Storage uri.
1122	//
1123	// Types that are valid to be assigned to AudioSource:
1124	//	*RecognitionAudio_Content
1125	//	*RecognitionAudio_Uri
1126	AudioSource          isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"`
1127	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
1128	XXX_unrecognized     []byte                         `json:"-"`
1129	XXX_sizecache        int32                          `json:"-"`
1130}
1131
1132func (m *RecognitionAudio) Reset()         { *m = RecognitionAudio{} }
1133func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) }
1134func (*RecognitionAudio) ProtoMessage()    {}
1135func (*RecognitionAudio) Descriptor() ([]byte, []int) {
1136	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{7}
1137}
1138func (m *RecognitionAudio) XXX_Unmarshal(b []byte) error {
1139	return xxx_messageInfo_RecognitionAudio.Unmarshal(m, b)
1140}
1141func (m *RecognitionAudio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1142	return xxx_messageInfo_RecognitionAudio.Marshal(b, m, deterministic)
1143}
1144func (dst *RecognitionAudio) XXX_Merge(src proto.Message) {
1145	xxx_messageInfo_RecognitionAudio.Merge(dst, src)
1146}
1147func (m *RecognitionAudio) XXX_Size() int {
1148	return xxx_messageInfo_RecognitionAudio.Size(m)
1149}
1150func (m *RecognitionAudio) XXX_DiscardUnknown() {
1151	xxx_messageInfo_RecognitionAudio.DiscardUnknown(m)
1152}
1153
1154var xxx_messageInfo_RecognitionAudio proto.InternalMessageInfo
1155
1156type isRecognitionAudio_AudioSource interface {
1157	isRecognitionAudio_AudioSource()
1158}
1159
1160type RecognitionAudio_Content struct {
1161	Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"`
1162}
1163
1164type RecognitionAudio_Uri struct {
1165	Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"`
1166}
1167
1168func (*RecognitionAudio_Content) isRecognitionAudio_AudioSource() {}
1169
1170func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {}
1171
1172func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource {
1173	if m != nil {
1174		return m.AudioSource
1175	}
1176	return nil
1177}
1178
1179func (m *RecognitionAudio) GetContent() []byte {
1180	if x, ok := m.GetAudioSource().(*RecognitionAudio_Content); ok {
1181		return x.Content
1182	}
1183	return nil
1184}
1185
1186func (m *RecognitionAudio) GetUri() string {
1187	if x, ok := m.GetAudioSource().(*RecognitionAudio_Uri); ok {
1188		return x.Uri
1189	}
1190	return ""
1191}
1192
1193// XXX_OneofFuncs is for the internal use of the proto package.
1194func (*RecognitionAudio) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
1195	return _RecognitionAudio_OneofMarshaler, _RecognitionAudio_OneofUnmarshaler, _RecognitionAudio_OneofSizer, []interface{}{
1196		(*RecognitionAudio_Content)(nil),
1197		(*RecognitionAudio_Uri)(nil),
1198	}
1199}
1200
1201func _RecognitionAudio_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
1202	m := msg.(*RecognitionAudio)
1203	// audio_source
1204	switch x := m.AudioSource.(type) {
1205	case *RecognitionAudio_Content:
1206		b.EncodeVarint(1<<3 | proto.WireBytes)
1207		b.EncodeRawBytes(x.Content)
1208	case *RecognitionAudio_Uri:
1209		b.EncodeVarint(2<<3 | proto.WireBytes)
1210		b.EncodeStringBytes(x.Uri)
1211	case nil:
1212	default:
1213		return fmt.Errorf("RecognitionAudio.AudioSource has unexpected type %T", x)
1214	}
1215	return nil
1216}
1217
1218func _RecognitionAudio_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
1219	m := msg.(*RecognitionAudio)
1220	switch tag {
1221	case 1: // audio_source.content
1222		if wire != proto.WireBytes {
1223			return true, proto.ErrInternalBadWireType
1224		}
1225		x, err := b.DecodeRawBytes(true)
1226		m.AudioSource = &RecognitionAudio_Content{x}
1227		return true, err
1228	case 2: // audio_source.uri
1229		if wire != proto.WireBytes {
1230			return true, proto.ErrInternalBadWireType
1231		}
1232		x, err := b.DecodeStringBytes()
1233		m.AudioSource = &RecognitionAudio_Uri{x}
1234		return true, err
1235	default:
1236		return false, nil
1237	}
1238}
1239
1240func _RecognitionAudio_OneofSizer(msg proto.Message) (n int) {
1241	m := msg.(*RecognitionAudio)
1242	// audio_source
1243	switch x := m.AudioSource.(type) {
1244	case *RecognitionAudio_Content:
1245		n += 1 // tag and wire
1246		n += proto.SizeVarint(uint64(len(x.Content)))
1247		n += len(x.Content)
1248	case *RecognitionAudio_Uri:
1249		n += 1 // tag and wire
1250		n += proto.SizeVarint(uint64(len(x.Uri)))
1251		n += len(x.Uri)
1252	case nil:
1253	default:
1254		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
1255	}
1256	return n
1257}
1258
1259// The only message returned to the client by the `Recognize` method. It
1260// contains the result as zero or more sequential `SpeechRecognitionResult`
1261// messages.
1262type RecognizeResponse struct {
1263	// Output only. Sequential list of transcription results corresponding to
1264	// sequential portions of audio.
1265	Results              []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
1266	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
1267	XXX_unrecognized     []byte                     `json:"-"`
1268	XXX_sizecache        int32                      `json:"-"`
1269}
1270
1271func (m *RecognizeResponse) Reset()         { *m = RecognizeResponse{} }
1272func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) }
1273func (*RecognizeResponse) ProtoMessage()    {}
1274func (*RecognizeResponse) Descriptor() ([]byte, []int) {
1275	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{8}
1276}
1277func (m *RecognizeResponse) XXX_Unmarshal(b []byte) error {
1278	return xxx_messageInfo_RecognizeResponse.Unmarshal(m, b)
1279}
1280func (m *RecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1281	return xxx_messageInfo_RecognizeResponse.Marshal(b, m, deterministic)
1282}
1283func (dst *RecognizeResponse) XXX_Merge(src proto.Message) {
1284	xxx_messageInfo_RecognizeResponse.Merge(dst, src)
1285}
1286func (m *RecognizeResponse) XXX_Size() int {
1287	return xxx_messageInfo_RecognizeResponse.Size(m)
1288}
1289func (m *RecognizeResponse) XXX_DiscardUnknown() {
1290	xxx_messageInfo_RecognizeResponse.DiscardUnknown(m)
1291}
1292
1293var xxx_messageInfo_RecognizeResponse proto.InternalMessageInfo
1294
1295func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult {
1296	if m != nil {
1297		return m.Results
1298	}
1299	return nil
1300}
1301
1302// The only message returned to the client by the `LongRunningRecognize` method.
1303// It contains the result as zero or more sequential `SpeechRecognitionResult`
1304// messages. It is included in the `result.response` field of the `Operation`
1305// returned by the `GetOperation` call of the `google::longrunning::Operations`
1306// service.
1307type LongRunningRecognizeResponse struct {
1308	// Output only. Sequential list of transcription results corresponding to
1309	// sequential portions of audio.
1310	Results              []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
1311	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
1312	XXX_unrecognized     []byte                     `json:"-"`
1313	XXX_sizecache        int32                      `json:"-"`
1314}
1315
1316func (m *LongRunningRecognizeResponse) Reset()         { *m = LongRunningRecognizeResponse{} }
1317func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) }
1318func (*LongRunningRecognizeResponse) ProtoMessage()    {}
1319func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) {
1320	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{9}
1321}
1322func (m *LongRunningRecognizeResponse) XXX_Unmarshal(b []byte) error {
1323	return xxx_messageInfo_LongRunningRecognizeResponse.Unmarshal(m, b)
1324}
1325func (m *LongRunningRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1326	return xxx_messageInfo_LongRunningRecognizeResponse.Marshal(b, m, deterministic)
1327}
1328func (dst *LongRunningRecognizeResponse) XXX_Merge(src proto.Message) {
1329	xxx_messageInfo_LongRunningRecognizeResponse.Merge(dst, src)
1330}
1331func (m *LongRunningRecognizeResponse) XXX_Size() int {
1332	return xxx_messageInfo_LongRunningRecognizeResponse.Size(m)
1333}
1334func (m *LongRunningRecognizeResponse) XXX_DiscardUnknown() {
1335	xxx_messageInfo_LongRunningRecognizeResponse.DiscardUnknown(m)
1336}
1337
1338var xxx_messageInfo_LongRunningRecognizeResponse proto.InternalMessageInfo
1339
1340func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult {
1341	if m != nil {
1342		return m.Results
1343	}
1344	return nil
1345}
1346
1347// Describes the progress of a long-running `LongRunningRecognize` call. It is
1348// included in the `metadata` field of the `Operation` returned by the
1349// `GetOperation` call of the `google::longrunning::Operations` service.
1350type LongRunningRecognizeMetadata struct {
1351	// Approximate percentage of audio processed thus far. Guaranteed to be 100
1352	// when the audio is fully processed and the results are available.
1353	ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
1354	// Time when the request was received.
1355	StartTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
1356	// Time of the most recent processing update.
1357	LastUpdateTime       *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
1358	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
1359	XXX_unrecognized     []byte               `json:"-"`
1360	XXX_sizecache        int32                `json:"-"`
1361}
1362
1363func (m *LongRunningRecognizeMetadata) Reset()         { *m = LongRunningRecognizeMetadata{} }
1364func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) }
1365func (*LongRunningRecognizeMetadata) ProtoMessage()    {}
1366func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) {
1367	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{10}
1368}
1369func (m *LongRunningRecognizeMetadata) XXX_Unmarshal(b []byte) error {
1370	return xxx_messageInfo_LongRunningRecognizeMetadata.Unmarshal(m, b)
1371}
1372func (m *LongRunningRecognizeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1373	return xxx_messageInfo_LongRunningRecognizeMetadata.Marshal(b, m, deterministic)
1374}
1375func (dst *LongRunningRecognizeMetadata) XXX_Merge(src proto.Message) {
1376	xxx_messageInfo_LongRunningRecognizeMetadata.Merge(dst, src)
1377}
1378func (m *LongRunningRecognizeMetadata) XXX_Size() int {
1379	return xxx_messageInfo_LongRunningRecognizeMetadata.Size(m)
1380}
1381func (m *LongRunningRecognizeMetadata) XXX_DiscardUnknown() {
1382	xxx_messageInfo_LongRunningRecognizeMetadata.DiscardUnknown(m)
1383}
1384
1385var xxx_messageInfo_LongRunningRecognizeMetadata proto.InternalMessageInfo
1386
1387func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 {
1388	if m != nil {
1389		return m.ProgressPercent
1390	}
1391	return 0
1392}
1393
1394func (m *LongRunningRecognizeMetadata) GetStartTime() *timestamp.Timestamp {
1395	if m != nil {
1396		return m.StartTime
1397	}
1398	return nil
1399}
1400
1401func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *timestamp.Timestamp {
1402	if m != nil {
1403		return m.LastUpdateTime
1404	}
1405	return nil
1406}
1407
1408// `StreamingRecognizeResponse` is the only message returned to the client by
1409// `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
1410// messages are streamed back to the client. If there is no recognizable
1411// audio, and `single_utterance` is set to false, then no messages are streamed
1412// back to the client.
1413//
1414// Here's an example of a series of ten `StreamingRecognizeResponse`s that might
1415// be returned while processing audio:
1416//
1417// 1. results { alternatives { transcript: "tube" } stability: 0.01 }
1418//
1419// 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
1420//
1421// 3. results { alternatives { transcript: "to be" } stability: 0.9 }
1422//    results { alternatives { transcript: " or not to be" } stability: 0.01 }
1423//
1424// 4. results { alternatives { transcript: "to be or not to be"
1425//                             confidence: 0.92 }
1426//              alternatives { transcript: "to bee or not to bee" }
1427//              is_final: true }
1428//
1429// 5. results { alternatives { transcript: " that's" } stability: 0.01 }
1430//
1431// 6. results { alternatives { transcript: " that is" } stability: 0.9 }
1432//    results { alternatives { transcript: " the question" } stability: 0.01 }
1433//
1434// 7. results { alternatives { transcript: " that is the question"
1435//                             confidence: 0.98 }
1436//              alternatives { transcript: " that was the question" }
1437//              is_final: true }
1438//
1439// Notes:
1440//
1441// - Only two of the above responses #4 and #7 contain final results; they are
1442//   indicated by `is_final: true`. Concatenating these together generates the
1443//   full transcript: "to be or not to be that is the question".
1444//
1445// - The others contain interim `results`. #3 and #6 contain two interim
1446//   `results`: the first portion has a high stability and is less likely to
1447//   change; the second portion has a low stability and is very likely to
1448//   change. A UI designer might choose to show only high stability `results`.
1449//
1450// - The specific `stability` and `confidence` values shown above are only for
1451//   illustrative purposes. Actual values may vary.
1452//
1453// - In each response, only one of these fields will be set:
1454//     `error`,
1455//     `speech_event_type`, or
1456//     one or more (repeated) `results`.
1457type StreamingRecognizeResponse struct {
1458	// Output only. If set, returns a [google.rpc.Status][google.rpc.Status] message that
1459	// specifies the error for the operation.
1460	Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
1461	// Output only. This repeated list contains zero or more results that
1462	// correspond to consecutive portions of the audio currently being processed.
1463	// It contains zero or one `is_final=true` result (the newly settled portion),
1464	// followed by zero or more `is_final=false` results (the interim results).
1465	Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
1466	// Output only. Indicates the type of speech event.
1467	SpeechEventType      StreamingRecognizeResponse_SpeechEventType `protobuf:"varint,4,opt,name=speech_event_type,json=speechEventType,proto3,enum=google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType" json:"speech_event_type,omitempty"`
1468	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
1469	XXX_unrecognized     []byte                                     `json:"-"`
1470	XXX_sizecache        int32                                      `json:"-"`
1471}
1472
1473func (m *StreamingRecognizeResponse) Reset()         { *m = StreamingRecognizeResponse{} }
1474func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) }
1475func (*StreamingRecognizeResponse) ProtoMessage()    {}
1476func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) {
1477	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{11}
1478}
1479func (m *StreamingRecognizeResponse) XXX_Unmarshal(b []byte) error {
1480	return xxx_messageInfo_StreamingRecognizeResponse.Unmarshal(m, b)
1481}
1482func (m *StreamingRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1483	return xxx_messageInfo_StreamingRecognizeResponse.Marshal(b, m, deterministic)
1484}
1485func (dst *StreamingRecognizeResponse) XXX_Merge(src proto.Message) {
1486	xxx_messageInfo_StreamingRecognizeResponse.Merge(dst, src)
1487}
1488func (m *StreamingRecognizeResponse) XXX_Size() int {
1489	return xxx_messageInfo_StreamingRecognizeResponse.Size(m)
1490}
1491func (m *StreamingRecognizeResponse) XXX_DiscardUnknown() {
1492	xxx_messageInfo_StreamingRecognizeResponse.DiscardUnknown(m)
1493}
1494
1495var xxx_messageInfo_StreamingRecognizeResponse proto.InternalMessageInfo
1496
1497func (m *StreamingRecognizeResponse) GetError() *status.Status {
1498	if m != nil {
1499		return m.Error
1500	}
1501	return nil
1502}
1503
1504func (m *StreamingRecognizeResponse) GetResults() []*StreamingRecognitionResult {
1505	if m != nil {
1506		return m.Results
1507	}
1508	return nil
1509}
1510
1511func (m *StreamingRecognizeResponse) GetSpeechEventType() StreamingRecognizeResponse_SpeechEventType {
1512	if m != nil {
1513		return m.SpeechEventType
1514	}
1515	return StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED
1516}
1517
1518// A streaming speech recognition result corresponding to a portion of the audio
1519// that is currently being processed.
1520type StreamingRecognitionResult struct {
1521	// Output only. May contain one or more recognition hypotheses (up to the
1522	// maximum specified in `max_alternatives`).
1523	// These alternatives are ordered in terms of accuracy, with the top (first)
1524	// alternative being the most probable, as ranked by the recognizer.
1525	Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
1526	// Output only. If `false`, this `StreamingRecognitionResult` represents an
1527	// interim result that may change. If `true`, this is the final time the
1528	// speech service will return this particular `StreamingRecognitionResult`,
1529	// the recognizer will not return any further hypotheses for this portion of
1530	// the transcript and corresponding audio.
1531	IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"`
1532	// Output only. An estimate of the likelihood that the recognizer will not
1533	// change its guess about this interim result. Values range from 0.0
1534	// (completely unstable) to 1.0 (completely stable).
1535	// This field is only provided for interim results (`is_final=false`).
1536	// The default of 0.0 is a sentinel value indicating `stability` was not set.
1537	Stability float32 `protobuf:"fixed32,3,opt,name=stability,proto3" json:"stability,omitempty"`
1538	// Output only. Time offset of the end of this result relative to the
1539	// beginning of the audio.
1540	ResultEndTime *duration.Duration `protobuf:"bytes,4,opt,name=result_end_time,json=resultEndTime,proto3" json:"result_end_time,omitempty"`
1541	// For multi-channel audio, this is the channel number corresponding to the
1542	// recognized result for the audio from that channel.
1543	// For audio_channel_count = N, its output values can range from '1' to 'N'.
1544	ChannelTag int32 `protobuf:"varint,5,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
1545	// Output only. The
1546	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
1547	// language in this result. This language code was detected to have the most
1548	// likelihood of being spoken in the audio.
1549	LanguageCode         string   `protobuf:"bytes,6,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
1550	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1551	XXX_unrecognized     []byte   `json:"-"`
1552	XXX_sizecache        int32    `json:"-"`
1553}
1554
1555func (m *StreamingRecognitionResult) Reset()         { *m = StreamingRecognitionResult{} }
1556func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) }
1557func (*StreamingRecognitionResult) ProtoMessage()    {}
1558func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) {
1559	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{12}
1560}
1561func (m *StreamingRecognitionResult) XXX_Unmarshal(b []byte) error {
1562	return xxx_messageInfo_StreamingRecognitionResult.Unmarshal(m, b)
1563}
1564func (m *StreamingRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1565	return xxx_messageInfo_StreamingRecognitionResult.Marshal(b, m, deterministic)
1566}
1567func (dst *StreamingRecognitionResult) XXX_Merge(src proto.Message) {
1568	xxx_messageInfo_StreamingRecognitionResult.Merge(dst, src)
1569}
1570func (m *StreamingRecognitionResult) XXX_Size() int {
1571	return xxx_messageInfo_StreamingRecognitionResult.Size(m)
1572}
1573func (m *StreamingRecognitionResult) XXX_DiscardUnknown() {
1574	xxx_messageInfo_StreamingRecognitionResult.DiscardUnknown(m)
1575}
1576
1577var xxx_messageInfo_StreamingRecognitionResult proto.InternalMessageInfo
1578
1579func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
1580	if m != nil {
1581		return m.Alternatives
1582	}
1583	return nil
1584}
1585
1586func (m *StreamingRecognitionResult) GetIsFinal() bool {
1587	if m != nil {
1588		return m.IsFinal
1589	}
1590	return false
1591}
1592
1593func (m *StreamingRecognitionResult) GetStability() float32 {
1594	if m != nil {
1595		return m.Stability
1596	}
1597	return 0
1598}
1599
1600func (m *StreamingRecognitionResult) GetResultEndTime() *duration.Duration {
1601	if m != nil {
1602		return m.ResultEndTime
1603	}
1604	return nil
1605}
1606
1607func (m *StreamingRecognitionResult) GetChannelTag() int32 {
1608	if m != nil {
1609		return m.ChannelTag
1610	}
1611	return 0
1612}
1613
1614func (m *StreamingRecognitionResult) GetLanguageCode() string {
1615	if m != nil {
1616		return m.LanguageCode
1617	}
1618	return ""
1619}
1620
1621// A speech recognition result corresponding to a portion of the audio.
1622type SpeechRecognitionResult struct {
1623	// Output only. May contain one or more recognition hypotheses (up to the
1624	// maximum specified in `max_alternatives`).
1625	// These alternatives are ordered in terms of accuracy, with the top (first)
1626	// alternative being the most probable, as ranked by the recognizer.
1627	Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
1628	// For multi-channel audio, this is the channel number corresponding to the
1629	// recognized result for the audio from that channel.
1630	// For audio_channel_count = N, its output values can range from '1' to 'N'.
1631	ChannelTag int32 `protobuf:"varint,2,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
1632	// Output only. The
1633	// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
1634	// language in this result. This language code was detected to have the most
1635	// likelihood of being spoken in the audio.
1636	LanguageCode         string   `protobuf:"bytes,5,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
1637	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1638	XXX_unrecognized     []byte   `json:"-"`
1639	XXX_sizecache        int32    `json:"-"`
1640}
1641
1642func (m *SpeechRecognitionResult) Reset()         { *m = SpeechRecognitionResult{} }
1643func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) }
1644func (*SpeechRecognitionResult) ProtoMessage()    {}
1645func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) {
1646	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{13}
1647}
1648func (m *SpeechRecognitionResult) XXX_Unmarshal(b []byte) error {
1649	return xxx_messageInfo_SpeechRecognitionResult.Unmarshal(m, b)
1650}
1651func (m *SpeechRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1652	return xxx_messageInfo_SpeechRecognitionResult.Marshal(b, m, deterministic)
1653}
1654func (dst *SpeechRecognitionResult) XXX_Merge(src proto.Message) {
1655	xxx_messageInfo_SpeechRecognitionResult.Merge(dst, src)
1656}
1657func (m *SpeechRecognitionResult) XXX_Size() int {
1658	return xxx_messageInfo_SpeechRecognitionResult.Size(m)
1659}
1660func (m *SpeechRecognitionResult) XXX_DiscardUnknown() {
1661	xxx_messageInfo_SpeechRecognitionResult.DiscardUnknown(m)
1662}
1663
1664var xxx_messageInfo_SpeechRecognitionResult proto.InternalMessageInfo
1665
1666func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
1667	if m != nil {
1668		return m.Alternatives
1669	}
1670	return nil
1671}
1672
1673func (m *SpeechRecognitionResult) GetChannelTag() int32 {
1674	if m != nil {
1675		return m.ChannelTag
1676	}
1677	return 0
1678}
1679
1680func (m *SpeechRecognitionResult) GetLanguageCode() string {
1681	if m != nil {
1682		return m.LanguageCode
1683	}
1684	return ""
1685}
1686
1687// Alternative hypotheses (a.k.a. n-best list).
1688type SpeechRecognitionAlternative struct {
1689	// Output only. Transcript text representing the words that the user spoke.
1690	Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
1691	// Output only. The confidence estimate between 0.0 and 1.0. A higher number
1692	// indicates an estimated greater likelihood that the recognized words are
1693	// correct. This field is set only for the top alternative of a non-streaming
1694	// result or, of a streaming result where `is_final=true`.
1695	// This field is not guaranteed to be accurate and users should not rely on it
1696	// to be always provided.
1697	// The default of 0.0 is a sentinel value indicating `confidence` was not set.
1698	Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
1699	// Output only. A list of word-specific information for each recognized word.
1700	// Note: When `enable_speaker_diarization` is true, you will see all the words
1701	// from the beginning of the audio.
1702	Words                []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
1703	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
1704	XXX_unrecognized     []byte      `json:"-"`
1705	XXX_sizecache        int32       `json:"-"`
1706}
1707
1708func (m *SpeechRecognitionAlternative) Reset()         { *m = SpeechRecognitionAlternative{} }
1709func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) }
1710func (*SpeechRecognitionAlternative) ProtoMessage()    {}
1711func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) {
1712	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{14}
1713}
1714func (m *SpeechRecognitionAlternative) XXX_Unmarshal(b []byte) error {
1715	return xxx_messageInfo_SpeechRecognitionAlternative.Unmarshal(m, b)
1716}
1717func (m *SpeechRecognitionAlternative) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1718	return xxx_messageInfo_SpeechRecognitionAlternative.Marshal(b, m, deterministic)
1719}
1720func (dst *SpeechRecognitionAlternative) XXX_Merge(src proto.Message) {
1721	xxx_messageInfo_SpeechRecognitionAlternative.Merge(dst, src)
1722}
1723func (m *SpeechRecognitionAlternative) XXX_Size() int {
1724	return xxx_messageInfo_SpeechRecognitionAlternative.Size(m)
1725}
1726func (m *SpeechRecognitionAlternative) XXX_DiscardUnknown() {
1727	xxx_messageInfo_SpeechRecognitionAlternative.DiscardUnknown(m)
1728}
1729
1730var xxx_messageInfo_SpeechRecognitionAlternative proto.InternalMessageInfo
1731
1732func (m *SpeechRecognitionAlternative) GetTranscript() string {
1733	if m != nil {
1734		return m.Transcript
1735	}
1736	return ""
1737}
1738
1739func (m *SpeechRecognitionAlternative) GetConfidence() float32 {
1740	if m != nil {
1741		return m.Confidence
1742	}
1743	return 0
1744}
1745
1746func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo {
1747	if m != nil {
1748		return m.Words
1749	}
1750	return nil
1751}
1752
1753// Word-specific information for recognized words.
1754type WordInfo struct {
1755	// Output only. Time offset relative to the beginning of the audio,
1756	// and corresponding to the start of the spoken word.
1757	// This field is only set if `enable_word_time_offsets=true` and only
1758	// in the top hypothesis.
1759	// This is an experimental feature and the accuracy of the time offset can
1760	// vary.
1761	StartTime *duration.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
1762	// Output only. Time offset relative to the beginning of the audio,
1763	// and corresponding to the end of the spoken word.
1764	// This field is only set if `enable_word_time_offsets=true` and only
1765	// in the top hypothesis.
1766	// This is an experimental feature and the accuracy of the time offset can
1767	// vary.
1768	EndTime *duration.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
1769	// Output only. The word corresponding to this set of information.
1770	Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
1771	// Output only. The confidence estimate between 0.0 and 1.0. A higher number
1772	// indicates an estimated greater likelihood that the recognized words are
1773	// correct. This field is set only for the top alternative of a non-streaming
1774	// result or, of a streaming result where `is_final=true`.
1775	// This field is not guaranteed to be accurate and users should not rely on it
1776	// to be always provided.
1777	// The default of 0.0 is a sentinel value indicating `confidence` was not set.
1778	Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
1779	// Output only. A distinct integer value is assigned for every speaker within
1780	// the audio. This field specifies which one of those speakers was detected to
1781	// have spoken this word. Value ranges from '1' to diarization_speaker_count.
1782	// speaker_tag is set if enable_speaker_diarization = 'true' and only in the
1783	// top alternative.
1784	SpeakerTag           int32    `protobuf:"varint,5,opt,name=speaker_tag,json=speakerTag,proto3" json:"speaker_tag,omitempty"`
1785	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1786	XXX_unrecognized     []byte   `json:"-"`
1787	XXX_sizecache        int32    `json:"-"`
1788}
1789
1790func (m *WordInfo) Reset()         { *m = WordInfo{} }
1791func (m *WordInfo) String() string { return proto.CompactTextString(m) }
1792func (*WordInfo) ProtoMessage()    {}
1793func (*WordInfo) Descriptor() ([]byte, []int) {
1794	return fileDescriptor_cloud_speech_721d8a193d06ee23, []int{15}
1795}
1796func (m *WordInfo) XXX_Unmarshal(b []byte) error {
1797	return xxx_messageInfo_WordInfo.Unmarshal(m, b)
1798}
1799func (m *WordInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1800	return xxx_messageInfo_WordInfo.Marshal(b, m, deterministic)
1801}
1802func (dst *WordInfo) XXX_Merge(src proto.Message) {
1803	xxx_messageInfo_WordInfo.Merge(dst, src)
1804}
1805func (m *WordInfo) XXX_Size() int {
1806	return xxx_messageInfo_WordInfo.Size(m)
1807}
1808func (m *WordInfo) XXX_DiscardUnknown() {
1809	xxx_messageInfo_WordInfo.DiscardUnknown(m)
1810}
1811
1812var xxx_messageInfo_WordInfo proto.InternalMessageInfo
1813
1814func (m *WordInfo) GetStartTime() *duration.Duration {
1815	if m != nil {
1816		return m.StartTime
1817	}
1818	return nil
1819}
1820
1821func (m *WordInfo) GetEndTime() *duration.Duration {
1822	if m != nil {
1823		return m.EndTime
1824	}
1825	return nil
1826}
1827
1828func (m *WordInfo) GetWord() string {
1829	if m != nil {
1830		return m.Word
1831	}
1832	return ""
1833}
1834
1835func (m *WordInfo) GetConfidence() float32 {
1836	if m != nil {
1837		return m.Confidence
1838	}
1839	return 0
1840}
1841
1842func (m *WordInfo) GetSpeakerTag() int32 {
1843	if m != nil {
1844		return m.SpeakerTag
1845	}
1846	return 0
1847}
1848
1849func init() {
1850	proto.RegisterType((*RecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.RecognizeRequest")
1851	proto.RegisterType((*LongRunningRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest")
1852	proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeRequest")
1853	proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionConfig")
1854	proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.RecognitionConfig")
1855	proto.RegisterType((*RecognitionMetadata)(nil), "google.cloud.speech.v1p1beta1.RecognitionMetadata")
1856	proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1p1beta1.SpeechContext")
1857	proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1p1beta1.RecognitionAudio")
1858	proto.RegisterType((*RecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.RecognizeResponse")
1859	proto.RegisterType((*LongRunningRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse")
1860	proto.RegisterType((*LongRunningRecognizeMetadata)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata")
1861	proto.RegisterType((*StreamingRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeResponse")
1862	proto.RegisterType((*StreamingRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionResult")
1863	proto.RegisterType((*SpeechRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionResult")
1864	proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative")
1865	proto.RegisterType((*WordInfo)(nil), "google.cloud.speech.v1p1beta1.WordInfo")
1866	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value)
1867	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType", RecognitionMetadata_InteractionType_name, RecognitionMetadata_InteractionType_value)
1868	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance", RecognitionMetadata_MicrophoneDistance_name, RecognitionMetadata_MicrophoneDistance_value)
1869	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType", RecognitionMetadata_OriginalMediaType_name, RecognitionMetadata_OriginalMediaType_value)
1870	proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType", RecognitionMetadata_RecordingDeviceType_name, RecognitionMetadata_RecordingDeviceType_value)
1871	proto.RegisterEnum("google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType", StreamingRecognizeResponse_SpeechEventType_name, StreamingRecognizeResponse_SpeechEventType_value)
1872}
1873
1874// Reference imports to suppress errors if they are not otherwise used.
1875var _ context.Context
1876var _ grpc.ClientConn
1877
1878// This is a compile-time assertion to ensure that this generated file
1879// is compatible with the grpc package it is being compiled against.
1880const _ = grpc.SupportPackageIsVersion4
1881
1882// SpeechClient is the client API for Speech service.
1883//
1884// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
1885type SpeechClient interface {
1886	// Performs synchronous speech recognition: receive results after all audio
1887	// has been sent and processed.
1888	Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error)
1889	// Performs asynchronous speech recognition: receive results via the
1890	// google.longrunning.Operations interface. Returns either an
1891	// `Operation.error` or an `Operation.response` which contains
1892	// a `LongRunningRecognizeResponse` message.
1893	LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
1894	// Performs bidirectional streaming speech recognition: receive results while
1895	// sending audio. This method is only available via the gRPC API (not REST).
1896	StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error)
1897}
1898
1899type speechClient struct {
1900	cc *grpc.ClientConn
1901}
1902
1903func NewSpeechClient(cc *grpc.ClientConn) SpeechClient {
1904	return &speechClient{cc}
1905}
1906
1907func (c *speechClient) Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) {
1908	out := new(RecognizeResponse)
1909	err := c.cc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/Recognize", in, out, opts...)
1910	if err != nil {
1911		return nil, err
1912	}
1913	return out, nil
1914}
1915
1916func (c *speechClient) LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
1917	out := new(longrunning.Operation)
1918	err := c.cc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", in, out, opts...)
1919	if err != nil {
1920		return nil, err
1921	}
1922	return out, nil
1923}
1924
1925func (c *speechClient) StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) {
1926	stream, err := c.cc.NewStream(ctx, &_Speech_serviceDesc.Streams[0], "/google.cloud.speech.v1p1beta1.Speech/StreamingRecognize", opts...)
1927	if err != nil {
1928		return nil, err
1929	}
1930	x := &speechStreamingRecognizeClient{stream}
1931	return x, nil
1932}
1933
1934type Speech_StreamingRecognizeClient interface {
1935	Send(*StreamingRecognizeRequest) error
1936	Recv() (*StreamingRecognizeResponse, error)
1937	grpc.ClientStream
1938}
1939
1940type speechStreamingRecognizeClient struct {
1941	grpc.ClientStream
1942}
1943
1944func (x *speechStreamingRecognizeClient) Send(m *StreamingRecognizeRequest) error {
1945	return x.ClientStream.SendMsg(m)
1946}
1947
1948func (x *speechStreamingRecognizeClient) Recv() (*StreamingRecognizeResponse, error) {
1949	m := new(StreamingRecognizeResponse)
1950	if err := x.ClientStream.RecvMsg(m); err != nil {
1951		return nil, err
1952	}
1953	return m, nil
1954}
1955
1956// SpeechServer is the server API for Speech service.
1957type SpeechServer interface {
1958	// Performs synchronous speech recognition: receive results after all audio
1959	// has been sent and processed.
1960	Recognize(context.Context, *RecognizeRequest) (*RecognizeResponse, error)
1961	// Performs asynchronous speech recognition: receive results via the
1962	// google.longrunning.Operations interface. Returns either an
1963	// `Operation.error` or an `Operation.response` which contains
1964	// a `LongRunningRecognizeResponse` message.
1965	LongRunningRecognize(context.Context, *LongRunningRecognizeRequest) (*longrunning.Operation, error)
1966	// Performs bidirectional streaming speech recognition: receive results while
1967	// sending audio. This method is only available via the gRPC API (not REST).
1968	StreamingRecognize(Speech_StreamingRecognizeServer) error
1969}
1970
1971func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) {
1972	s.RegisterService(&_Speech_serviceDesc, srv)
1973}
1974
1975func _Speech_Recognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
1976	in := new(RecognizeRequest)
1977	if err := dec(in); err != nil {
1978		return nil, err
1979	}
1980	if interceptor == nil {
1981		return srv.(SpeechServer).Recognize(ctx, in)
1982	}
1983	info := &grpc.UnaryServerInfo{
1984		Server:     srv,
1985		FullMethod: "/google.cloud.speech.v1p1beta1.Speech/Recognize",
1986	}
1987	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
1988		return srv.(SpeechServer).Recognize(ctx, req.(*RecognizeRequest))
1989	}
1990	return interceptor(ctx, in, info, handler)
1991}
1992
1993func _Speech_LongRunningRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
1994	in := new(LongRunningRecognizeRequest)
1995	if err := dec(in); err != nil {
1996		return nil, err
1997	}
1998	if interceptor == nil {
1999		return srv.(SpeechServer).LongRunningRecognize(ctx, in)
2000	}
2001	info := &grpc.UnaryServerInfo{
2002		Server:     srv,
2003		FullMethod: "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize",
2004	}
2005	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2006		return srv.(SpeechServer).LongRunningRecognize(ctx, req.(*LongRunningRecognizeRequest))
2007	}
2008	return interceptor(ctx, in, info, handler)
2009}
2010
2011func _Speech_StreamingRecognize_Handler(srv interface{}, stream grpc.ServerStream) error {
2012	return srv.(SpeechServer).StreamingRecognize(&speechStreamingRecognizeServer{stream})
2013}
2014
2015type Speech_StreamingRecognizeServer interface {
2016	Send(*StreamingRecognizeResponse) error
2017	Recv() (*StreamingRecognizeRequest, error)
2018	grpc.ServerStream
2019}
2020
2021type speechStreamingRecognizeServer struct {
2022	grpc.ServerStream
2023}
2024
2025func (x *speechStreamingRecognizeServer) Send(m *StreamingRecognizeResponse) error {
2026	return x.ServerStream.SendMsg(m)
2027}
2028
2029func (x *speechStreamingRecognizeServer) Recv() (*StreamingRecognizeRequest, error) {
2030	m := new(StreamingRecognizeRequest)
2031	if err := x.ServerStream.RecvMsg(m); err != nil {
2032		return nil, err
2033	}
2034	return m, nil
2035}
2036
2037var _Speech_serviceDesc = grpc.ServiceDesc{
2038	ServiceName: "google.cloud.speech.v1p1beta1.Speech",
2039	HandlerType: (*SpeechServer)(nil),
2040	Methods: []grpc.MethodDesc{
2041		{
2042			MethodName: "Recognize",
2043			Handler:    _Speech_Recognize_Handler,
2044		},
2045		{
2046			MethodName: "LongRunningRecognize",
2047			Handler:    _Speech_LongRunningRecognize_Handler,
2048		},
2049	},
2050	Streams: []grpc.StreamDesc{
2051		{
2052			StreamName:    "StreamingRecognize",
2053			Handler:       _Speech_StreamingRecognize_Handler,
2054			ServerStreams: true,
2055			ClientStreams: true,
2056		},
2057	},
2058	Metadata: "google/cloud/speech/v1p1beta1/cloud_speech.proto",
2059}
2060
2061func init() {
2062	proto.RegisterFile("google/cloud/speech/v1p1beta1/cloud_speech.proto", fileDescriptor_cloud_speech_721d8a193d06ee23)
2063}
2064
2065var fileDescriptor_cloud_speech_721d8a193d06ee23 = []byte{
2066	// 2178 bytes of a gzipped FileDescriptorProto
2067	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xbf, 0x73, 0xdb, 0xc8,
2068	0xf5, 0x37, 0x48, 0x51, 0x12, 0x9f, 0x7e, 0x41, 0x2b, 0xdf, 0x89, 0x96, 0x75, 0x67, 0x1b, 0x9e,
2069	0x3b, 0xfb, 0xee, 0x7b, 0x43, 0xd9, 0xfa, 0xde, 0x5c, 0xce, 0xbe, 0xe4, 0x26, 0x14, 0x00, 0x99,
2070	0x98, 0x21, 0x09, 0xce, 0x92, 0xb2, 0xe3, 0x6b, 0x76, 0x56, 0xc4, 0x92, 0xc2, 0x84, 0x04, 0x10,
2071	0x60, 0xe1, 0x58, 0x2e, 0xd3, 0xa6, 0x48, 0x91, 0x99, 0x74, 0xa9, 0x72, 0x75, 0xfe, 0x80, 0x34,
2072	0x97, 0x26, 0x4d, 0x9a, 0x14, 0xe9, 0x52, 0xa5, 0xc8, 0x7f, 0x90, 0x26, 0x33, 0x69, 0x32, 0xbb,
2073	0x0b, 0x50, 0x10, 0x29, 0x5b, 0xb6, 0x26, 0x37, 0x93, 0x0e, 0xfb, 0x79, 0x3f, 0xf6, 0xbd, 0xb7,
2074	0x6f, 0xdf, 0xbe, 0x07, 0x78, 0x30, 0x0a, 0xc3, 0xd1, 0x98, 0xed, 0x0d, 0xc6, 0x61, 0xea, 0xed,
2075	0x25, 0x11, 0x63, 0x83, 0x93, 0xbd, 0x17, 0x0f, 0xa3, 0x87, 0xc7, 0x8c, 0xd3, 0x87, 0x0a, 0x26,
2076	0x0a, 0xae, 0x47, 0x71, 0xc8, 0x43, 0xf4, 0x81, 0x92, 0xa8, 0x4b, 0x52, 0x3d, 0x23, 0x4d, 0x25,
2077	0x76, 0x76, 0x33, 0x85, 0x34, 0xf2, 0xf7, 0x68, 0x10, 0x84, 0x9c, 0x72, 0x3f, 0x0c, 0x12, 0x25,
2078	0xbc, 0x73, 0x37, 0xa3, 0x8e, 0xc3, 0x60, 0x14, 0xa7, 0x41, 0xe0, 0x07, 0xa3, 0xbd, 0x30, 0x62,
2079	0xf1, 0x39, 0xa6, 0x1b, 0x19, 0x93, 0x5c, 0x1d, 0xa7, 0xc3, 0x3d, 0x1a, 0x9c, 0x66, 0xa4, 0x0f,
2080	0x67, 0x49, 0x5e, 0xaa, 0x64, 0x33, 0xfa, 0xcd, 0x59, 0x3a, 0x9b, 0x44, 0x3c, 0x17, 0xbe, 0x35,
2081	0x4b, 0xe4, 0xfe, 0x84, 0x25, 0x9c, 0x4e, 0xa2, 0x8c, 0x61, 0x3b, 0x63, 0x88, 0xa3, 0xc1, 0x5e,
2082	0xc2, 0x29, 0x4f, 0x33, 0x8b, 0x8c, 0xdf, 0x69, 0xa0, 0x63, 0x36, 0x08, 0x47, 0x81, 0xff, 0x8a,
2083	0x61, 0xf6, 0xb3, 0x94, 0x25, 0x1c, 0x35, 0x61, 0x71, 0x10, 0x06, 0x43, 0x7f, 0x54, 0xd3, 0x6e,
2084	0x6b, 0xf7, 0x57, 0xf6, 0x1f, 0xd4, 0xdf, 0x18, 0x99, 0x7a, 0xa6, 0x40, 0x58, 0x6b, 0x4a, 0x39,
2085	0x9c, 0xc9, 0x23, 0x1b, 0x2a, 0x34, 0xf5, 0xfc, 0xb0, 0x56, 0x92, 0x8a, 0xf6, 0xde, 0x5e, 0x51,
2086	0x43, 0x88, 0x61, 0x25, 0x6d, 0xfc, 0x5e, 0x83, 0x9b, 0xad, 0x30, 0x18, 0x61, 0x15, 0xd8, 0xff,
2087	0x7d, 0x83, 0xbf, 0xd3, 0xe0, 0x46, 0x8f, 0xc7, 0x8c, 0x4e, 0x2e, 0x32, 0x77, 0x08, 0x7a, 0x92,
2088	0x13, 0xc9, 0x39, 0xc3, 0x1f, 0x5d, 0xb2, 0xdf, 0xac, 0xce, 0x33, 0x0f, 0x9a, 0xd7, 0xf0, 0xc6,
2089	0x54, 0xa9, 0x82, 0xd0, 0x47, 0xb0, 0x26, 0xcd, 0x11, 0x7b, 0x70, 0x16, 0x70, 0xe9, 0xd4, 0x6a,
2090	0xf3, 0x1a, 0x5e, 0x95, 0xb0, 0xa9, 0xd0, 0x83, 0x2d, 0xd8, 0x3c, 0x33, 0x27, 0x56, 0x36, 0x1a,
2091	0x7f, 0xd0, 0x60, 0xe7, 0xf5, 0xbb, 0xfd, 0x17, 0x23, 0xfe, 0x09, 0xe8, 0x89, 0x1f, 0x8c, 0xc6,
2092	0x8c, 0xa4, 0x9c, 0xb3, 0x98, 0x06, 0x03, 0x26, 0xed, 0x5c, 0xc6, 0x1b, 0x0a, 0x3f, 0xca, 0x61,
2093	0x74, 0x0f, 0x36, 0xfc, 0x80, 0xb3, 0xd8, 0x9f, 0x90, 0x98, 0x25, 0xe9, 0x98, 0x27, 0xb5, 0xb2,
2094	0xe4, 0x5c, 0xcf, 0x60, 0xac, 0x50, 0xe3, 0x9f, 0xcb, 0xb0, 0x39, 0x6f, 0xf3, 0x37, 0xb0, 0xcc,
2095	0x82, 0x41, 0xe8, 0xf9, 0x81, 0xb2, 0x7a, 0x7d, 0xff, 0xeb, 0x77, 0xb5, 0xba, 0x2e, 0x4f, 0xd9,
2096	0xce, 0xb4, 0xe0, 0xa9, 0x3e, 0xf4, 0x29, 0x6c, 0x26, 0x74, 0x12, 0x8d, 0x19, 0x89, 0x29, 0x67,
2097	0xe4, 0x84, 0xc5, 0xfc, 0x95, 0x74, 0xa3, 0x82, 0x37, 0x14, 0x01, 0x53, 0xce, 0x9a, 0x02, 0x46,
2098	0x75, 0xd8, 0xca, 0x8e, 0xe5, 0x84, 0x06, 0x01, 0x1b, 0x93, 0x41, 0x98, 0x06, 0xbc, 0xb6, 0x24,
2099	0xb9, 0x37, 0xd5, 0xd1, 0x28, 0x8a, 0x29, 0x08, 0xa8, 0x0f, 0xf7, 0x58, 0x40, 0x8f, 0xc7, 0x8c,
2100	0x24, 0x2c, 0xa2, 0x52, 0x7f, 0x7c, 0x66, 0x18, 0x89, 0x58, 0x9c, 0x6b, 0xaa, 0xad, 0xca, 0x70,
2101	0xdc, 0x55, 0xec, 0xbd, 0x8c, 0xbb, 0xe0, 0x45, 0x97, 0xc5, 0x99, 0x6a, 0x74, 0x17, 0xd6, 0xc6,
2102	0x34, 0x18, 0xa5, 0x74, 0xc4, 0xc8, 0x20, 0xf4, 0x98, 0x0c, 0x65, 0x15, 0xaf, 0xe6, 0xa0, 0x19,
2103	0x7a, 0x0c, 0xfd, 0x10, 0x76, 0xe8, 0x98, 0xb3, 0x38, 0xa0, 0xdc, 0x7f, 0xc1, 0xc8, 0x39, 0x81,
2104	0xa4, 0x86, 0x6e, 0x97, 0xef, 0x57, 0x71, 0xad, 0xc0, 0xd1, 0x2a, 0x08, 0x27, 0xe2, 0x68, 0x27,
2105	0xf4, 0x25, 0x29, 0xd0, 0x93, 0xda, 0x82, 0x8a, 0xc9, 0x84, 0xbe, 0x6c, 0x14, 0x60, 0xc1, 0x1a,
2106	0xc5, 0xe1, 0x90, 0x06, 0x3e, 0x3f, 0x25, 0x43, 0x5f, 0x90, 0x6a, 0x15, 0x95, 0x05, 0x53, 0xfc,
2107	0x50, 0xc2, 0xe8, 0x08, 0x36, 0xd4, 0x41, 0xa9, 0xb4, 0x7e, 0xc9, 0x93, 0xda, 0xe2, 0xed, 0xf2,
2108	0xfd, 0x95, 0xfd, 0xcf, 0x2e, 0xbb, 0x3c, 0x12, 0x30, 0x95, 0x10, 0x5e, 0x4f, 0x8a, 0xcb, 0x04,
2109	0xfd, 0x00, 0x6a, 0x59, 0x94, 0x7f, 0x1e, 0xc6, 0x1e, 0x11, 0x15, 0x94, 0x84, 0xc3, 0x61, 0xc2,
2110	0x78, 0x52, 0x5b, 0x96, 0x96, 0xbc, 0xa7, 0xe8, 0xcf, 0xc2, 0xd8, 0xeb, 0xfb, 0x13, 0xe6, 0x2a,
2111	0x22, 0xfa, 0x1c, 0xde, 0x2f, 0x0a, 0xca, 0xb4, 0xf6, 0x98, 0x48, 0xe3, 0x0d, 0x29, 0x76, 0xfd,
2112	0x4c, 0xcc, 0x9c, 0xd2, 0xd0, 0x8f, 0x61, 0x37, 0x93, 0xa2, 0x29, 0x0f, 0x27, 0x94, 0xfb, 0x03,
2113	0x12, 0xa5, 0xc1, 0x80, 0xa7, 0xb2, 0xea, 0xd7, 0x56, 0xa4, 0xec, 0x8e, 0xe2, 0x69, 0xe4, 0x2c,
2114	0xdd, 0x33, 0x0e, 0x71, 0x36, 0x79, 0x5a, 0x44, 0x8c, 0xfe, 0x94, 0xc5, 0xc4, 0xf3, 0x69, 0xec,
2115	0xbf, 0x52, 0xf2, 0xba, 0x94, 0xcf, 0x5c, 0xea, 0x29, 0x06, 0xeb, 0x8c, 0x8e, 0x1e, 0xc3, 0x8d,
2116	0x02, 0xfb, 0x54, 0x85, 0x4a, 0xc5, 0x4d, 0x79, 0x48, 0xdb, 0x05, 0x86, 0x4c, 0x83, 0x4a, 0xc8,
2117	0x0e, 0x2c, 0x4f, 0x18, 0xa7, 0x1e, 0xe5, 0xb4, 0x56, 0x95, 0xd7, 0x7f, 0xff, 0xed, 0x2f, 0x52,
2118	0x3b, 0x93, 0xc4, 0x53, 0x1d, 0xe8, 0x3a, 0x54, 0x26, 0xa1, 0xc7, 0xc6, 0xb5, 0x35, 0x99, 0x82,
2119	0x6a, 0x81, 0xee, 0xc0, 0x6a, 0x9a, 0x30, 0xc2, 0x82, 0x13, 0x71, 0xf9, 0xbd, 0xda, 0xba, 0xf4,
2120	0x68, 0x25, 0x4d, 0x98, 0x9d, 0x41, 0xc6, 0x2f, 0x35, 0x58, 0x3b, 0x77, 0x23, 0x51, 0x0d, 0xae,
2121	0xdb, 0x1d, 0xd3, 0xb5, 0x9c, 0xce, 0x13, 0x72, 0xd4, 0xe9, 0x75, 0x6d, 0xd3, 0x39, 0x74, 0x6c,
2122	0x4b, 0xbf, 0x86, 0x56, 0x61, 0xb9, 0xe5, 0x74, 0xec, 0x06, 0x7e, 0xf8, 0x85, 0xae, 0xa1, 0x65,
2123	0x58, 0x38, 0x6c, 0x35, 0x4c, 0xbd, 0x84, 0xaa, 0x50, 0x69, 0x1f, 0xb5, 0x1a, 0xcf, 0xf4, 0x32,
2124	0x5a, 0x82, 0x72, 0xa3, 0x8d, 0xf5, 0x05, 0x04, 0xb0, 0xd8, 0x68, 0x63, 0xf2, 0xec, 0x40, 0xaf,
2125	0x08, 0x39, 0xf7, 0xc9, 0x13, 0xe2, 0x76, 0x8f, 0x7a, 0xfa, 0x22, 0xda, 0x81, 0xf7, 0x7b, 0x5d,
2126	0xdb, 0xfe, 0x09, 0x79, 0xe6, 0xf4, 0x9b, 0xa4, 0x69, 0x37, 0x2c, 0x1b, 0x93, 0x83, 0xe7, 0x7d,
2127	0x5b, 0x5f, 0x32, 0xfe, 0x5d, 0x85, 0xad, 0x0b, 0x1c, 0x45, 0x13, 0xd0, 0x65, 0x7d, 0xa2, 0x03,
2128	0x19, 0x6a, 0x7e, 0x1a, 0xb1, 0xac, 0xfe, 0x1c, 0xbc, 0x7b, 0xd8, 0xea, 0xce, 0x99, 0xaa, 0xfe,
2129	0x69, 0xc4, 0xf0, 0x86, 0x7f, 0x1e, 0x40, 0x5f, 0xc3, 0xae, 0x1f, 0x78, 0x69, 0xc2, 0xe3, 0x53,
2130	0x12, 0x50, 0x7f, 0x90, 0xc8, 0xdb, 0x4a, 0xc2, 0x21, 0x51, 0x2f, 0x9b, 0xb8, 0xe7, 0x6b, 0xb8,
2131	0x96, 0xf3, 0x74, 0x04, 0x8b, 0xb8, 0xaf, 0xee, 0x50, 0x86, 0x12, 0xbd, 0x80, 0xad, 0x89, 0x3f,
2132	0x88, 0xc3, 0xe8, 0x24, 0x0c, 0x18, 0xf1, 0xfc, 0x84, 0xcb, 0x9a, 0xbc, 0x20, 0x2d, 0xb6, 0xaf,
2133	0x60, 0x71, 0x7b, 0xaa, 0xcd, 0xca, 0x94, 0x61, 0x34, 0x99, 0xc3, 0x10, 0x87, 0xad, 0x30, 0xf6,
2134	0x47, 0x7e, 0x40, 0xc7, 0x64, 0xc2, 0x3c, 0x9f, 0xaa, 0x48, 0x55, 0xe4, 0xbe, 0xd6, 0x15, 0xf6,
2135	0x75, 0x33, 0x6d, 0x6d, 0xa1, 0x4c, 0xc6, 0x6a, 0x33, 0x9c, 0x85, 0xd0, 0x2b, 0x78, 0x4f, 0x14,
2136	0xd3, 0x58, 0x64, 0x0f, 0xf1, 0xd8, 0x0b, 0x7f, 0xc0, 0xd4, 0xbe, 0x8b, 0x72, 0xdf, 0xc3, 0x2b,
2137	0xec, 0x8b, 0x73, 0x7d, 0x96, 0x54, 0x27, 0x77, 0xde, 0x8a, 0xe7, 0x41, 0xb4, 0x7f, 0xc1, 0xde,
2138	0x01, 0x9d, 0x30, 0xf9, 0x14, 0x54, 0xe7, 0x64, 0x3a, 0x74, 0xc2, 0xd0, 0x67, 0x80, 0xce, 0xa2,
2139	0x24, 0x6a, 0x94, 0x34, 0x76, 0x59, 0x0a, 0xe8, 0x53, 0xf7, 0xfc, 0x89, 0xda, 0xe1, 0x2e, 0xac,
2140	0x85, 0xc7, 0xc3, 0x34, 0x19, 0x50, 0xce, 0x3c, 0xe2, 0x7b, 0xf2, 0xba, 0x96, 0xf1, 0xea, 0x19,
2141	0xe8, 0x78, 0xe8, 0x16, 0xac, 0xa8, 0xf7, 0x88, 0x87, 0x91, 0x3f, 0xa8, 0x81, 0xd4, 0x05, 0x12,
2142	0xea, 0x0b, 0xc4, 0xf8, 0x93, 0x06, 0x1b, 0x33, 0x69, 0x87, 0x6e, 0xc3, 0xae, 0xd3, 0xe9, 0xdb,
2143	0xb8, 0x61, 0xf6, 0x1d, 0xb7, 0x43, 0xfa, 0xcf, 0xbb, 0xf6, 0xcc, 0x85, 0x5b, 0x07, 0xb0, 0x9c,
2144	0x9e, 0x79, 0xd4, 0xeb, 0x39, 0x6e, 0x47, 0xd7, 0x90, 0x0e, 0xab, 0x5d, 0x6c, 0xf7, 0xec, 0x4e,
2145	0xbf, 0x21, 0x44, 0xf4, 0x92, 0xe0, 0xe8, 0x36, 0xdd, 0x8e, 0x4d, 0xcc, 0x46, 0xab, 0xa5, 0x97,
2146	0xd1, 0x1a, 0x54, 0x9f, 0xba, 0x8e, 0x69, 0xb7, 0x1b, 0x4e, 0x4b, 0x5f, 0x40, 0x37, 0x61, 0xbb,
2147	0x8b, 0xdd, 0x43, 0x5b, 0x2a, 0x68, 0xb4, 0x5a, 0xcf, 0x49, 0x17, 0xbb, 0xd6, 0x91, 0x69, 0x5b,
2148	0x7a, 0x45, 0x68, 0x93, 0xbc, 0xa4, 0x67, 0x37, 0xb0, 0xd9, 0xd4, 0x17, 0xd1, 0x26, 0xac, 0x29,
2149	0xc4, 0x74, 0xdb, 0xed, 0x46, 0xc7, 0xd2, 0x97, 0x84, 0x42, 0xcb, 0x31, 0xb3, 0xfd, 0x96, 0x0d,
2150	0x0f, 0xd0, 0x7c, 0x2e, 0xa2, 0xbb, 0x70, 0xab, 0xed, 0x98, 0xd8, 0x55, 0xa6, 0x58, 0x4e, 0xaf,
2151	0xdf, 0xe8, 0x98, 0xb3, 0xce, 0xac, 0x41, 0x55, 0xd4, 0x8e, 0x43, 0xc7, 0x6e, 0x59, 0xba, 0x26,
2152	0x8a, 0x42, 0xdb, 0xb1, 0xd4, 0xaa, 0x24, 0x56, 0x87, 0x39, 0xad, 0x6c, 0x74, 0x60, 0x73, 0x2e,
2153	0xf3, 0xc4, 0x26, 0x2e, 0x76, 0x9e, 0x38, 0x9d, 0x46, 0x8b, 0xb4, 0x6d, 0xcb, 0x69, 0x5c, 0x14,
2154	0xb1, 0x2a, 0x54, 0x1a, 0x47, 0x96, 0xe3, 0xea, 0x9a, 0xf8, 0x7c, 0xea, 0x58, 0xb6, 0xab, 0x97,
2155	0x8c, 0x6f, 0x35, 0x55, 0x56, 0x66, 0xb3, 0xe7, 0x23, 0xb8, 0x83, 0x6d, 0xd3, 0xc5, 0xb2, 0xd6,
2156	0x59, 0xf6, 0x53, 0xe1, 0xfa, 0xc5, 0xc7, 0xd0, 0x6b, 0x37, 0x70, 0x5f, 0xba, 0xa7, 0x6b, 0x68,
2157	0x11, 0x4a, 0x5d, 0xb3, 0x18, 0x7c, 0x51, 0x15, 0xf5, 0x32, 0x5a, 0x81, 0xa5, 0xa7, 0x76, 0xd3,
2158	0x31, 0x5b, 0xb6, 0xbe, 0x20, 0xca, 0xa8, 0xdb, 0x6f, 0xda, 0x98, 0xb8, 0x47, 0x7d, 0xcb, 0x75,
2159	0x71, 0xa6, 0x5f, 0xaf, 0xa0, 0x6d, 0xd8, 0x52, 0x14, 0xa7, 0x53, 0x24, 0x2c, 0x1a, 0x9f, 0xc0,
2160	0xda, 0xb9, 0x07, 0x16, 0xd5, 0x60, 0x29, 0x3a, 0x89, 0x69, 0xc2, 0x92, 0x9a, 0x26, 0x1b, 0x85,
2161	0x7c, 0x69, 0xe0, 0xe9, 0xcc, 0x31, 0x6d, 0x9c, 0xd1, 0x0e, 0x2c, 0xe5, 0x5d, 0xaa, 0x96, 0x75,
2162	0xa9, 0x39, 0x80, 0x10, 0x94, 0xd3, 0xd8, 0x97, 0xed, 0x54, 0xb5, 0x79, 0x0d, 0x8b, 0xc5, 0xc1,
2163	0x3a, 0xa8, 0x26, 0x96, 0x24, 0x61, 0x1a, 0x0f, 0x98, 0xc1, 0xa6, 0x1d, 0x9f, 0xe8, 0xb3, 0x93,
2164	0x28, 0x0c, 0x12, 0x86, 0xba, 0xb0, 0x94, 0x37, 0x8a, 0x25, 0xd9, 0x22, 0x7c, 0xf1, 0x56, 0x2d,
2165	0x42, 0xc1, 0x38, 0xd5, 0x51, 0xe2, 0x5c, 0x8d, 0x11, 0xc1, 0xee, 0xc5, 0x83, 0xc8, 0xf7, 0xb6,
2166	0xe3, 0x9f, 0xb5, 0x8b, 0xb7, 0x9c, 0x3e, 0x2f, 0xaa, 0x75, 0x1a, 0xc5, 0x2c, 0x49, 0x44, 0x2f,
2167	0x38, 0xc8, 0x43, 0x58, 0x91, 0xad, 0x93, 0xc4, 0xbb, 0x0a, 0x46, 0x8f, 0x00, 0x12, 0x4e, 0x63,
2168	0x2e, 0xbb, 0x9b, 0x6c, 0xc4, 0xd9, 0xc9, 0x0d, 0xcc, 0x87, 0xc7, 0x7a, 0x3f, 0x1f, 0x1e, 0x71,
2169	0x55, 0x72, 0x8b, 0x35, 0xb2, 0x40, 0x1f, 0xd3, 0x84, 0x93, 0x34, 0xf2, 0x44, 0x03, 0x2a, 0x15,
2170	0x94, 0x2f, 0x55, 0xb0, 0x2e, 0x64, 0x8e, 0xa4, 0x88, 0x00, 0x8d, 0xbf, 0x97, 0xe6, 0xa7, 0x8a,
2171	0x42, 0xf4, 0xee, 0x43, 0x85, 0xc5, 0x71, 0x18, 0x67, 0x43, 0x05, 0xca, 0x35, 0xc7, 0xd1, 0xa0,
2172	0xde, 0x93, 0x63, 0x2b, 0x56, 0x0c, 0xa8, 0x37, 0x1b, 0xe7, 0xab, 0x4c, 0x4e, 0x33, 0xa1, 0x46,
2173	0x29, 0x6c, 0x66, 0x9d, 0x25, 0x7b, 0xc1, 0x02, 0xae, 0x4a, 0xab, 0x7a, 0xf7, 0x9c, 0x77, 0x54,
2174	0x7f, 0xe6, 0x54, 0x76, 0xc2, 0xb6, 0xd0, 0xa8, 0x1e, 0xec, 0xe4, 0x3c, 0x60, 0xb4, 0x60, 0x63,
2175	0x86, 0x07, 0xed, 0x42, 0x4d, 0xb4, 0x19, 0x66, 0x93, 0xd8, 0x4f, 0xed, 0x4e, 0x7f, 0xe6, 0x4a,
2176	0xdf, 0x84, 0x6d, 0xbb, 0x63, 0x11, 0xf7, 0x90, 0xf4, 0x9c, 0xce, 0x93, 0x96, 0x4d, 0x8e, 0xfa,
2177	0xa2, 0x12, 0x77, 0x4c, 0x5b, 0xd7, 0x8c, 0xef, 0x4a, 0x17, 0x0f, 0x6e, 0xca, 0x59, 0x44, 0x60,
2178	0xf5, 0x5c, 0x3f, 0xae, 0xc9, 0xe8, 0x7d, 0xf5, 0xae, 0x59, 0x5a, 0x68, 0xde, 0xf1, 0x39, 0x85,
2179	0xe8, 0x06, 0x2c, 0xfb, 0x09, 0x19, 0x8a, 0xf2, 0x97, 0xcd, 0x71, 0x4b, 0x7e, 0x72, 0x28, 0x96,
2180	0x68, 0x17, 0x44, 0x42, 0x1d, 0xfb, 0x63, 0x9f, 0x9f, 0xca, 0xe4, 0x29, 0xe1, 0x33, 0x00, 0x35,
2181	0x60, 0x43, 0x1d, 0x04, 0x61, 0x81, 0xea, 0xbf, 0x65, 0xec, 0x57, 0xf6, 0x6f, 0xcc, 0x25, 0x98,
2182	0x95, 0xfd, 0x1b, 0xc1, 0x6b, 0x4a, 0xc2, 0x0e, 0x64, 0x47, 0x2e, 0x5e, 0xb2, 0x7c, 0xa6, 0xe2,
2183	0x74, 0x24, 0x5b, 0x87, 0x0a, 0x86, 0x0c, 0xea, 0xd3, 0xd1, 0xfc, 0xd0, 0xb3, 0x38, 0x3f, 0xf4,
2184	0x18, 0x7f, 0xd4, 0x60, 0xfb, 0x35, 0xd7, 0xf2, 0xfb, 0x0f, 0xdf, 0x8c, 0x0b, 0xa5, 0xcb, 0x5d,
2185	0xa8, 0x5c, 0xe0, 0xc2, 0x6f, 0x35, 0xd8, 0x7d, 0xd3, 0xa6, 0xe8, 0x43, 0x00, 0x1e, 0xd3, 0x20,
2186	0x19, 0xc4, 0x7e, 0xa4, 0xca, 0x45, 0x15, 0x17, 0x10, 0x41, 0x2f, 0x0c, 0x32, 0x25, 0x79, 0x56,
2187	0x05, 0x04, 0xfd, 0x08, 0x2a, 0x62, 0xda, 0x11, 0x03, 0xb8, 0x08, 0xc0, 0xbd, 0x4b, 0x02, 0x20,
2188	0x86, 0x1f, 0x27, 0x18, 0x86, 0x58, 0x49, 0x19, 0x7f, 0xd1, 0x60, 0x39, 0xc7, 0xd0, 0x97, 0xe7,
2189	0xaa, 0x92, 0x76, 0xd9, 0x99, 0x17, 0x8a, 0xd2, 0xe7, 0x62, 0xa2, 0xf7, 0x8a, 0xd5, 0xec, 0x0d,
2190	0x72, 0x4b, 0x2c, 0xcb, 0x12, 0x04, 0x0b, 0xc2, 0x8a, 0x6c, 0xe0, 0x95, 0xdf, 0x33, 0xfe, 0x2e,
2191	0xcc, 0xf9, 0x7b, 0x0b, 0x56, 0xf2, 0x11, 0xa9, 0x90, 0x59, 0x19, 0xd4, 0xa7, 0xa3, 0xfd, 0xbf,
2192	0x95, 0x61, 0x51, 0x45, 0x1c, 0xfd, 0x46, 0x83, 0xea, 0xb4, 0x0c, 0xa0, 0xb7, 0xfc, 0x85, 0x34,
2193	0xfd, 0x3b, 0xb4, 0xf3, 0xe0, 0xed, 0x05, 0x54, 0x85, 0x31, 0x3e, 0xfe, 0xc5, 0x5f, 0xff, 0xf1,
2194	0xeb, 0xd2, 0x6d, 0xe3, 0x66, 0xe1, 0xff, 0xa6, 0x12, 0x7b, 0x1c, 0xe7, 0xcc, 0x8f, 0xb5, 0x4f,
2195	0xd1, 0xb7, 0x1a, 0x5c, 0xbf, 0xe8, 0x29, 0x41, 0x8f, 0x2f, 0xd9, 0xf2, 0x0d, 0xff, 0xde, 0x76,
2196	0x3e, 0xc8, 0x65, 0x0b, 0x7f, 0x3e, 0xeb, 0x6e, 0xfe, 0xe7, 0xd3, 0x78, 0x28, 0x6d, 0xfb, 0x3f,
2197	0xe3, 0xe3, 0x79, 0xdb, 0x0a, 0x02, 0xe7, 0xcc, 0xfc, 0x95, 0x06, 0x68, 0xbe, 0x9e, 0xa2, 0x2f,
2198	0xaf, 0x50, 0x82, 0x95, 0x89, 0x8f, 0xae, 0x5c, 0xbc, 0x8d, 0x6b, 0xf7, 0xb5, 0x07, 0xda, 0xc1,
2199	0x2b, 0xb8, 0x33, 0x08, 0x27, 0x6f, 0xd6, 0x72, 0xb0, 0xa2, 0x8e, 0xbf, 0x2b, 0x12, 0xaf, 0xab,
2200	0x7d, 0x63, 0x66, 0xdc, 0xa3, 0x50, 0xdc, 0xcc, 0x7a, 0x18, 0x8f, 0xf6, 0x46, 0x2c, 0x90, 0x69,
2201	0xb9, 0xa7, 0x48, 0x34, 0xf2, 0x93, 0xd7, 0xfc, 0x9e, 0xfe, 0x4a, 0x01, 0xff, 0xd2, 0xb4, 0xe3,
2202	0x45, 0x29, 0xf2, 0xff, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x29, 0xdf, 0xd7, 0xd0, 0x16,
2203	0x00, 0x00,
2204}
2205