1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/cloud/automl/v1/io.proto
3
4package automl
5
6import (
7	fmt "fmt"
8	math "math"
9
10	proto "github.com/golang/protobuf/proto"
11	_ "google.golang.org/genproto/googleapis/api/annotations"
12)
13
14// Reference imports to suppress errors if they are not otherwise used.
15var _ = proto.Marshal
16var _ = fmt.Errorf
17var _ = math.Inf
18
19// This is a compile-time assertion to ensure that this generated file
20// is compatible with the proto package it is being compiled against.
21// A compilation error at this line likely means your copy of the
22// proto package needs to be updated.
23const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
24
25// Input configuration for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData] action.
26//
27// The format of input depends on dataset_metadata the Dataset into which
28// the import is happening has. As input source the
29// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source]
30// is expected, unless specified otherwise. Additionally any input .CSV file
31// by itself must be 100MB or smaller, unless specified otherwise.
32// If an "example" file (that is, image, video etc.) with identical content
33// (even if it had different `GCS_FILE_PATH`) is mentioned multiple times, then
34// its label, bounding boxes etc. are appended. The same file should be always
35// provided with the same `ML_USE` and `GCS_FILE_PATH`, if it is not, then
36// these values are nondeterministically selected from the given ones.
37//
38// The formats are represented in EBNF with commas being literal and with
39// non-terminal symbols defined near the end of this comment. The formats are:
40//
41// <h4>AutoML Vision</h4>
42//
43//
44// <div class="ds-selector-tabs"><section><h5>Classification</h5>
45//
46// See [Preparing your training
47// data](https://cloud.google.com/vision/automl/docs/prepare) for more
48// information.
49//
50// CSV file(s) with each line in format:
51//
52//     ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
53//
54// *   `ML_USE` - Identifies the data set that the current row (file) applies
55// to.
56//     This value can be one of the following:
57//     * `TRAIN` - Rows in this file are used to train the model.
58//     * `TEST` - Rows in this file are used to test the model during training.
59//     * `UNASSIGNED` - Rows in this file are not categorized. They are
60//        Automatically divided into train and test data. 80% for training and
61//        20% for testing.
62//
63// *   `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
64//      30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP,
65//      .TIFF, .ICO.
66//
67// *   `LABEL` - A label that identifies the object in the image.
68//
69// For the `MULTICLASS` classification type, at most one `LABEL` is allowed
70// per image. If an image has not yet been labeled, then it should be
71// mentioned just once with no `LABEL`.
72//
73// Some sample rows:
74//
75//     TRAIN,gs://folder/image1.jpg,daisy
76//     TEST,gs://folder/image2.jpg,dandelion,tulip,rose
77//     UNASSIGNED,gs://folder/image3.jpg,daisy
78//     UNASSIGNED,gs://folder/image4.jpg
79//
80//
81// </section><section><h5>Object Detection</h5>
82// See [Preparing your training
83// data](https://cloud.google.com/vision/automl/object-detection/docs/prepare)
84// for more information.
85//
86// A CSV file(s) with each line in format:
87//
88//     ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,)
89//
90// *   `ML_USE` - Identifies the data set that the current row (file) applies
91// to.
92//     This value can be one of the following:
93//     * `TRAIN` - Rows in this file are used to train the model.
94//     * `TEST` - Rows in this file are used to test the model during training.
95//     * `UNASSIGNED` - Rows in this file are not categorized. They are
96//        Automatically divided into train and test data. 80% for training and
97//        20% for testing.
98//
99// *  `GCS_FILE_PATH` - The Google Cloud Storage location of an image of up to
100//     30MB in size. Supported extensions: .JPEG, .GIF, .PNG. Each image
101//     is assumed to be exhaustively labeled.
102//
103// *  `LABEL` - A label that identifies the object in the image specified by the
104//    `BOUNDING_BOX`.
105//
106// *  `BOUNDING BOX` - The vertices of an object in the example image.
107//    The minimum allowed `BOUNDING_BOX` edge length is 0.01, and no more than
108//    500 `BOUNDING_BOX` instances per image are allowed (one `BOUNDING_BOX`
109//    per line). If an image has no looked for objects then it should be
110//    mentioned just once with no LABEL and the ",,,,,,," in place of the
111//   `BOUNDING_BOX`.
112//
113// **Four sample rows:**
114//
115//     TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
116//     TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
117//     UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
118//     TEST,gs://folder/im3.png,,,,,,,,,
119//   </section>
120// </div>
121//
122//
123// <h4>AutoML Video Intelligence</h4>
124//
125//
126// <div class="ds-selector-tabs"><section><h5>Classification</h5>
127//
128// See [Preparing your training
129// data](https://cloud.google.com/video-intelligence/automl/docs/prepare) for
130// more information.
131//
132// CSV file(s) with each line in format:
133//
134//     ML_USE,GCS_FILE_PATH
135//
136// For `ML_USE`, do not use `VALIDATE`.
137//
138// `GCS_FILE_PATH` is the path to another .csv file that describes training
139// example for a given `ML_USE`, using the following row format:
140//
141//     GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
142//
143// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
144// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
145//
146// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
147// length of the video, and the end time must be after the start time. Any
148// segment of a video which has one or more labels on it, is considered a
149// hard negative for all other labels. Any segment with no labels on
150// it is considered to be unknown. If a whole video is unknown, then
151// it should be mentioned just once with ",," in place of `LABEL,
152// TIME_SEGMENT_START,TIME_SEGMENT_END`.
153//
154// Sample top level CSV file:
155//
156//     TRAIN,gs://folder/train_videos.csv
157//     TEST,gs://folder/test_videos.csv
158//     UNASSIGNED,gs://folder/other_videos.csv
159//
160// Sample rows of a CSV file for a particular ML_USE:
161//
162//     gs://folder/video1.avi,car,120,180.000021
163//     gs://folder/video1.avi,bike,150,180.000021
164//     gs://folder/vid2.avi,car,0,60.5
165//     gs://folder/vid3.avi,,,
166//
167//
168//
169// </section><section><h5>Object Tracking</h5>
170//
171// See [Preparing your training
172// data](/video-intelligence/automl/object-tracking/docs/prepare) for more
173// information.
174//
175// CSV file(s) with each line in format:
176//
177//     ML_USE,GCS_FILE_PATH
178//
179// For `ML_USE`, do not use `VALIDATE`.
180//
181// `GCS_FILE_PATH` is the path to another .csv file that describes training
182// example for a given `ML_USE`, using the following row format:
183//
184//     GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
185//
186// or
187//
188//     GCS_FILE_PATH,,,,,,,,,,
189//
190// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up
191// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
192// Providing `INSTANCE_ID`s can help to obtain a better model. When
193// a specific labeled entity leaves the video frame, and shows up
194// afterwards it is not required, albeit preferable, that the same
195// `INSTANCE_ID` is given to it.
196//
197// `TIMESTAMP` must be within the length of the video, the
198// `BOUNDING_BOX` is assumed to be drawn on the closest video's frame
199// to the `TIMESTAMP`. Any mentioned by the `TIMESTAMP` frame is expected
200// to be exhaustively labeled and no more than 500 `BOUNDING_BOX`-es per
201// frame are allowed. If a whole video is unknown, then it should be
202// mentioned just once with ",,,,,,,,,," in place of `LABEL,
203// [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX`.
204//
205// Sample top level CSV file:
206//
207//      TRAIN,gs://folder/train_videos.csv
208//      TEST,gs://folder/test_videos.csv
209//      UNASSIGNED,gs://folder/other_videos.csv
210//
211// Seven sample rows of a CSV file for a particular ML_USE:
212//
213//      gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
214//      gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
215//      gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
216//      gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
217//      gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
218//      gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
219//      gs://folder/video2.avi,,,,,,,,,,,
220//   </section>
221// </div>
222//
223//
224// <h4>AutoML Natural Language</h4>
225//
226//
227// <div class="ds-selector-tabs"><section><h5>Entity Extraction</h5>
228//
229// See [Preparing your training
230// data](/natural-language/automl/entity-analysis/docs/prepare) for more
231// information.
232//
233// One or more CSV file(s) with each line in the following format:
234//
235//     ML_USE,GCS_FILE_PATH
236//
237// *   `ML_USE` - Identifies the data set that the current row (file) applies
238// to.
239//     This value can be one of the following:
240//     * `TRAIN` - Rows in this file are used to train the model.
241//     * `TEST` - Rows in this file are used to test the model during training.
242//     * `UNASSIGNED` - Rows in this file are not categorized. They are
243//        Automatically divided into train and test data. 80% for training and
244//        20% for testing..
245//
246// *   `GCS_FILE_PATH` - a Identifies JSON Lines (.JSONL) file stored in
247//      Google Cloud Storage that contains in-line text in-line as documents
248//      for model training.
249//
250// After the training data set has been determined from the `TRAIN` and
251// `UNASSIGNED` CSV files, the training data is divided into train and
252// validation data sets. 70% for training and 30% for validation.
253//
254// For example:
255//
256//     TRAIN,gs://folder/file1.jsonl
257//     VALIDATE,gs://folder/file2.jsonl
258//     TEST,gs://folder/file3.jsonl
259//
260// **In-line JSONL files**
261//
262// In-line .JSONL files contain, per line, a JSON document that wraps a
263// [`text_snippet`][google.cloud.automl.v1.TextSnippet] field followed by
264// one or more [`annotations`][google.cloud.automl.v1.AnnotationPayload]
265// fields, which have `display_name` and `text_extraction` fields to describe
266// the entity from the text snippet. Multiple JSON documents can be separated
267// using line breaks (\n).
268//
269// The supplied text must be annotated exhaustively. For example, if you
270// include the text "horse", but do not label it as "animal",
271// then "horse" is assumed to not be an "animal".
272//
273// Any given text snippet content must have 30,000 characters or
274// less, and also be UTF-8 NFC encoded. ASCII is accepted as it is
275// UTF-8 NFC encoded.
276//
277// For example:
278//
279//     {
280//       "text_snippet": {
281//         "content": "dog car cat"
282//       },
283//       "annotations": [
284//          {
285//            "display_name": "animal",
286//            "text_extraction": {
287//              "text_segment": {"start_offset": 0, "end_offset": 2}
288//           }
289//          },
290//          {
291//           "display_name": "vehicle",
292//            "text_extraction": {
293//              "text_segment": {"start_offset": 4, "end_offset": 6}
294//            }
295//          },
296//          {
297//            "display_name": "animal",
298//            "text_extraction": {
299//              "text_segment": {"start_offset": 8, "end_offset": 10}
300//            }
301//          }
302//      ]
303//     }\n
304//     {
305//        "text_snippet": {
306//          "content": "This dog is good."
307//        },
308//        "annotations": [
309//           {
310//             "display_name": "animal",
311//             "text_extraction": {
312//               "text_segment": {"start_offset": 5, "end_offset": 7}
313//             }
314//           }
315//        ]
316//     }
317//
318// **JSONL files that reference documents**
319//
320// .JSONL files contain, per line, a JSON document that wraps a
321// `input_config` that contains the path to a source document.
322// Multiple JSON documents can be separated using line breaks (\n).
323//
324// Supported document extensions: .PDF, .TIF, .TIFF
325//
326// For example:
327//
328//     {
329//       "document": {
330//         "input_config": {
331//           "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
332//           }
333//         }
334//       }
335//     }\n
336//     {
337//       "document": {
338//         "input_config": {
339//           "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
340//           }
341//         }
342//       }
343//     }
344//
345// **In-line JSONL files with document layout information**
346//
347// **Note:** You can only annotate documents using the UI. The format described
348// below applies to annotated documents exported using the UI or `exportData`.
349//
350// In-line .JSONL files for documents contain, per line, a JSON document
351// that wraps a `document` field that provides the textual content of the
352// document and the layout information.
353//
354// For example:
355//
356//     {
357//       "document": {
358//               "document_text": {
359//                 "content": "dog car cat"
360//               }
361//               "layout": [
362//                 {
363//                   "text_segment": {
364//                     "start_offset": 0,
365//                     "end_offset": 11,
366//                    },
367//                    "page_number": 1,
368//                    "bounding_poly": {
369//                       "normalized_vertices": [
370//                         {"x": 0.1, "y": 0.1},
371//                         {"x": 0.1, "y": 0.3},
372//                         {"x": 0.3, "y": 0.3},
373//                         {"x": 0.3, "y": 0.1},
374//                       ],
375//                     },
376//                     "text_segment_type": TOKEN,
377//                 }
378//               ],
379//               "document_dimensions": {
380//                 "width": 8.27,
381//                 "height": 11.69,
382//                 "unit": INCH,
383//               }
384//               "page_count": 3,
385//             },
386//             "annotations": [
387//               {
388//                 "display_name": "animal",
389//                 "text_extraction": {
390//                   "text_segment": {"start_offset": 0, "end_offset": 3}
391//                 }
392//               },
393//               {
394//                 "display_name": "vehicle",
395//                 "text_extraction": {
396//                   "text_segment": {"start_offset": 4, "end_offset": 7}
397//                 }
398//               },
399//               {
400//                 "display_name": "animal",
401//                 "text_extraction": {
402//                   "text_segment": {"start_offset": 8, "end_offset": 11}
403//                 }
404//               },
405//             ],
406//
407//
408//
409//
410// </section><section><h5>Classification</h5>
411//
412// See [Preparing your training
413// data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
414// information.
415//
416// One or more CSV file(s) with each line in the following format:
417//
418//     ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
419//
420// *   `ML_USE` - Identifies the data set that the current row (file) applies
421// to.
422//     This value can be one of the following:
423//     * `TRAIN` - Rows in this file are used to train the model.
424//     * `TEST` - Rows in this file are used to test the model during training.
425//     * `UNASSIGNED` - Rows in this file are not categorized. They are
426//        Automatically divided into train and test data. 80% for training and
427//        20% for testing.
428//
429// *   `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
430//     the column content is a valid Google Cloud Storage file path, that is,
431//     prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
432//     the content is enclosed in double quotes (""), it is treated as a
433//     `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
434//     file with supported extension and UTF-8 encoding, for example,
435//     "gs://folder/content.txt" AutoML imports the file content
436//     as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
437//     excluding quotes. In both cases, size of the content must be 10MB or
438//     less in size. For zip files, the size of each file inside the zip must be
439//     10MB or less in size.
440//
441//     For the `MULTICLASS` classification type, at most one `LABEL` is allowed.
442//
443//     The `ML_USE` and `LABEL` columns are optional.
444//     Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
445//
446// A maximum of 100 unique labels are allowed per CSV row.
447//
448// Sample rows:
449//
450//     TRAIN,"They have bad food and very rude",RudeService,BadFood
451//     gs://folder/content.txt,SlowService
452//     TEST,gs://folder/document.pdf
453//     VALIDATE,gs://folder/text_files.zip,BadFood
454//
455//
456//
457// </section><section><h5>Sentiment Analysis</h5>
458//
459// See [Preparing your training
460// data](https://cloud.google.com/natural-language/automl/docs/prepare) for more
461// information.
462//
463// CSV file(s) with each line in format:
464//
465//     ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
466//
467// *   `ML_USE` - Identifies the data set that the current row (file) applies
468// to.
469//     This value can be one of the following:
470//     * `TRAIN` - Rows in this file are used to train the model.
471//     * `TEST` - Rows in this file are used to test the model during training.
472//     * `UNASSIGNED` - Rows in this file are not categorized. They are
473//        Automatically divided into train and test data. 80% for training and
474//        20% for testing.
475//
476// *   `TEXT_SNIPPET` and `GCS_FILE_PATH` are distinguished by a pattern. If
477//     the column content is a valid  Google Cloud Storage file path, that is,
478//     prefixed by "gs://", it is treated as a `GCS_FILE_PATH`. Otherwise, if
479//     the content is enclosed in double quotes (""), it is treated as a
480//     `TEXT_SNIPPET`. For `GCS_FILE_PATH`, the path must lead to a
481//     file with supported extension and UTF-8 encoding, for example,
482//     "gs://folder/content.txt" AutoML imports the file content
483//     as a text snippet. For `TEXT_SNIPPET`, AutoML imports the column content
484//     excluding quotes. In both cases, size of the content must be 128kB or
485//     less in size. For zip files, the size of each file inside the zip must be
486//     128kB or less in size.
487//
488//     The `ML_USE` and `SENTIMENT` columns are optional.
489//     Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
490//
491// *  `SENTIMENT` - An integer between 0 and
492//     Dataset.text_sentiment_dataset_metadata.sentiment_max
493//     (inclusive). Describes the ordinal of the sentiment - higher
494//     value means a more positive sentiment. All the values are
495//     completely relative, i.e. neither 0 needs to mean a negative or
496//     neutral sentiment nor sentiment_max needs to mean a positive one -
497//     it is just required that 0 is the least positive sentiment
498//     in the data, and sentiment_max is the  most positive one.
499//     The SENTIMENT shouldn't be confused with "score" or "magnitude"
500//     from the previous Natural Language Sentiment Analysis API.
501//     All SENTIMENT values between 0 and sentiment_max must be
502//     represented in the imported data. On prediction the same 0 to
503//     sentiment_max range will be used. The difference between
504//     neighboring sentiment values needs not to be uniform, e.g. 1 and
505//     2 may be similar whereas the difference between 2 and 3 may be
506//     large.
507//
508// Sample rows:
509//
510//     TRAIN,"@freewrytin this is way too good for your product",2
511//     gs://folder/content.txt,3
512//     TEST,gs://folder/document.pdf
513//     VALIDATE,gs://folder/text_files.zip,2
514//   </section>
515// </div>
516//
517//
518//
519// <h4>AutoML Tables</h4><div class="ui-datasection-main"><section
520// class="selected">
521//
522// See [Preparing your training
523// data](https://cloud.google.com/automl-tables/docs/prepare) for more
524// information.
525//
526// You can use either
527// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or
528// [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source].
529// All input is concatenated into a
530// single
531//
532// [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id]
533//
534// **For gcs_source:**
535//
536// CSV file(s), where the first row of the first file is the header,
537// containing unique column names. If the first row of a subsequent
538// file is the same as the header, then it is also treated as a
539// header. All other rows contain values for the corresponding
540// columns.
541//
542// Each .CSV file by itself must be 10GB or smaller, and their total
543// size must be 100GB or smaller.
544//
545// First three sample rows of a CSV file:
546// <pre>
547// "Id","First Name","Last Name","Dob","Addresses"
548//
549// "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
550//
551// "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
552// </pre>
553// **For bigquery_source:**
554//
555// An URI of a BigQuery table. The user data size of the BigQuery
556// table must be 100GB or smaller.
557//
558// An imported table must have between 2 and 1,000 columns, inclusive,
559// and between 1000 and 100,000,000 rows, inclusive. There are at most 5
560// import data running in parallel.
561//
562//   </section>
563// </div>
564//
565//
566// **Input field definitions:**
567//
568// `ML_USE`
569// : ("TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED")
570//   Describes how the given example (file) should be used for model
571//   training. "UNASSIGNED" can be used when user has no preference.
572//
573// `GCS_FILE_PATH`
574// : The path to a file on Google Cloud Storage. For example,
575//   "gs://folder/image1.png".
576//
577// `LABEL`
578// : A display name of an object on an image, video etc., e.g. "dog".
579//   Must be up to 32 characters long and can consist only of ASCII
580//   Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9.
581//   For each label an AnnotationSpec is created which display_name
582//   becomes the label; AnnotationSpecs are given back in predictions.
583//
584// `INSTANCE_ID`
585// : A positive integer that identifies a specific instance of a
586//   labeled entity on an example. Used e.g. to track two cars on
587//   a video while being able to tell apart which one is which.
588//
589// `BOUNDING_BOX`
590// : (`VERTEX,VERTEX,VERTEX,VERTEX` | `VERTEX,,,VERTEX,,`)
591//   A rectangle parallel to the frame of the example (image,
592//   video). If 4 vertices are given they are connected by edges
593//   in the order provided, if 2 are given they are recognized
594//   as diagonally opposite vertices of the rectangle.
595//
596// `VERTEX`
597// : (`COORDINATE,COORDINATE`)
598//   First coordinate is horizontal (x), the second is vertical (y).
599//
600// `COORDINATE`
601// : A float in 0 to 1 range, relative to total length of
602//   image or video in given dimension. For fractions the
603//   leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
604//   Point 0,0 is in top left.
605//
606// `TIME_SEGMENT_START`
607// : (`TIME_OFFSET`)
608//   Expresses a beginning, inclusive, of a time segment
609//   within an example that has a time dimension
610//   (e.g. video).
611//
612// `TIME_SEGMENT_END`
613// : (`TIME_OFFSET`)
614//   Expresses an end, exclusive, of a time segment within
615//   n example that has a time dimension (e.g. video).
616//
617// `TIME_OFFSET`
618// : A number of seconds as measured from the start of an
619//   example (e.g. video). Fractions are allowed, up to a
620//   microsecond precision. "inf" is allowed, and it means the end
621//   of the example.
622//
623// `TEXT_SNIPPET`
624// : The content of a text snippet, UTF-8 encoded, enclosed within
625//   double quotes ("").
626//
627// `DOCUMENT`
628// : A field that provides the textual content with document and the layout
629//   information.
630//
631//
632//  **Errors:**
633//
634//  If any of the provided CSV files can't be parsed or if more than certain
635//  percent of CSV rows cannot be processed then the operation fails and
636//  nothing is imported. Regardless of overall success or failure the per-row
637//  failures, up to a certain count cap, is listed in
638//  Operation.metadata.partial_failures.
639//
640type InputConfig struct {
641	// The source of the input.
642	//
643	// Types that are valid to be assigned to Source:
644	//	*InputConfig_GcsSource
645	Source isInputConfig_Source `protobuf_oneof:"source"`
646	// Additional domain-specific parameters describing the semantic of the
647	// imported data, any string must be up to 25000
648	// characters long.
649	//
650	// <h4>AutoML Tables</h4>
651	//
652	// `schema_inference_version`
653	// : (integer) This value must be supplied.
654	//   The version of the
655	//   algorithm to use for the initial inference of the
656	//   column data types of the imported table. Allowed values: "1".
657	Params               map[string]string `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
658	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
659	XXX_unrecognized     []byte            `json:"-"`
660	XXX_sizecache        int32             `json:"-"`
661}
662
663func (m *InputConfig) Reset()         { *m = InputConfig{} }
664func (m *InputConfig) String() string { return proto.CompactTextString(m) }
665func (*InputConfig) ProtoMessage()    {}
666func (*InputConfig) Descriptor() ([]byte, []int) {
667	return fileDescriptor_929966d18309cd53, []int{0}
668}
669
670func (m *InputConfig) XXX_Unmarshal(b []byte) error {
671	return xxx_messageInfo_InputConfig.Unmarshal(m, b)
672}
673func (m *InputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
674	return xxx_messageInfo_InputConfig.Marshal(b, m, deterministic)
675}
676func (m *InputConfig) XXX_Merge(src proto.Message) {
677	xxx_messageInfo_InputConfig.Merge(m, src)
678}
679func (m *InputConfig) XXX_Size() int {
680	return xxx_messageInfo_InputConfig.Size(m)
681}
682func (m *InputConfig) XXX_DiscardUnknown() {
683	xxx_messageInfo_InputConfig.DiscardUnknown(m)
684}
685
686var xxx_messageInfo_InputConfig proto.InternalMessageInfo
687
688type isInputConfig_Source interface {
689	isInputConfig_Source()
690}
691
692type InputConfig_GcsSource struct {
693	GcsSource *GcsSource `protobuf:"bytes,1,opt,name=gcs_source,json=gcsSource,proto3,oneof"`
694}
695
696func (*InputConfig_GcsSource) isInputConfig_Source() {}
697
698func (m *InputConfig) GetSource() isInputConfig_Source {
699	if m != nil {
700		return m.Source
701	}
702	return nil
703}
704
705func (m *InputConfig) GetGcsSource() *GcsSource {
706	if x, ok := m.GetSource().(*InputConfig_GcsSource); ok {
707		return x.GcsSource
708	}
709	return nil
710}
711
712func (m *InputConfig) GetParams() map[string]string {
713	if m != nil {
714		return m.Params
715	}
716	return nil
717}
718
719// XXX_OneofWrappers is for the internal use of the proto package.
720func (*InputConfig) XXX_OneofWrappers() []interface{} {
721	return []interface{}{
722		(*InputConfig_GcsSource)(nil),
723	}
724}
725
726// Input configuration for BatchPredict Action.
727//
728// The format of input depends on the ML problem of the model used for
729// prediction. As input source the
730// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source]
731// is expected, unless specified otherwise.
732//
733// The formats are represented in EBNF with commas being literal and with
734// non-terminal symbols defined near the end of this comment. The formats
735// are:
736//
737// <h4>AutoML Vision</h4>
738// <div class="ds-selector-tabs"><section><h5>Classification</h5>
739//
740// One or more CSV files where each line is a single column:
741//
742//     GCS_FILE_PATH
743//
744// The Google Cloud Storage location of an image of up to
745// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG.
746// This path is treated as the ID in the batch predict output.
747//
748// Sample rows:
749//
750//     gs://folder/image1.jpeg
751//     gs://folder/image2.gif
752//     gs://folder/image3.png
753//
754// </section><section><h5>Object Detection</h5>
755//
756// One or more CSV files where each line is a single column:
757//
758//     GCS_FILE_PATH
759//
760// The Google Cloud Storage location of an image of up to
761// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG.
762// This path is treated as the ID in the batch predict output.
763//
764// Sample rows:
765//
766//     gs://folder/image1.jpeg
767//     gs://folder/image2.gif
768//     gs://folder/image3.png
769//   </section>
770// </div>
771//
772// <h4>AutoML Video Intelligence</h4>
773// <div class="ds-selector-tabs"><section><h5>Classification</h5>
774//
775// One or more CSV files where each line is a single column:
776//
777//     GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
778//
779// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in
780// size and up to 3h in duration duration.
781// Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
782//
783// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
784// length of the video, and the end time must be after the start time.
785//
786// Sample rows:
787//
788//     gs://folder/video1.mp4,10,40
789//     gs://folder/video1.mp4,20,60
790//     gs://folder/vid2.mov,0,inf
791//
792// </section><section><h5>Object Tracking</h5>
793//
794// One or more CSV files where each line is a single column:
795//
796//     GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
797//
798// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in
799// size and up to 3h in duration duration.
800// Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
801//
802// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the
803// length of the video, and the end time must be after the start time.
804//
805// Sample rows:
806//
807//     gs://folder/video1.mp4,10,40
808//     gs://folder/video1.mp4,20,60
809//     gs://folder/vid2.mov,0,inf
810//   </section>
811// </div>
812//
813// <h4>AutoML Natural Language</h4>
814// <div class="ds-selector-tabs"><section><h5>Classification</h5>
815//
816// One or more CSV files where each line is a single column:
817//
818//     GCS_FILE_PATH
819//
820// `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
821// Supported file extensions: .TXT, .PDF, .TIF, .TIFF
822//
823// Text files can be no larger than 10MB in size.
824//
825// Sample rows:
826//
827//     gs://folder/text1.txt
828//     gs://folder/text2.pdf
829//     gs://folder/text3.tif
830//
831// </section><section><h5>Sentiment Analysis</h5>
832// One or more CSV files where each line is a single column:
833//
834//     GCS_FILE_PATH
835//
836// `GCS_FILE_PATH` is the Google Cloud Storage location of a text file.
837// Supported file extensions: .TXT, .PDF, .TIF, .TIFF
838//
839// Text files can be no larger than 128kB in size.
840//
841// Sample rows:
842//
843//     gs://folder/text1.txt
844//     gs://folder/text2.pdf
845//     gs://folder/text3.tif
846//
847// </section><section><h5>Entity Extraction</h5>
848//
849// One or more JSONL (JSON Lines) files that either provide inline text or
850// documents. You can only use one format, either inline text or documents,
851// for a single call to [AutoMl.BatchPredict].
852//
853// Each JSONL file contains a per line a proto that
854// wraps a temporary user-assigned TextSnippet ID (string up to 2000
855// characters long) called "id", a TextSnippet proto (in
856// JSON representation) and zero or more TextFeature protos. Any given
857// text snippet content must have 30,000 characters or less, and also
858// be UTF-8 NFC encoded (ASCII already is). The IDs provided should be
859// unique.
860//
861// Each document JSONL file contains, per line, a proto that wraps a Document
862// proto with `input_config` set. Each document cannot exceed 2MB in size.
863//
864// Supported document extensions: .PDF, .TIF, .TIFF
865//
866// Each JSONL file must not exceed 100MB in size, and no more than 20
867// JSONL files may be passed.
868//
869// Sample inline JSONL file (Shown with artificial line
870// breaks. Actual line breaks are denoted by "\n".):
871//
872//     {
873//        "id": "my_first_id",
874//        "text_snippet": { "content": "dog car cat"},
875//        "text_features": [
876//          {
877//            "text_segment": {"start_offset": 4, "end_offset": 6},
878//            "structural_type": PARAGRAPH,
879//            "bounding_poly": {
880//              "normalized_vertices": [
881//                {"x": 0.1, "y": 0.1},
882//                {"x": 0.1, "y": 0.3},
883//                {"x": 0.3, "y": 0.3},
884//                {"x": 0.3, "y": 0.1},
885//              ]
886//            },
887//          }
888//        ],
889//      }\n
890//      {
891//        "id": "2",
892//        "text_snippet": {
893//          "content": "Extended sample content",
894//          "mime_type": "text/plain"
895//        }
896//      }
897//
898// Sample document JSONL file (Shown with artificial line
899// breaks. Actual line breaks are denoted by "\n".):
900//
901//      {
902//        "document": {
903//          "input_config": {
904//            "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
905//            }
906//          }
907//        }
908//      }\n
909//      {
910//        "document": {
911//          "input_config": {
912//            "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
913//            }
914//          }
915//        }
916//      }
917//   </section>
918// </div>
919//
920// <h4>AutoML Tables</h4><div class="ui-datasection-main"><section
921// class="selected">
922//
923// See [Preparing your training
924// data](https://cloud.google.com/automl-tables/docs/predict-batch) for more
925// information.
926//
927// You can use either
928// [gcs_source][google.cloud.automl.v1.BatchPredictInputConfig.gcs_source]
929// or
930// [bigquery_source][BatchPredictInputConfig.bigquery_source].
931//
932// **For gcs_source:**
933//
934// CSV file(s), each by itself 10GB or smaller and total size must be
935// 100GB or smaller, where first file must have a header containing
936// column names. If the first row of a subsequent file is the same as
937// the header, then it is also treated as a header. All other rows
938// contain values for the corresponding columns.
939//
940// The column names must contain the model's
941//
942// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
943// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
944// (order doesn't matter). The columns corresponding to the model's
945// input feature column specs must contain values compatible with the
946// column spec's data types. Prediction on all the rows, i.e. the CSV
947// lines, will be attempted.
948//
949//
950// Sample rows from a CSV file:
951// <pre>
952// "First Name","Last Name","Dob","Addresses"
953//
954// "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
955//
956// "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
957// </pre>
958// **For bigquery_source:**
959//
960// The URI of a BigQuery table. The user data size of the BigQuery
961// table must be 100GB or smaller.
962//
963// The column names must contain the model's
964//
965// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
966// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
967// (order doesn't matter). The columns corresponding to the model's
968// input feature column specs must contain values compatible with the
969// column spec's data types. Prediction on all the rows of the table
970// will be attempted.
971//   </section>
972// </div>
973//
974// **Input field definitions:**
975//
976// `GCS_FILE_PATH`
977// : The path to a file on Google Cloud Storage. For example,
978//   "gs://folder/video.avi".
979//
980// `TIME_SEGMENT_START`
981// : (`TIME_OFFSET`)
982//   Expresses a beginning, inclusive, of a time segment
983//   within an example that has a time dimension
984//   (e.g. video).
985//
986// `TIME_SEGMENT_END`
987// : (`TIME_OFFSET`)
988//   Expresses an end, exclusive, of a time segment within
989//   n example that has a time dimension (e.g. video).
990//
991// `TIME_OFFSET`
992// : A number of seconds as measured from the start of an
993//   example (e.g. video). Fractions are allowed, up to a
994//   microsecond precision. "inf" is allowed, and it means the end
995//   of the example.
996//
997//  **Errors:**
998//
999//  If any of the provided CSV files can't be parsed or if more than certain
1000//  percent of CSV rows cannot be processed then the operation fails and
1001//  prediction does not happen. Regardless of overall success or failure the
1002//  per-row failures, up to a certain count cap, will be listed in
1003//  Operation.metadata.partial_failures.
1004type BatchPredictInputConfig struct {
1005	// The source of the input.
1006	//
1007	// Types that are valid to be assigned to Source:
1008	//	*BatchPredictInputConfig_GcsSource
1009	Source               isBatchPredictInputConfig_Source `protobuf_oneof:"source"`
1010	XXX_NoUnkeyedLiteral struct{}                         `json:"-"`
1011	XXX_unrecognized     []byte                           `json:"-"`
1012	XXX_sizecache        int32                            `json:"-"`
1013}
1014
1015func (m *BatchPredictInputConfig) Reset()         { *m = BatchPredictInputConfig{} }
1016func (m *BatchPredictInputConfig) String() string { return proto.CompactTextString(m) }
1017func (*BatchPredictInputConfig) ProtoMessage()    {}
1018func (*BatchPredictInputConfig) Descriptor() ([]byte, []int) {
1019	return fileDescriptor_929966d18309cd53, []int{1}
1020}
1021
1022func (m *BatchPredictInputConfig) XXX_Unmarshal(b []byte) error {
1023	return xxx_messageInfo_BatchPredictInputConfig.Unmarshal(m, b)
1024}
1025func (m *BatchPredictInputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1026	return xxx_messageInfo_BatchPredictInputConfig.Marshal(b, m, deterministic)
1027}
1028func (m *BatchPredictInputConfig) XXX_Merge(src proto.Message) {
1029	xxx_messageInfo_BatchPredictInputConfig.Merge(m, src)
1030}
1031func (m *BatchPredictInputConfig) XXX_Size() int {
1032	return xxx_messageInfo_BatchPredictInputConfig.Size(m)
1033}
1034func (m *BatchPredictInputConfig) XXX_DiscardUnknown() {
1035	xxx_messageInfo_BatchPredictInputConfig.DiscardUnknown(m)
1036}
1037
1038var xxx_messageInfo_BatchPredictInputConfig proto.InternalMessageInfo
1039
1040type isBatchPredictInputConfig_Source interface {
1041	isBatchPredictInputConfig_Source()
1042}
1043
1044type BatchPredictInputConfig_GcsSource struct {
1045	GcsSource *GcsSource `protobuf:"bytes,1,opt,name=gcs_source,json=gcsSource,proto3,oneof"`
1046}
1047
1048func (*BatchPredictInputConfig_GcsSource) isBatchPredictInputConfig_Source() {}
1049
1050func (m *BatchPredictInputConfig) GetSource() isBatchPredictInputConfig_Source {
1051	if m != nil {
1052		return m.Source
1053	}
1054	return nil
1055}
1056
1057func (m *BatchPredictInputConfig) GetGcsSource() *GcsSource {
1058	if x, ok := m.GetSource().(*BatchPredictInputConfig_GcsSource); ok {
1059		return x.GcsSource
1060	}
1061	return nil
1062}
1063
1064// XXX_OneofWrappers is for the internal use of the proto package.
1065func (*BatchPredictInputConfig) XXX_OneofWrappers() []interface{} {
1066	return []interface{}{
1067		(*BatchPredictInputConfig_GcsSource)(nil),
1068	}
1069}
1070
1071// Input configuration of a [Document][google.cloud.automl.v1.Document].
1072type DocumentInputConfig struct {
1073	// The Google Cloud Storage location of the document file. Only a single path
1074	// should be given.
1075	//
1076	// Max supported size: 512MB.
1077	//
1078	// Supported extensions: .PDF.
1079	GcsSource            *GcsSource `protobuf:"bytes,1,opt,name=gcs_source,json=gcsSource,proto3" json:"gcs_source,omitempty"`
1080	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
1081	XXX_unrecognized     []byte     `json:"-"`
1082	XXX_sizecache        int32      `json:"-"`
1083}
1084
1085func (m *DocumentInputConfig) Reset()         { *m = DocumentInputConfig{} }
1086func (m *DocumentInputConfig) String() string { return proto.CompactTextString(m) }
1087func (*DocumentInputConfig) ProtoMessage()    {}
1088func (*DocumentInputConfig) Descriptor() ([]byte, []int) {
1089	return fileDescriptor_929966d18309cd53, []int{2}
1090}
1091
1092func (m *DocumentInputConfig) XXX_Unmarshal(b []byte) error {
1093	return xxx_messageInfo_DocumentInputConfig.Unmarshal(m, b)
1094}
1095func (m *DocumentInputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1096	return xxx_messageInfo_DocumentInputConfig.Marshal(b, m, deterministic)
1097}
1098func (m *DocumentInputConfig) XXX_Merge(src proto.Message) {
1099	xxx_messageInfo_DocumentInputConfig.Merge(m, src)
1100}
1101func (m *DocumentInputConfig) XXX_Size() int {
1102	return xxx_messageInfo_DocumentInputConfig.Size(m)
1103}
1104func (m *DocumentInputConfig) XXX_DiscardUnknown() {
1105	xxx_messageInfo_DocumentInputConfig.DiscardUnknown(m)
1106}
1107
1108var xxx_messageInfo_DocumentInputConfig proto.InternalMessageInfo
1109
1110func (m *DocumentInputConfig) GetGcsSource() *GcsSource {
1111	if m != nil {
1112		return m.GcsSource
1113	}
1114	return nil
1115}
1116
1117// *  For Translation:
1118//         CSV file `translation.csv`, with each line in format:
1119//         ML_USE,GCS_FILE_PATH
1120//         GCS_FILE_PATH leads to a .TSV file which describes examples that have
1121//         given ML_USE, using the following row format per line:
1122//         TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target
1123//         language)
1124//
1125//   *  For Tables:
1126//         Output depends on whether the dataset was imported from Google Cloud
1127//         Storage or BigQuery.
1128//         Google Cloud Storage case:
1129//
1130// [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination]
1131//           must be set. Exported are CSV file(s) `tables_1.csv`,
1132//           `tables_2.csv`,...,`tables_N.csv` with each having as header line
1133//           the table's column names, and all other lines contain values for
1134//           the header columns.
1135//         BigQuery case:
1136//
1137// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
1138//           pointing to a BigQuery project must be set. In the given project a
1139//           new dataset will be created with name
1140//
1141// `export_data_<automl-dataset-display-name>_<timestamp-of-export-call>`
1142//           where <automl-dataset-display-name> will be made
1143//           BigQuery-dataset-name compatible (e.g. most special characters will
1144//           become underscores), and timestamp will be in
1145//           YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that
1146//           dataset a new table called `primary_table` will be created, and
1147//           filled with precisely the same data as this obtained on import.
1148type OutputConfig struct {
1149	// The destination of the output.
1150	//
1151	// Types that are valid to be assigned to Destination:
1152	//	*OutputConfig_GcsDestination
1153	Destination          isOutputConfig_Destination `protobuf_oneof:"destination"`
1154	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
1155	XXX_unrecognized     []byte                     `json:"-"`
1156	XXX_sizecache        int32                      `json:"-"`
1157}
1158
1159func (m *OutputConfig) Reset()         { *m = OutputConfig{} }
1160func (m *OutputConfig) String() string { return proto.CompactTextString(m) }
1161func (*OutputConfig) ProtoMessage()    {}
1162func (*OutputConfig) Descriptor() ([]byte, []int) {
1163	return fileDescriptor_929966d18309cd53, []int{3}
1164}
1165
1166func (m *OutputConfig) XXX_Unmarshal(b []byte) error {
1167	return xxx_messageInfo_OutputConfig.Unmarshal(m, b)
1168}
1169func (m *OutputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1170	return xxx_messageInfo_OutputConfig.Marshal(b, m, deterministic)
1171}
1172func (m *OutputConfig) XXX_Merge(src proto.Message) {
1173	xxx_messageInfo_OutputConfig.Merge(m, src)
1174}
1175func (m *OutputConfig) XXX_Size() int {
1176	return xxx_messageInfo_OutputConfig.Size(m)
1177}
1178func (m *OutputConfig) XXX_DiscardUnknown() {
1179	xxx_messageInfo_OutputConfig.DiscardUnknown(m)
1180}
1181
1182var xxx_messageInfo_OutputConfig proto.InternalMessageInfo
1183
1184type isOutputConfig_Destination interface {
1185	isOutputConfig_Destination()
1186}
1187
1188type OutputConfig_GcsDestination struct {
1189	GcsDestination *GcsDestination `protobuf:"bytes,1,opt,name=gcs_destination,json=gcsDestination,proto3,oneof"`
1190}
1191
1192func (*OutputConfig_GcsDestination) isOutputConfig_Destination() {}
1193
1194func (m *OutputConfig) GetDestination() isOutputConfig_Destination {
1195	if m != nil {
1196		return m.Destination
1197	}
1198	return nil
1199}
1200
1201func (m *OutputConfig) GetGcsDestination() *GcsDestination {
1202	if x, ok := m.GetDestination().(*OutputConfig_GcsDestination); ok {
1203		return x.GcsDestination
1204	}
1205	return nil
1206}
1207
1208// XXX_OneofWrappers is for the internal use of the proto package.
1209func (*OutputConfig) XXX_OneofWrappers() []interface{} {
1210	return []interface{}{
1211		(*OutputConfig_GcsDestination)(nil),
1212	}
1213}
1214
1215// Output configuration for BatchPredict Action.
1216//
1217// As destination the
1218//
1219// [gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination]
1220// must be set unless specified otherwise for a domain. If gcs_destination is
1221// set then in the given directory a new directory is created. Its name
1222// will be
1223// "prediction-<model-display-name>-<timestamp-of-prediction-call>",
1224// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents
1225// of it depends on the ML problem the predictions are made for.
1226//
1227//  *  For Image Classification:
1228//         In the created directory files `image_classification_1.jsonl`,
1229//         `image_classification_2.jsonl`,...,`image_classification_N.jsonl`
1230//         will be created, where N may be 1, and depends on the
1231//         total number of the successfully predicted images and annotations.
1232//         A single image will be listed only once with all its annotations,
1233//         and its annotations will never be split across files.
1234//         Each .JSONL file will contain, per line, a JSON representation of a
1235//         proto that wraps image's "ID" : "<id_value>" followed by a list of
1236//         zero or more AnnotationPayload protos (called annotations), which
1237//         have classification detail populated.
1238//         If prediction for any image failed (partially or completely), then an
1239//         additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
1240//         files will be created (N depends on total number of failed
1241//         predictions). These files will have a JSON representation of a proto
1242//         that wraps the same "ID" : "<id_value>" but here followed by
1243//         exactly one
1244//
1245// [`google.rpc.Status`](https:
1246// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1247//         containing only `code` and `message`fields.
1248//
1249//  *  For Image Object Detection:
1250//         In the created directory files `image_object_detection_1.jsonl`,
1251//         `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl`
1252//         will be created, where N may be 1, and depends on the
1253//         total number of the successfully predicted images and annotations.
1254//         Each .JSONL file will contain, per line, a JSON representation of a
1255//         proto that wraps image's "ID" : "<id_value>" followed by a list of
1256//         zero or more AnnotationPayload protos (called annotations), which
1257//         have image_object_detection detail populated. A single image will
1258//         be listed only once with all its annotations, and its annotations
1259//         will never be split across files.
1260//         If prediction for any image failed (partially or completely), then
1261//         additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl`
1262//         files will be created (N depends on total number of failed
1263//         predictions). These files will have a JSON representation of a proto
1264//         that wraps the same "ID" : "<id_value>" but here followed by
1265//         exactly one
1266//
1267// [`google.rpc.Status`](https:
1268// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1269//         containing only `code` and `message`fields.
1270//  *  For Video Classification:
1271//         In the created directory a video_classification.csv file, and a .JSON
1272//         file per each video classification requested in the input (i.e. each
1273//         line in given CSV(s)), will be created.
1274//
1275//         The format of video_classification.csv is:
1276//
1277// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
1278//         where:
1279//         GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
1280//             the prediction input lines (i.e. video_classification.csv has
1281//             precisely the same number of lines as the prediction input had.)
1282//         JSON_FILE_NAME = Name of .JSON file in the output directory, which
1283//             contains prediction responses for the video time segment.
1284//         STATUS = "OK" if prediction completed successfully, or an error code
1285//             with message otherwise. If STATUS is not "OK" then the .JSON file
1286//             for that line may not exist or be empty.
1287//
1288//         Each .JSON file, assuming STATUS is "OK", will contain a list of
1289//         AnnotationPayload protos in JSON format, which are the predictions
1290//         for the video time segment the file is assigned to in the
1291//         video_classification.csv. All AnnotationPayload protos will have
1292//         video_classification field set, and will be sorted by
1293//         video_classification.type field (note that the returned types are
1294//         governed by `classifaction_types` parameter in
1295//         [PredictService.BatchPredictRequest.params][]).
1296//
1297//  *  For Video Object Tracking:
1298//         In the created directory a video_object_tracking.csv file will be
1299//         created, and multiple files video_object_trackinng_1.json,
1300//         video_object_trackinng_2.json,..., video_object_trackinng_N.json,
1301//         where N is the number of requests in the input (i.e. the number of
1302//         lines in given CSV(s)).
1303//
1304//         The format of video_object_tracking.csv is:
1305//
1306// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
1307//         where:
1308//         GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
1309//             the prediction input lines (i.e. video_object_tracking.csv has
1310//             precisely the same number of lines as the prediction input had.)
1311//         JSON_FILE_NAME = Name of .JSON file in the output directory, which
1312//             contains prediction responses for the video time segment.
1313//         STATUS = "OK" if prediction completed successfully, or an error
1314//             code with message otherwise. If STATUS is not "OK" then the .JSON
1315//             file for that line may not exist or be empty.
1316//
1317//         Each .JSON file, assuming STATUS is "OK", will contain a list of
1318//         AnnotationPayload protos in JSON format, which are the predictions
1319//         for each frame of the video time segment the file is assigned to in
1320//         video_object_tracking.csv. All AnnotationPayload protos will have
1321//         video_object_tracking field set.
1322//  *  For Text Classification:
1323//         In the created directory files `text_classification_1.jsonl`,
1324//         `text_classification_2.jsonl`,...,`text_classification_N.jsonl`
1325//         will be created, where N may be 1, and depends on the
1326//         total number of inputs and annotations found.
1327//
1328//         Each .JSONL file will contain, per line, a JSON representation of a
1329//         proto that wraps input text file (or document) in
1330//         the text snippet (or document) proto and a list of
1331//         zero or more AnnotationPayload protos (called annotations), which
1332//         have classification detail populated. A single text file (or
1333//         document) will be listed only once with all its annotations, and its
1334//         annotations will never be split across files.
1335//
1336//         If prediction for any input file (or document) failed (partially or
1337//         completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
1338//         `errors_N.jsonl` files will be created (N depends on total number of
1339//         failed predictions). These files will have a JSON representation of a
1340//         proto that wraps input file followed by exactly one
1341//
1342// [`google.rpc.Status`](https:
1343// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1344//         containing only `code` and `message`.
1345//
1346//  *  For Text Sentiment:
1347//         In the created directory files `text_sentiment_1.jsonl`,
1348//         `text_sentiment_2.jsonl`,...,`text_sentiment_N.jsonl`
1349//         will be created, where N may be 1, and depends on the
1350//         total number of inputs and annotations found.
1351//
1352//         Each .JSONL file will contain, per line, a JSON representation of a
1353//         proto that wraps input text file (or document) in
1354//         the text snippet (or document) proto and a list of
1355//         zero or more AnnotationPayload protos (called annotations), which
1356//         have text_sentiment detail populated. A single text file (or
1357//         document) will be listed only once with all its annotations, and its
1358//         annotations will never be split across files.
1359//
1360//         If prediction for any input file (or document) failed (partially or
1361//         completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
1362//         `errors_N.jsonl` files will be created (N depends on total number of
1363//         failed predictions). These files will have a JSON representation of a
1364//         proto that wraps input file followed by exactly one
1365//
1366// [`google.rpc.Status`](https:
1367// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1368//         containing only `code` and `message`.
1369//
1370//   *  For Text Extraction:
1371//         In the created directory files `text_extraction_1.jsonl`,
1372//         `text_extraction_2.jsonl`,...,`text_extraction_N.jsonl`
1373//         will be created, where N may be 1, and depends on the
1374//         total number of inputs and annotations found.
1375//         The contents of these .JSONL file(s) depend on whether the input
1376//         used inline text, or documents.
1377//         If input was inline, then each .JSONL file will contain, per line,
1378//           a JSON representation of a proto that wraps given in request text
1379//           snippet's "id" (if specified), followed by input text snippet,
1380//           and a list of zero or more
1381//           AnnotationPayload protos (called annotations), which have
1382//           text_extraction detail populated. A single text snippet will be
1383//           listed only once with all its annotations, and its annotations will
1384//           never be split across files.
1385//         If input used documents, then each .JSONL file will contain, per
1386//           line, a JSON representation of a proto that wraps given in request
1387//           document proto, followed by its OCR-ed representation in the form
1388//           of a text snippet, finally followed by a list of zero or more
1389//           AnnotationPayload protos (called annotations), which have
1390//           text_extraction detail populated and refer, via their indices, to
1391//           the OCR-ed text snippet. A single document (and its text snippet)
1392//           will be listed only once with all its annotations, and its
1393//           annotations will never be split across files.
1394//         If prediction for any text snippet failed (partially or completely),
1395//         then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
1396//         `errors_N.jsonl` files will be created (N depends on total number of
1397//         failed predictions). These files will have a JSON representation of a
1398//         proto that wraps either the "id" : "<id_value>" (in case of inline)
1399//         or the document proto (in case of document) but here followed by
1400//         exactly one
1401//
1402// [`google.rpc.Status`](https:
1403// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1404//         containing only `code` and `message`.
1405//
1406//  *  For Tables:
1407//         Output depends on whether
1408//
1409// [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination]
1410//         or
1411//
1412// [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination]
1413//         is set (either is allowed).
1414//         Google Cloud Storage case:
1415//           In the created directory files `tables_1.csv`, `tables_2.csv`,...,
1416//           `tables_N.csv` will be created, where N may be 1, and depends on
1417//           the total number of the successfully predicted rows.
1418//           For all CLASSIFICATION
1419//
1420// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
1421//             Each .csv file will contain a header, listing all columns'
1422//
1423// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
1424//             given on input followed by M target column names in the format of
1425//
1426// "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
1427//
1428// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>_<target
1429//             value>_score" where M is the number of distinct target values,
1430//             i.e. number of distinct values in the target column of the table
1431//             used to train the model. Subsequent lines will contain the
1432//             respective values of successfully predicted rows, with the last,
1433//             i.e. the target, columns having the corresponding prediction
1434//             [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score].
1435//           For REGRESSION and FORECASTING
1436//
1437// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
1438//             Each .csv file will contain a header, listing all columns'
1439//             [display_name-s][google.cloud.automl.v1p1beta.display_name]
1440//             given on input followed by the predicted target column with name
1441//             in the format of
1442//
1443// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
1444//
1445// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
1446//             Subsequent lines will contain the respective values of
1447//             successfully predicted rows, with the last, i.e. the target,
1448//             column having the predicted target value.
1449//             If prediction for any rows failed, then an additional
1450//             `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be
1451//             created (N depends on total number of failed rows). These files
1452//             will have analogous format as `tables_*.csv`, but always with a
1453//             single target column having
1454//
1455// [`google.rpc.Status`](https:
1456// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1457//             represented as a JSON string, and containing only `code` and
1458//             `message`.
1459//         BigQuery case:
1460//
1461// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
1462//           pointing to a BigQuery project must be set. In the given project a
1463//           new dataset will be created with name
1464//           `prediction_<model-display-name>_<timestamp-of-prediction-call>`
1465//           where <model-display-name> will be made
1466//           BigQuery-dataset-name compatible (e.g. most special characters will
1467//           become underscores), and timestamp will be in
1468//           YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset
1469//           two tables will be created, `predictions`, and `errors`.
1470//           The `predictions` table's column names will be the input columns'
1471//
1472// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
1473//           followed by the target column with name in the format of
1474//
1475// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
1476//
1477// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
1478//           The input feature columns will contain the respective values of
1479//           successfully predicted rows, with the target column having an
1480//           ARRAY of
1481//
1482// [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload],
1483//           represented as STRUCT-s, containing
1484//           [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation].
1485//           The `errors` table contains rows for which the prediction has
1486//           failed, it has analogous input columns while the target column name
1487//           is in the format of
1488//
1489// "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
1490//
1491// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>",
1492//           and as a value has
1493//
1494// [`google.rpc.Status`](https:
1495// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
1496//           represented as a STRUCT, and containing only `code` and `message`.
1497type BatchPredictOutputConfig struct {
1498	// The destination of the output.
1499	//
1500	// Types that are valid to be assigned to Destination:
1501	//	*BatchPredictOutputConfig_GcsDestination
1502	Destination          isBatchPredictOutputConfig_Destination `protobuf_oneof:"destination"`
1503	XXX_NoUnkeyedLiteral struct{}                               `json:"-"`
1504	XXX_unrecognized     []byte                                 `json:"-"`
1505	XXX_sizecache        int32                                  `json:"-"`
1506}
1507
1508func (m *BatchPredictOutputConfig) Reset()         { *m = BatchPredictOutputConfig{} }
1509func (m *BatchPredictOutputConfig) String() string { return proto.CompactTextString(m) }
1510func (*BatchPredictOutputConfig) ProtoMessage()    {}
1511func (*BatchPredictOutputConfig) Descriptor() ([]byte, []int) {
1512	return fileDescriptor_929966d18309cd53, []int{4}
1513}
1514
1515func (m *BatchPredictOutputConfig) XXX_Unmarshal(b []byte) error {
1516	return xxx_messageInfo_BatchPredictOutputConfig.Unmarshal(m, b)
1517}
1518func (m *BatchPredictOutputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1519	return xxx_messageInfo_BatchPredictOutputConfig.Marshal(b, m, deterministic)
1520}
1521func (m *BatchPredictOutputConfig) XXX_Merge(src proto.Message) {
1522	xxx_messageInfo_BatchPredictOutputConfig.Merge(m, src)
1523}
1524func (m *BatchPredictOutputConfig) XXX_Size() int {
1525	return xxx_messageInfo_BatchPredictOutputConfig.Size(m)
1526}
1527func (m *BatchPredictOutputConfig) XXX_DiscardUnknown() {
1528	xxx_messageInfo_BatchPredictOutputConfig.DiscardUnknown(m)
1529}
1530
1531var xxx_messageInfo_BatchPredictOutputConfig proto.InternalMessageInfo
1532
1533type isBatchPredictOutputConfig_Destination interface {
1534	isBatchPredictOutputConfig_Destination()
1535}
1536
1537type BatchPredictOutputConfig_GcsDestination struct {
1538	GcsDestination *GcsDestination `protobuf:"bytes,1,opt,name=gcs_destination,json=gcsDestination,proto3,oneof"`
1539}
1540
1541func (*BatchPredictOutputConfig_GcsDestination) isBatchPredictOutputConfig_Destination() {}
1542
1543func (m *BatchPredictOutputConfig) GetDestination() isBatchPredictOutputConfig_Destination {
1544	if m != nil {
1545		return m.Destination
1546	}
1547	return nil
1548}
1549
1550func (m *BatchPredictOutputConfig) GetGcsDestination() *GcsDestination {
1551	if x, ok := m.GetDestination().(*BatchPredictOutputConfig_GcsDestination); ok {
1552		return x.GcsDestination
1553	}
1554	return nil
1555}
1556
1557// XXX_OneofWrappers is for the internal use of the proto package.
1558func (*BatchPredictOutputConfig) XXX_OneofWrappers() []interface{} {
1559	return []interface{}{
1560		(*BatchPredictOutputConfig_GcsDestination)(nil),
1561	}
1562}
1563
1564// Output configuration for ModelExport Action.
1565type ModelExportOutputConfig struct {
1566	// The destination of the output.
1567	//
1568	// Types that are valid to be assigned to Destination:
1569	//	*ModelExportOutputConfig_GcsDestination
1570	Destination isModelExportOutputConfig_Destination `protobuf_oneof:"destination"`
1571	// The format in which the model must be exported. The available, and default,
1572	// formats depend on the problem and model type (if given problem and type
1573	// combination doesn't have a format listed, it means its models are not
1574	// exportable):
1575	//
1576	// *  For Image Classification mobile-low-latency-1, mobile-versatile-1,
1577	//        mobile-high-accuracy-1:
1578	//      "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js",
1579	//      "docker".
1580	//
1581	// *  For Image Classification mobile-core-ml-low-latency-1,
1582	//        mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1:
1583	//      "core_ml" (default).
1584	//
1585	// *  For Image Object Detection mobile-low-latency-1, mobile-versatile-1,
1586	//        mobile-high-accuracy-1:
1587	//      "tflite", "tf_saved_model", "tf_js".
1588	// Formats description:
1589	//
1590	// * tflite - Used for Android mobile devices.
1591	// * edgetpu_tflite - Used for [Edge TPU](https://cloud.google.com/edge-tpu/)
1592	//                    devices.
1593	// * tf_saved_model - A tensorflow model in SavedModel format.
1594	// * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can
1595	//           be used in the browser and in Node.js using JavaScript.
1596	// * docker - Used for Docker containers. Use the params field to customize
1597	//            the container. The container is verified to work correctly on
1598	//            ubuntu 16.04 operating system. See more at
1599	//            [containers
1600	//
1601	// quickstart](https:
1602	// //cloud.google.com/vision/automl/docs/containers-gcs-quickstart)
1603	// * core_ml - Used for iOS mobile devices.
1604	ModelFormat string `protobuf:"bytes,4,opt,name=model_format,json=modelFormat,proto3" json:"model_format,omitempty"`
1605	// Additional model-type and format specific parameters describing the
1606	// requirements for the to be exported model files, any string must be up to
1607	// 25000 characters long.
1608	//
1609	//  * For `docker` format:
1610	//     `cpu_architecture` - (string) "x86_64" (default).
1611	//     `gpu_architecture` - (string) "none" (default), "nvidia".
1612	Params               map[string]string `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
1613	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
1614	XXX_unrecognized     []byte            `json:"-"`
1615	XXX_sizecache        int32             `json:"-"`
1616}
1617
1618func (m *ModelExportOutputConfig) Reset()         { *m = ModelExportOutputConfig{} }
1619func (m *ModelExportOutputConfig) String() string { return proto.CompactTextString(m) }
1620func (*ModelExportOutputConfig) ProtoMessage()    {}
1621func (*ModelExportOutputConfig) Descriptor() ([]byte, []int) {
1622	return fileDescriptor_929966d18309cd53, []int{5}
1623}
1624
1625func (m *ModelExportOutputConfig) XXX_Unmarshal(b []byte) error {
1626	return xxx_messageInfo_ModelExportOutputConfig.Unmarshal(m, b)
1627}
1628func (m *ModelExportOutputConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1629	return xxx_messageInfo_ModelExportOutputConfig.Marshal(b, m, deterministic)
1630}
1631func (m *ModelExportOutputConfig) XXX_Merge(src proto.Message) {
1632	xxx_messageInfo_ModelExportOutputConfig.Merge(m, src)
1633}
1634func (m *ModelExportOutputConfig) XXX_Size() int {
1635	return xxx_messageInfo_ModelExportOutputConfig.Size(m)
1636}
1637func (m *ModelExportOutputConfig) XXX_DiscardUnknown() {
1638	xxx_messageInfo_ModelExportOutputConfig.DiscardUnknown(m)
1639}
1640
1641var xxx_messageInfo_ModelExportOutputConfig proto.InternalMessageInfo
1642
1643type isModelExportOutputConfig_Destination interface {
1644	isModelExportOutputConfig_Destination()
1645}
1646
1647type ModelExportOutputConfig_GcsDestination struct {
1648	GcsDestination *GcsDestination `protobuf:"bytes,1,opt,name=gcs_destination,json=gcsDestination,proto3,oneof"`
1649}
1650
1651func (*ModelExportOutputConfig_GcsDestination) isModelExportOutputConfig_Destination() {}
1652
1653func (m *ModelExportOutputConfig) GetDestination() isModelExportOutputConfig_Destination {
1654	if m != nil {
1655		return m.Destination
1656	}
1657	return nil
1658}
1659
1660func (m *ModelExportOutputConfig) GetGcsDestination() *GcsDestination {
1661	if x, ok := m.GetDestination().(*ModelExportOutputConfig_GcsDestination); ok {
1662		return x.GcsDestination
1663	}
1664	return nil
1665}
1666
1667func (m *ModelExportOutputConfig) GetModelFormat() string {
1668	if m != nil {
1669		return m.ModelFormat
1670	}
1671	return ""
1672}
1673
1674func (m *ModelExportOutputConfig) GetParams() map[string]string {
1675	if m != nil {
1676		return m.Params
1677	}
1678	return nil
1679}
1680
1681// XXX_OneofWrappers is for the internal use of the proto package.
1682func (*ModelExportOutputConfig) XXX_OneofWrappers() []interface{} {
1683	return []interface{}{
1684		(*ModelExportOutputConfig_GcsDestination)(nil),
1685	}
1686}
1687
1688// The Google Cloud Storage location for the input content.
1689type GcsSource struct {
1690	// Required. Google Cloud Storage URIs to input files, up to 2000
1691	// characters long. Accepted forms:
1692	// * Full object path, e.g. gs://bucket/directory/object.csv
1693	InputUris            []string `protobuf:"bytes,1,rep,name=input_uris,json=inputUris,proto3" json:"input_uris,omitempty"`
1694	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1695	XXX_unrecognized     []byte   `json:"-"`
1696	XXX_sizecache        int32    `json:"-"`
1697}
1698
1699func (m *GcsSource) Reset()         { *m = GcsSource{} }
1700func (m *GcsSource) String() string { return proto.CompactTextString(m) }
1701func (*GcsSource) ProtoMessage()    {}
1702func (*GcsSource) Descriptor() ([]byte, []int) {
1703	return fileDescriptor_929966d18309cd53, []int{6}
1704}
1705
1706func (m *GcsSource) XXX_Unmarshal(b []byte) error {
1707	return xxx_messageInfo_GcsSource.Unmarshal(m, b)
1708}
1709func (m *GcsSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1710	return xxx_messageInfo_GcsSource.Marshal(b, m, deterministic)
1711}
1712func (m *GcsSource) XXX_Merge(src proto.Message) {
1713	xxx_messageInfo_GcsSource.Merge(m, src)
1714}
1715func (m *GcsSource) XXX_Size() int {
1716	return xxx_messageInfo_GcsSource.Size(m)
1717}
1718func (m *GcsSource) XXX_DiscardUnknown() {
1719	xxx_messageInfo_GcsSource.DiscardUnknown(m)
1720}
1721
1722var xxx_messageInfo_GcsSource proto.InternalMessageInfo
1723
1724func (m *GcsSource) GetInputUris() []string {
1725	if m != nil {
1726		return m.InputUris
1727	}
1728	return nil
1729}
1730
1731// The Google Cloud Storage location where the output is to be written to.
1732type GcsDestination struct {
1733	// Required. Google Cloud Storage URI to output directory, up to 2000
1734	// characters long.
1735	// Accepted forms:
1736	// * Prefix path: gs://bucket/directory
1737	// The requesting user must have write permission to the bucket.
1738	// The directory is created if it doesn't exist.
1739	OutputUriPrefix      string   `protobuf:"bytes,1,opt,name=output_uri_prefix,json=outputUriPrefix,proto3" json:"output_uri_prefix,omitempty"`
1740	XXX_NoUnkeyedLiteral struct{} `json:"-"`
1741	XXX_unrecognized     []byte   `json:"-"`
1742	XXX_sizecache        int32    `json:"-"`
1743}
1744
1745func (m *GcsDestination) Reset()         { *m = GcsDestination{} }
1746func (m *GcsDestination) String() string { return proto.CompactTextString(m) }
1747func (*GcsDestination) ProtoMessage()    {}
1748func (*GcsDestination) Descriptor() ([]byte, []int) {
1749	return fileDescriptor_929966d18309cd53, []int{7}
1750}
1751
1752func (m *GcsDestination) XXX_Unmarshal(b []byte) error {
1753	return xxx_messageInfo_GcsDestination.Unmarshal(m, b)
1754}
1755func (m *GcsDestination) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1756	return xxx_messageInfo_GcsDestination.Marshal(b, m, deterministic)
1757}
1758func (m *GcsDestination) XXX_Merge(src proto.Message) {
1759	xxx_messageInfo_GcsDestination.Merge(m, src)
1760}
1761func (m *GcsDestination) XXX_Size() int {
1762	return xxx_messageInfo_GcsDestination.Size(m)
1763}
1764func (m *GcsDestination) XXX_DiscardUnknown() {
1765	xxx_messageInfo_GcsDestination.DiscardUnknown(m)
1766}
1767
1768var xxx_messageInfo_GcsDestination proto.InternalMessageInfo
1769
1770func (m *GcsDestination) GetOutputUriPrefix() string {
1771	if m != nil {
1772		return m.OutputUriPrefix
1773	}
1774	return ""
1775}
1776
1777func init() {
1778	proto.RegisterType((*InputConfig)(nil), "google.cloud.automl.v1.InputConfig")
1779	proto.RegisterMapType((map[string]string)(nil), "google.cloud.automl.v1.InputConfig.ParamsEntry")
1780	proto.RegisterType((*BatchPredictInputConfig)(nil), "google.cloud.automl.v1.BatchPredictInputConfig")
1781	proto.RegisterType((*DocumentInputConfig)(nil), "google.cloud.automl.v1.DocumentInputConfig")
1782	proto.RegisterType((*OutputConfig)(nil), "google.cloud.automl.v1.OutputConfig")
1783	proto.RegisterType((*BatchPredictOutputConfig)(nil), "google.cloud.automl.v1.BatchPredictOutputConfig")
1784	proto.RegisterType((*ModelExportOutputConfig)(nil), "google.cloud.automl.v1.ModelExportOutputConfig")
1785	proto.RegisterMapType((map[string]string)(nil), "google.cloud.automl.v1.ModelExportOutputConfig.ParamsEntry")
1786	proto.RegisterType((*GcsSource)(nil), "google.cloud.automl.v1.GcsSource")
1787	proto.RegisterType((*GcsDestination)(nil), "google.cloud.automl.v1.GcsDestination")
1788}
1789
1790func init() {
1791	proto.RegisterFile("google/cloud/automl/v1/io.proto", fileDescriptor_929966d18309cd53)
1792}
1793
1794var fileDescriptor_929966d18309cd53 = []byte{
1795	// 542 bytes of a gzipped FileDescriptorProto
1796	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x54, 0xc1, 0x6e, 0xd3, 0x4c,
1797	0x10, 0xfe, 0xed, 0xfc, 0x44, 0x64, 0x5c, 0x5a, 0x30, 0xa8, 0x35, 0x11, 0x12, 0xa9, 0x0f, 0x28,
1798	0x27, 0x5b, 0x29, 0x17, 0x70, 0x39, 0x90, 0xb4, 0x25, 0x20, 0x51, 0x11, 0xa5, 0x6a, 0x90, 0x50,
1799	0xa4, 0x68, 0xeb, 0x6c, 0xdc, 0x15, 0xf6, 0x8e, 0xb5, 0xde, 0x8d, 0xda, 0x23, 0xaf, 0xd3, 0x2b,
1800	0x6f, 0xc1, 0x4b, 0xc0, 0x99, 0xa7, 0x40, 0xd9, 0x4d, 0xa8, 0x13, 0x11, 0x0e, 0x15, 0x82, 0xdb,
1801	0xee, 0x7c, 0xf3, 0xcd, 0x37, 0xdf, 0xec, 0x68, 0xe1, 0x71, 0x82, 0x98, 0xa4, 0x34, 0x8c, 0x53,
1802	0x54, 0xe3, 0x90, 0x28, 0x89, 0x59, 0x1a, 0x4e, 0x5b, 0x21, 0xc3, 0x20, 0x17, 0x28, 0xd1, 0xdd,
1803	0x36, 0x09, 0x81, 0x4e, 0x08, 0x4c, 0x42, 0x30, 0x6d, 0xd5, 0x17, 0x44, 0x92, 0xb3, 0x70, 0xc2,
1804	0x68, 0x3a, 0x1e, 0x9d, 0xd1, 0x73, 0x32, 0x65, 0x28, 0x0c, 0xb1, 0xfe, 0xa8, 0x94, 0x40, 0x38,
1805	0x47, 0x49, 0x24, 0x43, 0x5e, 0x18, 0xd4, 0xff, 0x6a, 0x81, 0xf3, 0x86, 0xe7, 0x4a, 0x1e, 0x20,
1806	0x9f, 0xb0, 0xc4, 0xed, 0x00, 0x24, 0x71, 0x31, 0x2a, 0x50, 0x89, 0x98, 0x7a, 0x56, 0xc3, 0x6a,
1807	0x3a, 0x7b, 0xbb, 0xc1, 0xaf, 0xb5, 0x83, 0x6e, 0x5c, 0x9c, 0xe8, 0xc4, 0xd7, 0xff, 0xf5, 0x6b,
1808	0xc9, 0xe2, 0xe2, 0x76, 0xa1, 0x9a, 0x13, 0x41, 0xb2, 0xc2, 0xb3, 0x1b, 0x95, 0xa6, 0xb3, 0x17,
1809	0xae, 0xe3, 0x97, 0x84, 0x83, 0x9e, 0x66, 0x1c, 0x71, 0x29, 0x2e, 0xfb, 0x73, 0x7a, 0xfd, 0x39,
1810	0x38, 0xa5, 0xb0, 0x7b, 0x17, 0x2a, 0x1f, 0xe9, 0xa5, 0x6e, 0xaa, 0xd6, 0x9f, 0x1d, 0xdd, 0x07,
1811	0x70, 0x6b, 0x4a, 0x52, 0x45, 0x3d, 0x5b, 0xc7, 0xcc, 0x25, 0xb2, 0x9f, 0x59, 0x9d, 0xdb, 0x50,
1812	0x35, 0x1e, 0xfc, 0x14, 0x76, 0x3a, 0x44, 0xc6, 0xe7, 0x3d, 0x41, 0xc7, 0x2c, 0x96, 0x65, 0xb3,
1813	0xdd, 0x1b, 0x99, 0xed, 0x54, 0xbe, 0xb5, 0xed, 0x25, 0xc7, 0x25, 0xb5, 0xf7, 0x70, 0xff, 0x10,
1814	0x63, 0x95, 0x51, 0xbe, 0xa4, 0xf4, 0xf2, 0x46, 0x4a, 0x25, 0x09, 0x5f, 0xc1, 0xc6, 0x3b, 0x25,
1815	0xaf, 0x2b, 0x0e, 0x60, 0x6b, 0x56, 0x71, 0x4c, 0x0b, 0xc9, 0xb8, 0x7e, 0xd2, 0x79, 0xd9, 0x27,
1816	0xbf, 0x29, 0x7b, 0x78, 0x9d, 0xbd, 0x70, 0xb1, 0x99, 0x2c, 0x87, 0xef, 0x80, 0x53, 0xaa, 0xe9,
1817	0x7f, 0xb2, 0xc0, 0x2b, 0x8f, 0xef, 0x5f, 0xf4, 0xf0, 0xd9, 0x86, 0x9d, 0x63, 0x1c, 0xd3, 0xf4,
1818	0xe8, 0x22, 0x47, 0xf1, 0x57, 0x5a, 0x70, 0x77, 0x61, 0x23, 0x9b, 0x49, 0x8e, 0x26, 0x28, 0x32,
1819	0x22, 0xbd, 0xff, 0xf5, 0x82, 0x39, 0x3a, 0xf6, 0x4a, 0x87, 0xdc, 0x93, 0x95, 0x35, 0xdf, 0x5f,
1820	0xa7, 0xb8, 0xa6, 0xf7, 0x3f, 0xbd, 0xf2, 0x2b, 0x53, 0x0b, 0xa1, 0xf6, 0x73, 0x91, 0x5c, 0x1f,
1821	0x80, 0xcd, 0xd6, 0x71, 0xa4, 0x04, 0x2b, 0x3c, 0xab, 0x51, 0x69, 0xd6, 0xb4, 0xf3, 0x7e, 0x4d,
1822	0x87, 0x4f, 0x05, 0x2b, 0xfc, 0x36, 0x6c, 0x2e, 0xcf, 0xc6, 0x0d, 0xe1, 0x1e, 0xea, 0x86, 0x67,
1823	0xb4, 0x51, 0x2e, 0xe8, 0x84, 0x5d, 0x98, 0x5e, 0x0c, 0x79, 0xcb, 0xa0, 0xa7, 0x82, 0xf5, 0x34,
1824	0xd6, 0xb9, 0xb2, 0xa0, 0x1e, 0x63, 0xb6, 0x66, 0x10, 0x3d, 0xeb, 0xc3, 0x8b, 0x39, 0x92, 0x60,
1825	0x4a, 0x78, 0x12, 0xa0, 0x48, 0xc2, 0x84, 0x72, 0xfd, 0x15, 0x85, 0x06, 0x22, 0x39, 0x2b, 0x56,
1826	0x7f, 0xc1, 0x7d, 0x73, 0xba, 0xb2, 0xb7, 0xbb, 0x86, 0x7e, 0xa0, 0x0b, 0xb7, 0x95, 0xc4, 0xe3,
1827	0xb7, 0xc1, 0xa0, 0xf5, 0x65, 0x01, 0x0c, 0x35, 0x30, 0xd4, 0x40, 0x3a, 0x1c, 0xb4, 0xbe, 0xdb,
1828	0x0f, 0x0d, 0x10, 0x45, 0x1a, 0x89, 0x22, 0xc3, 0x89, 0xa2, 0x41, 0xeb, 0xac, 0xaa, 0x65, 0x9f,
1829	0xfe, 0x08, 0x00, 0x00, 0xff, 0xff, 0x5a, 0x11, 0xbc, 0x85, 0x7b, 0x05, 0x00, 0x00,
1830}
1831