1/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16// This is the auto-generated operation definition file for TensorFlow.
17//
18// PLEASE DO NOT MANUALLY EDIT THIS FILE!
19//
20// If you absolutely need to modify the generated fields of an op, move the op
21// definition to `tf_ops.td` and perform the modification there.
22//
23// This file contains TensorFlow ops whose definitions are programmatically
24// generated from the api-def-files in the following folder:
25// tensorflow/core/api_def/base_api
26// The generated fields for an op include name, summary, description, traits,
27// arguments, results, derived attributes. Therefore, modifications to these
28// fields will NOT be respected upon subsequent refreshes. However, additional
29// fields after those fields will be retained.
30//
31// Ops in this file are sorted alphabetically.
32
33include "tf_op_base.td"
34
35def TF_AbsOp : TF_Op<"Abs", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
36  let summary = "Computes the absolute value of a tensor.";
37
38  let description = [{
39Given a tensor `x`, this operation returns a tensor containing the absolute
40value of each element in `x`. For example, if x is an input element and y is
41an output element, this operation computes \\(y = |x|\\).
42  }];
43
44  let arguments = (ins
45    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
46  );
47
48  let results = (outs
49    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
50  );
51
52  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
53}
54
55def TF_AcosOp : TF_Op<"Acos", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
56  let summary = "Computes acos of x element-wise.";
57
58  let description = [{
59Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
60
61  Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
62  }];
63
64  let arguments = (ins
65    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
66  );
67
68  let results = (outs
69    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
70  );
71
72  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
73}
74
75def TF_AcoshOp : TF_Op<"Acosh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
76  let summary = "Computes inverse hyperbolic cosine of x element-wise.";
77
78  let description = [{
79Given an input tensor, the function computes inverse hyperbolic cosine of every element.
80Input range is `[1, inf]`. It returns `nan` if the input lies outside the range.
81
82```python
83x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")])
84tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf]
85```
86  }];
87
88  let arguments = (ins
89    TF_FpOrComplexTensor:$x
90  );
91
92  let results = (outs
93    TF_FpOrComplexTensor:$y
94  );
95
96  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
97}
98
99def TF_AddNOp : TF_Op<"AddN", [Commutative, NoSideEffect]> {
100  let summary = "Add all input tensors element wise.";
101
102  let description = [{
103Inputs must be of same size and shape.
104
105  ```python
106  x = [9, 7, 10]
107  tf.math.add_n(x) ==> 26
108  ```
109  }];
110
111  let arguments = (ins
112    Variadic<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8, TF_Variant]>>:$inputs
113  );
114
115  let results = (outs
116    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8, TF_Variant]>:$sum
117  );
118
119  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
120  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
121}
122
123def TF_AdjustContrastv2Op : TF_Op<"AdjustContrastv2", [NoSideEffect]> {
124  let summary = "Adjust the contrast of one or more images.";
125
126  let description = [{
127`images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
128interpreted as `[height, width, channels]`.  The other dimensions only
129represent a collection of images, such as `[batch, height, width, channels].`
130
131Contrast is adjusted independently for each channel of each image.
132
133For each channel, the Op first computes the mean of the image pixels in the
134channel and then adjusts each component of each pixel to
135`(x - mean) * contrast_factor + mean`.
136  }];
137
138  let arguments = (ins
139    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
140    Arg<TF_Float32Tensor, [{A float multiplier for adjusting contrast.}]>:$contrast_factor
141  );
142
143  let results = (outs
144    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The contrast-adjusted image or images.}]>:$output
145  );
146
147  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
148}
149
150def TF_AdjustHueOp : TF_Op<"AdjustHue", [NoSideEffect]> {
151  let summary = "Adjust the hue of one or more images.";
152
153  let description = [{
154`images` is a tensor of at least 3 dimensions.  The last dimension is
155interpreted as channels, and must be three.
156
157The input image is considered in the RGB colorspace. Conceptually, the RGB
158colors are first mapped into HSV. A delta is then applied all the hue values,
159and then remapped back to RGB colorspace.
160  }];
161
162  let arguments = (ins
163    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
164    Arg<TF_Float32Tensor, [{A float delta to add to the hue.}]>:$delta
165  );
166
167  let results = (outs
168    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The hue-adjusted image or images.}]>:$output
169  );
170
171  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
172}
173
174def TF_AdjustSaturationOp : TF_Op<"AdjustSaturation", [NoSideEffect]> {
175  let summary = "Adjust the saturation of one or more images.";
176
177  let description = [{
178`images` is a tensor of at least 3 dimensions.  The last dimension is
179interpreted as channels, and must be three.
180
181The input image is considered in the RGB colorspace. Conceptually, the RGB
182colors are first mapped into HSV. A scale is then applied all the saturation
183values, and then remapped back to RGB colorspace.
184  }];
185
186  let arguments = (ins
187    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
188    Arg<TF_Float32Tensor, [{A float scale to add to the saturation.}]>:$scale
189  );
190
191  let results = (outs
192    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The hue-adjusted image or images.}]>:$output
193  );
194
195  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
196}
197
198def TF_AllOp : TF_Op<"All", [NoSideEffect]> {
199  let summary = [{
200Computes the "logical and" of elements across dimensions of a tensor.
201  }];
202
203  let description = [{
204Reduces `input` along the dimensions given in `axis`. Unless
205`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
206`axis`. If `keep_dims` is true, the reduced dimensions are
207retained with length 1.
208  }];
209
210  let arguments = (ins
211    Arg<TF_BoolTensor, [{The tensor to reduce.}]>:$input,
212    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
213`[-rank(input), rank(input))`.}]>:$reduction_indices,
214
215    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
216  );
217
218  let results = (outs
219    Res<TF_BoolTensor, [{The reduced tensor.}]>:$output
220  );
221
222  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
223
224  let verifier = [{ return Verify(*this); }];
225}
226
227def TF_AllToAllOp : TF_Op<"AllToAll", [NoSideEffect, TF_NoConstantFold]> {
228  let summary = "An Op to exchange data across TPU replicas.";
229
230  let description = [{
231On each replica, the input is split into `split_count` blocks along
232`split_dimension` and send to the other replicas given group_assignment. After
233receiving `split_count` - 1 blocks from other replicas, we concatenate the
234blocks along `concat_dimension` as the output.
235
236For example, suppose there are 2 TPU replicas:
237replica 0 receives input: `[[A, B]]`
238replica 1 receives input: `[[C, D]]`
239
240group_assignment=`[[0, 1]]`
241concat_dimension=0
242split_dimension=1
243split_count=2
244
245replica 0's output: `[[A], [C]]`
246replica 1's output: `[[B], [D]]`
247  }];
248
249  let arguments = (ins
250    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The local input to the sum.}]>:$input,
251    Arg<TF_Int32Tensor, [{An int32 tensor with shape
252[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
253replica ids in the ith subgroup.}]>:$group_assignment,
254
255    I64Attr:$concat_dimension,
256    I64Attr:$split_dimension,
257    I64Attr:$split_count
258  );
259
260  let results = (outs
261    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The exchanged result.}]>:$output
262  );
263
264  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
265}
266
267def TF_AngleOp : TF_Op<"Angle", [NoSideEffect, SameOperandsAndResultShape]> {
268  let summary = "Returns the argument of a complex number.";
269
270  let description = [{
271Given a tensor `input` of complex numbers, this operation returns a tensor of
272type `float` that is the argument of each element in `input`. All elements in
273`input` must be complex numbers of the form \\(a + bj\\), where *a*
274is the real part and *b* is the imaginary part.
275
276The argument returned by this operation is of the form \\(atan2(b, a)\\).
277
278For example:
279
280```
281# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
282tf.angle(input) ==> [2.0132, 1.056]
283```
284
285@compatibility(numpy)
286Equivalent to np.angle.
287@end_compatibility
288  }];
289
290  let arguments = (ins
291    TensorOf<[TF_Complex128, TF_Complex64]>:$input
292  );
293
294  let results = (outs
295    TF_F32OrF64Tensor:$output
296  );
297
298  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
299  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
300}
301
302def TF_AnonymousIteratorOp : TF_Op<"AnonymousIterator", []> {
303  let summary = "A container for an iterator resource.";
304
305  let arguments = (ins
306    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
307    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
308  );
309
310  let results = (outs
311    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator" or
312"IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
313resource sharing by name, and does not keep a reference to the resource
314container.}], [TF_DatasetIteratorAlloc]>:$handle
315  );
316}
317
318def TF_AnonymousIteratorV2Op : TF_Op<"AnonymousIteratorV2", []> {
319  let summary = "A container for an iterator resource.";
320
321  let arguments = (ins
322    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
323    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
324  );
325
326  let results = (outs
327    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator" or
328"IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
329resource sharing by name, and does not keep a reference to the resource
330container.}], [TF_DatasetIteratorAlloc]>:$handle,
331    Res<TF_VariantTensor, [{A variant deleter that should be passed into the op that deletes the iterator.}]>:$deleter
332  );
333}
334
335def TF_AnonymousMemoryCacheOp : TF_Op<"AnonymousMemoryCache", []> {
336  let summary = "";
337
338  let arguments = (ins);
339
340  let results = (outs
341    Res<TF_ResourceTensor, "", [TF_DatasetMemoryCacheAlloc]>:$handle,
342    TF_VariantTensor:$deleter
343  );
344}
345
346def TF_AnonymousMultiDeviceIteratorOp : TF_Op<"AnonymousMultiDeviceIterator", []> {
347  let summary = "A container for a multi device iterator resource.";
348
349  let arguments = (ins
350    Confined<StrArrayAttr, [ArrayMinCount<1>]>:$devices,
351    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
352    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
353  );
354
355  let results = (outs
356    Res<TF_ResourceTensor, [{A handle to a multi device iterator that can be passed to a
357"MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator,
358AnonymousIterator prevents resource sharing by name, and does not keep a
359reference to the resource container.}], [TF_DatasetIteratorAlloc]>:$handle,
360    Res<TF_VariantTensor, [{A variant deleter that should be passed into the op that deletes the iterator.}]>:$deleter
361  );
362}
363
364def TF_AnonymousRandomSeedGeneratorOp : TF_Op<"AnonymousRandomSeedGenerator", []> {
365  let summary = "";
366
367  let arguments = (ins
368    TF_Int64Tensor:$seed,
369    TF_Int64Tensor:$seed2
370  );
371
372  let results = (outs
373    Res<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorAlloc]>:$handle,
374    TF_VariantTensor:$deleter
375  );
376}
377
378def TF_AnonymousSeedGeneratorOp : TF_Op<"AnonymousSeedGenerator", []> {
379  let summary = "";
380
381  let arguments = (ins
382    TF_Int64Tensor:$seed,
383    TF_Int64Tensor:$seed2,
384    TF_BoolTensor:$reshuffle
385  );
386
387  let results = (outs
388    Res<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorAlloc]>:$handle,
389    TF_VariantTensor:$deleter
390  );
391}
392
393def TF_AnyOp : TF_Op<"Any", [NoSideEffect]> {
394  let summary = [{
395Computes the "logical or" of elements across dimensions of a tensor.
396  }];
397
398  let description = [{
399Reduces `input` along the dimensions given in `axis`. Unless
400`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
401`axis`. If `keep_dims` is true, the reduced dimensions are
402retained with length 1.
403  }];
404
405  let arguments = (ins
406    Arg<TF_BoolTensor, [{The tensor to reduce.}]>:$input,
407    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
408`[-rank(input), rank(input))`.}]>:$reduction_indices,
409
410    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
411  );
412
413  let results = (outs
414    Res<TF_BoolTensor, [{The reduced tensor.}]>:$output
415  );
416
417  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
418
419  let verifier = [{ return Verify(*this); }];
420}
421
422def TF_ApproximateEqualOp : TF_Op<"ApproximateEqual", [Commutative, NoSideEffect]> {
423  let summary = "Returns the truth value of abs(x-y) < tolerance element-wise.";
424
425  let arguments = (ins
426    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
427    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y,
428
429    DefaultValuedAttr<F32Attr, "1e-05f">:$tolerance
430  );
431
432  let results = (outs
433    TF_BoolTensor:$z
434  );
435
436  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
437}
438
439def TF_ArgMaxOp : TF_Op<"ArgMax", [NoSideEffect]> {
440  let summary = [{
441Returns the index with the largest value across dimensions of a tensor.
442  }];
443
444  let description = [{
445Note that in case of ties the identity of the return value is not guaranteed.
446
447Usage:
448  ```python
449  import tensorflow as tf
450  a = [1, 10, 26.9, 2.8, 166.32, 62.3]
451  b = tf.math.argmax(input = a)
452  c = tf.keras.backend.eval(b)
453  # c = 4
454  # here a[4] = 166.32 which is the largest element of a across axis 0
455  ```
456  }];
457
458  let arguments = (ins
459    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
460    Arg<TF_I32OrI64Tensor, [{int32 or int64, must be in the range `[-rank(input), rank(input))`.
461Describes which dimension of the input Tensor to reduce across. For vectors,
462use dimension = 0.}]>:$dimension
463  );
464
465  let results = (outs
466    TF_I32OrI64Tensor:$output
467  );
468
469  TF_DerivedResultTypeAttr output_type = TF_DerivedResultTypeAttr<0>;
470  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
471  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
472}
473
474def TF_ArgMinOp : TF_Op<"ArgMin", [NoSideEffect]> {
475  let summary = [{
476Returns the index with the smallest value across dimensions of a tensor.
477  }];
478
479  let description = [{
480Note that in case of ties the identity of the return value is not guaranteed.
481
482Usage:
483  ```python
484  import tensorflow as tf
485  a = [1, 10, 26.9, 2.8, 166.32, 62.3]
486  b = tf.math.argmin(input = a)
487  c = tf.keras.backend.eval(b)
488  # c = 0
489  # here a[0] = 1 which is the smallest element of a across axis 0
490  ```
491  }];
492
493  let arguments = (ins
494    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
495    Arg<TF_I32OrI64Tensor, [{int32 or int64, must be in the range `[-rank(input), rank(input))`.
496Describes which dimension of the input Tensor to reduce across. For vectors,
497use dimension = 0.}]>:$dimension
498  );
499
500  let results = (outs
501    TF_I32OrI64Tensor:$output
502  );
503
504  TF_DerivedResultTypeAttr output_type = TF_DerivedResultTypeAttr<0>;
505  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
506  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
507}
508
509def TF_AsStringOp : TF_Op<"AsString", [NoSideEffect, SameOperandsAndResultShape]> {
510  let summary = "Converts each entry in the given tensor to strings.";
511
512  let description = [{
513Supports many numeric types and boolean.
514
515For Unicode, see the
516[https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text)
517tutorial.
518
519Examples:
520
521>>> tf.strings.as_string([3, 2])
522<tf.Tensor: shape=(2,), dtype=string, numpy=array([b'3', b'2'], dtype=object)>
523>>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy()
524array([b'3.14', b'2.72'], dtype=object)
525  }];
526
527  let arguments = (ins
528    TensorOf<[TF_Bool, TF_Complex128, TF_Complex64, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Variant]>:$input,
529
530    DefaultValuedAttr<I64Attr, "-1">:$precision,
531    DefaultValuedAttr<BoolAttr, "false">:$scientific,
532    DefaultValuedAttr<BoolAttr, "false">:$shortest,
533    DefaultValuedAttr<I64Attr, "-1">:$width,
534    StrAttr:$fill
535  );
536
537  let results = (outs
538    TF_StrTensor:$output
539  );
540
541  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
542}
543
544def TF_AsinOp : TF_Op<"Asin", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
545  let summary = "Computes the trignometric inverse sine of x element-wise.";
546
547  let description = [{
548The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that
549if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`.
550
551**Note**: The output of `tf.math.asin` will lie within the invertible range
552of sine, i.e [-pi/2, pi/2].
553
554For example:
555
556```python
557# Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
558x = tf.constant([1.047, 0.785])
559y = tf.math.sin(x) # [0.8659266, 0.7068252]
560
561tf.math.asin(y) # [1.047, 0.785] = x
562```
563  }];
564
565  let arguments = (ins
566    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
567  );
568
569  let results = (outs
570    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
571  );
572
573  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
574}
575
576def TF_AsinhOp : TF_Op<"Asinh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
577  let summary = "Computes inverse hyperbolic sine of x element-wise.";
578
579  let description = [{
580Given an input tensor, this function computes inverse hyperbolic sine
581  for every element in the tensor. Both input and output has a range of
582  `[-inf, inf]`.
583
584  ```python
585  x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")])
586  tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf]
587  ```
588  }];
589
590  let arguments = (ins
591    TF_FpOrComplexTensor:$x
592  );
593
594  let results = (outs
595    TF_FpOrComplexTensor:$y
596  );
597
598  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
599}
600
601def TF_AssertOp : TF_Op<"Assert", []> {
602  let summary = "Asserts that the given condition is true.";
603
604  let description = [{
605If `condition` evaluates to false, print the list of tensors in `data`.
606`summarize` determines how many entries of the tensors to print.
607  }];
608
609  let arguments = (ins
610    Arg<TF_BoolTensor, [{The condition to evaluate.}]>:$condition,
611    Arg<Variadic<TF_Tensor>, [{The tensors to print out when condition is false.}]>:$data,
612
613    DefaultValuedAttr<I64Attr, "3">:$summarize
614  );
615
616  let results = (outs);
617
618  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<1>;
619}
620
621def TF_AssignAddVariableOp : TF_Op<"AssignAddVariableOp", []> {
622  let summary = "Adds a value to the current value of a variable.";
623
624  let description = [{
625Any ReadVariableOp with a control dependency on this op is guaranteed to
626see the incremented value or a subsequent newer one.
627  }];
628
629  let arguments = (ins
630    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
631    Arg<TF_Tensor, [{the value by which the variable will be incremented.}]>:$value
632  );
633
634  let results = (outs);
635
636  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
637}
638
639def TF_AssignSubVariableOp : TF_Op<"AssignSubVariableOp", []> {
640  let summary = "Subtracts a value from the current value of a variable.";
641
642  let description = [{
643Any ReadVariableOp with a control dependency on this op is guaranteed to
644see the decremented value or a subsequent newer one.
645  }];
646
647  let arguments = (ins
648    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
649    Arg<TF_Tensor, [{the value by which the variable will be incremented.}]>:$value
650  );
651
652  let results = (outs);
653
654  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
655}
656
657def TF_AssignVariableOp : TF_Op<"AssignVariableOp", []> {
658  let summary = "Assigns a new value to a variable.";
659
660  let description = [{
661Any ReadVariableOp with a control dependency on this op is guaranteed to return
662this value or a subsequent newer value of the variable.
663  }];
664
665  let arguments = (ins
666    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableWrite]>:$resource,
667    Arg<TF_Tensor, [{the value to set the new tensor to use.}]>:$value
668  );
669
670  let results = (outs);
671
672  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
673}
674
675def TF_AtanOp : TF_Op<"Atan", [NoSideEffect, SameOperandsAndResultType]> {
676  let summary = "Computes the trignometric inverse tangent of x element-wise.";
677
678  let description = [{
679The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that
680if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`.
681
682**Note**: The output of `tf.math.atan` will lie within the invertible range
683of tan, i.e (-pi/2, pi/2).
684
685For example:
686
687```python
688# Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
689x = tf.constant([1.047, 0.785])
690y = tf.math.tan(x) # [1.731261, 0.99920404]
691
692tf.math.atan(y) # [1.047, 0.785] = x
693```
694  }];
695
696  let arguments = (ins
697    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
698  );
699
700  let results = (outs
701    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
702  );
703
704  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
705}
706
707def TF_Atan2Op : TF_Op<"Atan2", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
708                 WithBroadcastableBinOpBuilder {
709  let summary = [{
710Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
711  }];
712
713  let description = [{
714This is the angle \( \theta \in [-\pi, \pi] \) such that
715\[ x = r \cos(\theta) \]
716and
717\[ y = r \sin(\theta) \]
718where \(r = \sqrt(x^2 + y^2) \).
719  }];
720
721  let arguments = (ins
722    TF_FloatTensor:$y,
723    TF_FloatTensor:$x
724  );
725
726  let results = (outs
727    TF_FloatTensor:$z
728  );
729
730  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
731}
732
733def TF_AtanhOp : TF_Op<"Atanh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
734  let summary = "Computes inverse hyperbolic tangent of x element-wise.";
735
736  let description = [{
737Given an input tensor, this function computes inverse hyperbolic tangent
738  for every element in the tensor. Input range is `[-1,1]` and output range is
739  `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the
740  input is `1`, output will be `inf`. Values outside the range will have
741  `nan` as output.
742
743  ```python
744  x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")])
745  tf.math.atanh(x) ==> [nan -inf -0.54930615 inf  0. 0.54930615 nan nan]
746  ```
747  }];
748
749  let arguments = (ins
750    TF_FpOrComplexTensor:$x
751  );
752
753  let results = (outs
754    TF_FpOrComplexTensor:$y
755  );
756
757  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
758}
759
760def TF_AvgPoolOp : TF_Op<"AvgPool", [NoSideEffect]> {
761  let summary = "Performs average pooling on the input.";
762
763  let description = [{
764Each entry in `output` is the mean of the corresponding size `ksize`
765window in `value`.
766  }];
767
768  let arguments = (ins
769    Arg<TF_FloatTensor, [{4-D with shape `[batch, height, width, channels]`.}]>:$value,
770
771    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
772    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
773    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
774    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
775  );
776
777  let results = (outs
778    Res<TF_FloatTensor, [{The average pooled output tensor.}]>:$output
779  );
780
781  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
782}
783
784def TF_AvgPool3DOp : TF_Op<"AvgPool3D", [NoSideEffect]> {
785  let summary = "Performs 3D average pooling on the input.";
786
787  let description = [{
788Each entry in `output` is the mean of the corresponding size `ksize` window in
789`value`.
790  }];
791
792  let arguments = (ins
793    Arg<TF_FloatTensor, [{Shape `[batch, depth, rows, cols, channels]` tensor to pool over.}]>:$input,
794
795    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
796    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
797    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
798    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
799  );
800
801  let results = (outs
802    Res<TF_FloatTensor, [{The average pooled output tensor.}]>:$output
803  );
804
805  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
806}
807
808def TF_AvgPool3DGradOp : TF_Op<"AvgPool3DGrad", [NoSideEffect]> {
809  let summary = "Computes gradients of average pooling function.";
810
811  let arguments = (ins
812    Arg<TF_Int32Tensor, [{The original input dimensions.}]>:$orig_input_shape,
813    Arg<TF_FloatTensor, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
814
815    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
816    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
817    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
818    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
819  );
820
821  let results = (outs
822    Res<TF_FloatTensor, [{The backprop for input.}]>:$output
823  );
824
825  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
826}
827
828def TF_AvgPoolGradOp : TF_Op<"AvgPoolGrad", [NoSideEffect]> {
829  let summary = "Computes gradients of the average pooling function.";
830
831  let arguments = (ins
832    Arg<TF_Int32Tensor, [{1-D.  Shape of the original input to `avg_pool`.}]>:$orig_input_shape,
833    Arg<TF_FloatTensor, [{4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
834the output of `avg_pool`.}]>:$grad,
835
836    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
837    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
838    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
839    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
840  );
841
842  let results = (outs
843    Res<TF_FloatTensor, [{4-D.  Gradients w.r.t. the input of `avg_pool`.}]>:$output
844  );
845
846  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
847}
848
849def TF_BatchMatMulOp : TF_Op<"BatchMatMul", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
850  let summary = "Multiplies slices of two tensors in batches.";
851
852  let description = [{
853Multiplies all slices of `Tensor` `x` and `y` (each slice can be
854viewed as an element of a batch), and arranges the individual results
855in a single output tensor of the same batch size. Each of the
856individual slices can optionally be adjointed (to adjoint a matrix
857means to transpose and conjugate it) before multiplication by setting
858the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
859
860The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
861and `[..., r_y, c_y]`.
862
863The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
864
865    r_o = c_x if adj_x else r_x
866    c_o = r_y if adj_y else c_y
867
868It is computed as:
869
870    output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
871  }];
872
873  let arguments = (ins
874    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_x, c_x]`.}]>:$x,
875    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_y, c_y]`.}]>:$y,
876
877    DefaultValuedAttr<BoolAttr, "false">:$adj_x,
878    DefaultValuedAttr<BoolAttr, "false">:$adj_y
879  );
880
881  let results = (outs
882    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{3-D or higher with shape `[..., r_o, c_o]`}]>:$output
883  );
884
885  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
886
887  let verifier = [{
888    return Verify(*this);
889  }];
890}
891
892def TF_BatchMatMulV2Op : TF_Op<"BatchMatMulV2", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
893  let summary = "Multiplies slices of two tensors in batches.";
894
895  let description = [{
896Multiplies all slices of `Tensor` `x` and `y` (each slice can be
897viewed as an element of a batch), and arranges the individual results
898in a single output tensor of the same batch size. Each of the
899individual slices can optionally be adjointed (to adjoint a matrix
900means to transpose and conjugate it) before multiplication by setting
901the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
902
903The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
904and `[..., r_y, c_y]`.
905
906The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
907
908    r_o = c_x if adj_x else r_x
909    c_o = r_y if adj_y else c_y
910
911It is computed as:
912
913    output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
914
915*NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More
916about broadcasting
917[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
918  }];
919
920  let arguments = (ins
921    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_x, c_x]`.}]>:$x,
922    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_y, c_y]`.}]>:$y,
923
924    DefaultValuedAttr<BoolAttr, "false">:$adj_x,
925    DefaultValuedAttr<BoolAttr, "false">:$adj_y
926  );
927
928  let results = (outs
929    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{3-D or higher with shape `[..., r_o, c_o]`}]>:$output
930  );
931
932  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
933
934  let verifier = [{
935    return Verify(*this);
936  }];
937}
938
939def TF_BatchNormWithGlobalNormalizationOp : TF_Op<"BatchNormWithGlobalNormalization", [NoSideEffect]> {
940  let summary = "Batch normalization.";
941
942  let description = [{
943This op is deprecated. Prefer `tf.nn.batch_normalization`.
944  }];
945
946  let arguments = (ins
947    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 4D input Tensor.}]>:$t,
948    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D mean Tensor with size matching the last dimension of t.
949This is the first output from tf.nn.moments,
950or a saved moving average thereof.}]>:$m,
951    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D variance Tensor with size matching the last dimension of t.
952This is the second output from tf.nn.moments,
953or a saved moving average thereof.}]>:$v,
954    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D beta Tensor with size matching the last dimension of t.
955An offset to be added to the normalized tensor.}]>:$beta,
956    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D gamma Tensor with size matching the last dimension of t.
957If "scale_after_normalization" is true, this tensor will be multiplied
958with the normalized tensor.}]>:$gamma,
959
960    F32Attr:$variance_epsilon,
961    BoolAttr:$scale_after_normalization
962  );
963
964  let results = (outs
965    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$result
966  );
967
968  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
969}
970
971def TF_BatchToSpaceOp : TF_Op<"BatchToSpace", [NoSideEffect]> {
972  let summary = "BatchToSpace for 4-D tensors of type T.";
973
974  let description = [{
975This is a legacy version of the more general BatchToSpaceND.
976
977Rearranges (permutes) data from batch into blocks of spatial data, followed by
978cropping. This is the reverse transformation of SpaceToBatch. More specifically,
979this op outputs a copy of the input tensor where values from the `batch`
980dimension are moved in spatial blocks to the `height` and `width` dimensions,
981followed by cropping along the `height` and `width` dimensions.
982  }];
983
984  let arguments = (ins
985    Arg<TF_Tensor, [{4-D tensor with shape
986`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
987  depth]`. Note that the batch size of the input tensor must be divisible by
988`block_size * block_size`.}]>:$input,
989    Arg<TF_I32OrI64Tensor, [{2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
990how many elements to crop from the intermediate result across the spatial
991dimensions as follows:
992
993    crops = [[crop_top, crop_bottom], [crop_left, crop_right]]}]>:$crops,
994
995    Confined<I64Attr, [IntMinValue<2>]>:$block_size
996  );
997
998  let results = (outs
999    Res<TF_Tensor, [{4-D with shape `[batch, height, width, depth]`, where:
1000
1001      height = height_pad - crop_top - crop_bottom
1002      width = width_pad - crop_left - crop_right
1003
1004The attr `block_size` must be greater than one. It indicates the block size.
1005
1006Some examples:
1007
1008(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
1009
1010```
1011[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
1012```
1013
1014The output tensor has shape `[1, 2, 2, 1]` and value:
1015
1016```
1017x = [[[[1], [2]], [[3], [4]]]]
1018```
1019
1020(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
1021
1022```
1023[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
1024```
1025
1026The output tensor has shape `[1, 2, 2, 3]` and value:
1027
1028```
1029x = [[[[1, 2, 3], [4, 5, 6]],
1030      [[7, 8, 9], [10, 11, 12]]]]
1031```
1032
1033(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
1034
1035```
1036x = [[[[1], [3]], [[9], [11]]],
1037     [[[2], [4]], [[10], [12]]],
1038     [[[5], [7]], [[13], [15]]],
1039     [[[6], [8]], [[14], [16]]]]
1040```
1041
1042The output tensor has shape `[1, 4, 4, 1]` and value:
1043
1044```
1045x = [[[[1],   [2],  [3],  [4]],
1046     [[5],   [6],  [7],  [8]],
1047     [[9],  [10], [11],  [12]],
1048     [[13], [14], [15],  [16]]]]
1049```
1050
1051(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
1052
1053```
1054x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
1055     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
1056```
1057
1058The output tensor has shape `[2, 2, 4, 1]` and value:
1059
1060```
1061x = [[[[1], [3]], [[5], [7]]],
1062     [[[2], [4]], [[10], [12]]],
1063     [[[5], [7]], [[13], [15]]],
1064     [[[6], [8]], [[14], [16]]]]
1065```}]>:$output
1066  );
1067
1068  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1069  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
1070
1071  let verifier = [{
1072    return Verify(*this);
1073  }];
1074}
1075
1076def TF_BatchToSpaceNDOp : TF_Op<"BatchToSpaceND", [NoSideEffect]> {
1077  let summary = "BatchToSpace for N-D tensors of type T.";
1078
1079  let description = [{
1080This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
1081`block_shape + [batch]`, interleaves these blocks back into the grid defined by
1082the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
1083the input.  The spatial dimensions of this intermediate result are then
1084optionally cropped according to `crops` to produce the output.  This is the
1085reverse of SpaceToBatch.  See below for a precise description.
1086  }];
1087
1088  let arguments = (ins
1089    Arg<TF_Tensor, [{N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
1090where spatial_shape has M dimensions.}]>:$input,
1091    Arg<TF_I32OrI64Tensor, [{1-D with shape `[M]`, all values must be >= 1.}]>:$block_shape,
1092    Arg<TF_I32OrI64Tensor, [{2-D with shape `[M, 2]`, all values must be >= 0.
1093  `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
1094  dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
1095  required that
1096  `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
1097
1098This operation is equivalent to the following steps:
1099
11001. Reshape `input` to `reshaped` of shape:
1101     [block_shape[0], ..., block_shape[M-1],
1102      batch / prod(block_shape),
1103      input_shape[1], ..., input_shape[N-1]]
1104
11052. Permute dimensions of `reshaped` to produce `permuted` of shape
1106     [batch / prod(block_shape),
1107
1108      input_shape[1], block_shape[0],
1109      ...,
1110      input_shape[M], block_shape[M-1],
1111
1112      input_shape[M+1], ..., input_shape[N-1]]
1113
11143. Reshape `permuted` to produce `reshaped_permuted` of shape
1115     [batch / prod(block_shape),
1116
1117      input_shape[1] * block_shape[0],
1118      ...,
1119      input_shape[M] * block_shape[M-1],
1120
1121      input_shape[M+1],
1122      ...,
1123      input_shape[N-1]]
1124
11254. Crop the start and end of dimensions `[1, ..., M]` of
1126   `reshaped_permuted` according to `crops` to produce the output of shape:
1127     [batch / prod(block_shape),
1128
1129      input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
1130      ...,
1131      input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
1132
1133      input_shape[M+1], ..., input_shape[N-1]]
1134
1135Some examples:
1136
1137(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
1138    `crops = [[0, 0], [0, 0]]`:
1139
1140```
1141[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
1142```
1143
1144The output tensor has shape `[1, 2, 2, 1]` and value:
1145
1146```
1147x = [[[[1], [2]], [[3], [4]]]]
1148```
1149
1150(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
1151    `crops = [[0, 0], [0, 0]]`:
1152
1153```
1154[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
1155```
1156
1157The output tensor has shape `[1, 2, 2, 3]` and value:
1158
1159```
1160x = [[[[1, 2, 3], [4, 5, 6]],
1161      [[7, 8, 9], [10, 11, 12]]]]
1162```
1163
1164(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
1165    `crops = [[0, 0], [0, 0]]`:
1166
1167```
1168x = [[[[1], [3]], [[9], [11]]],
1169     [[[2], [4]], [[10], [12]]],
1170     [[[5], [7]], [[13], [15]]],
1171     [[[6], [8]], [[14], [16]]]]
1172```
1173
1174The output tensor has shape `[1, 4, 4, 1]` and value:
1175
1176```
1177x = [[[[1],   [2],  [3],  [4]],
1178     [[5],   [6],  [7],  [8]],
1179     [[9],  [10], [11],  [12]],
1180     [[13], [14], [15],  [16]]]]
1181```
1182
1183(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
1184    `crops = [[0, 0], [2, 0]]`:
1185
1186```
1187x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
1188     [[[0], [2], [4]]], [[[0], [10], [12]]],
1189     [[[0], [5], [7]]], [[[0], [13], [15]]],
1190     [[[0], [6], [8]]], [[[0], [14], [16]]]]
1191```
1192
1193The output tensor has shape `[2, 2, 4, 1]` and value:
1194
1195```
1196x = [[[[1],   [2],  [3],  [4]],
1197      [[5],   [6],  [7],  [8]]],
1198     [[[9],  [10], [11],  [12]],
1199      [[13], [14], [15],  [16]]]]
1200```}]>:$crops
1201  );
1202
1203  let results = (outs
1204    TF_Tensor:$output
1205  );
1206
1207  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1208  TF_DerivedOperandTypeAttr Tcrops = TF_DerivedOperandTypeAttr<2>;
1209  TF_DerivedOperandTypeAttr Tblock_shape = TF_DerivedOperandTypeAttr<1>;
1210
1211  let verifier = [{
1212    return Verify(*this);
1213  }];
1214}
1215
1216def TF_BetaincOp : TF_Op<"Betainc", [NoSideEffect]> {
1217  let summary = [{
1218Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
1219  }];
1220
1221  let description = [{
1222The regularized incomplete beta integral is defined as:
1223
1224
1225\\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
1226
1227where
1228
1229
1230\\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
1231
1232
1233is the incomplete beta function and \\(B(a, b)\\) is the *complete*
1234beta function.
1235  }];
1236
1237  let arguments = (ins
1238    TF_F32OrF64Tensor:$a,
1239    TF_F32OrF64Tensor:$b,
1240    TF_F32OrF64Tensor:$x
1241  );
1242
1243  let results = (outs
1244    TF_F32OrF64Tensor:$z
1245  );
1246
1247  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1248}
1249
1250def TF_BiasAddOp : TF_Op<"BiasAdd", [NoSideEffect]> {
1251  let summary = "Adds `bias` to `value`.";
1252
1253  let description = [{
1254This is a special case of `tf.add` where `bias` is restricted to be 1-D.
1255Broadcasting is supported, so `value` may have any number of dimensions.
1256  }];
1257
1258  let arguments = (ins
1259    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$value,
1260    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the last dimension of `value`.}]>:$bias,
1261
1262    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
1263  );
1264
1265  let results = (outs
1266    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Broadcasted sum of `value` and `bias`.}]>:$output
1267  );
1268
1269  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1270
1271  let verifier = [{
1272    return Verify(*this);
1273  }];
1274}
1275
1276def TF_BiasAddGradOp : TF_Op<"BiasAddGrad", [NoSideEffect]> {
1277  let summary = [{
1278The backward operation for "BiasAdd" on the "bias" tensor.
1279  }];
1280
1281  let description = [{
1282It accumulates all the values from out_backprop into the feature dimension.
1283For NHWC data format, the feature dimension is the last. For NCHW data format,
1284the feature dimension is the third-to-last.
1285  }];
1286
1287  let arguments = (ins
1288    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$out_backprop,
1289
1290    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
1291  );
1292
1293  let results = (outs
1294    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the feature dimension of `out_backprop`.}]>:$output
1295  );
1296
1297  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1298
1299  let verifier = [{
1300    return Verify(*this);
1301  }];
1302}
1303
1304def TF_BiasAddV1Op : TF_Op<"BiasAddV1", [NoSideEffect]> {
1305  let summary = "Adds `bias` to `value`.";
1306
1307  let description = [{
1308This is a deprecated version of BiasAdd and will be soon removed.
1309
1310This is a special case of `tf.add` where `bias` is restricted to be 1-D.
1311Broadcasting is supported, so `value` may have any number of dimensions.
1312  }];
1313
1314  let arguments = (ins
1315    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$value,
1316    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the last dimension of `value`.}]>:$bias
1317  );
1318
1319  let results = (outs
1320    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Broadcasted sum of `value` and `bias`.}]>:$output
1321  );
1322
1323  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1324}
1325
1326def TF_BincountOp : TF_Op<"Bincount", [NoSideEffect]> {
1327  let summary = [{
1328Counts the number of occurrences of each value in an integer array.
1329  }];
1330
1331  let description = [{
1332Outputs a vector with length `size` and the same dtype as `weights`. If
1333`weights` are empty, then index `i` stores the number of times the value `i` is
1334counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
1335the value in `weights` at each index where the corresponding value in `arr` is
1336`i`.
1337
1338Values in `arr` outside of the range [0, size) are ignored.
1339  }];
1340
1341  let arguments = (ins
1342    Arg<TF_Int32Tensor, [{int32 `Tensor`.}]>:$arr,
1343    Arg<TF_Int32Tensor, [{non-negative int32 scalar `Tensor`.}]>:$size,
1344    Arg<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{is an int32, int64, float32, or float64 `Tensor` with the same
1345shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
1346equal to 1.}]>:$weights
1347  );
1348
1349  let results = (outs
1350    Res<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{1D `Tensor` with length equal to `size`. The counts or summed weights for
1351each value in the range [0, size).}]>:$bins
1352  );
1353
1354  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
1355}
1356
1357def TF_BitcastOp : TF_Op<"Bitcast", [NoSideEffect]> {
1358  let summary = [{
1359Bitcasts a tensor from one type to another without copying data.
1360  }];
1361
1362  let description = [{
1363Given a tensor `input`, this operation returns a tensor that has the same buffer
1364data as `input` with datatype `type`.
1365
1366If the input datatype `T` is larger than the output datatype `type` then the
1367shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
1368
1369If `T` is smaller than `type`, the operator requires that the rightmost
1370dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
1371[..., sizeof(`type`)/sizeof(`T`)] to [...].
1372
1373tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype
1374(e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()
1375gives module error.
1376For example,
1377
1378Example 1:
1379
1380>>> a = [1., 2., 3.]
1381>>> equality_bitcast = tf.bitcast(a, tf.complex128)
1382Traceback (most recent call last):
1383...
1384InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]
1385>>> equality_cast = tf.cast(a, tf.complex128)
1386>>> print(equality_cast)
1387tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)
1388
1389Example 2:
1390
1391>>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)
1392<tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)>
1393
1394Example 3:
1395
1396>>> x = [1., 2., 3.]
1397>>> y = [0., 2., 3.]
1398>>> equality= tf.equal(x,y)
1399>>> equality_cast = tf.cast(equality,tf.float32)
1400>>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)
1401>>> print(equality)
1402tf.Tensor([False True True], shape=(3,), dtype=bool)
1403>>> print(equality_cast)
1404tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)
1405>>> print(equality_bitcast)
1406tf.Tensor(
1407    [[  0   0   0   0]
1408     [  0   0 128  63]
1409     [  0   0 128  63]], shape=(3, 4), dtype=uint8)
1410
1411*NOTE*: Bitcast is implemented as a low-level cast, so machines with different
1412endian orderings will give different results.
1413  }];
1414
1415  let arguments = (ins
1416    TF_NumberTensor:$input
1417  );
1418
1419  let results = (outs
1420    TF_NumberTensor:$output
1421  );
1422
1423  TF_DerivedResultTypeAttr type = TF_DerivedResultTypeAttr<0>;
1424  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1425}
1426
1427def TF_BitwiseAndOp : TF_Op<"BitwiseAnd", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
1428                      WithBroadcastableBinOpBuilder {
1429  let summary = "Elementwise computes the bitwise AND of `x` and `y`.";
1430
1431  let description = [{
1432The result will have those bits set, that are set in both `x` and `y`. The
1433computation is performed on the underlying representations of `x` and `y`.
1434
1435For example:
1436
1437```python
1438import tensorflow as tf
1439from tensorflow.python.ops import bitwise_ops
1440dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
1441              tf.uint8, tf.uint16, tf.uint32, tf.uint64]
1442
1443for dtype in dtype_list:
1444  lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
1445  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
1446  exp = tf.constant([0, 0, 3, 10], dtype=tf.float32)
1447
1448  res = bitwise_ops.bitwise_and(lhs, rhs)
1449  tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
1450```
1451  }];
1452
1453  let arguments = (ins
1454    TF_IntTensor:$x,
1455    TF_IntTensor:$y
1456  );
1457
1458  let results = (outs
1459    TF_IntTensor:$z
1460  );
1461
1462  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1463}
1464
1465def TF_BitwiseOrOp : TF_Op<"BitwiseOr", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
1466                     WithBroadcastableBinOpBuilder {
1467  let summary = "Elementwise computes the bitwise OR of `x` and `y`.";
1468
1469  let description = [{
1470The result will have those bits set, that are set in `x`, `y` or both. The
1471computation is performed on the underlying representations of `x` and `y`.
1472
1473For example:
1474
1475```python
1476import tensorflow as tf
1477from tensorflow.python.ops import bitwise_ops
1478dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
1479              tf.uint8, tf.uint16, tf.uint32, tf.uint64]
1480
1481for dtype in dtype_list:
1482  lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
1483  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
1484  exp = tf.constant([5, 5, 7, 15], dtype=tf.float32)
1485
1486  res = bitwise_ops.bitwise_or(lhs, rhs)
1487  tf.assert_equal(tf.cast(res,  tf.float32), exp)  # TRUE
1488```
1489  }];
1490
1491  let arguments = (ins
1492    TF_IntTensor:$x,
1493    TF_IntTensor:$y
1494  );
1495
1496  let results = (outs
1497    TF_IntTensor:$z
1498  );
1499
1500  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1501}
1502
1503def TF_BitwiseXorOp : TF_Op<"BitwiseXor", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
1504                      WithBroadcastableBinOpBuilder {
1505  let summary = "Elementwise computes the bitwise XOR of `x` and `y`.";
1506
1507  let description = [{
1508The result will have those bits set, that are different in `x` and `y`. The
1509computation is performed on the underlying representations of `x` and `y`.
1510
1511For example:
1512
1513```python
1514import tensorflow as tf
1515from tensorflow.python.ops import bitwise_ops
1516dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
1517              tf.uint8, tf.uint16, tf.uint32, tf.uint64]
1518
1519for dtype in dtype_list:
1520  lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
1521  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
1522  exp = tf.constant([5, 5, 4, 5],  dtype=tf.float32)
1523
1524  res = bitwise_ops.bitwise_xor(lhs, rhs)
1525  tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
1526```
1527  }];
1528
1529  let arguments = (ins
1530    TF_IntTensor:$x,
1531    TF_IntTensor:$y
1532  );
1533
1534  let results = (outs
1535    TF_IntTensor:$z
1536  );
1537
1538  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1539}
1540
1541def TF_BoostedTreesBucketizeOp : TF_Op<"BoostedTreesBucketize", [NoSideEffect, SameVariadicOperandSize]> {
1542  let summary = "Bucketize each feature based on bucket boundaries.";
1543
1544  let description = [{
1545An op that returns a list of float tensors, where each tensor represents the
1546bucketized values for a single feature.
1547  }];
1548
1549  let arguments = (ins
1550    Arg<Variadic<TF_Float32Tensor>, [{float; List of Rank 1 Tensor each containing float values for a single feature.}]>:$float_values,
1551    Arg<Variadic<TF_Float32Tensor>, [{float; List of Rank 1 Tensors each containing the bucket boundaries for a single
1552feature.}]>:$bucket_boundaries
1553  );
1554
1555  let results = (outs
1556    Res<Variadic<TF_Int32Tensor>, [{int; List of Rank 1 Tensors each containing the bucketized values for a single feature.}]>:$buckets
1557  );
1558
1559  TF_DerivedOperandSizeAttr num_features = TF_DerivedOperandSizeAttr<0>;
1560}
1561
1562def TF_BroadcastArgsOp : TF_Op<"BroadcastArgs", [NoSideEffect]> {
1563  let summary = "Return the shape of s0 op s1 with broadcast.";
1564
1565  let description = [{
1566Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
1567broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
1568  }];
1569
1570  let arguments = (ins
1571    TF_I32OrI64Tensor:$s0,
1572    TF_I32OrI64Tensor:$s1
1573  );
1574
1575  let results = (outs
1576    TF_I32OrI64Tensor:$r0
1577  );
1578
1579  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1580}
1581
1582def TF_BroadcastGradientArgsOp : TF_Op<"BroadcastGradientArgs", [NoSideEffect, SameOperandsAndResultElementType, TF_OperandHasRank<0, 1>, TF_OperandHasRank<1, 1>, TF_ResultHasRank<0, 1>, TF_ResultHasRank<1, 1>]> {
1583  let summary = [{
1584Return the reduction indices for computing gradients of s0 op s1 with broadcast.
1585  }];
1586
1587  let description = [{
1588This is typically used by gradient computations for a broadcasting operation.
1589  }];
1590
1591  let arguments = (ins
1592    TF_I32OrI64Tensor:$s0,
1593    TF_I32OrI64Tensor:$s1
1594  );
1595
1596  let results = (outs
1597    TF_I32OrI64Tensor:$r0,
1598    TF_I32OrI64Tensor:$r1
1599  );
1600
1601  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1602
1603  let verifier = [{
1604    return Verify(*this);
1605  }];
1606}
1607
1608def TF_BroadcastToOp : TF_Op<"BroadcastTo", [NoSideEffect]> {
1609  let summary = "Broadcast an array for a compatible shape.";
1610
1611  let description = [{
1612Broadcasting is the process of making arrays to have compatible shapes
1613for arithmetic operations. Two shapes are compatible if for each
1614dimension pair they are either equal or one of them is one. When trying
1615to broadcast a Tensor to a shape, it starts with the trailing dimensions,
1616and works its way forward.
1617
1618For example,
1619
1620>>> x = tf.constant([1, 2, 3])
1621>>> y = tf.broadcast_to(x, [3, 3])
1622>>> print(y)
1623tf.Tensor(
1624    [[1 2 3]
1625     [1 2 3]
1626     [1 2 3]], shape=(3, 3), dtype=int32)
1627
1628In the above example, the input Tensor with the shape of `[1, 3]`
1629is broadcasted to output Tensor with shape of `[3, 3]`.
1630
1631When doing broadcasted operations such as multiplying a tensor
1632by a scalar, broadcasting (usually) confers some time or space
1633benefit, as the broadcasted tensor is never materialized.
1634
1635However, `broadcast_to` does not carry with it any such benefits.
1636The newly-created tensor takes the full memory of the broadcasted
1637shape. (In a graph context, `broadcast_to` might be fused to
1638subsequent operation and then be optimized away, however.)
1639  }];
1640
1641  let arguments = (ins
1642    Arg<TF_Tensor, [{A Tensor to broadcast.}]>:$input,
1643    Arg<TF_I32OrI64Tensor, [{An 1-D `int` Tensor. The shape of the desired output.}]>:$shape
1644  );
1645
1646  let results = (outs
1647    Res<TF_Tensor, [{A Tensor.}]>:$output
1648  );
1649
1650  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1651  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
1652
1653  let verifier = [{
1654    return Verify(*this);
1655  }];
1656}
1657
1658def TF_BucketizeOp : TF_Op<"Bucketize", [NoSideEffect, SameOperandsAndResultShape]> {
1659  let summary = "Bucketizes 'input' based on 'boundaries'.";
1660
1661  let description = [{
1662For example, if the inputs are
1663    boundaries = [0, 10, 100]
1664    input = [[-5, 10000]
1665             [150,   10]
1666             [5,    100]]
1667
1668then the output will be
1669    output = [[0, 3]
1670              [3, 2]
1671              [1, 3]]
1672  }];
1673
1674  let arguments = (ins
1675    Arg<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Any shape of Tensor contains with int or float type.}]>:$input,
1676
1677    F32ArrayAttr:$boundaries
1678  );
1679
1680  let results = (outs
1681    Res<TF_Int32Tensor, [{Same shape with 'input', each value of input replaced with bucket index.
1682
1683@compatibility(numpy)
1684Equivalent to np.digitize.
1685@end_compatibility}]>:$output
1686  );
1687
1688  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1689}
1690
1691def TF_CastOp : TF_Op<"Cast", [NoSideEffect, SameOperandsAndResultShape]> {
1692  let summary = "Cast x of type SrcT to y of DstT.";
1693
1694  let arguments = (ins
1695    TF_Tensor:$x,
1696
1697    DefaultValuedAttr<BoolAttr, "false">:$Truncate
1698  );
1699
1700  let results = (outs
1701    TF_Tensor:$y
1702  );
1703
1704  TF_DerivedOperandTypeAttr SrcT = TF_DerivedOperandTypeAttr<0>;
1705  TF_DerivedResultTypeAttr DstT = TF_DerivedResultTypeAttr<0>;
1706}
1707
1708def TF_CeilOp : TF_Op<"Ceil", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
1709  let summary = "Returns element-wise smallest integer not less than x.";
1710
1711  let arguments = (ins
1712    TF_FloatTensor:$x
1713  );
1714
1715  let results = (outs
1716    TF_FloatTensor:$y
1717  );
1718
1719  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1720}
1721
1722def TF_CheckNumericsOp : TF_Op<"CheckNumerics", [TF_SameOperandsAndResultTypeResolveRef]> {
1723  let summary = "Checks a tensor for NaN and Inf values.";
1724
1725  let description = [{
1726When run, reports an `InvalidArgument` error if `tensor` has any values
1727that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
1728  }];
1729
1730  let arguments = (ins
1731    TF_FloatTensor:$tensor,
1732
1733    StrAttr:$message
1734  );
1735
1736  let results = (outs
1737    TF_FloatTensor:$output
1738  );
1739
1740  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1741}
1742
1743def TF_CholeskyOp : TF_Op<"Cholesky", [NoSideEffect]> {
1744  let summary = [{
1745Computes the Cholesky decomposition of one or more square matrices.
1746  }];
1747
1748  let description = [{
1749The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
1750form square matrices.
1751
1752The input has to be symmetric and positive definite. Only the lower-triangular
1753part of the input will be used for this operation. The upper-triangular part
1754will not be read.
1755
1756The output is a tensor of the same shape as the input
1757containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
1758
1759**Note**: The gradient computation on GPU is faster for large matrices but
1760not for large batch dimensions when the submatrices are small. In this
1761case it might be faster to use the CPU.
1762  }];
1763
1764  let arguments = (ins
1765    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$input
1766  );
1767
1768  let results = (outs
1769    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$output
1770  );
1771
1772  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1773}
1774
1775def TF_ClipByValueOp : TF_Op<"ClipByValue", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
1776  let summary = "Clips tensor values to a specified min and max.";
1777
1778  let description = [{
1779Given a tensor `t`, this operation returns a tensor of the same type and
1780shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
1781Any values less than `clip_value_min` are set to `clip_value_min`. Any values
1782greater than `clip_value_max` are set to `clip_value_max`.
1783  }];
1784
1785  let arguments = (ins
1786    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`.}]>:$t,
1787    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
1788as `t`. The minimum value to clip by.}]>:$clip_value_min,
1789    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
1790as `t`. The maximum value to clip by.}]>:$clip_value_max
1791  );
1792
1793  let results = (outs
1794    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A clipped `Tensor` with the same shape as input 't'.}]>:$output
1795  );
1796
1797  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1798}
1799
1800def TF_CollectiveBcastRecvOp : TF_Op<"CollectiveBcastRecv", []> {
1801  let summary = "Receives a tensor value broadcast from another device.";
1802
1803  let arguments = (ins
1804    I64Attr:$group_size,
1805    I64Attr:$group_key,
1806    I64Attr:$instance_key,
1807    TF_ShapeAttr:$shape,
1808    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
1809    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
1810  );
1811
1812  let results = (outs
1813    TensorOf<[TF_Bool, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
1814  );
1815
1816  TF_DerivedResultTypeAttr T = TF_DerivedResultTypeAttr<0>;
1817}
1818
1819def TF_CollectiveBcastSendOp : TF_Op<"CollectiveBcastSend", []> {
1820  let summary = "Broadcasts a tensor value to one or more other devices.";
1821
1822  let arguments = (ins
1823    TensorOf<[TF_Bool, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input,
1824
1825    I64Attr:$group_size,
1826    I64Attr:$group_key,
1827    I64Attr:$instance_key,
1828    TF_ShapeAttr:$shape,
1829    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
1830    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
1831  );
1832
1833  let results = (outs
1834    TensorOf<[TF_Bool, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
1835  );
1836
1837  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1838}
1839
1840def TF_CollectiveGatherOp : TF_Op<"CollectiveGather", []> {
1841  let summary = [{
1842Mutually accumulates multiple tensors of identical type and shape.
1843  }];
1844
1845  let arguments = (ins
1846    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input,
1847
1848    I64Attr:$group_size,
1849    I64Attr:$group_key,
1850    I64Attr:$instance_key,
1851    TF_ShapeAttr:$shape,
1852    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
1853    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
1854  );
1855
1856  let results = (outs
1857    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
1858  );
1859
1860  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1861}
1862
1863def TF_CollectiveReduceOp : TF_Op<"CollectiveReduce", [TF_SameOperandsAndResultTypeResolveRef]> {
1864  let summary = [{
1865Mutually reduces multiple tensors of identical type and shape.
1866  }];
1867
1868  let arguments = (ins
1869    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input,
1870
1871    I64Attr:$group_size,
1872    I64Attr:$group_key,
1873    I64Attr:$instance_key,
1874    TF_AnyStrAttrOf<["Min", "Max", "Mul", "Add"]>:$merge_op,
1875    TF_AnyStrAttrOf<["Id", "Div"]>:$final_op,
1876    I64ArrayAttr:$subdiv_offsets,
1877    DefaultValuedAttr<I64ArrayAttr, "{}">:$wait_for,
1878    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
1879    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
1880  );
1881
1882  let results = (outs
1883    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
1884  );
1885
1886  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1887}
1888
1889def TF_CollectiveReduceV2Op : TF_Op<"CollectiveReduceV2", []> {
1890  let summary = [{
1891Mutually reduces multiple tensors of identical type and shape.
1892  }];
1893
1894  let arguments = (ins
1895    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input,
1896    TF_Int32Tensor:$group_size,
1897    TF_Int32Tensor:$group_key,
1898    TF_Int32Tensor:$instance_key,
1899    Variadic<TF_ResourceTensor>:$ordering_token,
1900
1901    TF_AnyStrAttrOf<["Min", "Max", "Mul", "Add"]>:$merge_op,
1902    TF_AnyStrAttrOf<["Id", "Div"]>:$final_op,
1903    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
1904    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
1905  );
1906
1907  let results = (outs
1908    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
1909  );
1910
1911  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1912  TF_DerivedOperandSizeAttr Nordering_token = TF_DerivedOperandSizeAttr<4>;
1913}
1914
1915def TF_ComplexOp : TF_Op<"Complex", [NoSideEffect, ResultsBroadcastableShape]> {
1916  let summary = "Converts two real numbers to a complex number.";
1917
1918  let description = [{
1919Given a tensor `real` representing the real part of a complex number, and a
1920tensor `imag` representing the imaginary part of a complex number, this
1921operation returns complex numbers elementwise of the form \\(a + bj\\), where
1922*a* represents the `real` part and *b* represents the `imag` part.
1923
1924The input tensors `real` and `imag` must have the same shape.
1925
1926For example:
1927
1928```
1929# tensor 'real' is [2.25, 3.25]
1930# tensor `imag` is [4.75, 5.75]
1931tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
1932```
1933  }];
1934
1935  let arguments = (ins
1936    TF_F32OrF64Tensor:$real,
1937    TF_F32OrF64Tensor:$imag
1938  );
1939
1940  let results = (outs
1941    TensorOf<[TF_Complex128, TF_Complex64]>:$out
1942  );
1943
1944  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1945  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
1946}
1947
1948def TF_ComplexAbsOp : TF_Op<"ComplexAbs", [NoSideEffect, SameOperandsAndResultShape]> {
1949  let summary = "Computes the complex absolute value of a tensor.";
1950
1951  let description = [{
1952Given a tensor `x` of complex numbers, this operation returns a tensor of type
1953`float` or `double` that is the absolute value of each element in `x`. All
1954elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
1955value is computed as \\( \sqrt{a^2 + b^2}\\).
1956  }];
1957
1958  let arguments = (ins
1959    TensorOf<[TF_Complex128, TF_Complex64]>:$x
1960  );
1961
1962  let results = (outs
1963    TF_F32OrF64Tensor:$y
1964  );
1965
1966  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1967  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
1968}
1969
1970def TF_ConcatOp : TF_Op<"Concat", [NoSideEffect]> {
1971  let summary = "Concatenates tensors along one dimension.";
1972
1973  let arguments = (ins
1974    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to concatenate.  Must be in the
1975range [0, rank(values)).}]>:$concat_dim,
1976    Arg<Variadic<TF_Tensor>, [{The `N` Tensors to concatenate. Their ranks and types must match,
1977and their sizes must match in all dimensions except `concat_dim`.}]>:$values
1978  );
1979
1980  let results = (outs
1981    Res<TF_Tensor, [{A `Tensor` with the concatenation of values stacked along the
1982`concat_dim` dimension.  This tensor's shape matches that of `values` except
1983in `concat_dim` where it has the sum of the sizes.}]>:$output
1984  );
1985
1986  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
1987  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<1>;
1988
1989  let verifier = [{
1990    return Verify(*this);
1991  }];
1992}
1993
1994def TF_ConcatOffsetOp : TF_Op<"ConcatOffset", [NoSideEffect]> {
1995  let summary = "Computes offsets of concat inputs within its output.";
1996
1997  let description = [{
1998For example:
1999
2000```
2001# 'x' is [2, 2, 7]
2002# 'y' is [2, 3, 7]
2003# 'z' is [2, 5, 7]
2004concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
2005```
2006
2007This is typically used by gradient computations for a concat operation.
2008  }];
2009
2010  let arguments = (ins
2011    Arg<TF_Int32Tensor, [{The dimension along which to concatenate.}]>:$concat_dim,
2012    Arg<Variadic<TF_Int32Tensor>, [{The `N` int32 vectors representing shape of tensors being concatenated.}]>:$shape
2013  );
2014
2015  let results = (outs
2016    Res<Variadic<TF_Int32Tensor>, [{The `N` int32 vectors representing the starting offset
2017of input tensors within the concatenated output.}]>:$offset
2018  );
2019
2020  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<1>;
2021
2022  let verifier = [{
2023    return Verify(*this);
2024  }];
2025}
2026
2027def TF_ConcatV2Op : TF_Op<"ConcatV2", [NoSideEffect]> {
2028  let summary = "Concatenates tensors along one dimension.";
2029
2030  let arguments = (ins
2031    Arg<Variadic<TF_Tensor>, [{List of `N` Tensors to concatenate. Their ranks and types must match,
2032and their sizes must match in all dimensions except `concat_dim`.}]>:$values,
2033    Arg<TF_I32OrI64Tensor, [{0-D.  The dimension along which to concatenate.  Must be in the
2034range [-rank(values), rank(values)).}]>:$axis
2035  );
2036
2037  let results = (outs
2038    Res<TF_Tensor, [{A `Tensor` with the concatenation of values stacked along the
2039`concat_dim` dimension.  This tensor's shape matches that of `values` except
2040in `concat_dim` where it has the sum of the sizes.}]>:$output
2041  );
2042
2043  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2044  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
2045  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
2046
2047  let verifier = [{
2048    return Verify(*this);
2049  }];
2050}
2051
2052def TF_ConfigureDistributedTPUOp : TF_Op<"ConfigureDistributedTPU", []> {
2053  let summary = [{
2054Sets up the centralized structures for a distributed TPU system.
2055  }];
2056
2057  let arguments = (ins
2058    StrAttr:$embedding_config,
2059    StrAttr:$tpu_embedding_config,
2060    DefaultValuedAttr<BoolAttr, "false">:$is_global_init,
2061    DefaultValuedAttr<BoolAttr, "false">:$enable_whole_mesh_compilations,
2062    DefaultValuedAttr<BoolAttr, "true">:$compilation_failure_closes_chips
2063  );
2064
2065  let results = (outs
2066    Res<TF_StrTensor, [{A serialized tensorflow.tpu.TopologyProto that describes the TPU
2067topology.}]>:$topology
2068  );
2069}
2070
2071def TF_ConfigureTPUEmbeddingOp : TF_Op<"ConfigureTPUEmbedding", []> {
2072  let summary = "Sets up TPUEmbedding in a distributed TPU system.";
2073
2074  let arguments = (ins
2075    StrAttr:$config
2076  );
2077
2078  let results = (outs);
2079}
2080
2081def TF_ConjOp : TF_Op<"Conj", [Involution, NoSideEffect, SameOperandsAndResultType]> {
2082  let summary = "Returns the complex conjugate of a complex number.";
2083
2084  let description = [{
2085Given a tensor `input` of complex numbers, this operation returns a tensor of
2086complex numbers that are the complex conjugate of each element in `input`. The
2087complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
2088real part and *b* is the imaginary part.
2089
2090The complex conjugate returned by this operation is of the form \\(a - bj\\).
2091
2092For example:
2093
2094```
2095# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
2096tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
2097```
2098  }];
2099
2100  let arguments = (ins
2101    TensorOf<[TF_Complex128, TF_Complex64, TF_Variant]>:$input
2102  );
2103
2104  let results = (outs
2105    TensorOf<[TF_Complex128, TF_Complex64, TF_Variant]>:$output
2106  );
2107
2108  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2109}
2110
2111def TF_ConjugateTransposeOp : TF_Op<"ConjugateTranspose", [NoSideEffect]> {
2112  let summary = [{
2113Shuffle dimensions of x according to a permutation and conjugate the result.
2114  }];
2115
2116  let description = [{
2117The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
2118  `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
2119  `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
2120  }];
2121
2122  let arguments = (ins
2123    TF_Tensor:$x,
2124    TF_I32OrI64Tensor:$perm
2125  );
2126
2127  let results = (outs
2128    TF_Tensor:$y
2129  );
2130
2131  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2132  TF_DerivedOperandTypeAttr Tperm = TF_DerivedOperandTypeAttr<1>;
2133}
2134
2135def TF_Conv2DOp : TF_Op<"Conv2D", [NoSideEffect]> {
2136  let summary = [{
2137Computes a 2-D convolution given 4-D `input` and `filter` tensors.
2138  }];
2139
2140  let description = [{
2141Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
2142and a filter / kernel tensor of shape
2143`[filter_height, filter_width, in_channels, out_channels]`, this op
2144performs the following:
2145
21461. Flattens the filter to a 2-D matrix with shape
2147   `[filter_height * filter_width * in_channels, output_channels]`.
21482. Extracts image patches from the input tensor to form a *virtual*
2149   tensor of shape `[batch, out_height, out_width,
2150   filter_height * filter_width * in_channels]`.
21513. For each patch, right-multiplies the filter matrix and the image patch
2152   vector.
2153
2154In detail, with the default NHWC format,
2155
2156    output[b, i, j, k] =
2157        sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
2158                        filter[di, dj, q, k]
2159
2160Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
2161horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
2162  }];
2163
2164  let arguments = (ins
2165    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor. The dimension order is interpreted according to the value
2166of `data_format`, see below for details.}]>:$input,
2167    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor of shape
2168`[filter_height, filter_width, in_channels, out_channels]`}]>:$filter,
2169
2170    I64ArrayAttr:$strides,
2171    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
2172    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
2173    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
2174    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
2175    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
2176  );
2177
2178  let results = (outs
2179    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor. The dimension order is determined by the value of
2180`data_format`, see below for details.}]>:$output
2181  );
2182
2183  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2184
2185  let verifier = [{
2186    return Verify(*this);
2187  }];
2188}
2189
2190def TF_Conv2DBackpropFilterOp : TF_Op<"Conv2DBackpropFilter", [NoSideEffect]> {
2191  let summary = [{
2192Computes the gradients of convolution with respect to the filter.
2193  }];
2194
2195  let arguments = (ins
2196    Arg<TF_FloatTensor, [{4-D with shape `[batch, in_height, in_width, in_channels]`.}]>:$input,
2197    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
2198where `filter` is a 4-D
2199`[filter_height, filter_width, in_channels, out_channels]` tensor.}]>:$filter_sizes,
2200    Arg<TF_FloatTensor, [{4-D with shape `[batch, out_height, out_width, out_channels]`.
2201Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
2202
2203    I64ArrayAttr:$strides,
2204    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
2205    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
2206    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
2207    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
2208    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
2209  );
2210
2211  let results = (outs
2212    Res<TF_FloatTensor, [{4-D with shape
2213`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
2214the `filter` input of the convolution.}]>:$output
2215  );
2216
2217  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2218}
2219
2220def TF_Conv2DBackpropInputOp : TF_Op<"Conv2DBackpropInput", [NoSideEffect]> {
2221  let summary = [{
2222Computes the gradients of convolution with respect to the input.
2223  }];
2224
2225  let arguments = (ins
2226    Arg<TF_Int32Tensor, [{An integer vector representing the shape of `input`,
2227where `input` is a 4-D `[batch, height, width, channels]` tensor.}]>:$input_sizes,
2228    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape
2229`[filter_height, filter_width, in_channels, out_channels]`.}]>:$filter,
2230    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape `[batch, out_height, out_width, out_channels]`.
2231Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
2232
2233    I64ArrayAttr:$strides,
2234    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
2235    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
2236    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
2237    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
2238    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
2239  );
2240
2241  let results = (outs
2242    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
2243w.r.t. the input of the convolution.}]>:$output
2244  );
2245
2246  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
2247
2248  let verifier = [{
2249    return Verify(*this);
2250  }];
2251}
2252
2253def TF_Conv3DOp : TF_Op<"Conv3D", [NoSideEffect]> {
2254  let summary = [{
2255Computes a 3-D convolution given 5-D `input` and `filter` tensors.
2256  }];
2257
2258  let description = [{
2259In signal processing, cross-correlation is a measure of similarity of
2260two waveforms as a function of a time-lag applied to one of them. This
2261is also known as a sliding dot product or sliding inner-product.
2262
2263Our Conv3D implements a form of cross-correlation.
2264  }];
2265
2266  let arguments = (ins
2267    Arg<TF_FloatTensor, [{Shape `[batch, in_depth, in_height, in_width, in_channels]`.}]>:$input,
2268    Arg<TF_FloatTensor, [{Shape `[filter_depth, filter_height, filter_width, in_channels,
2269out_channels]`. `in_channels` must match between `input` and `filter`.}]>:$filter,
2270
2271    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
2272    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
2273    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format,
2274    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1, 1}">:$dilations
2275  );
2276
2277  let results = (outs
2278    TF_FloatTensor:$output
2279  );
2280
2281  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2282
2283  let verifier = [{
2284    return Verify(*this);
2285  }];
2286}
2287
2288def TF_Conv3DBackpropFilterV2Op : TF_Op<"Conv3DBackpropFilterV2", [NoSideEffect]> {
2289  let summary = [{
2290Computes the gradients of 3-D convolution with respect to the filter.
2291  }];
2292
2293  let arguments = (ins
2294    Arg<TF_FloatTensor, [{Shape `[batch, depth, rows, cols, in_channels]`.}]>:$input,
2295    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
2296where `filter` is a 5-D
2297`[filter_depth, filter_height, filter_width, in_channels, out_channels]`
2298tensor.}]>:$filter_sizes,
2299    Arg<TF_FloatTensor, [{Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
2300out_channels]`.}]>:$out_backprop,
2301
2302    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
2303    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
2304    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format,
2305    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1, 1}">:$dilations
2306  );
2307
2308  let results = (outs
2309    TF_FloatTensor:$output
2310  );
2311
2312  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2313}
2314
2315def TF_Conv3DBackpropInputV2Op : TF_Op<"Conv3DBackpropInputV2", [NoSideEffect]> {
2316  let summary = [{
2317Computes the gradients of 3-D convolution with respect to the input.
2318  }];
2319
2320  let arguments = (ins
2321    Arg<TF_I32OrI64Tensor, [{An integer vector representing the tensor shape of `input`,
2322where `input` is a 5-D
2323`[batch, depth, rows, cols, in_channels]` tensor.}]>:$input_sizes,
2324    Arg<TF_FloatTensor, [{Shape `[depth, rows, cols, in_channels, out_channels]`.
2325`in_channels` must match between `input` and `filter`.}]>:$filter,
2326    Arg<TF_FloatTensor, [{Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
2327out_channels]`.}]>:$out_backprop,
2328
2329    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
2330    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
2331    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format,
2332    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1, 1}">:$dilations
2333  );
2334
2335  let results = (outs
2336    TF_FloatTensor:$output
2337  );
2338
2339  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
2340  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
2341}
2342
2343def TF_CosOp : TF_Op<"Cos", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
2344  let summary = "Computes cos of x element-wise.";
2345
2346  let description = [{
2347Given an input tensor, this function computes cosine of every
2348  element in the tensor. Input range is `(-inf, inf)` and
2349  output range is `[-1,1]`. If input lies outside the boundary, `nan`
2350  is returned.
2351
2352  ```python
2353  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
2354  tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan]
2355  ```
2356  }];
2357
2358  let arguments = (ins
2359    TF_FpOrComplexTensor:$x
2360  );
2361
2362  let results = (outs
2363    TF_FpOrComplexTensor:$y
2364  );
2365
2366  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2367}
2368
2369def TF_CoshOp : TF_Op<"Cosh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
2370  let summary = "Computes hyperbolic cosine of x element-wise.";
2371
2372  let description = [{
2373Given an input tensor, this function computes hyperbolic cosine of every
2374  element in the tensor. Input range is `[-inf, inf]` and output range
2375  is `[1, inf]`.
2376
2377  ```python
2378  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
2379  tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf]
2380  ```
2381  }];
2382
2383  let arguments = (ins
2384    TF_FpOrComplexTensor:$x
2385  );
2386
2387  let results = (outs
2388    TF_FpOrComplexTensor:$y
2389  );
2390
2391  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2392}
2393
2394def TF_CrossOp : TF_Op<"Cross", [NoSideEffect, SameOperandsAndResultType]> {
2395  let summary = "Compute the pairwise cross product.";
2396
2397  let description = [{
2398`a` and `b` must be the same shape; they can either be simple 3-element vectors,
2399or any shape where the innermost dimension is 3. In the latter case, each pair
2400of corresponding 3-element vectors is cross-multiplied independently.
2401  }];
2402
2403  let arguments = (ins
2404    Arg<TF_IntOrFpTensor, [{A tensor containing 3-element vectors.}]>:$a,
2405    Arg<TF_IntOrFpTensor, [{Another tensor, of same type and shape as `a`.}]>:$b
2406  );
2407
2408  let results = (outs
2409    Res<TF_IntOrFpTensor, [{Pairwise cross product of the vectors in `a` and `b`.}]>:$product
2410  );
2411
2412  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2413}
2414
2415def TF_CrossReplicaSumOp : TF_Op<"CrossReplicaSum", [NoSideEffect, TF_AllTypesMatch<["input", "output"]>, TF_NoConstantFold]> {
2416  let summary = "An Op to sum inputs across replicated TPU instances.";
2417
2418  let description = [{
2419Each instance supplies its own input.
2420
2421For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`.
2422Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0,
2423and `B, D, F, H` as group 1. Thus we get the outputs:
2424`[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`.
2425  }];
2426
2427  let arguments = (ins
2428    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Int32, TF_Uint32]>, [{The local input to the sum.}]>:$input,
2429    Arg<TF_Int32Tensor, [{An int32 tensor with shape
2430[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
2431replica ids in the ith subgroup.}]>:$group_assignment
2432  );
2433
2434  let results = (outs
2435    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Int32, TF_Uint32]>, [{The sum of all the distributed inputs.}]>:$output
2436  );
2437
2438  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2439}
2440
2441def TF_CumprodOp : TF_Op<"Cumprod", [NoSideEffect, TF_AllTypesMatch<["x", "out"]>]> {
2442  let summary = [{
2443Compute the cumulative product of the tensor `x` along `axis`.
2444  }];
2445
2446  let description = [{
2447By default, this op performs an inclusive cumprod, which means that the first
2448element of the input is identical to the first element of the output:
2449
2450```python
2451tf.cumprod([a, b, c])  # => [a, a * b, a * b * c]
2452```
2453
2454By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
2455performed instead:
2456
2457```python
2458tf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]
2459```
2460
2461By setting the `reverse` kwarg to `True`, the cumprod is performed in the
2462opposite direction:
2463
2464```python
2465tf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]
2466```
2467
2468This is more efficient than using separate `tf.reverse` ops.
2469
2470The `reverse` and `exclusive` kwargs can also be combined:
2471
2472```python
2473tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
2474```
2475  }];
2476
2477  let arguments = (ins
2478    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`. Must be one of the following types: `float32`, `float64`,
2479`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
2480`complex128`, `qint8`, `quint8`, `qint32`, `half`.}]>:$x,
2481    Arg<TF_I32OrI64Tensor, [{A `Tensor` of type `int32` (default: 0). Must be in the range
2482`[-rank(x), rank(x))`.}]>:$axis,
2483
2484    DefaultValuedAttr<BoolAttr, "false">:$exclusive,
2485    DefaultValuedAttr<BoolAttr, "false">:$reverse
2486  );
2487
2488  let results = (outs
2489    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$out
2490  );
2491
2492  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2493  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
2494
2495  let verifier = [{
2496    return Verify(*this);
2497  }];
2498}
2499
2500def TF_CumsumOp : TF_Op<"Cumsum", [NoSideEffect, TF_AllTypesMatch<["x", "out"]>]> {
2501  let summary = "Compute the cumulative sum of the tensor `x` along `axis`.";
2502
2503  let description = [{
2504By default, this op performs an inclusive cumsum, which means that the first
2505element of the input is identical to the first element of the output:
2506
2507```python
2508tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
2509```
2510
2511By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
2512performed instead:
2513
2514```python
2515tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
2516```
2517
2518By setting the `reverse` kwarg to `True`, the cumsum is performed in the
2519opposite direction:
2520
2521```python
2522tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
2523```
2524
2525This is more efficient than using separate `tf.reverse` ops.
2526
2527The `reverse` and `exclusive` kwargs can also be combined:
2528
2529```python
2530tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
2531```
2532  }];
2533
2534  let arguments = (ins
2535    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`. Must be one of the following types: `float32`, `float64`,
2536`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
2537`complex128`, `qint8`, `quint8`, `qint32`, `half`.}]>:$x,
2538    Arg<TF_I32OrI64Tensor, [{A `Tensor` of type `int32` (default: 0). Must be in the range
2539`[-rank(x), rank(x))`.}]>:$axis,
2540
2541    DefaultValuedAttr<BoolAttr, "false">:$exclusive,
2542    DefaultValuedAttr<BoolAttr, "false">:$reverse
2543  );
2544
2545  let results = (outs
2546    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$out
2547  );
2548
2549  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2550  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
2551
2552  let verifier = [{
2553    return Verify(*this);
2554  }];
2555}
2556
2557def TF_DataFormatDimMapOp : TF_Op<"DataFormatDimMap", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
2558  let summary = [{
2559Returns the dimension index in the destination data format given the one in
2560  }];
2561
2562  let description = [{
2563the source data format.
2564  }];
2565
2566  let arguments = (ins
2567    Arg<TF_I32OrI64Tensor, [{A Tensor with each element as a dimension index in source data format.
2568Must be in the range [-4, 4).}]>:$x,
2569
2570    DefaultValuedAttr<StrAttr, "NHWC">:$src_format,
2571    DefaultValuedAttr<StrAttr, "NCHW">:$dst_format
2572  );
2573
2574  let results = (outs
2575    Res<TF_I32OrI64Tensor, [{A Tensor with each element as a dimension index in destination data format.}]>:$y
2576  );
2577
2578  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2579}
2580
2581def TF_DataFormatVecPermuteOp : TF_Op<"DataFormatVecPermute", [NoSideEffect, SameOperandsAndResultType]> {
2582  let summary = "Permute input tensor from `src_format` to `dst_format`.";
2583
2584  let description = [{
2585Input tensor must be a vector of size 4, or a 4x2 tensor.
2586
2587For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs:
2588```
2589[1, 2, 3, 4]
2590```
2591and
2592```
2593[[1, 2, 3, 4],
2594 [5, 6, 7, 8]]
2595```
2596, the outputs will be (respectively):
2597```
2598[1, 4, 2, 3]
2599```
2600and
2601```
2602[[1, 4, 2, 3],
2603 [5, 8, 6, 7]]
2604```
2605  }];
2606
2607  let arguments = (ins
2608    Arg<TF_I32OrI64Tensor, [{Vector of size 4 or Tensor of shape (4, 2) in source data format.}]>:$x,
2609
2610    DefaultValuedAttr<StrAttr, "NHWC">:$src_format,
2611    DefaultValuedAttr<StrAttr, "NCHW">:$dst_format
2612  );
2613
2614  let results = (outs
2615    Res<TF_I32OrI64Tensor, [{Vector of size 4 or Tensor of shape (4, 2) in destination data format.}]>:$y
2616  );
2617
2618  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2619
2620  let verifier = [{ return Verify(*this); }];
2621}
2622
2623def TF_DebugIdentityV2Op : TF_Op<"DebugIdentityV2", []> {
2624  let summary = "Debug Identity V2 Op.";
2625
2626  let description = [{
2627Provides an identity mapping from input to output, while writing the content of
2628the input tensor by calling DebugEventsWriter.
2629
2630The semantics of the input tensor depends on tensor_debug_mode. In typical
2631usage, the input tensor comes directly from the user computation only when
2632graph_debug_mode is FULL_TENSOR (see protobuf/debug_event.proto for a
2633list of all the possible values of graph_debug_mode). For the other debug modes,
2634the input tensor should be produced by an additional op or subgraph that
2635computes summary information about one or more tensors.
2636  }];
2637
2638  let arguments = (ins
2639    Arg<TF_Tensor, [{Input tensor, non-Reference type}]>:$input,
2640
2641    StrAttr:$tfdbg_context_id,
2642    StrAttr:$op_name,
2643    DefaultValuedAttr<I64Attr, "-1">:$output_slot,
2644    DefaultValuedAttr<I64Attr, "-1">:$tensor_debug_mode,
2645    DefaultValuedAttr<StrArrayAttr, "{}">:$debug_urls,
2646    DefaultValuedAttr<I64Attr, "1000">:$circular_buffer_size,
2647    StrAttr:$tfdbg_run_id
2648  );
2649
2650  let results = (outs
2651    TF_Tensor:$output
2652  );
2653
2654  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2655}
2656
2657def TF_DecodeAndCropJpegOp : TF_Op<"DecodeAndCropJpeg", [NoSideEffect]> {
2658  let summary = "Decode and Crop a JPEG-encoded image to a uint8 tensor.";
2659
2660  let description = [{
2661The attr `channels` indicates the desired number of color channels for the
2662decoded image.
2663
2664Accepted values are:
2665
2666*   0: Use the number of channels in the JPEG-encoded image.
2667*   1: output a grayscale image.
2668*   3: output an RGB image.
2669
2670If needed, the JPEG-encoded image is transformed to match the requested number
2671of color channels.
2672
2673The attr `ratio` allows downscaling the image by an integer factor during
2674decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
2675downscaling the image later.
2676
2677
2678It is equivalent to a combination of decode and crop, but much faster by only
2679decoding partial jpeg image.
2680  }];
2681
2682  let arguments = (ins
2683    Arg<TF_StrTensor, [{0-D.  The JPEG-encoded image.}]>:$contents,
2684    Arg<TF_Int32Tensor, [{1-D.  The crop window: [crop_y, crop_x, crop_height, crop_width].}]>:$crop_window,
2685
2686    DefaultValuedAttr<I64Attr, "0">:$channels,
2687    DefaultValuedAttr<I64Attr, "1">:$ratio,
2688    DefaultValuedAttr<BoolAttr, "true">:$fancy_upscaling,
2689    DefaultValuedAttr<BoolAttr, "false">:$try_recover_truncated,
2690    DefaultValuedAttr<F32Attr, "1.0f">:$acceptable_fraction,
2691    StrAttr:$dct_method
2692  );
2693
2694  let results = (outs
2695    Res<TF_Uint8Tensor, [{3-D with shape `[height, width, channels]`..}]>:$image
2696  );
2697}
2698
2699def TF_DecodeGifOp : TF_Op<"DecodeGif", [NoSideEffect]> {
2700  let summary = "Decode the frame(s) of a GIF-encoded image to a uint8 tensor.";
2701
2702  let description = [{
2703GIF images with frame or transparency compression are not supported.
2704On Linux and MacOS systems, convert animated GIFs from compressed to
2705uncompressed by running:
2706
2707    convert $src.gif -coalesce $dst.gif
2708
2709This op also supports decoding JPEGs and PNGs, though it is cleaner to use
2710`tf.io.decode_image`.
2711  }];
2712
2713  let arguments = (ins
2714    Arg<TF_StrTensor, [{0-D.  The GIF-encoded image.}]>:$contents
2715  );
2716
2717  let results = (outs
2718    Res<TF_Uint8Tensor, [{4-D with shape `[num_frames, height, width, 3]`. RGB channel order.}]>:$image
2719  );
2720}
2721
2722def TF_DecodeJpegOp : TF_Op<"DecodeJpeg", [NoSideEffect]> {
2723  let summary = "Decode a JPEG-encoded image to a uint8 tensor.";
2724
2725  let description = [{
2726The attr `channels` indicates the desired number of color channels for the
2727decoded image.
2728
2729Accepted values are:
2730
2731*   0: Use the number of channels in the JPEG-encoded image.
2732*   1: output a grayscale image.
2733*   3: output an RGB image.
2734
2735If needed, the JPEG-encoded image is transformed to match the requested number
2736of color channels.
2737
2738The attr `ratio` allows downscaling the image by an integer factor during
2739decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
2740downscaling the image later.
2741
2742
2743This op also supports decoding PNGs and non-animated GIFs since the interface is
2744the same, though it is cleaner to use `tf.io.decode_image`.
2745  }];
2746
2747  let arguments = (ins
2748    Arg<TF_StrTensor, [{0-D.  The JPEG-encoded image.}]>:$contents,
2749
2750    DefaultValuedAttr<I64Attr, "0">:$channels,
2751    DefaultValuedAttr<I64Attr, "1">:$ratio,
2752    DefaultValuedAttr<BoolAttr, "true">:$fancy_upscaling,
2753    DefaultValuedAttr<BoolAttr, "false">:$try_recover_truncated,
2754    DefaultValuedAttr<F32Attr, "1.0f">:$acceptable_fraction,
2755    StrAttr:$dct_method
2756  );
2757
2758  let results = (outs
2759    Res<TF_Uint8Tensor, [{3-D with shape `[height, width, channels]`..}]>:$image
2760  );
2761}
2762
2763def TF_DecodePngOp : TF_Op<"DecodePng", [NoSideEffect]> {
2764  let summary = "Decode a PNG-encoded image to a uint8 or uint16 tensor.";
2765
2766  let description = [{
2767The attr `channels` indicates the desired number of color channels for the
2768decoded image.
2769
2770Accepted values are:
2771
2772*   0: Use the number of channels in the PNG-encoded image.
2773*   1: output a grayscale image.
2774*   3: output an RGB image.
2775*   4: output an RGBA image.
2776
2777If needed, the PNG-encoded image is transformed to match the requested number
2778of color channels.
2779
2780This op also supports decoding JPEGs and non-animated GIFs since the interface
2781is the same, though it is cleaner to use `tf.io.decode_image`.
2782  }];
2783
2784  let arguments = (ins
2785    Arg<TF_StrTensor, [{0-D.  The PNG-encoded image.}]>:$contents,
2786
2787    DefaultValuedAttr<I64Attr, "0">:$channels
2788  );
2789
2790  let results = (outs
2791    Res<TensorOf<[TF_Uint16, TF_Uint8]>, [{3-D with shape `[height, width, channels]`.}]>:$image
2792  );
2793
2794  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
2795}
2796
2797def TF_DeleteIteratorOp : TF_Op<"DeleteIterator", []> {
2798  let summary = "A container for an iterator resource.";
2799
2800  let arguments = (ins
2801    Arg<TF_ResourceTensor, [{A handle to the iterator to delete.}], [TF_DatasetIteratorFree]>:$handle,
2802    Arg<TF_VariantTensor, [{A variant deleter.}]>:$deleter
2803  );
2804
2805  let results = (outs);
2806}
2807
2808def TF_DeleteMemoryCacheOp : TF_Op<"DeleteMemoryCache", []> {
2809  let summary = "";
2810
2811  let arguments = (ins
2812    Arg<TF_ResourceTensor, "", [TF_DatasetMemoryCacheFree]>:$handle,
2813    TF_VariantTensor:$deleter
2814  );
2815
2816  let results = (outs);
2817}
2818
2819def TF_DeleteMultiDeviceIteratorOp : TF_Op<"DeleteMultiDeviceIterator", []> {
2820  let summary = "A container for an iterator resource.";
2821
2822  let arguments = (ins
2823    Arg<TF_ResourceTensor, [{A handle to the multi device iterator to delete.}], [TF_DatasetIteratorFree]>:$multi_device_iterator,
2824    Arg<Variadic<TF_ResourceTensor>, [{A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted.}], [TF_DatasetIteratorRead]>:$iterators,
2825    Arg<TF_VariantTensor, [{A variant deleter.}]>:$deleter
2826  );
2827
2828  let results = (outs);
2829
2830  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<1>;
2831}
2832
2833def TF_DeleteRandomSeedGeneratorOp : TF_Op<"DeleteRandomSeedGenerator", []> {
2834  let summary = "";
2835
2836  let arguments = (ins
2837    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorFree]>:$handle,
2838    TF_VariantTensor:$deleter
2839  );
2840
2841  let results = (outs);
2842}
2843
2844def TF_DeleteSeedGeneratorOp : TF_Op<"DeleteSeedGenerator", []> {
2845  let summary = "";
2846
2847  let arguments = (ins
2848    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorFree]>:$handle,
2849    TF_VariantTensor:$deleter
2850  );
2851
2852  let results = (outs);
2853}
2854
2855def TF_DepthToSpaceOp : TF_Op<"DepthToSpace", [NoSideEffect]> {
2856  let summary = "DepthToSpace for tensors of type T.";
2857
2858  let description = [{
2859Rearranges data from depth into blocks of spatial data.
2860This is the reverse transformation of SpaceToDepth. More specifically,
2861this op outputs a copy of the input tensor where values from the `depth`
2862dimension are moved in spatial blocks to the `height` and `width` dimensions.
2863The attr `block_size` indicates the input block size and how the data is moved.
2864
2865  * Chunks of data of size `block_size * block_size` from depth are rearranged
2866    into non-overlapping blocks of size `block_size x block_size`
2867  * The width the output tensor is `input_depth * block_size`, whereas the
2868    height is `input_height * block_size`.
2869  * The Y, X coordinates within each block of the output image are determined
2870    by the high order component of the input channel index.
2871  * The depth of the input tensor must be divisible by
2872    `block_size * block_size`.
2873
2874The `data_format` attr specifies the layout of the input and output tensors
2875with the following options:
2876  "NHWC": `[ batch, height, width, channels ]`
2877  "NCHW": `[ batch, channels, height, width ]`
2878  "NCHW_VECT_C":
2879      `qint8 [ batch, channels / 4, height, width, 4 ]`
2880
2881It is useful to consider the operation as transforming a 6-D Tensor.
2882e.g. for data_format = NHWC,
2883     Each element in the input tensor can be specified via 6 coordinates,
2884     ordered by decreasing memory layout significance as:
2885     n,iY,iX,bY,bX,oC  (where n=batch index, iX, iY means X or Y coordinates
2886                        within the input image, bX, bY means coordinates
2887                        within the output block, oC means output channels).
2888     The output would be the input transposed to the following layout:
2889     n,iY,bY,iX,bX,oC
2890
2891This operation is useful for resizing the activations between convolutions
2892(but keeping all data), e.g. instead of pooling. It is also useful for training
2893purely convolutional models.
2894
2895For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
2896block_size = 2:
2897
2898```
2899x = [[[[1, 2, 3, 4]]]]
2900
2901```
2902
2903This operation will output a tensor of shape `[1, 2, 2, 1]`:
2904
2905```
2906   [[[[1], [2]],
2907     [[3], [4]]]]
2908```
2909
2910Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
2911the corresponding output will have 2x2 elements and will have a depth of
29121 channel (1 = `4 / (block_size * block_size)`).
2913The output element shape is `[2, 2, 1]`.
2914
2915For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
2916
2917```
2918x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
2919```
2920
2921This operation, for block size of 2, will return the following tensor of shape
2922`[1, 2, 2, 3]`
2923
2924```
2925   [[[[1, 2, 3], [4, 5, 6]],
2926     [[7, 8, 9], [10, 11, 12]]]]
2927
2928```
2929
2930Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
2931
2932```
2933x =  [[[[1, 2, 3, 4],
2934       [5, 6, 7, 8]],
2935      [[9, 10, 11, 12],
2936       [13, 14, 15, 16]]]]
2937```
2938
2939the operator will return the following tensor of shape `[1 4 4 1]`:
2940
2941```
2942x = [[[ [1],   [2],  [5],  [6]],
2943      [ [3],   [4],  [7],  [8]],
2944      [ [9],  [10], [13],  [14]],
2945      [ [11], [12], [15],  [16]]]]
2946
2947```
2948  }];
2949
2950  let arguments = (ins
2951    TF_Tensor:$input,
2952
2953    Confined<I64Attr, [IntMinValue<2>]>:$block_size,
2954    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
2955  );
2956
2957  let results = (outs
2958    TF_Tensor:$output
2959  );
2960
2961  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2962}
2963
2964def TF_DepthwiseConv2dNativeOp : TF_Op<"DepthwiseConv2dNative", [NoSideEffect]> {
2965  let summary = [{
2966Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
2967  }];
2968
2969  let description = [{
2970Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
2971and a filter / kernel tensor of shape
2972`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
2973`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
2974a different filter to each input channel (expanding from 1 channel to
2975`channel_multiplier` channels for each), then concatenates the results
2976together. Thus, the output has `in_channels * channel_multiplier` channels.
2977
2978```
2979for k in 0..in_channels-1
2980  for q in 0..channel_multiplier-1
2981    output[b, i, j, k * channel_multiplier + q] =
2982      sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
2983                        filter[di, dj, k, q]
2984```
2985
2986Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
2987horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
2988  }];
2989
2990  let arguments = (ins
2991    TF_FloatTensor:$input,
2992    TF_FloatTensor:$filter,
2993
2994    I64ArrayAttr:$strides,
2995    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
2996    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
2997    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
2998    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
2999  );
3000
3001  let results = (outs
3002    TF_FloatTensor:$output
3003  );
3004
3005  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3006}
3007
3008def TF_DepthwiseConv2dNativeBackpropFilterOp : TF_Op<"DepthwiseConv2dNativeBackpropFilter", [NoSideEffect]> {
3009  let summary = [{
3010Computes the gradients of depthwise convolution with respect to the filter.
3011  }];
3012
3013  let arguments = (ins
3014    Arg<TF_FloatTensor, [{4-D with shape based on `data_format`.  For example, if
3015`data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
3016in_width, in_channels]` tensor.}]>:$input,
3017    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
3018where `filter` is a 4-D
3019`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.}]>:$filter_sizes,
3020    Arg<TF_FloatTensor, [{4-D with shape  based on `data_format`.
3021For example, if `data_format` is 'NHWC' then
3022out_backprop shape is `[batch, out_height, out_width, out_channels]`.
3023Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
3024
3025    I64ArrayAttr:$strides,
3026    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
3027    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
3028    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
3029    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
3030  );
3031
3032  let results = (outs
3033    Res<TF_FloatTensor, [{4-D with shape
3034`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
3035the `filter` input of the convolution.}]>:$output
3036  );
3037
3038  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3039}
3040
3041def TF_DepthwiseConv2dNativeBackpropInputOp : TF_Op<"DepthwiseConv2dNativeBackpropInput", [NoSideEffect]> {
3042  let summary = [{
3043Computes the gradients of depthwise convolution with respect to the input.
3044  }];
3045
3046  let arguments = (ins
3047    Arg<TF_Int32Tensor, [{An integer vector representing the shape of `input`, based
3048on `data_format`.  For example, if `data_format` is 'NHWC' then
3049 `input` is a 4-D `[batch, height, width, channels]` tensor.}]>:$input_sizes,
3050    Arg<TF_FloatTensor, [{4-D with shape
3051`[filter_height, filter_width, in_channels, depthwise_multiplier]`.}]>:$filter,
3052    Arg<TF_FloatTensor, [{4-D with shape  based on `data_format`.
3053For example, if `data_format` is 'NHWC' then
3054out_backprop shape is `[batch, out_height, out_width, out_channels]`.
3055Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
3056
3057    I64ArrayAttr:$strides,
3058    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
3059    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
3060    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
3061    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
3062  );
3063
3064  let results = (outs
3065    Res<TF_FloatTensor, [{4-D with shape according to `data_format`.  For example, if
3066`data_format` is 'NHWC', output shape is `[batch, in_height,
3067in_width, in_channels]`.  Gradient w.r.t. the input of the
3068convolution.}]>:$output
3069  );
3070
3071  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
3072}
3073
3074def TF_DequantizeOp : TF_Op<"Dequantize", [NoSideEffect]> {
3075  let summary = [{
3076Dequantize the 'input' tensor into a float or bfloat16 Tensor.
3077  }];
3078
3079  let description = [{
3080[min_range, max_range] are scalar floats that specify the range for
3081the output. The 'mode' attribute controls exactly which calculations are
3082used to convert the float values to their quantized equivalents.
3083
3084In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
3085
3086```
3087if T == qint8: in[i] += (range(T) + 1)/ 2.0
3088out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
3089```
3090here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
3091
3092*MIN_COMBINED Mode Example*
3093
3094If the input comes from a QuantizedRelu6, the output type is
3095quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
30960-6.  The min_range and max_range values are therefore 0.0 and 6.0.
3097Dequantize on quint8 will take each value, cast to float, and multiply
3098by 6 / 255.
3099Note that if quantizedtype is qint8, the operation will additionally add
3100each value by 128 prior to casting.
3101
3102If the mode is 'MIN_FIRST', then this approach is used:
3103
3104```c++
3105num_discrete_values = 1 << (# of bits in T)
3106range_adjust = num_discrete_values / (num_discrete_values - 1)
3107range = (range_max - range_min) * range_adjust
3108range_scale = range / num_discrete_values
3109const double offset_input = static_cast<double>(input) - lowest_quantized;
3110result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
3111```
3112
3113If the mode is `SCALED`, dequantization is performed by multiplying each
3114input value by a scaling_factor. (Thus an input of 0 always maps to 0.0).
3115
3116The scaling_factor is determined from `min_range`, `max_range`, and
3117`narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}`
3118and `QuantizeV2`, using the following algorithm:
3119
3120```c++
3121
3122  const int min_expected_T = std::numeric_limits<T>::min() +
3123    (narrow_range ? 1 : 0);
3124  const int max_expected_T = std::numeric_limits<T>::max();
3125  const float max_expected_T = std::numeric_limits<float>::max();
3126
3127  const float scale_factor =
3128    (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T)
3129                                         : std::max(min_range / min_expected_T,
3130                                                    max_range / max_expected_T);
3131```
3132  }];
3133
3134  let arguments = (ins
3135    TensorOf<[TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8]>:$input,
3136    Arg<TF_Float32Tensor, [{The minimum scalar value possibly produced for the input.}]>:$min_range,
3137    Arg<TF_Float32Tensor, [{The maximum scalar value possibly produced for the input.}]>:$max_range,
3138
3139    DefaultValuedAttr<TF_AnyStrAttrOf<["MIN_COMBINED", "MIN_FIRST", "SCALED"]>, "MIN_COMBINED">:$mode,
3140    DefaultValuedAttr<BoolAttr, "false">:$narrow_range,
3141    DefaultValuedAttr<I64Attr, "-1">:$axis
3142  );
3143
3144  let results = (outs
3145    TensorOf<[TF_Bfloat16, TF_Float32]>:$output
3146  );
3147
3148  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3149  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
3150}
3151
3152def TF_DeserializeIteratorOp : TF_Op<"DeserializeIterator", []> {
3153  let summary = [{
3154Converts the given variant tensor to an iterator and stores it in the given resource.
3155  }];
3156
3157  let arguments = (ins
3158    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorWrite]>:$resource_handle,
3159    Arg<TF_VariantTensor, [{A variant tensor storing the state of the iterator contained in the
3160resource.}]>:$serialized
3161  );
3162
3163  let results = (outs);
3164}
3165
3166def TF_DeserializeSparseOp : TF_Op<"DeserializeSparse", [NoSideEffect]> {
3167  let summary = "Deserialize `SparseTensor` objects.";
3168
3169  let description = [{
3170The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
3171the last dimension stores serialized `SparseTensor` objects and the other N
3172dimensions (N >= 0) correspond to a batch. The ranks of the original
3173`SparseTensor` objects must all match. When the final `SparseTensor` is
3174created, its rank is the rank of the incoming `SparseTensor` objects plus N;
3175the sparse tensors have been concatenated along new dimensions, one for each
3176batch.
3177
3178The output `SparseTensor` object's shape values for the original dimensions
3179are the max across the input `SparseTensor` objects' shape values for the
3180corresponding dimensions. The new dimensions match the size of the batch.
3181
3182The input `SparseTensor` objects' indices are assumed ordered in
3183standard lexicographic order.  If this is not the case, after this
3184step run `SparseReorder` to restore index ordering.
3185
3186For example, if the serialized input is a `[2 x 3]` matrix representing two
3187original `SparseTensor` objects:
3188
3189    index = [ 0]
3190            [10]
3191            [20]
3192    values = [1, 2, 3]
3193    shape = [50]
3194
3195and
3196
3197    index = [ 2]
3198            [10]
3199    values = [4, 5]
3200    shape = [30]
3201
3202then the final deserialized `SparseTensor` will be:
3203
3204    index = [0  0]
3205            [0 10]
3206            [0 20]
3207            [1  2]
3208            [1 10]
3209    values = [1, 2, 3, 4, 5]
3210    shape = [2 50]
3211  }];
3212
3213  let arguments = (ins
3214    Arg<TensorOf<[TF_Str, TF_Variant]>, [{The serialized `SparseTensor` objects. The last dimension
3215must have 3 columns.}]>:$serialized_sparse
3216  );
3217
3218  let results = (outs
3219    TF_Int64Tensor:$sparse_indices,
3220    TF_Tensor:$sparse_values,
3221    TF_Int64Tensor:$sparse_shape
3222  );
3223
3224  TF_DerivedOperandTypeAttr Tserialized = TF_DerivedOperandTypeAttr<0>;
3225  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<1>;
3226}
3227
3228def TF_DestroyResourceOp : TF_Op<"DestroyResourceOp", []> {
3229  let summary = "Deletes the resource specified by the handle.";
3230
3231  let description = [{
3232All subsequent operations using the resource will result in a NotFound
3233error status.
3234  }];
3235
3236  let arguments = (ins
3237    Arg<TF_ResourceTensor, [{handle to the resource to delete.}]>:$resource,
3238
3239    DefaultValuedAttr<BoolAttr, "true">:$ignore_lookup_error
3240  );
3241
3242  let results = (outs);
3243}
3244
3245def TF_DeviceIndexOp : TF_Op<"DeviceIndex", [NoSideEffect]> {
3246  let summary = "Return the index of device the op runs.";
3247
3248  let description = [{
3249Given a list of device names, this operation returns the index of the device
3250this op runs. The length of the list is returned in two cases:
3251(1) Device does not exist in the given device list.
3252(2) It is in XLA compilation.
3253  }];
3254
3255  let arguments = (ins
3256    StrArrayAttr:$device_names
3257  );
3258
3259  let results = (outs
3260    TF_Int32Tensor:$index
3261  );
3262}
3263
3264def TF_DiagOp : TF_Op<"Diag", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
3265  let summary = "Returns a diagonal tensor with a given diagonal values.";
3266
3267  let description = [{
3268Given a `diagonal`, this operation returns a tensor with the `diagonal` and
3269everything else padded with zeros. The diagonal is computed as follows:
3270
3271Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
3272rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
3273
3274`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
3275
3276For example:
3277
3278```
3279# 'diagonal' is [1, 2, 3, 4]
3280tf.diag(diagonal) ==> [[1, 0, 0, 0]
3281                       [0, 2, 0, 0]
3282                       [0, 0, 3, 0]
3283                       [0, 0, 0, 4]]
3284```
3285  }];
3286
3287  let arguments = (ins
3288    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Rank k tensor where k is at most 1.}]>:$diagonal
3289  );
3290
3291  let results = (outs
3292    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$output
3293  );
3294
3295  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3296}
3297
3298def TF_DiagPartOp : TF_Op<"DiagPart", [NoSideEffect]> {
3299  let summary = "Returns the diagonal part of the tensor.";
3300
3301  let description = [{
3302This operation returns a tensor with the `diagonal` part
3303of the `input`. The `diagonal` part is computed as follows:
3304
3305Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
3306tensor of rank `k` with dimensions `[D1,..., Dk]` where:
3307
3308`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
3309
3310For example:
3311
3312```
3313# 'input' is [[1, 0, 0, 0]
3314              [0, 2, 0, 0]
3315              [0, 0, 3, 0]
3316              [0, 0, 0, 4]]
3317
3318tf.diag_part(input) ==> [1, 2, 3, 4]
3319```
3320  }];
3321
3322  let arguments = (ins
3323    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Rank k tensor where k is even and not zero.}]>:$input
3324  );
3325
3326  let results = (outs
3327    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The extracted diagonal.}]>:$diagonal
3328  );
3329
3330  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3331}
3332
3333def TF_DigammaOp : TF_Op<"Digamma", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3334  let summary = [{
3335Computes Psi, the derivative of Lgamma (the log of the absolute value of
3336  }];
3337
3338  let description = [{
3339`Gamma(x)`), element-wise.
3340  }];
3341
3342  let arguments = (ins
3343    TF_FloatTensor:$x
3344  );
3345
3346  let results = (outs
3347    TF_FloatTensor:$y
3348  );
3349
3350  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3351}
3352
3353def TF_DivOp : TF_Op<"Div", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
3354               WithBroadcastableBinOpBuilder {
3355  let summary = "Returns x / y element-wise.";
3356
3357  let description = [{
3358*NOTE*: `Div` supports broadcasting. More about broadcasting
3359[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
3360  }];
3361
3362  let arguments = (ins
3363    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
3364    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
3365  );
3366
3367  let results = (outs
3368    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
3369  );
3370
3371  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3372}
3373
3374def TF_DummyMemoryCacheOp : TF_Op<"DummyMemoryCache", []> {
3375  let summary = "";
3376
3377  let arguments = (ins);
3378
3379  let results = (outs
3380    Res<TF_ResourceTensor, "", [TF_DatasetMemoryCacheAlloc]>:$handle
3381  );
3382}
3383
3384def TF_DummySeedGeneratorOp : TF_Op<"DummySeedGenerator", []> {
3385  let summary = "";
3386
3387  let arguments = (ins);
3388
3389  let results = (outs
3390    Res<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorAlloc]>:$handle
3391  );
3392}
3393
3394def TF_DynamicStitchOp : TF_Op<"DynamicStitch", [NoSideEffect, SameVariadicOperandSize]> {
3395  let summary = [{
3396Interleave the values from the `data` tensors into a single tensor.
3397  }];
3398
3399  let description = [{
3400Builds a merged tensor such that
3401
3402```python
3403    merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
3404```
3405
3406For example, if each `indices[m]` is scalar or vector, we have
3407
3408```python
3409    # Scalar indices:
3410    merged[indices[m], ...] = data[m][...]
3411
3412    # Vector indices:
3413    merged[indices[m][i], ...] = data[m][i, ...]
3414```
3415
3416Each `data[i].shape` must start with the corresponding `indices[i].shape`,
3417and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
3418must have `data[i].shape = indices[i].shape + constant`.  In terms of this
3419`constant`, the output shape is
3420
3421    merged.shape = [max(indices)] + constant
3422
3423Values are merged in order, so if an index appears in both `indices[m][i]` and
3424`indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
3425merged result. If you do not need this guarantee, ParallelDynamicStitch might
3426perform better on some devices.
3427
3428For example:
3429
3430```python
3431    indices[0] = 6
3432    indices[1] = [4, 1]
3433    indices[2] = [[5, 2], [0, 3]]
3434    data[0] = [61, 62]
3435    data[1] = [[41, 42], [11, 12]]
3436    data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
3437    merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
3438              [51, 52], [61, 62]]
3439```
3440
3441This method can be used to merge partitions created by `dynamic_partition`
3442as illustrated on the following example:
3443
3444```python
3445    # Apply function (increments x_i) on elements for which a certain condition
3446    # apply (x_i != -1 in this example).
3447    x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
3448    condition_mask=tf.not_equal(x,tf.constant(-1.))
3449    partitioned_data = tf.dynamic_partition(
3450        x, tf.cast(condition_mask, tf.int32) , 2)
3451    partitioned_data[1] = partitioned_data[1] + 1.0
3452    condition_indices = tf.dynamic_partition(
3453        tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
3454    x = tf.dynamic_stitch(condition_indices, partitioned_data)
3455    # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
3456    # unchanged.
3457```
3458
3459<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
3460<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
3461</div>
3462  }];
3463
3464  let arguments = (ins
3465    Variadic<TF_Int32Tensor>:$indices,
3466    Variadic<TF_Tensor>:$data
3467  );
3468
3469  let results = (outs
3470    TF_Tensor:$merged
3471  );
3472
3473  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
3474  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3475
3476  let verifier = [{
3477    return Verify(*this);
3478  }];
3479}
3480
3481def TF_EinsumOp : TF_Op<"Einsum", [NoSideEffect]> {
3482  let summary = [{
3483Tensor contraction according to Einstein summation convention.
3484  }];
3485
3486  let description = [{
3487Implements generalized Tensor contraction and reduction. Each input Tensor must
3488have a corresponding input subscript appearing in the comma-separated left-hand
3489side of the equation. The right-hand side of the equation consists of the
3490output subscript. The input subscripts and the output subscript should consist
3491of zero or more named axis labels and at most one ellipsis (`...`).
3492
3493The named axis labels may be any single character other than those having
3494special meaning, namely `,.->`. The behavior of this Op is undefined if it
3495receives an ill-formatted equation; since the validation is done at
3496graph-building time, we omit format validation checks at runtime.
3497
3498Note: This Op is *not* intended to be called by the user; instead users should
3499call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`.
3500
3501Operations are applied to the input(s) according to the following rules:
3502
3503 (a) Generalized Diagonals: For input dimensions corresponding to axis labels
3504     appearing more than once in the same input subscript, we take the
3505     generalized (`k`-dimensional) diagonal.
3506     For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the
3507     generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`,
3508     `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`.
3509
3510 (b) Reduction: Axes corresponding to labels appearing only in one input
3511     subscript but not in the output subscript are summed over prior to Tensor
3512     contraction.
3513     For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are
3514     the reduction axis labels.
3515
3516 (c) Batch Dimensions: Axes corresponding to labels appearing in each of the
3517     input subscripts and also in the output subscript make up the batch
3518     dimensions in Tensor contraction. Unnamed axis labels corresponding to
3519     ellipsis (`...`) also correspond to batch dimensions.
3520     For example, for the equation denoting batch matrix multiplication,
3521     `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension.
3522
3523 (d) Contraction: In case of binary einsum, axes corresponding to labels
3524     appearing in two different inputs (and not in the output) are contracted
3525     against each other.
3526     Considering the batch matrix multiplication equation again
3527     (`bij,bjk->bik`), the contracted axis label is `j`.
3528
3529 (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis
3530     labels, the opposite operation of (a) is applied. For example, in the
3531     equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]`
3532     are all zeros, except for the (generalized) diagonal which is populated
3533     with values from the input.
3534     Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is
3535     provided to enable computing the symbolic gradient of `tf.einsum`.
3536
3537The output subscripts must contain only labels appearing in at least one of the
3538input subscripts. Furthermore, all dimensions mapping to the same axis label
3539must be equal.
3540
3541Any of the input and output subscripts may contain at most a single ellipsis
3542(`...`). These ellipsis are mapped against dimensions not corresponding to any
3543named axis label. If two inputs contain ellipsis, then they are broadcasted
3544according to standard NumPy broadcasting
3545[rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
3546
3547The broadcasted dimensions are placed in the corresponding location of the
3548ellipsis in the output subscript. If the broadcasted dimensions are non-empty
3549and the output subscripts do not contain ellipsis, then an InvalidArgument error
3550is raised.
3551
3552@compatibility(numpy)
3553Similar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html).
3554
3555Comparison with `numpy.einsum`:
3556
3557 * This Op only supports unary and binary forms of `numpy.einsum`.
3558 * This Op does not support implicit form. (i.e. equations without `->`).
3559 * This Op also supports repeated indices in the output subscript, which is not
3560   supported by `numpy.einsum`.
3561@end_compatibility
3562  }];
3563
3564  let arguments = (ins
3565    Arg<Variadic<TF_Tensor>, [{List of 1 or 2 Tensors.}]>:$inputs,
3566
3567    StrAttr:$equation
3568  );
3569
3570  let results = (outs
3571    Res<TF_Tensor, [{Output Tensor with shape depending upon `equation`.}]>:$output
3572  );
3573
3574  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3575  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3576
3577  let verifier = [{
3578    return Verify(*this);
3579  }];
3580}
3581
3582def TF_EluOp : TF_Op<"Elu", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3583  let summary = [{
3584Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
3585  }];
3586
3587  let description = [{
3588See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
3589](http://arxiv.org/abs/1511.07289)
3590  }];
3591
3592  let arguments = (ins
3593    TF_FloatTensor:$features
3594  );
3595
3596  let results = (outs
3597    TF_FloatTensor:$activations
3598  );
3599
3600  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3601}
3602
3603def TF_EluGradOp : TF_Op<"EluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3604  let summary = [{
3605Computes gradients for the exponential linear (Elu) operation.
3606  }];
3607
3608  let arguments = (ins
3609    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding Elu operation.}]>:$gradients,
3610    Arg<TF_FloatTensor, [{The outputs of the corresponding Elu operation.}]>:$outputs
3611  );
3612
3613  let results = (outs
3614    Res<TF_FloatTensor, [{The gradients: `gradients * (outputs + 1)` if outputs < 0,
3615`gradients` otherwise.}]>:$backprops
3616  );
3617
3618  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3619}
3620
3621def TF_EmptyOp : TF_Op<"Empty", []> {
3622  let summary = [{
3623Creates a tensor with the given shape.
3624
3625This operation creates a tensor of `shape` and `dtype`.
3626  }];
3627
3628  let arguments = (ins
3629    Arg<TF_Int32Tensor, [{1-D. Represents the shape of the output tensor.}]>:$shape,
3630
3631    DefaultValuedAttr<BoolAttr, "false">:$init
3632  );
3633
3634  let results = (outs
3635    Res<TF_Tensor, [{A `Tensor` of type `T`.}]>:$output
3636  );
3637
3638  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
3639}
3640
3641def TF_EnqueueTPUEmbeddingIntegerBatchOp : TF_Op<"EnqueueTPUEmbeddingIntegerBatch", [TF_TPUEmbeddingSideEffect]> {
3642  let summary = [{
3643An op that enqueues a list of input batch tensors to TPUEmbedding.
3644  }];
3645
3646  let arguments = (ins
3647    Arg<Variadic<TF_Int32Tensor>, [{A list of 1D tensors, one for each embedding table, containing the
3648indices into the tables.}]>:$batch,
3649    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
3650TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
3651'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
3652in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
3653
3654    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal
3655  );
3656
3657  let results = (outs);
3658
3659  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3660}
3661
3662def TF_EnqueueTPUEmbeddingRaggedTensorBatchOp : TF_Op<"EnqueueTPUEmbeddingRaggedTensorBatch", [SameVariadicOperandSize, TF_TPUEmbeddingSideEffect]> {
3663  let summary = "Eases the porting of code that uses tf.nn.embedding_lookup().";
3664
3665  let description = [{
3666sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond
3667to the ith feature. table_ids[i] indicates which embedding table to look up ith
3668feature.
3669
3670The tensors at corresponding positions in two of the input lists,
3671embedding_indices and aggregation_weights, must have the same shape, i.e. rank 1
3672with dim_size() equal to the total number of lookups into the table described by
3673the corresponding feature.
3674  }];
3675
3676  let arguments = (ins
3677    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the break points for splitting
3678embedding_indices and aggregation_weights into rows.
3679It corresponds to ids.row_splits in embedding_lookup(), when ids is a
3680RaggedTensor.}]>:$sample_splits,
3681    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.
3682It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor.}]>:$embedding_indices,
3683    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per training example
3684aggregation weights. It corresponds to the values field of a RaggedTensor
3685with the same row_splits as ids in embedding_lookup(), when ids is a
3686RaggedTensor.}]>:$aggregation_weights,
3687    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
3688TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
3689'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
3690in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
3691
3692    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
3693    DefaultValuedAttr<StrArrayAttr, "{}">:$combiners,
3694    I64ArrayAttr:$table_ids,
3695    DefaultValuedAttr<I64ArrayAttr, "{}">:$max_sequence_lengths,
3696    DefaultValuedAttr<I64ArrayAttr, "{}">:$num_features
3697  );
3698
3699  let results = (outs);
3700
3701  TF_DerivedOperandTypeAttr T1 = TF_DerivedOperandTypeAttr<0>;
3702  TF_DerivedOperandTypeAttr T2 = TF_DerivedOperandTypeAttr<1>;
3703  TF_DerivedOperandTypeAttr T3 = TF_DerivedOperandTypeAttr<2>;
3704  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3705}
3706
3707def TF_EnqueueTPUEmbeddingSparseBatchOp : TF_Op<"EnqueueTPUEmbeddingSparseBatch", [SameVariadicOperandSize, TF_TPUEmbeddingSideEffect]> {
3708  let summary = [{
3709An op that enqueues TPUEmbedding input indices from a SparseTensor.
3710  }];
3711
3712  let description = [{
3713This Op eases the porting of code that uses embedding_lookup_sparse(),
3714although some Python preprocessing of the SparseTensor arguments to
3715embedding_lookup_sparse() is required to produce the arguments to this Op,
3716since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training
3717step.
3718
3719The tensors at corresponding positions in the three input lists
3720must have the same shape, i.e. rank 1 with dim_size() equal to the total
3721number of lookups into the table described by the corresponding table_id.
3722  }];
3723
3724  let arguments = (ins
3725    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the training example and
3726feature to which the corresponding embedding_indices and aggregation_weights
3727values belong. sample_indices[i] must equal b * nf + f, where nf is the
3728number of features from the corresponding table, f is in [0, nf), and
3729b is in [0, batch size).}]>:$sample_indices,
3730    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.}]>:$embedding_indices,
3731    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per sample -- i.e. per
3732(training example, feature) -- aggregation weights.}]>:$aggregation_weights,
3733    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
3734TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
3735'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
3736in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
3737
3738    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
3739    DefaultValuedAttr<StrArrayAttr, "{}">:$combiners
3740  );
3741
3742  let results = (outs);
3743
3744  TF_DerivedOperandTypeAttr T1 = TF_DerivedOperandTypeAttr<0>;
3745  TF_DerivedOperandTypeAttr T2 = TF_DerivedOperandTypeAttr<1>;
3746  TF_DerivedOperandTypeAttr T3 = TF_DerivedOperandTypeAttr<2>;
3747  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3748}
3749
3750def TF_EnqueueTPUEmbeddingSparseTensorBatchOp : TF_Op<"EnqueueTPUEmbeddingSparseTensorBatch", [SameVariadicOperandSize, TF_TPUEmbeddingSideEffect]> {
3751  let summary = [{
3752Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
3753  }];
3754
3755  let description = [{
3756sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond
3757to the ith feature. table_ids[i] indicates which embedding table to look up ith
3758feature.
3759
3760The tensors at corresponding positions in the three input lists (sample_indices,
3761embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1
3762with dim_size() equal to the total number of lookups into the table described by
3763the corresponding feature.
3764  }];
3765
3766  let arguments = (ins
3767    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the training example to
3768which the corresponding embedding_indices and aggregation_weights values
3769belong. It corresponds to sp_ids.indices[:,0] in  embedding_lookup_sparse().}]>:$sample_indices,
3770    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.
3771It corresponds to sp_ids.values in embedding_lookup_sparse().}]>:$embedding_indices,
3772    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per training example
3773aggregation weights. It corresponds to sp_weights.values in
3774embedding_lookup_sparse().}]>:$aggregation_weights,
3775    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
3776TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
3777'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
3778in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
3779
3780    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
3781    DefaultValuedAttr<StrArrayAttr, "{}">:$combiners,
3782    I64ArrayAttr:$table_ids,
3783    DefaultValuedAttr<I64ArrayAttr, "{}">:$max_sequence_lengths,
3784    DefaultValuedAttr<I64ArrayAttr, "{}">:$num_features
3785  );
3786
3787  let results = (outs);
3788
3789  TF_DerivedOperandTypeAttr T1 = TF_DerivedOperandTypeAttr<0>;
3790  TF_DerivedOperandTypeAttr T2 = TF_DerivedOperandTypeAttr<1>;
3791  TF_DerivedOperandTypeAttr T3 = TF_DerivedOperandTypeAttr<2>;
3792  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3793}
3794
3795def TF_EnsureShapeOp : TF_Op<"EnsureShape", [NoSideEffect]> {
3796  let summary = "Ensures that the tensor's shape matches the expected shape.";
3797
3798  let description = [{
3799Raises an error if the input tensor's shape does not match the specified shape.
3800Returns the input tensor otherwise.
3801  }];
3802
3803  let arguments = (ins
3804    Arg<TF_Tensor, [{A tensor, whose shape is to be validated.}]>:$input,
3805
3806    TF_ShapeAttr:$shape
3807  );
3808
3809  let results = (outs
3810    Res<TF_Tensor, [{A tensor with the same shape and contents as the input tensor or value.}]>:$output
3811  );
3812
3813  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3814}
3815
3816def TF_EqualOp : TF_Op<"Equal", [Commutative, NoSideEffect]> {
3817  let summary = "Returns the truth value of (x == y) element-wise.";
3818
3819  let description = [{
3820*NOTE*: `Equal` supports broadcasting. More about broadcasting
3821[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
3822
3823```python
3824x = tf.constant([2, 4])
3825y = tf.constant(2)
3826tf.math.equal(x, y) ==> array([True, False])
3827
3828x = tf.constant([2, 4])
3829y = tf.constant([2, 4])
3830tf.math.equal(x, y) ==> array([True,  True])
3831```
3832  }];
3833
3834  let arguments = (ins
3835    TF_Tensor:$x,
3836    TF_Tensor:$y,
3837
3838    DefaultValuedAttr<BoolAttr, "true">:$incompatible_shape_error
3839  );
3840
3841  let results = (outs
3842    TF_BoolTensor:$z
3843  );
3844
3845  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3846
3847  let builders = [
3848    OpBuilder<(ins "Value":$x, "Value":$y,
3849      "BoolAttr":$incompatible_shape_error)>
3850  ];
3851
3852  let verifier = [{
3853    return Verify(*this);
3854  }];
3855}
3856
3857def TF_ErfOp : TF_Op<"Erf", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3858  let summary = "Computes the Gauss error function of `x` element-wise.";
3859
3860  let arguments = (ins
3861    TF_FloatTensor:$x
3862  );
3863
3864  let results = (outs
3865    TF_FloatTensor:$y
3866  );
3867
3868  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3869}
3870
3871def TF_ErfcOp : TF_Op<"Erfc", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3872  let summary = [{
3873Computes the complementary error function of `x` element-wise.
3874  }];
3875
3876  let arguments = (ins
3877    TF_FloatTensor:$x
3878  );
3879
3880  let results = (outs
3881    TF_FloatTensor:$y
3882  );
3883
3884  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3885}
3886
3887def TF_ErfinvOp : TF_Op<"Erfinv", [NoSideEffect]> {
3888  let summary = "";
3889
3890  let arguments = (ins
3891    TF_FloatTensor:$x
3892  );
3893
3894  let results = (outs
3895    TF_FloatTensor:$y
3896  );
3897
3898  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3899}
3900
3901def TF_ExpOp : TF_Op<"Exp", [NoSideEffect, SameOperandsAndResultType]> {
3902  let summary = [{
3903Computes exponential of x element-wise.  \\(y = e^x\\).
3904  }];
3905
3906  let description = [{
3907This function computes the exponential of every element in the input tensor.
3908  i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor.
3909  `e` denotes Euler's number and is approximately equal to 2.718281.
3910  Output is positive for any real input.
3911
3912  ```python
3913  x = tf.constant(2.0)
3914  tf.math.exp(x) ==> 7.389056
3915
3916  x = tf.constant([2.0, 8.0])
3917  tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32)
3918  ```
3919
3920  For complex numbers, the exponential value is calculated as follows:
3921
3922  ```
3923  e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y)
3924  ```
3925
3926  Let's consider complex number 1+1j as an example.
3927  e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j)
3928
3929  ```python
3930  x = tf.constant(1 + 1j)
3931  tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j
3932  ```
3933  }];
3934
3935  let arguments = (ins
3936    TF_FpOrComplexTensor:$x
3937  );
3938
3939  let results = (outs
3940    TF_FpOrComplexTensor:$y
3941  );
3942
3943  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3944}
3945
3946def TF_ExpandDimsOp : TF_Op<"ExpandDims", [NoSideEffect]> {
3947  let summary = "Inserts a dimension of 1 into a tensor's shape.";
3948
3949  let description = [{
3950Given a tensor `input`, this operation inserts a dimension of 1 at the
3951dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
3952zero; if you specify a negative number for `axis` it is counted backward from
3953the end.
3954
3955This operation is useful if you want to add a batch dimension to a single
3956element. For example, if you have a single image of shape `[height, width,
3957channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
3958which will make the shape `[1, height, width, channels]`.
3959
3960Other examples:
3961
3962```
3963# 't' is a tensor of shape [2]
3964shape(expand_dims(t, 0)) ==> [1, 2]
3965shape(expand_dims(t, 1)) ==> [2, 1]
3966shape(expand_dims(t, -1)) ==> [2, 1]
3967
3968# 't2' is a tensor of shape [2, 3, 5]
3969shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
3970shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
3971shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
3972```
3973
3974This operation requires that:
3975
3976`-1-input.dims() <= dim <= input.dims()`
3977
3978This operation is related to `squeeze()`, which removes dimensions of
3979size 1.
3980  }];
3981
3982  let arguments = (ins
3983    TF_Tensor:$input,
3984    Arg<TF_I32OrI64Tensor, [{0-D (scalar). Specifies the dimension index at which to
3985expand the shape of `input`. Must be in the range
3986`[-rank(input) - 1, rank(input)]`.}]>:$dim
3987  );
3988
3989  let results = (outs
3990    Res<TF_Tensor, [{Contains the same data as `input`, but its shape has an additional
3991dimension of size 1 added.}]>:$output
3992  );
3993
3994  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3995  TF_DerivedOperandTypeAttr Tdim = TF_DerivedOperandTypeAttr<1>;
3996
3997  let builders = [
3998    OpBuilder<(ins "Value":$condition, "Value":$dim)>
3999  ];
4000}
4001
4002def TF_Expm1Op : TF_Op<"Expm1", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4003  let summary = "Computes `exp(x) - 1` element-wise.";
4004
4005  let description = [{
4006i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor.
4007  `e` denotes Euler's number and is approximately equal to 2.718281.
4008
4009  ```python
4010  x = tf.constant(2.0)
4011  tf.math.expm1(x) ==> 6.389056
4012
4013  x = tf.constant([2.0, 8.0])
4014  tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32)
4015
4016  x = tf.constant(1 + 1j)
4017  tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j)
4018  ```
4019  }];
4020
4021  let arguments = (ins
4022    TF_FpOrComplexTensor:$x
4023  );
4024
4025  let results = (outs
4026    TF_FpOrComplexTensor:$y
4027  );
4028
4029  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4030}
4031
4032def TF_ExtractImagePatchesOp : TF_Op<"ExtractImagePatches", [NoSideEffect]> {
4033  let summary = [{
4034Extract `patches` from `images` and put them in the "depth" output dimension.
4035  }];
4036
4037  let arguments = (ins
4038    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.}]>:$images,
4039
4040    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksizes,
4041    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
4042    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$rates,
4043    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding
4044  );
4045
4046  let results = (outs
4047    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
4048ksize_cols * depth]` containing image patches with size
4049`ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
4050`out_rows` and `out_cols` are the dimensions of the output patches.}]>:$patches
4051  );
4052
4053  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4054}
4055
4056def TF_FFTOp : TF_Op<"FFT", [NoSideEffect]> {
4057  let summary = "Fast Fourier transform.";
4058
4059  let description = [{
4060Computes the 1-dimensional discrete Fourier transform over the inner-most
4061dimension of `input`.
4062  }];
4063
4064  let arguments = (ins
4065    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
4066  );
4067
4068  let results = (outs
4069    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most
4070  dimension of `input` is replaced with its 1D Fourier transform.
4071
4072@compatibility(numpy)
4073Equivalent to np.fft.fft
4074@end_compatibility}]>:$output
4075  );
4076
4077  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
4078}
4079
4080def TF_FFT2DOp : TF_Op<"FFT2D", [NoSideEffect]> {
4081  let summary = "2D fast Fourier transform.";
4082
4083  let description = [{
4084Computes the 2-dimensional discrete Fourier transform over the inner-most
40852 dimensions of `input`.
4086  }];
4087
4088  let arguments = (ins
4089    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
4090  );
4091
4092  let results = (outs
4093    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 2
4094  dimensions of `input` are replaced with their 2D Fourier transform.
4095
4096@compatibility(numpy)
4097Equivalent to np.fft.fft2
4098@end_compatibility}]>:$output
4099  );
4100
4101  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
4102}
4103
4104def TF_FFT3DOp : TF_Op<"FFT3D", [NoSideEffect]> {
4105  let summary = "3D fast Fourier transform.";
4106
4107  let description = [{
4108Computes the 3-dimensional discrete Fourier transform over the inner-most 3
4109dimensions of `input`.
4110  }];
4111
4112  let arguments = (ins
4113    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
4114  );
4115
4116  let results = (outs
4117    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 3
4118  dimensions of `input` are replaced with their 3D Fourier transform.
4119
4120@compatibility(numpy)
4121Equivalent to np.fft.fftn with 3 dimensions.
4122@end_compatibility}]>:$output
4123  );
4124
4125  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
4126}
4127
4128def TF_FakeParamOp : TF_Op<"FakeParam", [NoSideEffect, TF_NoConstantFold]> {
4129  let summary = [{
4130  This op is used as a placeholder in If branch functions. It doesn't provide a
4131  valid output when run, so must either be removed (e.g. replaced with a
4132  function input) or guaranteed not to be used (e.g. if mirroring an
4133  intermediate output needed for the gradient computation of the other branch).
4134  }];
4135
4136  let arguments = (ins
4137    TF_ShapeAttr:$shape
4138  );
4139
4140  let results = (outs
4141    Res<TF_Tensor, [{    \"Fake\" output value. This should not be consumed by another op.}]>:$output
4142  );
4143
4144  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
4145}
4146
4147def TF_FakeQuantWithMinMaxArgsOp : TF_Op<"FakeQuantWithMinMaxArgs", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4148  let summary = [{
4149Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
4150  }];
4151
4152  let description = [{
4153Attributes
4154
4155*   `[min; max]` define the clamping range for the `inputs` data.
4156*   `inputs` values are quantized into the quantization range (
4157`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
4158when it is true) and then de-quantized and output as floats in `[min; max]`
4159interval.
4160*   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
4161
4162Before quantization, `min` and `max` values are adjusted with the following
4163logic.
4164It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
4165the behavior can be unexpected:
4166
4167*   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
4168*   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
4169*   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
4170`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
4171
4172Quantization is called fake since the output is still in floating point.
4173  }];
4174
4175  let arguments = (ins
4176    TF_Float32Tensor:$inputs,
4177
4178    DefaultValuedAttr<F32Attr, "-6.0f">:$min,
4179    DefaultValuedAttr<F32Attr, "6.0f">:$max,
4180    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4181    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4182  );
4183
4184  let results = (outs
4185    TF_Float32Tensor:$outputs
4186  );
4187
4188  let verifier = [{
4189    return Verify(*this);
4190  }];
4191}
4192
4193def TF_FakeQuantWithMinMaxArgsGradientOp : TF_Op<"FakeQuantWithMinMaxArgsGradient", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4194  let summary = "Compute gradients for a FakeQuantWithMinMaxArgs operation.";
4195
4196  let arguments = (ins
4197    Arg<TF_Float32Tensor, [{Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.}]>:$gradients,
4198    Arg<TF_Float32Tensor, [{Values passed as inputs to the FakeQuantWithMinMaxArgs operation.}]>:$inputs,
4199
4200    DefaultValuedAttr<F32Attr, "-6.0f">:$min,
4201    DefaultValuedAttr<F32Attr, "6.0f">:$max,
4202    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4203    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4204  );
4205
4206  let results = (outs
4207    Res<TF_Float32Tensor, [{Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
4208`gradients * (inputs >= min && inputs <= max)`.}]>:$backprops
4209  );
4210}
4211
4212def TF_FakeQuantWithMinMaxVarsOp : TF_Op<"FakeQuantWithMinMaxVars", [NoSideEffect]> {
4213  let summary = [{
4214Fake-quantize the 'inputs' tensor of type float via global float scalars
4215  }];
4216
4217  let description = [{
4218Fake-quantize the `inputs` tensor of type float via global float scalars
4219`min` and `max` to `outputs` tensor of same shape as `inputs`.
4220
4221Attributes
4222
4223*   `[min; max]` define the clamping range for the `inputs` data.
4224*   `inputs` values are quantized into the quantization range (
4225`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
4226when it is true) and then de-quantized and output as floats in `[min; max]`
4227interval.
4228*   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
4229
4230Before quantization, `min` and `max` values are adjusted with the following
4231logic.
4232It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
4233the behavior can be unexpected:
4234
4235*   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
4236*   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
4237*   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
4238`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
4239
4240This operation has a gradient and thus allows for training `min` and `max`
4241values.
4242  }];
4243
4244  let arguments = (ins
4245    TF_Float32Tensor:$inputs,
4246    TF_Float32Tensor:$min,
4247    TF_Float32Tensor:$max,
4248
4249    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4250    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4251  );
4252
4253  let results = (outs
4254    TF_Float32Tensor:$outputs
4255  );
4256
4257  let verifier = [{
4258    return Verify(*this);
4259  }];
4260}
4261
4262def TF_FakeQuantWithMinMaxVarsGradientOp : TF_Op<"FakeQuantWithMinMaxVarsGradient", [NoSideEffect]> {
4263  let summary = "Compute gradients for a FakeQuantWithMinMaxVars operation.";
4264
4265  let arguments = (ins
4266    Arg<TF_Float32Tensor, [{Backpropagated gradients above the FakeQuantWithMinMaxVars operation.}]>:$gradients,
4267    Arg<TF_Float32Tensor, [{Values passed as inputs to the FakeQuantWithMinMaxVars operation.
4268min, max: Quantization interval, scalar floats.}]>:$inputs,
4269    TF_Float32Tensor:$min,
4270    TF_Float32Tensor:$max,
4271
4272    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4273    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4274  );
4275
4276  let results = (outs
4277    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. inputs:
4278`gradients * (inputs >= min && inputs <= max)`.}]>:$backprops_wrt_input,
4279    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. min parameter:
4280`sum(gradients * (inputs < min))`.}]>:$backprop_wrt_min,
4281    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. max parameter:
4282`sum(gradients * (inputs > max))`.}]>:$backprop_wrt_max
4283  );
4284}
4285
4286def TF_FakeQuantWithMinMaxVarsPerChannelOp : TF_Op<"FakeQuantWithMinMaxVarsPerChannel", [NoSideEffect]> {
4287  let summary = [{
4288Fake-quantize the 'inputs' tensor of type float via per-channel floats
4289  }];
4290
4291  let description = [{
4292Fake-quantize the `inputs` tensor of type float per-channel and one of the
4293shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max`
4294of shape `[d]` to `outputs` tensor of same shape as `inputs`.
4295
4296Attributes
4297
4298*   `[min; max]` define the clamping range for the `inputs` data.
4299*   `inputs` values are quantized into the quantization range (
4300`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
4301when it is true) and then de-quantized and output as floats in `[min; max]`
4302interval.
4303*   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
4304
4305Before quantization, `min` and `max` values are adjusted with the following
4306logic.
4307It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
4308the behavior can be unexpected:
4309
4310*   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
4311*   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
4312*   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
4313`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
4314
4315This operation has a gradient and thus allows for training `min` and `max`
4316values.
4317  }];
4318
4319  let arguments = (ins
4320    TF_Float32Tensor:$inputs,
4321    TF_Float32Tensor:$min,
4322    TF_Float32Tensor:$max,
4323
4324    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4325    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4326  );
4327
4328  let results = (outs
4329    TF_Float32Tensor:$outputs
4330  );
4331
4332  let verifier = [{
4333    return Verify(*this);
4334  }];
4335}
4336
4337def TF_FillOp : TF_Op<"Fill", [NoSideEffect]> {
4338  let summary = "Creates a tensor filled with a scalar value.";
4339
4340  let description = [{
4341This operation creates a tensor of shape `dims` and fills it with `value`.
4342
4343For example:
4344
4345```
4346# Output tensor has shape [2, 3].
4347fill([2, 3], 9) ==> [[9, 9, 9]
4348                     [9, 9, 9]]
4349```
4350
4351`tf.fill` differs from `tf.constant` in a few ways:
4352
4353*   `tf.fill` only supports scalar contents, whereas `tf.constant` supports
4354    Tensor values.
4355*   `tf.fill` creates an Op in the computation graph that constructs the actual
4356    Tensor value at runtime. This is in contrast to `tf.constant` which embeds
4357    the entire Tensor into the graph with a `Const` node.
4358*   Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
4359    based on other runtime Tensors, unlike `tf.constant`.
4360  }];
4361
4362  let arguments = (ins
4363    Arg<TF_I32OrI64Tensor, [{1-D. Represents the shape of the output tensor.}]>:$dims,
4364    Arg<TF_Tensor, [{0-D (scalar). Value to fill the returned tensor.
4365
4366@compatibility(numpy)
4367Equivalent to np.full
4368@end_compatibility}]>:$value
4369  );
4370
4371  let results = (outs
4372    TF_Tensor:$output
4373  );
4374
4375  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
4376  TF_DerivedOperandTypeAttr index_type = TF_DerivedOperandTypeAttr<0>;
4377
4378  let verifier = [{
4379    return Verify(*this);
4380  }];
4381
4382  let builders = [
4383    OpBuilder<(ins "Value":$dims, "Value":$value)>
4384  ];
4385}
4386
4387def TF_FloorOp : TF_Op<"Floor", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
4388  let summary = "Returns element-wise largest integer not greater than x.";
4389
4390  let arguments = (ins
4391    TF_FloatTensor:$x
4392  );
4393
4394  let results = (outs
4395    TF_FloatTensor:$y
4396  );
4397
4398  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4399}
4400
4401def TF_FloorDivOp : TF_Op<"FloorDiv", [NoSideEffect, ResultsBroadcastableShape]>,
4402                    WithBroadcastableBinOpBuilder {
4403  let summary = "Returns x // y element-wise.";
4404
4405  let description = [{
4406*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
4407[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4408  }];
4409
4410  let arguments = (ins
4411    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
4412    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
4413  );
4414
4415  let results = (outs
4416    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
4417  );
4418
4419  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4420}
4421
4422def TF_FloorModOp : TF_Op<"FloorMod", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
4423                    WithBroadcastableBinOpBuilder {
4424  let summary = [{
4425Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
4426  }];
4427
4428  let description = [{
4429true, this follows Python semantics in that the result here is consistent
4430with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
4431
4432*NOTE*: `FloorMod` supports broadcasting. More about broadcasting
4433[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4434  }];
4435
4436  let arguments = (ins
4437    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64, TF_Uint64]>:$x,
4438    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64, TF_Uint64]>:$y
4439  );
4440
4441  let results = (outs
4442    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64, TF_Uint64]>:$z
4443  );
4444
4445  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4446}
4447
4448def TF_FusedBatchNormOp : TF_Op<"FusedBatchNorm", [NoSideEffect]> {
4449  let summary = "Batch normalization.";
4450
4451  let description = [{
4452Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4453The size of 1D Tensors matches the dimension C of the 4D Tensors.
4454  }];
4455
4456  let arguments = (ins
4457    Arg<TF_Float32Tensor, [{A 4D Tensor for input data.}]>:$x,
4458    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4459    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
4460    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
4461must be empty for training.}]>:$mean,
4462    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
4463must be empty for training.}]>:$variance,
4464
4465    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4466    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
4467    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
4468    DefaultValuedAttr<BoolAttr, "true">:$is_training
4469  );
4470
4471  let results = (outs
4472    Res<TF_Float32Tensor, [{A 4D Tensor for output data.}]>:$y,
4473    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
4474to compute the running mean.}]>:$batch_mean,
4475    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
4476TensorFlow to compute the running variance.}]>:$batch_variance,
4477    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
4478in the gradient computation.}]>:$reserve_space_1,
4479    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
4480in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2
4481  );
4482
4483  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4484
4485  let verifier = [{
4486    return Verify(*this);
4487  }];
4488}
4489
4490def TF_FusedBatchNormGradOp : TF_Op<"FusedBatchNormGrad", [NoSideEffect]> {
4491  let summary = "Gradient for batch normalization.";
4492
4493  let description = [{
4494Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4495The size of 1D Tensors matches the dimension C of the 4D Tensors.
4496  }];
4497
4498  let arguments = (ins
4499    Arg<TF_Float32Tensor, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
4500    Arg<TF_Float32Tensor, [{A 4D Tensor for input data.}]>:$x,
4501    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4502    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4503mean to be reused in gradient computation. When is_training is
4504False, a 1D Tensor for the population mean to be reused in both
45051st and 2nd order gradient computation.}]>:$reserve_space_1,
4506    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4507variance (inverted variance in the cuDNN case) to be reused in
4508gradient computation. When is_training is False, a 1D Tensor
4509for the population variance to be reused in both 1st and 2nd
4510order gradient computation.}]>:$reserve_space_2,
4511
4512    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4513    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
4514    DefaultValuedAttr<BoolAttr, "true">:$is_training
4515  );
4516
4517  let results = (outs
4518    Res<TF_Float32Tensor, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
4519    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
4520    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
4521    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_3,
4522    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
4523in FusedBatchNorm.}]>:$reserve_space_4
4524  );
4525
4526  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4527}
4528
4529def TF_FusedBatchNormGradV2Op : TF_Op<"FusedBatchNormGradV2", [NoSideEffect]> {
4530  let summary = "Gradient for batch normalization.";
4531
4532  let description = [{
4533Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4534The size of 1D Tensors matches the dimension C of the 4D Tensors.
4535  }];
4536
4537  let arguments = (ins
4538    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
4539    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
4540    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4541    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4542mean to be reused in gradient computation. When is_training is
4543False, a 1D Tensor for the population mean to be reused in both
45441st and 2nd order gradient computation.}]>:$reserve_space_1,
4545    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4546variance (inverted variance in the cuDNN case) to be reused in
4547gradient computation. When is_training is False, a 1D Tensor
4548for the population variance to be reused in both 1st and 2nd
4549order gradient computation.}]>:$reserve_space_2,
4550
4551    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4552    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
4553    DefaultValuedAttr<BoolAttr, "true">:$is_training
4554  );
4555
4556  let results = (outs
4557    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
4558    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
4559    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
4560    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_3,
4561    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
4562in FusedBatchNorm.}]>:$reserve_space_4
4563  );
4564
4565  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4566  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<3>;
4567}
4568
4569def TF_FusedBatchNormGradV3Op : TF_Op<"FusedBatchNormGradV3", [NoSideEffect]> {
4570  let summary = "Gradient for batch normalization.";
4571
4572  let description = [{
4573Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4574The size of 1D Tensors matches the dimension C of the 4D Tensors.
4575  }];
4576
4577  let arguments = (ins
4578    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
4579    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
4580    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4581    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4582mean to be reused in gradient computation. When is_training is
4583False, a 1D Tensor for the population mean to be reused in both
45841st and 2nd order gradient computation.}]>:$reserve_space_1,
4585    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4586variance (inverted variance in the cuDNN case) to be reused in
4587gradient computation. When is_training is False, a 1D Tensor
4588for the population variance to be reused in both 1st and 2nd
4589order gradient computation.}]>:$reserve_space_2,
4590    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for some intermediate results to be reused
4591in gradient computation. When is_training is False, a dummy empty Tensor will be
4592created.}]>:$reserve_space_3,
4593
4594    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4595    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NDHWC", "NCDHW"]>, "NHWC">:$data_format,
4596    DefaultValuedAttr<BoolAttr, "true">:$is_training
4597  );
4598
4599  let results = (outs
4600    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
4601    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
4602    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
4603    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_4,
4604    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
4605in FusedBatchNorm.}]>:$reserve_space_5
4606  );
4607
4608  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4609  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<3>;
4610}
4611
4612def TF_FusedBatchNormV2Op : TF_Op<"FusedBatchNormV2", [NoSideEffect]> {
4613  let summary = "Batch normalization.";
4614
4615  let description = [{
4616Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4617The size of 1D Tensors matches the dimension C of the 4D Tensors.
4618  }];
4619
4620  let arguments = (ins
4621    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
4622    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4623    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
4624    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
4625must be empty for training.}]>:$mean,
4626    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
4627must be empty for training.}]>:$variance,
4628
4629    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4630    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
4631    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
4632    DefaultValuedAttr<BoolAttr, "true">:$is_training
4633  );
4634
4635  let results = (outs
4636    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for output data.}]>:$y,
4637    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
4638to compute the running mean.}]>:$batch_mean,
4639    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
4640TensorFlow to compute the running variance.}]>:$batch_variance,
4641    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
4642in the gradient computation.}]>:$reserve_space_1,
4643    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
4644in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2
4645  );
4646
4647  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4648  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<1>;
4649}
4650
4651def TF_FusedBatchNormV3Op : TF_Op<"FusedBatchNormV3", [NoSideEffect]> {
4652  let summary = "Batch normalization.";
4653
4654  let description = [{
4655Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4656The size of 1D Tensors matches the dimension C of the 4D Tensors.
4657  }];
4658
4659  let arguments = (ins
4660    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
4661    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4662    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
4663    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
4664must be empty for training.}]>:$mean,
4665    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
4666must be empty for training.}]>:$variance,
4667
4668    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4669    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
4670    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NDHWC", "NCDHW"]>, "NHWC">:$data_format,
4671    DefaultValuedAttr<BoolAttr, "true">:$is_training
4672  );
4673
4674  let results = (outs
4675    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for output data.}]>:$y,
4676    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
4677to compute the running mean.}]>:$batch_mean,
4678    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
4679TensorFlow to compute the running variance.}]>:$batch_variance,
4680    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
4681in the gradient computation.}]>:$reserve_space_1,
4682    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
4683in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2,
4684    Res<TF_Float32Tensor, [{A 1D Tensor for some intermediate results, to be reused in the gradient
4685computation for better efficiency.}]>:$reserve_space_3
4686  );
4687
4688  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4689  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<1>;
4690}
4691
4692def TF_GatherOp : TF_Op<"Gather", [NoSideEffect]> {
4693  let summary = "Gather slices from `params` according to `indices`.";
4694
4695  let description = [{
4696`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
4697Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
4698
4699```python
4700    # Scalar indices
4701    output[:, ..., :] = params[indices, :, ... :]
4702
4703    # Vector indices
4704    output[i, :, ..., :] = params[indices[i], :, ... :]
4705
4706    # Higher rank indices
4707    output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
4708```
4709
4710If `indices` is a permutation and `len(indices) == params.shape[0]` then
4711this operation will permute `params` accordingly.
4712
4713`validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
4714`indices` are always validated to be within range. If assigned to GPU,
4715out-of-bound indices result in safe but unspecified behavior, which may include
4716raising an error.
4717
4718<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
4719<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
4720</div>
4721  }];
4722
4723  let arguments = (ins
4724    TF_Tensor:$params,
4725    TF_I32OrI64Tensor:$indices,
4726
4727    DefaultValuedAttr<BoolAttr, "true">:$validate_indices
4728  );
4729
4730  let results = (outs
4731    TF_Tensor:$output
4732  );
4733
4734  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
4735  TF_DerivedOperandTypeAttr Tparams = TF_DerivedOperandTypeAttr<0>;
4736}
4737
4738def TF_GatherNdOp : TF_Op<"GatherNd", [NoSideEffect]> {
4739  let summary = [{
4740Gather slices from `params` into a Tensor with shape specified by `indices`.
4741  }];
4742
4743  let description = [{
4744`indices` is a K-dimensional integer tensor, best thought of as a
4745(K-1)-dimensional tensor of indices into `params`, where each element defines a
4746slice of `params`:
4747
4748    output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
4749
4750Whereas in `tf.gather` `indices` defines slices into the `axis`
4751dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
4752first `N` dimensions of `params`, where `N = indices.shape[-1]`.
4753
4754The last dimension of `indices` can be at most the rank of
4755`params`:
4756
4757    indices.shape[-1] <= params.rank
4758
4759The last dimension of `indices` corresponds to elements
4760(if `indices.shape[-1] == params.rank`) or slices
4761(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
4762of `params`.  The output tensor has shape
4763
4764    indices.shape[:-1] + params.shape[indices.shape[-1]:]
4765
4766Note that on CPU, if an out of bound index is found, an error is returned.
4767On GPU, if an out of bound index is found, a 0 is stored in the
4768corresponding output value.
4769
4770Some examples below.
4771
4772Simple indexing into a matrix:
4773
4774```python
4775    indices = [[0, 0], [1, 1]]
4776    params = [['a', 'b'], ['c', 'd']]
4777    output = ['a', 'd']
4778```
4779
4780Slice indexing into a matrix:
4781
4782```python
4783    indices = [[1], [0]]
4784    params = [['a', 'b'], ['c', 'd']]
4785    output = [['c', 'd'], ['a', 'b']]
4786```
4787
4788Indexing into a 3-tensor:
4789
4790```python
4791    indices = [[1]]
4792    params = [[['a0', 'b0'], ['c0', 'd0']],
4793              [['a1', 'b1'], ['c1', 'd1']]]
4794    output = [[['a1', 'b1'], ['c1', 'd1']]]
4795
4796
4797    indices = [[0, 1], [1, 0]]
4798    params = [[['a0', 'b0'], ['c0', 'd0']],
4799              [['a1', 'b1'], ['c1', 'd1']]]
4800    output = [['c0', 'd0'], ['a1', 'b1']]
4801
4802
4803    indices = [[0, 0, 1], [1, 0, 1]]
4804    params = [[['a0', 'b0'], ['c0', 'd0']],
4805              [['a1', 'b1'], ['c1', 'd1']]]
4806    output = ['b0', 'b1']
4807```
4808
4809Batched indexing into a matrix:
4810
4811```python
4812    indices = [[[0, 0]], [[0, 1]]]
4813    params = [['a', 'b'], ['c', 'd']]
4814    output = [['a'], ['b']]
4815```
4816
4817Batched slice indexing into a matrix:
4818
4819```python
4820    indices = [[[1]], [[0]]]
4821    params = [['a', 'b'], ['c', 'd']]
4822    output = [[['c', 'd']], [['a', 'b']]]
4823```
4824
4825Batched indexing into a 3-tensor:
4826
4827```python
4828    indices = [[[1]], [[0]]]
4829    params = [[['a0', 'b0'], ['c0', 'd0']],
4830              [['a1', 'b1'], ['c1', 'd1']]]
4831    output = [[[['a1', 'b1'], ['c1', 'd1']]],
4832              [[['a0', 'b0'], ['c0', 'd0']]]]
4833
4834    indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
4835    params = [[['a0', 'b0'], ['c0', 'd0']],
4836              [['a1', 'b1'], ['c1', 'd1']]]
4837    output = [[['c0', 'd0'], ['a1', 'b1']],
4838              [['a0', 'b0'], ['c1', 'd1']]]
4839
4840
4841    indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
4842    params = [[['a0', 'b0'], ['c0', 'd0']],
4843              [['a1', 'b1'], ['c1', 'd1']]]
4844    output = [['b0', 'b1'], ['d0', 'c1']]
4845```
4846
4847See also `tf.gather` and `tf.batch_gather`.
4848  }];
4849
4850  let arguments = (ins
4851    Arg<TF_Tensor, [{The tensor from which to gather values.}]>:$params,
4852    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices
4853  );
4854
4855  let results = (outs
4856    Res<TF_Tensor, [{Values from `params` gathered from indices given by `indices`, with
4857shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.}]>:$output
4858  );
4859
4860  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
4861  TF_DerivedOperandTypeAttr Tparams = TF_DerivedOperandTypeAttr<0>;
4862}
4863
4864def TF_GatherV2Op : TF_Op<"GatherV2", [NoSideEffect]> {
4865  let summary = [{
4866Gather slices from `params` axis `axis` according to `indices`.
4867  }];
4868
4869  let description = [{
4870`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
4871Produces an output tensor with shape `params.shape[:axis] +
4872indices.shape[batch_dims:] + params.shape[axis + 1:]` where:
4873
4874```python
4875    # Scalar indices (output is rank(params) - 1).
4876    output[a_0, ..., a_n, b_0, ..., b_n] =
4877      params[a_0, ..., a_n, indices, b_0, ..., b_n]
4878
4879    # Vector indices (output is rank(params)).
4880    output[a_0, ..., a_n, i, b_0, ..., b_n] =
4881      params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
4882
4883    # Higher rank indices (output is rank(params) + rank(indices) - 1).
4884    output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
4885      params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
4886```
4887
4888<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
4889<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
4890</div>
4891
4892Note that on CPU, if an out of bound index is found, an error is returned.
4893On GPU, if an out of bound index is found, a 0 is stored in the
4894corresponding output value.
4895
4896See also `tf.batch_gather` and `tf.gather_nd`.
4897  }];
4898
4899  let arguments = (ins
4900    Arg<TF_Tensor, [{The tensor from which to gather values. Must be at least rank
4901`axis + 1`.}]>:$params,
4902    Arg<TF_I32OrI64Tensor, [{Index tensor. Must be in range `[0, params.shape[axis])`.}]>:$indices,
4903    Arg<TF_I32OrI64Tensor, [{The axis in `params` to gather `indices` from. Defaults to the first
4904dimension. Supports negative indexes.}]>:$axis,
4905
4906    DefaultValuedAttr<I64Attr, "0">:$batch_dims
4907  );
4908
4909  let results = (outs
4910    Res<TF_Tensor, [{Values from `params` gathered from indices given by `indices`, with
4911shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.}]>:$output
4912  );
4913
4914  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
4915  TF_DerivedOperandTypeAttr Tparams = TF_DerivedOperandTypeAttr<0>;
4916  TF_DerivedOperandTypeAttr Taxis = TF_DerivedOperandTypeAttr<2>;
4917
4918  let verifier = [{
4919    return Verify(*this);
4920  }];
4921}
4922
4923def TF_GreaterOp : TF_Op<"Greater", [NoSideEffect, ResultsBroadcastableShape]>,
4924                   WithBroadcastableCmpOpBuilder {
4925  let summary = "Returns the truth value of (x > y) element-wise.";
4926
4927  let description = [{
4928*NOTE*: `Greater` supports broadcasting. More about broadcasting
4929[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4930
4931Example:
4932
4933```python
4934x = tf.constant([5, 4, 6])
4935y = tf.constant([5, 2, 5])
4936tf.math.greater(x, y) ==> [False, True, True]
4937
4938x = tf.constant([5, 4, 6])
4939y = tf.constant([5])
4940tf.math.greater(x, y) ==> [False, False, True]
4941```
4942  }];
4943
4944  let arguments = (ins
4945    TF_IntOrFpTensor:$x,
4946    TF_IntOrFpTensor:$y
4947  );
4948
4949  let results = (outs
4950    TF_BoolTensor:$z
4951  );
4952
4953  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4954}
4955
4956def TF_GreaterEqualOp : TF_Op<"GreaterEqual", [NoSideEffect, ResultsBroadcastableShape]>,
4957                        WithBroadcastableCmpOpBuilder {
4958  let summary = "Returns the truth value of (x >= y) element-wise.";
4959
4960  let description = [{
4961*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
4962[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4963
4964Example:
4965
4966```python
4967x = tf.constant([5, 4, 6, 7])
4968y = tf.constant([5, 2, 5, 10])
4969tf.math.greater_equal(x, y) ==> [True, True, True, False]
4970
4971x = tf.constant([5, 4, 6, 7])
4972y = tf.constant([5])
4973tf.math.greater_equal(x, y) ==> [True, False, True, True]
4974```
4975  }];
4976
4977  let arguments = (ins
4978    TF_IntOrFpTensor:$x,
4979    TF_IntOrFpTensor:$y
4980  );
4981
4982  let results = (outs
4983    TF_BoolTensor:$z
4984  );
4985
4986  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4987}
4988
4989def TF_HSVToRGBOp : TF_Op<"HSVToRGB", [NoSideEffect]> {
4990  let summary = "Convert one or more images from HSV to RGB.";
4991
4992  let description = [{
4993Outputs a tensor of the same shape as the `images` tensor, containing the RGB
4994value of the pixels. The output is only well defined if the value in `images`
4995are in `[0,1]`.
4996
4997See `rgb_to_hsv` for a description of the HSV encoding.
4998  }];
4999
5000  let arguments = (ins
5001    Arg<TF_FloatTensor, [{1-D or higher rank. HSV data to convert. Last dimension must be size 3.}]>:$images
5002  );
5003
5004  let results = (outs
5005    Res<TF_FloatTensor, [{`images` converted to RGB.}]>:$output
5006  );
5007
5008  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5009}
5010
5011def TF_HashTableV2Op : TF_Op<"HashTableV2", []> {
5012  let summary = "Creates a non-initialized hash table.";
5013
5014  let description = [{
5015This op creates a hash table, specifying the type of its keys and values.
5016Before using the table you will have to initialize it.  After initialization the
5017table will be immutable.
5018  }];
5019
5020  let arguments = (ins
5021    StrAttr:$container,
5022    StrAttr:$shared_name,
5023    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
5024    TypeAttr:$key_dtype,
5025    TypeAttr:$value_dtype
5026  );
5027
5028  let results = (outs
5029    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
5030  );
5031}
5032
5033def TF_IFFTOp : TF_Op<"IFFT", [NoSideEffect]> {
5034  let summary = "Inverse fast Fourier transform.";
5035
5036  let description = [{
5037Computes the inverse 1-dimensional discrete Fourier transform over the
5038inner-most dimension of `input`.
5039  }];
5040
5041  let arguments = (ins
5042    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
5043  );
5044
5045  let results = (outs
5046    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most
5047  dimension of `input` is replaced with its inverse 1D Fourier transform.
5048
5049@compatibility(numpy)
5050Equivalent to np.fft.ifft
5051@end_compatibility}]>:$output
5052  );
5053
5054  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5055}
5056
5057def TF_IFFT2DOp : TF_Op<"IFFT2D", [NoSideEffect]> {
5058  let summary = "Inverse 2D fast Fourier transform.";
5059
5060  let description = [{
5061Computes the inverse 2-dimensional discrete Fourier transform over the
5062inner-most 2 dimensions of `input`.
5063  }];
5064
5065  let arguments = (ins
5066    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
5067  );
5068
5069  let results = (outs
5070    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 2
5071  dimensions of `input` are replaced with their inverse 2D Fourier transform.
5072
5073@compatibility(numpy)
5074Equivalent to np.fft.ifft2
5075@end_compatibility}]>:$output
5076  );
5077
5078  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5079}
5080
5081def TF_IFFT3DOp : TF_Op<"IFFT3D", [NoSideEffect]> {
5082  let summary = "Inverse 3D fast Fourier transform.";
5083
5084  let description = [{
5085Computes the inverse 3-dimensional discrete Fourier transform over the
5086inner-most 3 dimensions of `input`.
5087  }];
5088
5089  let arguments = (ins
5090    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
5091  );
5092
5093  let results = (outs
5094    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 3
5095  dimensions of `input` are replaced with their inverse 3D Fourier transform.
5096
5097@compatibility(numpy)
5098Equivalent to np.fft.ifftn with 3 dimensions.
5099@end_compatibility}]>:$output
5100  );
5101
5102  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5103}
5104
5105def TF_IRFFTOp : TF_Op<"IRFFT", [NoSideEffect]> {
5106  let summary = "Inverse real-valued fast Fourier transform.";
5107
5108  let description = [{
5109Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
5110signal over the inner-most dimension of `input`.
5111
5112The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
5113`fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
5114`fft_length` is not provided, it is computed from the size of the inner-most
5115dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
5116compute `input` is odd, it should be provided since it cannot be inferred
5117properly.
5118
5119Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
5120than the corresponding dimension of `input`, the dimension is cropped. If it is
5121larger, the dimension is padded with zeros.
5122  }];
5123
5124  let arguments = (ins
5125    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
5126    Arg<TF_Int32Tensor, [{An int32 tensor of shape [1]. The FFT length.}]>:$fft_length
5127  );
5128
5129  let results = (outs
5130    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most
5131  dimension of `input` is replaced with the `fft_length` samples of its inverse
5132  1D Fourier transform.
5133
5134@compatibility(numpy)
5135Equivalent to np.fft.irfft
5136@end_compatibility}]>:$output
5137  );
5138
5139  TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
5140  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5141}
5142
5143def TF_IRFFT2DOp : TF_Op<"IRFFT2D", [NoSideEffect]> {
5144  let summary = "Inverse 2D real-valued fast Fourier transform.";
5145
5146  let description = [{
5147Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
5148signal over the inner-most 2 dimensions of `input`.
5149
5150The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
5151The inner-most dimension contains the `fft_length / 2 + 1` unique components of
5152the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
5153from the size of the inner-most 2 dimensions of `input`. If the FFT length used
5154to compute `input` is odd, it should be provided since it cannot be inferred
5155properly.
5156
5157Along each axis `IRFFT2D` is computed on, if `fft_length` (or
5158`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
5159corresponding dimension of `input`, the dimension is cropped. If it is larger,
5160the dimension is padded with zeros.
5161  }];
5162
5163  let arguments = (ins
5164    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
5165    Arg<TF_Int32Tensor, [{An int32 tensor of shape [2]. The FFT length for each dimension.}]>:$fft_length
5166  );
5167
5168  let results = (outs
5169    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most 2
5170  dimensions of `input` are replaced with the `fft_length` samples of their
5171  inverse 2D Fourier transform.
5172
5173@compatibility(numpy)
5174Equivalent to np.fft.irfft2
5175@end_compatibility}]>:$output
5176  );
5177
5178  TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
5179  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5180}
5181
5182def TF_IRFFT3DOp : TF_Op<"IRFFT3D", [NoSideEffect]> {
5183  let summary = "Inverse 3D real-valued fast Fourier transform.";
5184
5185  let description = [{
5186Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
5187signal over the inner-most 3 dimensions of `input`.
5188
5189The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
5190The inner-most dimension contains the `fft_length / 2 + 1` unique components of
5191the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
5192from the size of the inner-most 3 dimensions of `input`. If the FFT length used
5193to compute `input` is odd, it should be provided since it cannot be inferred
5194properly.
5195
5196Along each axis `IRFFT3D` is computed on, if `fft_length` (or
5197`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
5198corresponding dimension of `input`, the dimension is cropped. If it is larger,
5199the dimension is padded with zeros.
5200  }];
5201
5202  let arguments = (ins
5203    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
5204    Arg<TF_Int32Tensor, [{An int32 tensor of shape [3]. The FFT length for each dimension.}]>:$fft_length
5205  );
5206
5207  let results = (outs
5208    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most 3
5209  dimensions of `input` are replaced with the `fft_length` samples of their
5210  inverse 3D real Fourier transform.
5211
5212@compatibility(numpy)
5213Equivalent to np.irfftn with 3 dimensions.
5214@end_compatibility}]>:$output
5215  );
5216
5217  TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
5218  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5219}
5220
5221def TF_IdentityNOp : TF_Op<"IdentityN", [NoSideEffect]> {
5222  let summary = [{
5223Returns a list of tensors with the same shapes and contents as the input
5224  }];
5225
5226  let description = [{
5227tensors.
5228
5229This op can be used to override the gradient for complicated functions. For
5230example, suppose y = f(x) and we wish to apply a custom function g for backprop
5231such that dx = g(dy). In Python,
5232
5233```python
5234with tf.get_default_graph().gradient_override_map(
5235    {'IdentityN': 'OverrideGradientWithG'}):
5236  y, _ = identity_n([f(x), x])
5237
5238@tf.RegisterGradient('OverrideGradientWithG')
5239def ApplyG(op, dy, _):
5240  return [None, g(dy)]  # Do not backprop to f(x).
5241```
5242  }];
5243
5244  let arguments = (ins
5245    Variadic<TF_Tensor>:$input
5246  );
5247
5248  let results = (outs
5249    Variadic<TF_Tensor>:$output
5250  );
5251
5252  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<0>;
5253}
5254
5255def TF_IgammaOp : TF_Op<"Igamma", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
5256                  WithBroadcastableBinOpBuilder {
5257  let summary = [{
5258Compute the lower regularized incomplete Gamma function `P(a, x)`.
5259  }];
5260
5261  let description = [{
5262The lower regularized incomplete Gamma function is defined as:
5263
5264
5265\\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
5266
5267where
5268
5269\\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\)
5270
5271is the lower incomplete Gamma function.
5272
5273Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
5274Gamma function.
5275  }];
5276
5277  let arguments = (ins
5278    TF_F32OrF64Tensor:$a,
5279    TF_F32OrF64Tensor:$x
5280  );
5281
5282  let results = (outs
5283    TF_F32OrF64Tensor:$z
5284  );
5285
5286  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5287}
5288
5289def TF_IgammaGradAOp : TF_Op<"IgammaGradA", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
5290                       WithBroadcastableBinOpBuilder {
5291  let summary = "Computes the gradient of `igamma(a, x)` wrt `a`.";
5292
5293  let arguments = (ins
5294    TF_F32OrF64Tensor:$a,
5295    TF_F32OrF64Tensor:$x
5296  );
5297
5298  let results = (outs
5299    TF_F32OrF64Tensor:$z
5300  );
5301
5302  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5303}
5304
5305def TF_IgammacOp : TF_Op<"Igammac", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
5306                   WithBroadcastableBinOpBuilder {
5307  let summary = [{
5308Compute the upper regularized incomplete Gamma function `Q(a, x)`.
5309  }];
5310
5311  let description = [{
5312The upper regularized incomplete Gamma function is defined as:
5313
5314\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
5315
5316where
5317
5318\\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
5319
5320is the upper incomplete Gama function.
5321
5322Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
5323Gamma function.
5324  }];
5325
5326  let arguments = (ins
5327    TF_F32OrF64Tensor:$a,
5328    TF_F32OrF64Tensor:$x
5329  );
5330
5331  let results = (outs
5332    TF_F32OrF64Tensor:$z
5333  );
5334
5335  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5336}
5337
5338def TF_ImagOp : TF_Op<"Imag", [NoSideEffect, SameOperandsAndResultShape]> {
5339  let summary = "Returns the imaginary part of a complex number.";
5340
5341  let description = [{
5342Given a tensor `input` of complex numbers, this operation returns a tensor of
5343type `float` that is the imaginary part of each element in `input`. All
5344elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
5345is the real part and *b* is the imaginary part returned by this operation.
5346
5347For example:
5348
5349```
5350# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
5351tf.imag(input) ==> [4.75, 5.75]
5352```
5353  }];
5354
5355  let arguments = (ins
5356    TensorOf<[TF_Complex128, TF_Complex64]>:$input
5357  );
5358
5359  let results = (outs
5360    TF_F32OrF64Tensor:$output
5361  );
5362
5363  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5364  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
5365}
5366
5367def TF_InTopKV2Op : TF_Op<"InTopKV2", [NoSideEffect]> {
5368  let summary = "Says whether the targets are in the top `K` predictions.";
5369
5370  let description = [{
5371This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
5372prediction for the target class is among the top `k` predictions among
5373all predictions for example `i`. Note that the behavior of `InTopK` differs
5374from the `TopK` op in its handling of ties; if multiple classes have the
5375same prediction value and straddle the top-`k` boundary, all of those
5376classes are considered to be in the top `k`.
5377
5378More formally, let
5379
5380  \\(predictions_i\\) be the predictions for all classes for example `i`,
5381  \\(targets_i\\) be the target class for example `i`,
5382  \\(out_i\\) be the output for example `i`,
5383
5384$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
5385  }];
5386
5387  let arguments = (ins
5388    Arg<TF_Float32Tensor, [{A `batch_size` x `classes` tensor.}]>:$predictions,
5389    Arg<TF_I32OrI64Tensor, [{A `batch_size` vector of class ids.}]>:$targets,
5390    Arg<TF_I32OrI64Tensor, [{Number of top elements to look at for computing precision.}]>:$k
5391  );
5392
5393  let results = (outs
5394    Res<TF_BoolTensor, [{Computed precision at `k` as a `bool Tensor`.}]>:$precision
5395  );
5396
5397  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
5398}
5399
5400def TF_InfeedDequeueOp : TF_Op<"InfeedDequeue", []> {
5401  let summary = [{
5402A placeholder op for a value that will be fed into the computation.
5403  }];
5404
5405  let arguments = (ins
5406    TF_ShapeAttr:$shape
5407  );
5408
5409  let results = (outs
5410    Res<TF_Tensor, [{A tensor that will be provided using the infeed mechanism.}]>:$output
5411  );
5412
5413  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
5414}
5415
5416def TF_InitializeTableV2Op : TF_Op<"InitializeTableV2", []> {
5417  let summary = [{
5418Table initializer that takes two tensors for keys and values respectively.
5419  }];
5420
5421  let arguments = (ins
5422    Arg<TF_ResourceTensor, [{Handle to a table which will be initialized.}], [TF_LookupTableWrite]>:$table_handle,
5423    Arg<TF_Tensor, [{Keys of type Tkey.}]>:$keys,
5424    Arg<TF_Tensor, [{Values of type Tval.}]>:$values
5425  );
5426
5427  let results = (outs);
5428
5429  TF_DerivedOperandTypeAttr Tval = TF_DerivedOperandTypeAttr<2>;
5430  TF_DerivedOperandTypeAttr Tkey = TF_DerivedOperandTypeAttr<1>;
5431}
5432
5433def TF_InplaceAddOp : TF_Op<"InplaceAdd", [NoSideEffect, TF_AllTypesMatch<["x", "y"]>]> {
5434  let summary = "Adds v into specified rows of x.";
5435
5436  let description = [{
5437Computes y = x; y[i, :] += v; return y.
5438  }];
5439
5440  let arguments = (ins
5441    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$x,
5442    Arg<TF_Int32Tensor, [{A vector. Indices into the left-most dimension of `x`.}]>:$i,
5443    Arg<TF_Tensor, [{A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.}]>:$v
5444  );
5445
5446  let results = (outs
5447    Res<TF_Tensor, [{A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.}]>:$y
5448  );
5449
5450  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5451}
5452
5453def TF_InplaceUpdateOp : TF_Op<"InplaceUpdate", [NoSideEffect]> {
5454  let summary = "Updates specified rows 'i' with values 'v'.";
5455
5456  let description = [{
5457Computes `x[i, :] = v; return x`.
5458
5459Originally this function is mutative however for compilation we make this
5460operation create / operate on a copy of `x`.
5461  }];
5462
5463  let arguments = (ins
5464    Arg<TF_Tensor, [{A tensor of type `T`.}]>:$x,
5465    Arg<TF_Int32Tensor, [{A vector. Indices into the left-most dimension of `x`.}]>:$i,
5466    Arg<TF_Tensor, [{A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.}]>:$v
5467  );
5468
5469  let results = (outs
5470    Res<TF_Tensor, [{A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.}]>:$y
5471  );
5472
5473  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5474}
5475
5476def TF_InvOp : TF_Op<"Inv", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
5477  let summary = "Computes the reciprocal of x element-wise.";
5478
5479  let description = [{
5480I.e., \\(y = 1 / x\\).
5481  }];
5482
5483  let arguments = (ins
5484    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
5485  );
5486
5487  let results = (outs
5488    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
5489  );
5490
5491  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5492}
5493
5494def TF_InvertOp : TF_Op<"Invert", [Involution, NoSideEffect, SameOperandsAndResultType]> {
5495  let summary = [{
5496Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010.
5497  }];
5498
5499  let description = [{
5500Flip each bit of supported types.  For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101.
5501This operation is performed on each element of the tensor argument `x`.
5502
5503Example:
5504```python
5505import tensorflow as tf
5506from tensorflow.python.ops import bitwise_ops
5507
5508# flip 2 (00000010) to -3 (11111101)
5509tf.assert_equal(-3, bitwise_ops.invert(2))
5510
5511dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
5512              dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
5513
5514inputs = [0, 5, 3, 14]
5515for dtype in dtype_list:
5516  # Because of issues with negative numbers, let's test this indirectly.
5517  # 1. invert(a) and a = 0
5518  # 2. invert(a) or a = invert(0)
5519  input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype)
5520  not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and(
5521                                      input_tensor, bitwise_ops.invert(input_tensor)),
5522                                    bitwise_ops.bitwise_or(
5523                                      input_tensor, bitwise_ops.invert(input_tensor)),
5524                                    bitwise_ops.invert(
5525                                      tf.constant(0, dtype=dtype))]
5526
5527  expected = tf.constant([0, 0, 0, 0], dtype=tf.float32)
5528  tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected)
5529
5530  expected = tf.cast([not_0] * 4, tf.float32)
5531  tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected)
5532
5533  # For unsigned dtypes let's also check the result directly.
5534  if dtype.is_unsigned:
5535    inverted = bitwise_ops.invert(input_tensor)
5536    expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32)
5537    tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32))
5538```
5539  }];
5540
5541  let arguments = (ins
5542    TF_IntTensor:$x
5543  );
5544
5545  let results = (outs
5546    TF_IntTensor:$y
5547  );
5548
5549  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5550}
5551
5552def TF_InvertPermutationOp : TF_Op<"InvertPermutation", [NoSideEffect]> {
5553  let summary = "Computes the inverse permutation of a tensor.";
5554
5555  let description = [{
5556This operation computes the inverse of an index permutation. It takes a 1-D
5557integer tensor `x`, which represents the indices of a zero-based array, and
5558swaps each value with its index position. In other words, for an output tensor
5559`y` and an input tensor `x`, this operation computes the following:
5560
5561`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
5562
5563The values must include 0. There can be no duplicate values or negative values.
5564
5565For example:
5566
5567```
5568# tensor `x` is [3, 4, 0, 2, 1]
5569invert_permutation(x) ==> [2, 4, 3, 0, 1]
5570```
5571  }];
5572
5573  let arguments = (ins
5574    Arg<TF_I32OrI64Tensor, [{1-D.}]>:$x
5575  );
5576
5577  let results = (outs
5578    Res<TF_I32OrI64Tensor, [{1-D.}]>:$y
5579  );
5580
5581  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5582
5583  let verifier = [{
5584    return Verify(*this);
5585  }];
5586}
5587
5588def TF_IsFiniteOp : TF_Op<"IsFinite", [NoSideEffect, SameOperandsAndResultShape]> {
5589  let summary = "Returns which elements of x are finite.";
5590
5591  let description = [{
5592@compatibility(numpy)
5593Equivalent to np.isfinite
5594@end_compatibility
5595
5596Example:
5597
5598```python
5599x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan])
5600tf.math.is_finite(x) ==> [True, True, True, False, False]
5601```
5602  }];
5603
5604  let arguments = (ins
5605    TF_FloatTensor:$x
5606  );
5607
5608  let results = (outs
5609    TF_BoolTensor:$y
5610  );
5611
5612  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5613}
5614
5615def TF_IsInfOp : TF_Op<"IsInf", [NoSideEffect, SameOperandsAndResultShape]> {
5616  let summary = "Returns which elements of x are Inf.";
5617
5618  let description = [{
5619@compatibility(numpy)
5620Equivalent to np.isinf
5621@end_compatibility
5622
5623Example:
5624
5625```python
5626x = tf.constant([5.0, np.inf, 6.8, np.inf])
5627tf.math.is_inf(x) ==> [False, True, False, True]
5628```
5629  }];
5630
5631  let arguments = (ins
5632    TF_FloatTensor:$x
5633  );
5634
5635  let results = (outs
5636    TF_BoolTensor:$y
5637  );
5638
5639  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5640}
5641
5642def TF_IsNanOp : TF_Op<"IsNan", [NoSideEffect, SameOperandsAndResultShape]> {
5643  let summary = "Returns which elements of x are NaN.";
5644
5645  let description = [{
5646@compatibility(numpy)
5647Equivalent to np.isnan
5648@end_compatibility
5649
5650Example:
5651
5652```python
5653x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf])
5654tf.math.is_nan(x) ==> [False, True, False, True, False]
5655```
5656  }];
5657
5658  let arguments = (ins
5659    TF_FloatTensor:$x
5660  );
5661
5662  let results = (outs
5663    TF_BoolTensor:$y
5664  );
5665
5666  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5667}
5668
5669def TF_IteratorOp : TF_Op<"Iterator", []> {
5670  let summary = "A container for an iterator resource.";
5671
5672  let arguments = (ins
5673    StrAttr:$shared_name,
5674    StrAttr:$container,
5675    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
5676    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
5677  );
5678
5679  let results = (outs
5680    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator"
5681or "IteratorGetNext" op.}], [TF_DatasetIteratorAlloc]>:$handle
5682  );
5683}
5684
5685def TF_IteratorFromStringHandleOp : TF_Op<"IteratorFromStringHandle", []> {
5686  let summary = [{
5687Converts the given string representing a handle to an iterator to a resource.
5688  }];
5689
5690  let arguments = (ins
5691    Arg<TF_StrTensor, [{A string representation of the given handle.}]>:$string_handle,
5692
5693    DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
5694    DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
5695  );
5696
5697  let results = (outs
5698    Res<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorAlloc]>:$resource_handle
5699  );
5700}
5701
5702def TF_IteratorFromStringHandleV2Op : TF_Op<"IteratorFromStringHandleV2", []> {
5703  let summary = "";
5704
5705  let arguments = (ins
5706    TF_StrTensor:$string_handle,
5707
5708    DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
5709    DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
5710  );
5711
5712  let results = (outs
5713    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$resource_handle
5714  );
5715}
5716
5717def TF_IteratorGetNextOp : TF_Op<"IteratorGetNext", []> {
5718  let summary = "Gets the next output from the given iterator .";
5719
5720  let arguments = (ins
5721    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator
5722  );
5723
5724  let results = (outs
5725    Variadic<TF_Tensor>:$components
5726  );
5727
5728  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
5729  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
5730}
5731
5732def TF_IteratorGetNextAsOptionalOp : TF_Op<"IteratorGetNextAsOptional", []> {
5733  let summary = [{
5734Gets the next output from the given iterator as an Optional variant.
5735  }];
5736
5737  let arguments = (ins
5738    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator,
5739
5740    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
5741    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
5742  );
5743
5744  let results = (outs
5745    TF_VariantTensor:$optional
5746  );
5747}
5748
5749def TF_IteratorGetNextSyncOp : TF_Op<"IteratorGetNextSync", []> {
5750  let summary = "Gets the next output from the given iterator.";
5751
5752  let description = [{
5753This operation is a synchronous version IteratorGetNext. It should only be used
5754in situations where the iterator does not block the calling thread, or where
5755the calling thread is not a member of the thread pool used to execute parallel
5756operations (e.g. in eager mode).
5757  }];
5758
5759  let arguments = (ins
5760    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator
5761  );
5762
5763  let results = (outs
5764    Variadic<TF_Tensor>:$components
5765  );
5766
5767  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
5768  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
5769}
5770
5771def TF_IteratorToStringHandleOp : TF_Op<"IteratorToStringHandle", []> {
5772  let summary = [{
5773Converts the given `resource_handle` representing an iterator to a string.
5774  }];
5775
5776  let arguments = (ins
5777    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorRead]>:$resource_handle
5778  );
5779
5780  let results = (outs
5781    Res<TF_StrTensor, [{A string representation of the given handle.}]>:$string_handle
5782  );
5783}
5784
5785def TF_IteratorV2Op : TF_Op<"IteratorV2", []> {
5786  let summary = "";
5787
5788  let arguments = (ins
5789    StrAttr:$shared_name,
5790    StrAttr:$container,
5791    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
5792    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
5793  );
5794
5795  let results = (outs
5796    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
5797  );
5798}
5799
5800def TF_KthOrderStatisticOp : TF_Op<"KthOrderStatistic", [NoSideEffect]> {
5801  let summary = "Computes the Kth order statistic of a data set. The current";
5802
5803  let description = [{
5804implementation uses a binary search requiring exactly 32 passes over
5805the input data. The running time is linear with respect to input
5806size. The median-of-medians algorithm is probably faster, but is
5807difficult to implement efficiently in XLA. The implementation imposes
5808a total ordering on floats. The ordering is consistent with the usual
5809partial order.  Positive NaNs are greater than positive
5810infinity. Negative NaNs are less than negative infinity. NaNs with
5811distinct payloads are treated as distinct. Subnormal numbers are
5812preserved (not flushed to zero). Positive infinity is greater than all
5813numbers. Negative infinity is less than all numbers. Positive is
5814greater than negative zero. There are less than k values greater than
5815the kth order statistic. There are at least k values greater than or
5816equal to the Kth order statistic. The semantics are not the same as
5817top_k_unique.
5818  }];
5819
5820  let arguments = (ins
5821    TF_Float32Tensor:$input,
5822
5823    I64Attr:$k
5824  );
5825
5826  let results = (outs
5827    TF_Float32Tensor:$output
5828  );
5829}
5830
5831def TF_L2LossOp : TF_Op<"L2Loss", [NoSideEffect]> {
5832  let summary = "L2 Loss.";
5833
5834  let description = [{
5835Computes half the L2 norm of a tensor without the `sqrt`:
5836
5837    output = sum(t ** 2) / 2
5838  }];
5839
5840  let arguments = (ins
5841    Arg<TF_FloatTensor, [{Typically 2-D, but may have any dimensions.}]>:$t
5842  );
5843
5844  let results = (outs
5845    Res<TF_FloatTensor, [{0-D.}]>:$output
5846  );
5847
5848  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5849}
5850
5851def TF_LRNOp : TF_Op<"LRN", [NoSideEffect]> {
5852  let summary = "Local Response Normalization.";
5853
5854  let description = [{
5855The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
5856dimension), and each vector is normalized independently.  Within a given vector,
5857each component is divided by the weighted, squared sum of inputs within
5858`depth_radius`.  In detail,
5859
5860    sqr_sum[a, b, c, d] =
5861        sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
5862    output = input / (bias + alpha * sqr_sum) ** beta
5863
5864For details, see [Krizhevsky et al., ImageNet classification with deep
5865convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
5866  }];
5867
5868  let arguments = (ins
5869    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D.}]>:$input,
5870
5871    DefaultValuedAttr<I64Attr, "5">:$depth_radius,
5872    DefaultValuedAttr<F32Attr, "1.0f">:$bias,
5873    DefaultValuedAttr<F32Attr, "1.0f">:$alpha,
5874    DefaultValuedAttr<F32Attr, "0.5f">:$beta
5875  );
5876
5877  let results = (outs
5878    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$output
5879  );
5880
5881  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5882}
5883
5884def TF_LRNGradOp : TF_Op<"LRNGrad", [NoSideEffect]> {
5885  let summary = "Gradients for Local Response Normalization.";
5886
5887  let arguments = (ins
5888    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$input_grads,
5889    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$input_image,
5890    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$output_image,
5891
5892    DefaultValuedAttr<I64Attr, "5">:$depth_radius,
5893    DefaultValuedAttr<F32Attr, "1.0f">:$bias,
5894    DefaultValuedAttr<F32Attr, "1.0f">:$alpha,
5895    DefaultValuedAttr<F32Attr, "0.5f">:$beta
5896  );
5897
5898  let results = (outs
5899    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The gradients for LRN.}]>:$output
5900  );
5901
5902  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5903}
5904
5905def TF_LeakyReluOp : TF_Op<"LeakyRelu", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
5906  let summary = "Computes rectified linear: `max(features, features * alpha)`.";
5907
5908  let arguments = (ins
5909    TF_FloatTensor:$features,
5910
5911    DefaultValuedAttr<F32Attr, "0.2f">:$alpha
5912  );
5913
5914  let results = (outs
5915    TF_FloatTensor:$activations
5916  );
5917
5918  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5919}
5920
5921def TF_LeakyReluGradOp : TF_Op<"LeakyReluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
5922  let summary = [{
5923Computes rectified linear gradients for a LeakyRelu operation.
5924  }];
5925
5926  let arguments = (ins
5927    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding LeakyRelu operation.}]>:$gradients,
5928    Arg<TF_FloatTensor, [{The features passed as input to the corresponding LeakyRelu operation,
5929OR the outputs of that operation (both work equivalently).}]>:$features,
5930
5931    DefaultValuedAttr<F32Attr, "0.2f">:$alpha
5932  );
5933
5934  let results = (outs
5935    Res<TF_FloatTensor, [{`gradients * (features > 0) + alpha * gradients * (features <= 0)`.}]>:$backprops
5936  );
5937
5938  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5939}
5940
5941def TF_LeftShiftOp : TF_Op<"LeftShift", [NoSideEffect, ResultsBroadcastableShape]>,
5942                     WithBroadcastableBinOpBuilder {
5943  let summary = "Elementwise computes the bitwise left-shift of `x` and `y`.";
5944
5945  let description = [{
5946If `y` is negative, or greater than or equal to the width of `x` in bits the
5947result is implementation defined.
5948
5949Example:
5950
5951```python
5952import tensorflow as tf
5953from tensorflow.python.ops import bitwise_ops
5954import numpy as np
5955dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
5956
5957for dtype in dtype_list:
5958  lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
5959  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
5960
5961  left_shift_result = bitwise_ops.left_shift(lhs, rhs)
5962
5963  print(left_shift_result)
5964
5965# This will print:
5966# tf.Tensor([ -32   -5 -128    0], shape=(4,), dtype=int8)
5967# tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int16)
5968# tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int32)
5969# tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int64)
5970
5971lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
5972rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
5973bitwise_ops.left_shift(lhs, rhs)
5974# <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
5975```
5976  }];
5977
5978  let arguments = (ins
5979    TF_IntTensor:$x,
5980    TF_IntTensor:$y
5981  );
5982
5983  let results = (outs
5984    TF_IntTensor:$z
5985  );
5986
5987  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5988}
5989
5990def TF_LessOp : TF_Op<"Less", [NoSideEffect, ResultsBroadcastableShape]>,
5991                WithBroadcastableCmpOpBuilder {
5992  let summary = "Returns the truth value of (x < y) element-wise.";
5993
5994  let description = [{
5995*NOTE*: `Less` supports broadcasting. More about broadcasting
5996[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
5997
5998Example:
5999
6000```python
6001x = tf.constant([5, 4, 6])
6002y = tf.constant([5])
6003tf.math.less(x, y) ==> [False, True, False]
6004
6005x = tf.constant([5, 4, 6])
6006y = tf.constant([5, 6, 7])
6007tf.math.less(x, y) ==> [False, True, True]
6008```
6009  }];
6010
6011  let arguments = (ins
6012    TF_IntOrFpTensor:$x,
6013    TF_IntOrFpTensor:$y
6014  );
6015
6016  let results = (outs
6017    TF_BoolTensor:$z
6018  );
6019
6020  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6021}
6022
6023def TF_LessEqualOp : TF_Op<"LessEqual", [NoSideEffect, ResultsBroadcastableShape]>,
6024                     WithBroadcastableCmpOpBuilder {
6025  let summary = "Returns the truth value of (x <= y) element-wise.";
6026
6027  let description = [{
6028*NOTE*: `LessEqual` supports broadcasting. More about broadcasting
6029[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
6030
6031Example:
6032
6033```python
6034x = tf.constant([5, 4, 6])
6035y = tf.constant([5])
6036tf.math.less_equal(x, y) ==> [True, True, False]
6037
6038x = tf.constant([5, 4, 6])
6039y = tf.constant([5, 6, 6])
6040tf.math.less_equal(x, y) ==> [True, True, True]
6041```
6042  }];
6043
6044  let arguments = (ins
6045    TF_IntOrFpTensor:$x,
6046    TF_IntOrFpTensor:$y
6047  );
6048
6049  let results = (outs
6050    TF_BoolTensor:$z
6051  );
6052
6053  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6054}
6055
6056def TF_LgammaOp : TF_Op<"Lgamma", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
6057  let summary = [{
6058Computes the log of the absolute value of `Gamma(x)` element-wise.
6059  }];
6060
6061  let description = [{
6062For positive numbers, this function computes log((input - 1)!) for every element in the tensor.
6063  `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539`
6064
6065Example:
6066
6067```python
6068x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6])
6069tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685]
6070```
6071  }];
6072
6073  let arguments = (ins
6074    TF_FloatTensor:$x
6075  );
6076
6077  let results = (outs
6078    TF_FloatTensor:$y
6079  );
6080
6081  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6082}
6083
6084def TF_LinSpaceOp : TF_Op<"LinSpace", [NoSideEffect]> {
6085  let summary = "Generates values in an interval.";
6086
6087  let description = [{
6088A sequence of `num` evenly-spaced values are generated beginning at `start`.
6089If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
6090so that the last one is exactly `stop`.
6091
6092For example:
6093
6094```
6095tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
6096```
6097  }];
6098
6099  let arguments = (ins
6100    Arg<TF_FloatTensor, [{0-D tensor. First entry in the range.}]>:$start,
6101    Arg<TF_FloatTensor, [{0-D tensor. Last entry in the range.}]>:$stop,
6102    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of values to generate.}]>:$num
6103  );
6104
6105  let results = (outs
6106    Res<TF_FloatTensor, [{1-D. The generated values.}]>:$output
6107  );
6108
6109  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6110  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<2>;
6111}
6112
6113def TF_ListDiffOp : TF_Op<"ListDiff", [NoSideEffect]> {
6114  let summary = [{
6115Computes the difference between two lists of numbers or strings.
6116  }];
6117
6118  let description = [{
6119Given a list `x` and a list `y`, this operation returns a list `out` that
6120represents all values that are in `x` but not in `y`. The returned list `out`
6121is sorted in the same order that the numbers appear in `x` (duplicates are
6122preserved). This operation also returns a list `idx` that represents the
6123position of each `out` element in `x`. In other words:
6124
6125`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
6126
6127For example, given this input:
6128
6129```
6130x = [1, 2, 3, 4, 5, 6]
6131y = [1, 3, 5]
6132```
6133
6134This operation would return:
6135
6136```
6137out ==> [2, 4, 6]
6138idx ==> [1, 3, 5]
6139```
6140  }];
6141
6142  let arguments = (ins
6143    Arg<TF_Tensor, [{1-D. Values to keep.}]>:$x,
6144    Arg<TF_Tensor, [{1-D. Values to remove.}]>:$y
6145  );
6146
6147  let results = (outs
6148    Res<TF_Tensor, [{1-D. Values present in `x` but not in `y`.}]>:$out,
6149    Res<TF_I32OrI64Tensor, [{1-D. Positions of `x` values preserved in `out`.}]>:$idx
6150  );
6151
6152  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6153  TF_DerivedResultTypeAttr out_idx = TF_DerivedResultTypeAttr<1>;
6154}
6155
6156def TF_LoadTPUEmbeddingADAMParametersOp : TF_Op<"LoadTPUEmbeddingADAMParameters", [TF_TPUEmbeddingSideEffect]> {
6157  let summary = "Load ADAM embedding parameters.";
6158
6159  let description = [{
6160An op that loads optimization parameters into HBM for embedding. Must be
6161preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6162embedding table configuration. For example, this op is used to install
6163parameters that are loaded from a checkpoint before a training loop is
6164executed.
6165  }];
6166
6167  let arguments = (ins
6168    Arg<TF_Float32Tensor, [{Value of parameters used in the ADAM optimization algorithm.}]>:$parameters,
6169    Arg<TF_Float32Tensor, [{Value of momenta used in the ADAM optimization algorithm.}]>:$momenta,
6170    Arg<TF_Float32Tensor, [{Value of velocities used in the ADAM optimization algorithm.}]>:$velocities,
6171
6172    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6173    StrAttr:$table_name,
6174    I64Attr:$num_shards,
6175    I64Attr:$shard_id,
6176    StrAttr:$config
6177  );
6178
6179  let results = (outs);
6180}
6181
6182def TF_LoadTPUEmbeddingADAMParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingADAMParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6183  let summary = "Load ADAM embedding parameters with debug support.";
6184
6185  let description = [{
6186An op that loads optimization parameters into HBM for embedding. Must be
6187preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6188embedding table configuration. For example, this op is used to install
6189parameters that are loaded from a checkpoint before a training loop is
6190executed.
6191  }];
6192
6193  let arguments = (ins
6194    Arg<TF_Float32Tensor, [{Value of parameters used in the ADAM optimization algorithm.}]>:$parameters,
6195    Arg<TF_Float32Tensor, [{Value of momenta used in the ADAM optimization algorithm.}]>:$momenta,
6196    Arg<TF_Float32Tensor, [{Value of velocities used in the ADAM optimization algorithm.}]>:$velocities,
6197    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the ADAM optimization algorithm.}]>:$gradient_accumulators,
6198
6199    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6200    StrAttr:$table_name,
6201    I64Attr:$num_shards,
6202    I64Attr:$shard_id,
6203    StrAttr:$config
6204  );
6205
6206  let results = (outs);
6207}
6208
6209def TF_LoadTPUEmbeddingAdadeltaParametersOp : TF_Op<"LoadTPUEmbeddingAdadeltaParameters", [TF_TPUEmbeddingSideEffect]> {
6210  let summary = "Load Adadelta embedding parameters.";
6211
6212  let description = [{
6213An op that loads optimization parameters into HBM for embedding. Must be
6214preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6215embedding table configuration. For example, this op is used to install
6216parameters that are loaded from a checkpoint before a training loop is
6217executed.
6218  }];
6219
6220  let arguments = (ins
6221    Arg<TF_Float32Tensor, [{Value of parameters used in the Adadelta optimization algorithm.}]>:$parameters,
6222    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adadelta optimization algorithm.}]>:$accumulators,
6223    Arg<TF_Float32Tensor, [{Value of updates used in the Adadelta optimization algorithm.}]>:$updates,
6224
6225    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6226    StrAttr:$table_name,
6227    I64Attr:$num_shards,
6228    I64Attr:$shard_id,
6229    StrAttr:$config
6230  );
6231
6232  let results = (outs);
6233}
6234
6235def TF_LoadTPUEmbeddingAdadeltaParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingAdadeltaParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6236  let summary = "Load Adadelta parameters with debug support.";
6237
6238  let description = [{
6239An op that loads optimization parameters into HBM for embedding. Must be
6240preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6241embedding table configuration. For example, this op is used to install
6242parameters that are loaded from a checkpoint before a training loop is
6243executed.
6244  }];
6245
6246  let arguments = (ins
6247    Arg<TF_Float32Tensor, [{Value of parameters used in the Adadelta optimization algorithm.}]>:$parameters,
6248    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adadelta optimization algorithm.}]>:$accumulators,
6249    Arg<TF_Float32Tensor, [{Value of updates used in the Adadelta optimization algorithm.}]>:$updates,
6250    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Adadelta optimization algorithm.}]>:$gradient_accumulators,
6251
6252    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6253    StrAttr:$table_name,
6254    I64Attr:$num_shards,
6255    I64Attr:$shard_id,
6256    StrAttr:$config
6257  );
6258
6259  let results = (outs);
6260}
6261
6262def TF_LoadTPUEmbeddingAdagradParametersOp : TF_Op<"LoadTPUEmbeddingAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
6263  let summary = "Load Adagrad embedding parameters.";
6264
6265  let description = [{
6266An op that loads optimization parameters into HBM for embedding. Must be
6267preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6268embedding table configuration. For example, this op is used to install
6269parameters that are loaded from a checkpoint before a training loop is
6270executed.
6271  }];
6272
6273  let arguments = (ins
6274    Arg<TF_Float32Tensor, [{Value of parameters used in the Adagrad optimization algorithm.}]>:$parameters,
6275    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adagrad optimization algorithm.}]>:$accumulators,
6276
6277    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6278    StrAttr:$table_name,
6279    I64Attr:$num_shards,
6280    I64Attr:$shard_id,
6281    StrAttr:$config
6282  );
6283
6284  let results = (outs);
6285}
6286
6287def TF_LoadTPUEmbeddingAdagradParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6288  let summary = "Load Adagrad embedding parameters with debug support.";
6289
6290  let description = [{
6291An op that loads optimization parameters into HBM for embedding. Must be
6292preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6293embedding table configuration. For example, this op is used to install
6294parameters that are loaded from a checkpoint before a training loop is
6295executed.
6296  }];
6297
6298  let arguments = (ins
6299    Arg<TF_Float32Tensor, [{Value of parameters used in the Adagrad optimization algorithm.}]>:$parameters,
6300    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adagrad optimization algorithm.}]>:$accumulators,
6301    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Adagrad optimization algorithm.}]>:$gradient_accumulators,
6302
6303    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6304    StrAttr:$table_name,
6305    I64Attr:$num_shards,
6306    I64Attr:$shard_id,
6307    StrAttr:$config
6308  );
6309
6310  let results = (outs);
6311}
6312
6313def TF_LoadTPUEmbeddingCenteredRMSPropParametersOp : TF_Op<"LoadTPUEmbeddingCenteredRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
6314  let summary = "Load centered RMSProp embedding parameters.";
6315
6316  let description = [{
6317An op that loads optimization parameters into HBM for embedding. Must be
6318preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6319embedding table configuration. For example, this op is used to install
6320parameters that are loaded from a checkpoint before a training loop is
6321executed.
6322  }];
6323
6324  let arguments = (ins
6325    Arg<TF_Float32Tensor, [{Value of parameters used in the centered RMSProp optimization algorithm.}]>:$parameters,
6326    Arg<TF_Float32Tensor, [{Value of ms used in the centered RMSProp optimization algorithm.}]>:$ms,
6327    Arg<TF_Float32Tensor, [{Value of mom used in the centered RMSProp optimization algorithm.}]>:$mom,
6328    Arg<TF_Float32Tensor, [{Value of mg used in the centered RMSProp optimization algorithm.}]>:$mg,
6329
6330    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6331    StrAttr:$table_name,
6332    I64Attr:$num_shards,
6333    I64Attr:$shard_id,
6334    StrAttr:$config
6335  );
6336
6337  let results = (outs);
6338}
6339
6340def TF_LoadTPUEmbeddingFTRLParametersOp : TF_Op<"LoadTPUEmbeddingFTRLParameters", [TF_TPUEmbeddingSideEffect]> {
6341  let summary = "Load FTRL embedding parameters.";
6342
6343  let description = [{
6344An op that loads optimization parameters into HBM for embedding. Must be
6345preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6346embedding table configuration. For example, this op is used to install
6347parameters that are loaded from a checkpoint before a training loop is
6348executed.
6349  }];
6350
6351  let arguments = (ins
6352    Arg<TF_Float32Tensor, [{Value of parameters used in the FTRL optimization algorithm.}]>:$parameters,
6353    Arg<TF_Float32Tensor, [{Value of accumulators used in the FTRL optimization algorithm.}]>:$accumulators,
6354    Arg<TF_Float32Tensor, [{Value of linears used in the FTRL optimization algorithm.}]>:$linears,
6355
6356    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6357    StrAttr:$table_name,
6358    I64Attr:$num_shards,
6359    I64Attr:$shard_id,
6360    StrAttr:$config
6361  );
6362
6363  let results = (outs);
6364}
6365
6366def TF_LoadTPUEmbeddingFTRLParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingFTRLParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6367  let summary = "Load FTRL embedding parameters with debug support.";
6368
6369  let description = [{
6370An op that loads optimization parameters into HBM for embedding. Must be
6371preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6372embedding table configuration. For example, this op is used to install
6373parameters that are loaded from a checkpoint before a training loop is
6374executed.
6375  }];
6376
6377  let arguments = (ins
6378    Arg<TF_Float32Tensor, [{Value of parameters used in the FTRL optimization algorithm.}]>:$parameters,
6379    Arg<TF_Float32Tensor, [{Value of accumulators used in the FTRL optimization algorithm.}]>:$accumulators,
6380    Arg<TF_Float32Tensor, [{Value of linears used in the FTRL optimization algorithm.}]>:$linears,
6381    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the FTRL optimization algorithm.}]>:$gradient_accumulators,
6382
6383    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6384    StrAttr:$table_name,
6385    I64Attr:$num_shards,
6386    I64Attr:$shard_id,
6387    StrAttr:$config
6388  );
6389
6390  let results = (outs);
6391}
6392
6393def TF_LoadTPUEmbeddingMDLAdagradLightParametersOp : TF_Op<"LoadTPUEmbeddingMDLAdagradLightParameters", [TF_TPUEmbeddingSideEffect]> {
6394  let summary = "Load MDL Adagrad Light embedding parameters.";
6395
6396  let description = [{
6397An op that loads optimization parameters into HBM for embedding. Must be
6398preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6399embedding table configuration. For example, this op is used to install
6400parameters that are loaded from a checkpoint before a training loop is
6401executed.
6402  }];
6403
6404  let arguments = (ins
6405    Arg<TF_Float32Tensor, [{Value of parameters used in the MDL Adagrad Light optimization algorithm.}]>:$parameters,
6406    Arg<TF_Float32Tensor, [{Value of accumulators used in the MDL Adagrad Light optimization algorithm.}]>:$accumulators,
6407    Arg<TF_Float32Tensor, [{Value of weights used in the MDL Adagrad Light optimization algorithm.}]>:$weights,
6408    Arg<TF_Float32Tensor, [{Value of benefits used in the MDL Adagrad Light optimization algorithm.}]>:$benefits,
6409
6410    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6411    StrAttr:$table_name,
6412    I64Attr:$num_shards,
6413    I64Attr:$shard_id,
6414    StrAttr:$config
6415  );
6416
6417  let results = (outs);
6418}
6419
6420def TF_LoadTPUEmbeddingMomentumParametersOp : TF_Op<"LoadTPUEmbeddingMomentumParameters", [TF_TPUEmbeddingSideEffect]> {
6421  let summary = "Load Momentum embedding parameters.";
6422
6423  let description = [{
6424An op that loads optimization parameters into HBM for embedding. Must be
6425preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6426embedding table configuration. For example, this op is used to install
6427parameters that are loaded from a checkpoint before a training loop is
6428executed.
6429  }];
6430
6431  let arguments = (ins
6432    Arg<TF_Float32Tensor, [{Value of parameters used in the Momentum optimization algorithm.}]>:$parameters,
6433    Arg<TF_Float32Tensor, [{Value of momenta used in the Momentum optimization algorithm.}]>:$momenta,
6434
6435    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6436    StrAttr:$table_name,
6437    I64Attr:$num_shards,
6438    I64Attr:$shard_id,
6439    StrAttr:$config
6440  );
6441
6442  let results = (outs);
6443}
6444
6445def TF_LoadTPUEmbeddingMomentumParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingMomentumParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6446  let summary = "Load Momentum embedding parameters with debug support.";
6447
6448  let description = [{
6449An op that loads optimization parameters into HBM for embedding. Must be
6450preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6451embedding table configuration. For example, this op is used to install
6452parameters that are loaded from a checkpoint before a training loop is
6453executed.
6454  }];
6455
6456  let arguments = (ins
6457    Arg<TF_Float32Tensor, [{Value of parameters used in the Momentum optimization algorithm.}]>:$parameters,
6458    Arg<TF_Float32Tensor, [{Value of momenta used in the Momentum optimization algorithm.}]>:$momenta,
6459    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Momentum optimization algorithm.}]>:$gradient_accumulators,
6460
6461    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6462    StrAttr:$table_name,
6463    I64Attr:$num_shards,
6464    I64Attr:$shard_id,
6465    StrAttr:$config
6466  );
6467
6468  let results = (outs);
6469}
6470
6471def TF_LoadTPUEmbeddingProximalAdagradParametersOp : TF_Op<"LoadTPUEmbeddingProximalAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
6472  let summary = "Load proximal Adagrad embedding parameters.";
6473
6474  let description = [{
6475An op that loads optimization parameters into HBM for embedding. Must be
6476preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6477embedding table configuration. For example, this op is used to install
6478parameters that are loaded from a checkpoint before a training loop is
6479executed.
6480  }];
6481
6482  let arguments = (ins
6483    Arg<TF_Float32Tensor, [{Value of parameters used in the proximal Adagrad optimization algorithm.}]>:$parameters,
6484    Arg<TF_Float32Tensor, [{Value of accumulators used in the proximal Adagrad optimization algorithm.}]>:$accumulators,
6485
6486    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6487    StrAttr:$table_name,
6488    I64Attr:$num_shards,
6489    I64Attr:$shard_id,
6490    StrAttr:$config
6491  );
6492
6493  let results = (outs);
6494}
6495
6496def TF_LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6497  let summary = [{
6498Load proximal Adagrad embedding parameters with debug support.
6499  }];
6500
6501  let description = [{
6502An op that loads optimization parameters into HBM for embedding. Must be
6503preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6504embedding table configuration. For example, this op is used to install
6505parameters that are loaded from a checkpoint before a training loop is
6506executed.
6507  }];
6508
6509  let arguments = (ins
6510    Arg<TF_Float32Tensor, [{Value of parameters used in the proximal Adagrad optimization algorithm.}]>:$parameters,
6511    Arg<TF_Float32Tensor, [{Value of accumulators used in the proximal Adagrad optimization algorithm.}]>:$accumulators,
6512    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the proximal Adagrad optimization algorithm.}]>:$gradient_accumulators,
6513
6514    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6515    StrAttr:$table_name,
6516    I64Attr:$num_shards,
6517    I64Attr:$shard_id,
6518    StrAttr:$config
6519  );
6520
6521  let results = (outs);
6522}
6523
6524def TF_LoadTPUEmbeddingProximalYogiParametersOp : TF_Op<"LoadTPUEmbeddingProximalYogiParameters", [TF_TPUEmbeddingSideEffect]> {
6525  let summary = "";
6526
6527  let arguments = (ins
6528    TF_Float32Tensor:$parameters,
6529    TF_Float32Tensor:$v,
6530    TF_Float32Tensor:$m,
6531
6532    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6533    StrAttr:$table_name,
6534    I64Attr:$num_shards,
6535    I64Attr:$shard_id,
6536    StrAttr:$config
6537  );
6538
6539  let results = (outs);
6540}
6541
6542def TF_LoadTPUEmbeddingProximalYogiParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingProximalYogiParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6543  let summary = "";
6544
6545  let arguments = (ins
6546    TF_Float32Tensor:$parameters,
6547    TF_Float32Tensor:$v,
6548    TF_Float32Tensor:$m,
6549    TF_Float32Tensor:$gradient_accumulators,
6550
6551    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6552    StrAttr:$table_name,
6553    I64Attr:$num_shards,
6554    I64Attr:$shard_id,
6555    StrAttr:$config
6556  );
6557
6558  let results = (outs);
6559}
6560
6561def TF_LoadTPUEmbeddingRMSPropParametersOp : TF_Op<"LoadTPUEmbeddingRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
6562  let summary = "Load RMSProp embedding parameters.";
6563
6564  let description = [{
6565An op that loads optimization parameters into HBM for embedding. Must be
6566preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6567embedding table configuration. For example, this op is used to install
6568parameters that are loaded from a checkpoint before a training loop is
6569executed.
6570  }];
6571
6572  let arguments = (ins
6573    Arg<TF_Float32Tensor, [{Value of parameters used in the RMSProp optimization algorithm.}]>:$parameters,
6574    Arg<TF_Float32Tensor, [{Value of ms used in the RMSProp optimization algorithm.}]>:$ms,
6575    Arg<TF_Float32Tensor, [{Value of mom used in the RMSProp optimization algorithm.}]>:$mom,
6576
6577    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6578    StrAttr:$table_name,
6579    I64Attr:$num_shards,
6580    I64Attr:$shard_id,
6581    StrAttr:$config
6582  );
6583
6584  let results = (outs);
6585}
6586
6587def TF_LoadTPUEmbeddingRMSPropParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingRMSPropParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6588  let summary = "Load RMSProp embedding parameters with debug support.";
6589
6590  let description = [{
6591An op that loads optimization parameters into HBM for embedding. Must be
6592preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6593embedding table configuration. For example, this op is used to install
6594parameters that are loaded from a checkpoint before a training loop is
6595executed.
6596  }];
6597
6598  let arguments = (ins
6599    Arg<TF_Float32Tensor, [{Value of parameters used in the RMSProp optimization algorithm.}]>:$parameters,
6600    Arg<TF_Float32Tensor, [{Value of ms used in the RMSProp optimization algorithm.}]>:$ms,
6601    Arg<TF_Float32Tensor, [{Value of mom used in the RMSProp optimization algorithm.}]>:$mom,
6602    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the RMSProp optimization algorithm.}]>:$gradient_accumulators,
6603
6604    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6605    StrAttr:$table_name,
6606    I64Attr:$num_shards,
6607    I64Attr:$shard_id,
6608    StrAttr:$config
6609  );
6610
6611  let results = (outs);
6612}
6613
6614def TF_LoadTPUEmbeddingStochasticGradientDescentParametersOp : TF_Op<"LoadTPUEmbeddingStochasticGradientDescentParameters", [TF_TPUEmbeddingSideEffect]> {
6615  let summary = "Load SGD embedding parameters.";
6616
6617  let description = [{
6618An op that loads optimization parameters into HBM for embedding. Must be
6619preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6620embedding table configuration. For example, this op is used to install
6621parameters that are loaded from a checkpoint before a training loop is
6622executed.
6623  }];
6624
6625  let arguments = (ins
6626    Arg<TF_Float32Tensor, [{Value of parameters used in the stochastic gradient descent optimization algorithm.}]>:$parameters,
6627
6628    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6629    StrAttr:$table_name,
6630    I64Attr:$num_shards,
6631    I64Attr:$shard_id,
6632    StrAttr:$config
6633  );
6634
6635  let results = (outs);
6636}
6637
6638def TF_LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6639  let summary = "Load SGD embedding parameters.";
6640
6641  let description = [{
6642An op that loads optimization parameters into HBM for embedding. Must be
6643preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6644embedding table configuration. For example, this op is used to install
6645parameters that are loaded from a checkpoint before a training loop is
6646executed.
6647  }];
6648
6649  let arguments = (ins
6650    Arg<TF_Float32Tensor, [{Value of parameters used in the stochastic gradient descent optimization algorithm.}]>:$parameters,
6651    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Adadelta optimization algorithm.}]>:$gradient_accumulators,
6652
6653    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6654    StrAttr:$table_name,
6655    I64Attr:$num_shards,
6656    I64Attr:$shard_id,
6657    StrAttr:$config
6658  );
6659
6660  let results = (outs);
6661}
6662
6663def TF_LogOp : TF_Op<"Log", [NoSideEffect, SameOperandsAndResultType]> {
6664  let summary = "Computes natural logarithm of x element-wise.";
6665
6666  let description = [{
6667I.e., \\(y = \log_e x\\).
6668
6669Example:
6670
6671```python
6672x = tf.constant([0, 0.5, 1, 5])
6673tf.math.log(x) ==> [-inf, -0.6931472,  0. ,  1.609438]
6674```
6675  }];
6676
6677  let arguments = (ins
6678    TF_FpOrComplexTensor:$x
6679  );
6680
6681  let results = (outs
6682    TF_FpOrComplexTensor:$y
6683  );
6684
6685  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6686}
6687
6688def TF_Log1pOp : TF_Op<"Log1p", [NoSideEffect, SameOperandsAndResultType, TF_CwiseUnary]> {
6689  let summary = "Computes natural logarithm of (1 + x) element-wise.";
6690
6691  let description = [{
6692I.e., \\(y = \log_e (1 + x)\\).
6693
6694Example:
6695
6696```python
6697x = tf.constant([0, 0.5, 1, 5])
6698tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595]
6699```
6700  }];
6701
6702  let arguments = (ins
6703    TF_FpOrComplexTensor:$x
6704  );
6705
6706  let results = (outs
6707    TF_FpOrComplexTensor:$y
6708  );
6709
6710  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6711}
6712
6713def TF_LogSoftmaxOp : TF_Op<"LogSoftmax", [NoSideEffect, SameOperandsAndResultType]> {
6714  let summary = "Computes log softmax activations.";
6715
6716  let description = [{
6717For each batch `i` and class `j` we have
6718
6719    logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
6720  }];
6721
6722  let arguments = (ins
6723    Arg<TF_FloatTensor, [{2-D with shape `[batch_size, num_classes]`.}]>:$logits
6724  );
6725
6726  let results = (outs
6727    Res<TF_FloatTensor, [{Same shape as `logits`.}]>:$logsoftmax
6728  );
6729
6730  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6731}
6732
6733def TF_LogicalAndOp : TF_Op<"LogicalAnd", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
6734                      WithBroadcastableBinOpBuilder {
6735  let summary = "Returns the truth value of x AND y element-wise.";
6736
6737  let description = [{
6738*NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
6739[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
6740  }];
6741
6742  let arguments = (ins
6743    TF_BoolTensor:$x,
6744    TF_BoolTensor:$y
6745  );
6746
6747  let results = (outs
6748    TF_BoolTensor:$z
6749  );
6750}
6751
6752def TF_LogicalNotOp : TF_Op<"LogicalNot", [Involution, NoSideEffect, SameOperandsAndResultType]> {
6753  let summary = "Returns the truth value of `NOT x` element-wise.";
6754
6755  let arguments = (ins
6756    Arg<TF_BoolTensor, [{A `Tensor` of type `bool`.}]>:$x
6757  );
6758
6759  let results = (outs
6760    Res<TF_BoolTensor, [{A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`.}]>:$y
6761  );
6762}
6763
6764def TF_LogicalOrOp : TF_Op<"LogicalOr", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
6765                     WithBroadcastableBinOpBuilder {
6766  let summary = "Returns the truth value of x OR y element-wise.";
6767
6768  let description = [{
6769*NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
6770[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
6771  }];
6772
6773  let arguments = (ins
6774    TF_BoolTensor:$x,
6775    TF_BoolTensor:$y
6776  );
6777
6778  let results = (outs
6779    TF_BoolTensor:$z
6780  );
6781}
6782
6783def TF_LookupTableExportV2Op : TF_Op<"LookupTableExportV2", []> {
6784  let summary = "Outputs all keys and values in the table.";
6785
6786  let arguments = (ins
6787    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableRead]>:$table_handle
6788  );
6789
6790  let results = (outs
6791    Res<TF_Tensor, [{Vector of all keys present in the table.}]>:$keys,
6792    Res<TF_Tensor, [{Tensor of all values in the table. Indexed in parallel with `keys`.}]>:$values
6793  );
6794
6795  TF_DerivedResultTypeAttr Tkeys = TF_DerivedResultTypeAttr<0>;
6796  TF_DerivedResultTypeAttr Tvalues = TF_DerivedResultTypeAttr<1>;
6797}
6798
6799def TF_LookupTableFindV2Op : TF_Op<"LookupTableFindV2", []> {
6800  let summary = "Looks up keys in a table, outputs the corresponding values.";
6801
6802  let description = [{
6803The tensor `keys` must of the same type as the keys of the table.
6804The output `values` is of the type of the table values.
6805
6806The scalar `default_value` is the value output for keys not present in the
6807table. It must also be of the same type as the table values.
6808  }];
6809
6810  let arguments = (ins
6811    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableRead]>:$table_handle,
6812    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
6813    TF_Tensor:$default_value
6814  );
6815
6816  let results = (outs
6817    Res<TF_Tensor, [{Same shape as `keys`.  Values found in the table, or `default_values`
6818for missing keys.}]>:$values
6819  );
6820
6821  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
6822  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<2>;
6823}
6824
6825def TF_LookupTableImportV2Op : TF_Op<"LookupTableImportV2", []> {
6826  let summary = [{
6827Replaces the contents of the table with the specified keys and values.
6828  }];
6829
6830  let description = [{
6831The tensor `keys` must be of the same type as the keys of the table.
6832The tensor `values` must be of the type of the table values.
6833  }];
6834
6835  let arguments = (ins
6836    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
6837    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
6838    Arg<TF_Tensor, [{Values to associate with keys.}]>:$values
6839  );
6840
6841  let results = (outs);
6842
6843  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
6844  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<2>;
6845}
6846
6847def TF_LookupTableInsertV2Op : TF_Op<"LookupTableInsertV2", []> {
6848  let summary = "Updates the table to associates keys with values.";
6849
6850  let description = [{
6851The tensor `keys` must be of the same type as the keys of the table.
6852The tensor `values` must be of the type of the table values.
6853  }];
6854
6855  let arguments = (ins
6856    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
6857    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
6858    Arg<TF_Tensor, [{Values to associate with keys.}]>:$values
6859  );
6860
6861  let results = (outs);
6862
6863  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
6864  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<2>;
6865}
6866
6867def TF_LookupTableRemoveV2Op : TF_Op<"LookupTableRemoveV2", []> {
6868  let summary = "Removes keys and its associated values from a table.";
6869
6870  let description = [{
6871The tensor `keys` must of the same type as the keys of the table. Keys not
6872already in the table are silently ignored.
6873  }];
6874
6875  let arguments = (ins
6876    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
6877    Arg<TF_Tensor, [{Any shape.  Keys of the elements to remove.}]>:$keys
6878  );
6879
6880  let results = (outs);
6881
6882  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
6883}
6884
6885def TF_LookupTableSizeV2Op : TF_Op<"LookupTableSizeV2", []> {
6886  let summary = "Computes the number of elements in the given table.";
6887
6888  let arguments = (ins
6889    Arg<TF_ResourceTensor, [{Handle to the table.}]>:$table_handle
6890  );
6891
6892  let results = (outs
6893    Res<TF_Int64Tensor, [{Scalar that contains number of elements in the table.}]>:$size
6894  );
6895}
6896
6897def TF_LowerBoundOp : TF_Op<"LowerBound", [NoSideEffect]> {
6898  let summary = [{
6899Applies lower_bound(sorted_search_values, values) along each row.
6900  }];
6901
6902  let description = [{
6903Each set of rows with the same index in (sorted_inputs, values) is treated
6904independently.  The resulting row is the equivalent of calling
6905`np.searchsorted(sorted_inputs, values, side='left')`.
6906
6907The result is not a global index to the entire
6908`Tensor`, but rather just the index in the last dimension.
6909
6910A 2-D example:
6911  sorted_sequence = [[0, 3, 9, 9, 10],
6912                     [1, 2, 3, 4, 5]]
6913  values = [[2, 4, 9],
6914            [0, 2, 6]]
6915
6916  result = LowerBound(sorted_sequence, values)
6917
6918  result == [[1, 2, 2],
6919             [0, 1, 5]]
6920  }];
6921
6922  let arguments = (ins
6923    Arg<TF_Tensor, [{2-D Tensor where each row is ordered.}]>:$sorted_inputs,
6924    Arg<TF_Tensor, [{2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
6925the values that will be searched for in `sorted_search_values`.}]>:$values
6926  );
6927
6928  let results = (outs
6929    Res<TF_I32OrI64Tensor, [{A `Tensor` with the same shape as `values`.  It contains the first scalar index
6930into the last dimension where values can be inserted without changing the
6931ordered property.}]>:$output
6932  );
6933
6934  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6935  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
6936}
6937
6938def TF_MakeIteratorOp : TF_Op<"MakeIterator", []> {
6939  let summary = [{
6940Makes a new iterator from the given `dataset` and stores it in `iterator`.
6941  }];
6942
6943  let description = [{
6944This operation may be executed multiple times. Each execution will reset the
6945iterator in `iterator` to the first element of `dataset`.
6946  }];
6947
6948  let arguments = (ins
6949    TF_VariantTensor:$dataset,
6950    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorWrite]>:$iterator
6951  );
6952
6953  let results = (outs);
6954}
6955
6956def TF_MakeUniqueOp : TF_Op<"MakeUnique", [NoSideEffect]> {
6957  let summary = [{
6958Make all elements in the non-Batch dimension unique, but \"close\" to
6959  }];
6960
6961  let description = [{
6962their initial value. Never returns a sub-normal number. Never returns
6963zero. The sign of each input element is always identical to the sign
6964of the corresponding output element. Behavior for infinite elements is
6965undefined. Behavior for subnormal elements is undefined.
6966  }];
6967
6968  let arguments = (ins
6969    TF_Float32Tensor:$input
6970  );
6971
6972  let results = (outs
6973    TF_Float32Tensor:$output
6974  );
6975}
6976
6977def TF_MatMulOp : TF_Op<"MatMul", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
6978  let summary = [{
6979Multiply the matrix "a" by the matrix "b".
6980  }];
6981
6982  let description = [{
6983The inputs must be two-dimensional matrices and the inner dimension of
6984"a" (after being transposed if transpose_a is true) must match the
6985outer dimension of "b" (after being transposed if transposed_b is
6986true).
6987
6988*Note*: The default kernel implementation for MatMul on GPUs uses
6989cublas.
6990  }];
6991
6992  let arguments = (ins
6993    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$a,
6994    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$b,
6995
6996    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
6997    DefaultValuedAttr<BoolAttr, "false">:$transpose_b
6998  );
6999
7000  let results = (outs
7001    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$product
7002  );
7003
7004  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7005}
7006
7007def TF_MatrixBandPartOp : TF_Op<"MatrixBandPart", [NoSideEffect, TF_AllTypesMatch<["input", "band"]>]> {
7008  let summary = [{
7009Copy a tensor setting everything outside a central band in each innermost matrix to zero.
7010  }];
7011
7012  let description = [{
7013The `band` part is computed as follows:
7014Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
7015tensor with the same shape where
7016
7017`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
7018
7019The indicator function
7020
7021`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
7022                 (num_upper < 0 || (n-m) <= num_upper)`.
7023
7024For example:
7025
7026```
7027# if 'input' is [[ 0,  1,  2, 3]
7028#                [-1,  0,  1, 2]
7029#                [-2, -1,  0, 1]
7030#                [-3, -2, -1, 0]],
7031
7032tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
7033                                       [-1,  0,  1, 2]
7034                                       [ 0, -1,  0, 1]
7035                                       [ 0,  0, -1, 0]],
7036
7037tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
7038                                      [-1,  0,  1, 0]
7039                                      [-2, -1,  0, 1]
7040                                      [ 0, -2, -1, 0]]
7041```
7042
7043Useful special cases:
7044
7045```
7046 tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
7047 tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
7048 tf.matrix_band_part(input, 0, 0) ==> Diagonal.
7049```
7050  }];
7051
7052  let arguments = (ins
7053    Arg<TF_Tensor, [{Rank `k` tensor.}]>:$input,
7054    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of subdiagonals to keep. If negative, keep entire
7055lower triangle.}]>:$num_lower,
7056    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of superdiagonals to keep. If negative, keep
7057entire upper triangle.}]>:$num_upper
7058  );
7059
7060  let results = (outs
7061    Res<TF_Tensor, [{Rank `k` tensor of the same shape as input. The extracted banded tensor.}]>:$band
7062  );
7063
7064  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7065  TF_DerivedOperandTypeAttr Tindex = TF_DerivedOperandTypeAttr<1>;
7066
7067  let verifier = [{
7068    return Verify(*this);
7069  }];
7070}
7071
7072def TF_MatrixDiagOp : TF_Op<"MatrixDiag", [NoSideEffect]> {
7073  let summary = [{
7074Returns a batched diagonal tensor with a given batched diagonal values.
7075  }];
7076
7077  let description = [{
7078Given a `diagonal`, this operation returns a tensor with the `diagonal` and
7079everything else padded with zeros. The diagonal is computed as follows:
7080
7081Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
7082tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
7083
7084`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
7085
7086For example:
7087
7088```
7089# 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
7090
7091and diagonal.shape = (2, 4)
7092
7093tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
7094                                     [0, 2, 0, 0]
7095                                     [0, 0, 3, 0]
7096                                     [0, 0, 0, 4]],
7097                                    [[5, 0, 0, 0]
7098                                     [0, 6, 0, 0]
7099                                     [0, 0, 7, 0]
7100                                     [0, 0, 0, 8]]]
7101
7102which has shape (2, 4, 4)
7103```
7104  }];
7105
7106  let arguments = (ins
7107    Arg<TF_Tensor, [{Rank `k`, where `k >= 1`.}]>:$diagonal
7108  );
7109
7110  let results = (outs
7111    Res<TF_Tensor, [{Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.}]>:$output
7112  );
7113
7114  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7115}
7116
7117def TF_MatrixDiagPartV3Op : TF_Op<"MatrixDiagPartV3", [NoSideEffect]> {
7118  let summary = "Returns the batched diagonal part of a batched tensor.";
7119
7120  let description = [{
7121Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
7122`input`.
7123
7124Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
7125Let `max_diag_len` be the maximum length among all diagonals to be extracted,
7126`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
7127Let `num_diags` be the number of diagonals to extract,
7128`num_diags = k[1] - k[0] + 1`.
7129
7130If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
7131`[I, J, ..., L, max_diag_len]` and values:
7132
7133```
7134diagonal[i, j, ..., l, n]
7135  = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
7136    padding_value                 ; otherwise.
7137```
7138where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
7139
7140Otherwise, the output tensor has rank `r` with dimensions
7141`[I, J, ..., L, num_diags, max_diag_len]` with values:
7142
7143```
7144diagonal[i, j, ..., l, m, n]
7145  = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
7146    padding_value                 ; otherwise.
7147```
7148where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
7149
7150`offset` is zero except when the alignment of the diagonal is to the right.
7151```
7152offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
7153                                           and `d >= 0`) or
7154                                         (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
7155                                           and `d <= 0`)
7156         0                          ; otherwise
7157```
7158where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
7159
7160The input must be at least a matrix.
7161
7162For example:
7163
7164```
7165input = np.array([[[1, 2, 3, 4],  # Input shape: (2, 3, 4)
7166                   [5, 6, 7, 8],
7167                   [9, 8, 7, 6]],
7168                  [[5, 4, 3, 2],
7169                   [1, 2, 3, 4],
7170                   [5, 6, 7, 8]]])
7171
7172# A main diagonal from each batch.
7173tf.matrix_diag_part(input) ==> [[1, 6, 7],  # Output shape: (2, 3)
7174                                [5, 2, 7]]
7175
7176# A superdiagonal from each batch.
7177tf.matrix_diag_part(input, k = 1)
7178  ==> [[2, 7, 6],  # Output shape: (2, 3)
7179       [4, 3, 8]]
7180
7181# A band from each batch.
7182tf.matrix_diag_part(input, k = (-1, 2))
7183  ==> [[[0, 3, 8],  # Output shape: (2, 4, 3)
7184        [2, 7, 6],
7185        [1, 6, 7],
7186        [5, 8, 0]],
7187       [[0, 3, 4],
7188        [4, 3, 8],
7189        [5, 2, 7],
7190        [1, 6, 0]]]
7191
7192# LEFT_RIGHT alignment.
7193tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT")
7194  ==> [[[3, 8, 0],  # Output shape: (2, 4, 3)
7195        [2, 7, 6],
7196        [1, 6, 7],
7197        [0, 5, 8]],
7198       [[3, 4, 0],
7199        [4, 3, 8],
7200        [5, 2, 7],
7201        [0, 1, 6]]]
7202
7203# max_diag_len can be shorter than the main diagonal.
7204tf.matrix_diag_part(input, k = (-2, -1))
7205  ==> [[[5, 8],
7206        [9, 0]],
7207       [[1, 6],
7208        [5, 0]]]
7209
7210# padding_value = 9
7211tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
7212  ==> [[[9, 9, 4],  # Output shape: (2, 3, 3)
7213        [9, 3, 8],
7214        [2, 7, 6]],
7215       [[9, 9, 2],
7216        [9, 3, 4],
7217        [4, 3, 8]]]
7218
7219```
7220  }];
7221
7222  let arguments = (ins
7223    Arg<TF_Tensor, [{Rank `r` tensor where `r >= 2`.}]>:$input,
7224    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
7225diagonal, and negative value means subdiagonals. `k` can be a single integer
7226(for a single diagonal) or a pair of integers specifying the low and high ends
7227of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
7228    Arg<TF_Tensor, [{The value to fill the area outside the specified diagonal band with.
7229Default is 0.}]>:$padding_value,
7230
7231    DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
7232  );
7233
7234  let results = (outs
7235    Res<TF_Tensor, [{The extracted diagonal(s).}]>:$diagonal
7236  );
7237
7238  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7239}
7240
7241def TF_MatrixDiagV2Op : TF_Op<"MatrixDiagV2", [NoSideEffect]> {
7242  let summary = [{
7243Returns a batched diagonal tensor with given batched diagonal values.
7244  }];
7245
7246  let description = [{
7247Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
7248diagonals of a matrix, with everything else padded with `padding`. `num_rows`
7249and `num_cols` specify the dimension of the innermost matrix of the output. If
7250both are not specified, the op assumes the innermost matrix is square and infers
7251its size from `k` and the innermost dimension of `diagonal`. If only one of them
7252is specified, the op assumes the unspecified value is the smallest possible
7253based on other criteria.
7254
7255Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
7256rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
7257diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
7258`r` with shape `[I, J, ..., L, num_rows, num_cols]`.
7259
7260The second innermost dimension of `diagonal` has double meaning.
7261When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
7262[I, J, ..., M], and the output tensor is:
7263
7264```
7265output[i, j, ..., l, m, n]
7266  = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
7267    padding_value                             ; otherwise
7268```
7269
7270Otherwise, `M` is treated as the number of diagonals for the matrix in the
7271same batch (`M = k[1]-k[0]+1`), and the output tensor is:
7272
7273```
7274output[i, j, ..., l, m, n]
7275  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
7276    padding_value                                     ; otherwise
7277```
7278where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
7279
7280For example:
7281
7282```
7283# The main diagonal.
7284diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
7285                     [5, 6, 7, 8]])
7286tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
7287                               [0, 2, 0, 0],
7288                               [0, 0, 3, 0],
7289                               [0, 0, 0, 4]],
7290                              [[5, 0, 0, 0],
7291                               [0, 6, 0, 0],
7292                               [0, 0, 7, 0],
7293                               [0, 0, 0, 8]]]
7294
7295# A superdiagonal (per batch).
7296diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
7297                     [4, 5, 6]])
7298tf.matrix_diag(diagonal, k = 1)
7299  ==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
7300        [0, 0, 2, 0],
7301        [0, 0, 0, 3],
7302        [0, 0, 0, 0]],
7303       [[0, 4, 0, 0],
7304        [0, 0, 5, 0],
7305        [0, 0, 0, 6],
7306        [0, 0, 0, 0]]]
7307
7308# A band of diagonals.
7309diagonals = np.array([[[1, 2, 3],  # Input shape: (2, 2, 3)
7310                       [4, 5, 0]],
7311                      [[6, 7, 9],
7312                       [9, 1, 0]]])
7313tf.matrix_diag(diagonals, k = (-1, 0))
7314  ==> [[[1, 0, 0],  # Output shape: (2, 3, 3)
7315        [4, 2, 0],
7316        [0, 5, 3]],
7317       [[6, 0, 0],
7318        [9, 7, 0],
7319        [0, 1, 9]]]
7320
7321# Rectangular matrix.
7322diagonal = np.array([1, 2])  # Input shape: (2)
7323tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
7324  ==> [[0, 0, 0, 0],  # Output shape: (3, 4)
7325       [1, 0, 0, 0],
7326       [0, 2, 0, 0]]
7327
7328# Rectangular matrix with inferred num_cols and padding_value = 9.
7329tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
7330  ==> [[9, 9],  # Output shape: (3, 2)
7331       [1, 9],
7332       [9, 2]]
7333```
7334  }];
7335
7336  let arguments = (ins
7337    Arg<TF_Tensor, [{Rank `r`, where `r >= 1`}]>:$diagonal,
7338    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
7339diagonal, and negative value means subdiagonals. `k` can be a single integer
7340(for a single diagonal) or a pair of integers specifying the low and high ends
7341of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
7342    Arg<TF_Int32Tensor, [{The number of rows of the output matrix. If it is not provided, the op assumes
7343the output matrix is a square matrix and infers the matrix size from k and the
7344innermost dimension of `diagonal`.}]>:$num_rows,
7345    Arg<TF_Int32Tensor, [{The number of columns of the output matrix. If it is not provided, the op
7346assumes the output matrix is a square matrix and infers the matrix size from
7347k and the innermost dimension of `diagonal`.}]>:$num_cols,
7348    Arg<TF_Tensor, [{The number to fill the area outside the specified diagonal band with.
7349Default is 0.}]>:$padding_value
7350  );
7351
7352  let results = (outs
7353    Res<TF_Tensor, [{Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.}]>:$output
7354  );
7355
7356  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7357}
7358
7359def TF_MatrixDiagV3Op : TF_Op<"MatrixDiagV3", [NoSideEffect]> {
7360  let summary = [{
7361Returns a batched diagonal tensor with given batched diagonal values.
7362  }];
7363
7364  let description = [{
7365Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
7366diagonals of a matrix, with everything else padded with `padding`. `num_rows`
7367and `num_cols` specify the dimension of the innermost matrix of the output. If
7368both are not specified, the op assumes the innermost matrix is square and infers
7369its size from `k` and the innermost dimension of `diagonal`. If only one of them
7370is specified, the op assumes the unspecified value is the smallest possible
7371based on other criteria.
7372
7373Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
7374rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
7375diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
7376`r` with shape `[I, J, ..., L, num_rows, num_cols]`.
7377
7378The second innermost dimension of `diagonal` has double meaning.
7379When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
7380[I, J, ..., M], and the output tensor is:
7381
7382```
7383output[i, j, ..., l, m, n]
7384  = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
7385    padding_value                             ; otherwise
7386```
7387
7388Otherwise, `M` is treated as the number of diagonals for the matrix in the
7389same batch (`M = k[1]-k[0]+1`), and the output tensor is:
7390
7391```
7392output[i, j, ..., l, m, n]
7393  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
7394    padding_value                                     ; otherwise
7395```
7396where `d = n - m`, `diag_index = [k] - d`, and
7397`index_in_diag = n - max(d, 0) + offset`.
7398
7399`offset` is zero except when the alignment of the diagonal is to the right.
7400```
7401offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
7402                                           and `d >= 0`) or
7403                                         (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
7404                                           and `d <= 0`)
7405         0                          ; otherwise
7406```
7407where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
7408
7409For example:
7410
7411```
7412# The main diagonal.
7413diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
7414                     [5, 6, 7, 8]])
7415tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
7416                               [0, 2, 0, 0],
7417                               [0, 0, 3, 0],
7418                               [0, 0, 0, 4]],
7419                              [[5, 0, 0, 0],
7420                               [0, 6, 0, 0],
7421                               [0, 0, 7, 0],
7422                               [0, 0, 0, 8]]]
7423
7424# A superdiagonal (per batch).
7425diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
7426                     [4, 5, 6]])
7427tf.matrix_diag(diagonal, k = 1)
7428  ==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
7429        [0, 0, 2, 0],
7430        [0, 0, 0, 3],
7431        [0, 0, 0, 0]],
7432       [[0, 4, 0, 0],
7433        [0, 0, 5, 0],
7434        [0, 0, 0, 6],
7435        [0, 0, 0, 0]]]
7436
7437# A tridiagonal band (per batch).
7438diagonals = np.array([[[0, 8, 9],  # Input shape: (2, 2, 3)
7439                       [1, 2, 3],
7440                       [4, 5, 0]],
7441                      [[0, 2, 3],
7442                       [6, 7, 9],
7443                       [9, 1, 0]]])
7444tf.matrix_diag(diagonals, k = (-1, 1))
7445  ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
7446        [4, 2, 9],
7447        [0, 5, 3]],
7448       [[6, 2, 0],
7449        [9, 7, 3],
7450        [0, 1, 9]]]
7451
7452# LEFT_RIGHT alignment.
7453diagonals = np.array([[[8, 9, 0],  # Input shape: (2, 2, 3)
7454                       [1, 2, 3],
7455                       [0, 4, 5]],
7456                      [[2, 3, 0],
7457                       [6, 7, 9],
7458                       [0, 9, 1]]])
7459tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT")
7460  ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
7461        [4, 2, 9],
7462        [0, 5, 3]],
7463       [[6, 2, 0],
7464        [9, 7, 3],
7465        [0, 1, 9]]]
7466
7467# Rectangular matrix.
7468diagonal = np.array([1, 2])  # Input shape: (2)
7469tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
7470  ==> [[0, 0, 0, 0],  # Output shape: (3, 4)
7471       [1, 0, 0, 0],
7472       [0, 2, 0, 0]]
7473
7474# Rectangular matrix with inferred num_cols and padding_value = 9.
7475tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
7476  ==> [[9, 9],  # Output shape: (3, 2)
7477       [1, 9],
7478       [9, 2]]
7479
7480```
7481  }];
7482
7483  let arguments = (ins
7484    Arg<TF_Tensor, [{Rank `r`, where `r >= 1`}]>:$diagonal,
7485    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
7486diagonal, and negative value means subdiagonals. `k` can be a single integer
7487(for a single diagonal) or a pair of integers specifying the low and high ends
7488of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
7489    Arg<TF_Int32Tensor, [{The number of rows of the output matrix. If it is not provided, the op assumes
7490the output matrix is a square matrix and infers the matrix size from k and the
7491innermost dimension of `diagonal`.}]>:$num_rows,
7492    Arg<TF_Int32Tensor, [{The number of columns of the output matrix. If it is not provided, the op
7493assumes the output matrix is a square matrix and infers the matrix size from
7494k and the innermost dimension of `diagonal`.}]>:$num_cols,
7495    Arg<TF_Tensor, [{The number to fill the area outside the specified diagonal band with.
7496Default is 0.}]>:$padding_value,
7497
7498    DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
7499  );
7500
7501  let results = (outs
7502    Res<TF_Tensor, [{Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.}]>:$output
7503  );
7504
7505  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7506}
7507
7508def TF_MatrixInverseOp : TF_Op<"MatrixInverse", [NoSideEffect]> {
7509  let summary = [{
7510Computes the inverse of one or more square invertible matrices or their adjoints (conjugate transposes).
7511  }];
7512
7513  let description = [{
7514The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
7515form square matrices. The output is a tensor of the same shape as the input
7516containing the inverse for all input submatrices `[..., :, :]`.
7517
7518The op uses LU decomposition with partial pivoting to compute the inverses.
7519
7520If a matrix is not invertible there is no guarantee what the op does. It
7521may detect the condition and raise an exception or it may simply return a
7522garbage result.
7523  }];
7524
7525  let arguments = (ins
7526    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$input,
7527
7528    DefaultValuedAttr<BoolAttr, "false">:$adjoint
7529  );
7530
7531  let results = (outs
7532    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.
7533
7534@compatibility(numpy)
7535Equivalent to np.linalg.inv
7536@end_compatibility}]>:$output
7537  );
7538
7539  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7540}
7541
7542def TF_MatrixSetDiagOp : TF_Op<"MatrixSetDiag", [NoSideEffect]> {
7543  let summary = [{
7544Returns a batched matrix tensor with new batched diagonal values.
7545  }];
7546
7547  let description = [{
7548Given `input` and `diagonal`, this operation returns a tensor with the
7549same shape and values as `input`, except for the main diagonal of the
7550innermost matrices.  These will be overwritten by the values in `diagonal`.
7551
7552The output is computed as follows:
7553
7554Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
7555`k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a
7556tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
7557
7558  * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
7559  * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
7560  }];
7561
7562  let arguments = (ins
7563    Arg<TF_Tensor, [{Rank `k+1`, where `k >= 1`.}]>:$input,
7564    Arg<TF_Tensor, [{Rank `k`, where `k >= 1`.}]>:$diagonal
7565  );
7566
7567  let results = (outs
7568    Res<TF_Tensor, [{Rank `k+1`, with `output.shape = input.shape`.}]>:$output
7569  );
7570
7571  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7572}
7573
7574def TF_MatrixSetDiagV2Op : TF_Op<"MatrixSetDiagV2", [NoSideEffect]> {
7575  let summary = [{
7576Returns a batched matrix tensor with new batched diagonal values.
7577  }];
7578
7579  let description = [{
7580Given `input` and `diagonal`, this operation returns a tensor with the
7581same shape and values as `input`, except for the specified diagonals of the
7582innermost matrices. These will be overwritten by the values in `diagonal`.
7583
7584`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
7585`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
7586Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
7587`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
7588`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
7589`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
7590
7591The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
7592If `k` is scalar or `k[0] == k[1]`:
7593
7594```
7595output[i, j, ..., l, m, n]
7596  = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
7597    input[i, j, ..., l, m, n]              ; otherwise
7598```
7599
7600Otherwise,
7601
7602```
7603output[i, j, ..., l, m, n]
7604  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
7605    input[i, j, ..., l, m, n]                         ; otherwise
7606```
7607where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
7608
7609For example:
7610
7611```
7612# The main diagonal.
7613input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
7614                   [7, 7, 7, 7],
7615                   [7, 7, 7, 7]],
7616                  [[7, 7, 7, 7],
7617                   [7, 7, 7, 7],
7618                   [7, 7, 7, 7]]])
7619diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
7620                     [4, 5, 6]])
7621tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
7622                                   [7, 2, 7, 7],
7623                                   [7, 7, 3, 7]],
7624                                  [[4, 7, 7, 7],
7625                                   [7, 5, 7, 7],
7626                                   [7, 7, 6, 7]]]
7627
7628# A superdiagonal (per batch).
7629tf.matrix_set_diag(diagonal, k = 1)
7630  ==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
7631        [7, 7, 2, 7],
7632        [7, 7, 7, 3]],
7633       [[7, 4, 7, 7],
7634        [7, 7, 5, 7],
7635        [7, 7, 7, 6]]]
7636
7637# A band of diagonals.
7638diagonals = np.array([[[1, 2, 3],  # Diagonal shape: (2, 2, 3)
7639                       [4, 5, 0]],
7640                      [[6, 1, 2],
7641                       [3, 4, 0]]])
7642tf.matrix_set_diag(diagonals, k = (-1, 0))
7643  ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
7644        [4, 2, 7, 7],
7645        [0, 5, 3, 7]],
7646       [[6, 7, 7, 7],
7647        [3, 1, 7, 7],
7648        [7, 4, 2, 7]]]
7649
7650```
7651  }];
7652
7653  let arguments = (ins
7654    Arg<TF_Tensor, [{Rank `r+1`, where `r >= 1`.}]>:$input,
7655    Arg<TF_Tensor, [{Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
7656`k >= 1`.}]>:$diagonal,
7657    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
7658diagonal, and negative value means subdiagonals. `k` can be a single integer
7659(for a single diagonal) or a pair of integers specifying the low and high ends
7660of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k
7661  );
7662
7663  let results = (outs
7664    Res<TF_Tensor, [{Rank `r+1`, with `output.shape = input.shape`.}]>:$output
7665  );
7666
7667  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7668}
7669
7670def TF_MatrixSetDiagV3Op : TF_Op<"MatrixSetDiagV3", [NoSideEffect]> {
7671  let summary = [{
7672Returns a batched matrix tensor with new batched diagonal values.
7673  }];
7674
7675  let description = [{
7676Given `input` and `diagonal`, this operation returns a tensor with the
7677same shape and values as `input`, except for the specified diagonals of the
7678innermost matrices. These will be overwritten by the values in `diagonal`.
7679
7680`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
7681`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
7682Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
7683`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
7684`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
7685`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
7686
7687The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
7688If `k` is scalar or `k[0] == k[1]`:
7689
7690```
7691output[i, j, ..., l, m, n]
7692  = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
7693    input[i, j, ..., l, m, n]              ; otherwise
7694```
7695
7696Otherwise,
7697
7698```
7699output[i, j, ..., l, m, n]
7700  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
7701    input[i, j, ..., l, m, n]                         ; otherwise
7702```
7703where `d = n - m`, `diag_index = k[1] - d`, and
7704`index_in_diag = n - max(d, 0) + offset`.
7705
7706`offset` is zero except when the alignment of the diagonal is to the right.
7707```
7708offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
7709                                           and `d >= 0`) or
7710                                         (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
7711                                           and `d <= 0`)
7712         0                          ; otherwise
7713```
7714where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
7715
7716For example:
7717
7718```
7719# The main diagonal.
7720input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
7721                   [7, 7, 7, 7],
7722                   [7, 7, 7, 7]],
7723                  [[7, 7, 7, 7],
7724                   [7, 7, 7, 7],
7725                   [7, 7, 7, 7]]])
7726diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
7727                     [4, 5, 6]])
7728tf.matrix_set_diag(input, diagonal)
7729  ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
7730        [7, 2, 7, 7],
7731        [7, 7, 3, 7]],
7732       [[4, 7, 7, 7],
7733        [7, 5, 7, 7],
7734        [7, 7, 6, 7]]]
7735
7736# A superdiagonal (per batch).
7737tf.matrix_set_diag(input, diagonal, k = 1)
7738  ==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
7739        [7, 7, 2, 7],
7740        [7, 7, 7, 3]],
7741       [[7, 4, 7, 7],
7742        [7, 7, 5, 7],
7743        [7, 7, 7, 6]]]
7744
7745# A band of diagonals.
7746diagonals = np.array([[[0, 9, 1],  # Diagonal shape: (2, 4, 3)
7747                       [6, 5, 8],
7748                       [1, 2, 3],
7749                       [4, 5, 0]],
7750                      [[0, 1, 2],
7751                       [5, 6, 4],
7752                       [6, 1, 2],
7753                       [3, 4, 0]]])
7754tf.matrix_set_diag(input, diagonals, k = (-1, 2))
7755  ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
7756        [4, 2, 5, 1],
7757        [7, 5, 3, 8]],
7758       [[6, 5, 1, 7],
7759        [3, 1, 6, 2],
7760        [7, 4, 2, 4]]]
7761
7762# LEFT_RIGHT alignment.
7763diagonals = np.array([[[9, 1, 0],  # Diagonal shape: (2, 4, 3)
7764                       [6, 5, 8],
7765                       [1, 2, 3],
7766                       [0, 4, 5]],
7767                      [[1, 2, 0],
7768                       [5, 6, 4],
7769                       [6, 1, 2],
7770                       [0, 3, 4]]])
7771tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT")
7772  ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
7773        [4, 2, 5, 1],
7774        [7, 5, 3, 8]],
7775       [[6, 5, 1, 7],
7776        [3, 1, 6, 2],
7777        [7, 4, 2, 4]]]
7778
7779```
7780  }];
7781
7782  let arguments = (ins
7783    Arg<TF_Tensor, [{Rank `r+1`, where `r >= 1`.}]>:$input,
7784    Arg<TF_Tensor, [{Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
7785`k >= 1`.}]>:$diagonal,
7786    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
7787diagonal, and negative value means subdiagonals. `k` can be a single integer
7788(for a single diagonal) or a pair of integers specifying the low and high ends
7789of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
7790
7791    DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
7792  );
7793
7794  let results = (outs
7795    Res<TF_Tensor, [{Rank `r+1`, with `output.shape = input.shape`.}]>:$output
7796  );
7797
7798  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7799}
7800
7801def TF_MatrixSolveOp : TF_Op<"MatrixSolve", [NoSideEffect]> {
7802  let summary = "Solves systems of linear equations.";
7803
7804  let description = [{
7805`Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
7806form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
7807a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix
7808satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
7809If `adjoint` is `True` then each output matrix satisfies
7810`adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
7811  }];
7812
7813  let arguments = (ins
7814    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$matrix,
7815    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$rhs,
7816
7817    DefaultValuedAttr<BoolAttr, "false">:$adjoint
7818  );
7819
7820  let results = (outs
7821    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$output
7822  );
7823
7824  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7825}
7826
7827def TF_MatrixTriangularSolveOp : TF_Op<"MatrixTriangularSolve", [NoSideEffect]> {
7828  let summary = [{
7829Solves systems of linear equations with upper or lower triangular matrices by backsubstitution.
7830  }];
7831
7832  let description = [{
7833`matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
7834square matrices. If `lower` is `True` then the strictly upper triangular part
7835of each inner-most matrix is assumed to be zero and not accessed.
7836If `lower` is False then the strictly lower triangular part of each inner-most
7837matrix is assumed to be zero and not accessed.
7838`rhs` is a tensor of shape `[..., M, N]`.
7839
7840The output is a tensor of shape `[..., M, N]`. If `adjoint` is
7841`True` then the innermost matrices in `output` satisfy matrix equations
7842`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
7843If `adjoint` is `False` then the strictly then the  innermost matrices in
7844`output` satisfy matrix equations
7845`adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
7846
7847Note, the batch shapes for the inputs only need to broadcast.
7848
7849Example:
7850```python
7851
7852a = tf.constant([[3,  0,  0,  0],
7853                 [2,  1,  0,  0],
7854                 [1,  0,  1,  0],
7855                 [1,  1,  1,  1]], dtype=tf.float32)
7856
7857b = tf.constant([[4],
7858                 [2],
7859                 [4],
7860                 [2]], dtype=tf.float32)
7861
7862x = tf.linalg.triangular_solve(a, b, lower=True)
7863x
7864# <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
7865# array([[ 1.3333334 ],
7866#        [-0.66666675],
7867#        [ 2.6666665 ],
7868#        [-1.3333331 ]], dtype=float32)>
7869
7870# in python3 one can use `a@x`
7871tf.matmul(a, x)
7872# <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
7873# array([[4.       ],
7874#        [2.       ],
7875#        [4.       ],
7876#        [1.9999999]], dtype=float32)>
7877```
7878  }];
7879
7880  let arguments = (ins
7881    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$matrix,
7882    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$rhs,
7883
7884    DefaultValuedAttr<BoolAttr, "true">:$lower,
7885    DefaultValuedAttr<BoolAttr, "false">:$adjoint
7886  );
7887
7888  let results = (outs
7889    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$output
7890  );
7891
7892  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7893}
7894
7895def TF_MaxOp : TF_Op<"Max", [NoSideEffect]> {
7896  let summary = [{
7897Computes the maximum of elements across dimensions of a tensor.
7898  }];
7899
7900  let description = [{
7901Reduces `input` along the dimensions given in `axis`. Unless
7902`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
7903`axis`. If `keep_dims` is true, the reduced dimensions are
7904retained with length 1.
7905  }];
7906
7907  let arguments = (ins
7908    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
7909    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
7910`[-rank(input), rank(input))`.}]>:$reduction_indices,
7911
7912    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
7913  );
7914
7915  let results = (outs
7916    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
7917  );
7918
7919  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7920  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
7921
7922  let builders = [
7923    OpBuilder<(ins "Value":$input, "Value":$reduction_indices,
7924      "BoolAttr":$keep_dims)>
7925  ];
7926}
7927
7928def TF_MaxPoolOp : TF_Op<"MaxPool", [NoSideEffect]> {
7929  let summary = "Performs max pooling on the input.";
7930
7931  let arguments = (ins
7932    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{4-D input to pool over.}]>:$input,
7933
7934    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
7935    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
7936    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
7937    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
7938    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
7939  );
7940
7941  let results = (outs
7942    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{The max pooled output tensor.}]>:$output
7943  );
7944
7945  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7946}
7947
7948def TF_MaxPool3DOp : TF_Op<"MaxPool3D", [NoSideEffect]> {
7949  let summary = "Performs 3D max pooling on the input.";
7950
7951  let arguments = (ins
7952    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{Shape `[batch, depth, rows, cols, channels]` tensor to pool over.}]>:$input,
7953
7954    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
7955    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
7956    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
7957    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
7958  );
7959
7960  let results = (outs
7961    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The max pooled output tensor.}]>:$output
7962  );
7963
7964  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7965}
7966
7967def TF_MaxPool3DGradOp : TF_Op<"MaxPool3DGrad", [NoSideEffect]> {
7968  let summary = "Computes gradients of 3D max pooling function.";
7969
7970  let arguments = (ins
7971    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The original input tensor.}]>:$orig_input,
7972    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The original output tensor.}]>:$orig_output,
7973    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
7974
7975    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
7976    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
7977    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
7978    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
7979  );
7980
7981  let results = (outs
7982    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$output
7983  );
7984
7985  TF_DerivedOperandTypeAttr TInput = TF_DerivedOperandTypeAttr<0>;
7986  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
7987}
7988
7989def TF_MaxPool3DGradGradOp : TF_Op<"MaxPool3DGradGrad", [NoSideEffect]> {
7990  let summary = "Computes second-order gradients of the maxpooling function.";
7991
7992  let arguments = (ins
7993    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
7994    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
7995    Arg<TF_IntOrFpTensor, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
7996
7997    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
7998    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
7999    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8000    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
8001  );
8002
8003  let results = (outs
8004    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
8005  );
8006
8007  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8008}
8009
8010def TF_MaxPoolGradOp : TF_Op<"MaxPoolGrad", [NoSideEffect]> {
8011  let summary = "Computes gradients of the maxpooling function.";
8012
8013  let arguments = (ins
8014    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8015    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8016    Arg<TF_IntOrFpTensor, [{4-D.  Gradients w.r.t. the output of `max_pool`.}]>:$grad,
8017
8018    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
8019    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
8020    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
8021    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
8022    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8023  );
8024
8025  let results = (outs
8026    Res<TF_IntOrFpTensor, [{Gradients w.r.t. the input to `max_pool`.}]>:$output
8027  );
8028
8029  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8030
8031  let verifier = [{
8032    return Verify(*this);
8033  }];
8034}
8035
8036def TF_MaxPoolGradGradOp : TF_Op<"MaxPoolGradGrad", [NoSideEffect]> {
8037  let summary = "Computes second-order gradients of the maxpooling function.";
8038
8039  let arguments = (ins
8040    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8041    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8042    Arg<TF_IntOrFpTensor, [{4-D.  Gradients of gradients w.r.t. the input of `max_pool`.}]>:$grad,
8043
8044    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
8045    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
8046    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8047    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8048  );
8049
8050  let results = (outs
8051    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
8052  );
8053
8054  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8055}
8056
8057def TF_MaxPoolGradGradV2Op : TF_Op<"MaxPoolGradGradV2", [NoSideEffect]> {
8058  let summary = "Computes second-order gradients of the maxpooling function.";
8059
8060  let arguments = (ins
8061    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8062    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8063    Arg<TF_IntOrFpTensor, [{4-D.  Gradients of gradients w.r.t. the input of `max_pool`.}]>:$grad,
8064    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
8065    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
8066input tensor.}]>:$strides,
8067
8068    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8069    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8070  );
8071
8072  let results = (outs
8073    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
8074  );
8075
8076  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8077}
8078
8079def TF_MaxPoolGradV2Op : TF_Op<"MaxPoolGradV2", [NoSideEffect]> {
8080  let summary = "Computes gradients of the maxpooling function.";
8081
8082  let arguments = (ins
8083    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8084    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8085    Arg<TF_IntOrFpTensor, [{4-D.  Gradients w.r.t. the output of `max_pool`.}]>:$grad,
8086    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
8087    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
8088input tensor.}]>:$strides,
8089
8090    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8091    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8092  );
8093
8094  let results = (outs
8095    Res<TF_IntOrFpTensor, [{Gradients w.r.t. the input to `max_pool`.}]>:$output
8096  );
8097
8098  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8099}
8100
8101def TF_MaxPoolV2Op : TF_Op<"MaxPoolV2", [NoSideEffect]> {
8102  let summary = "Performs max pooling on the input.";
8103
8104  let arguments = (ins
8105    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{4-D input to pool over.}]>:$input,
8106    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
8107    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
8108input tensor.}]>:$strides,
8109
8110    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8111    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
8112  );
8113
8114  let results = (outs
8115    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{The max pooled output tensor.}]>:$output
8116  );
8117
8118  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8119}
8120
8121def TF_MeanOp : TF_Op<"Mean", [NoSideEffect]> {
8122  let summary = "Computes the mean of elements across dimensions of a tensor.";
8123
8124  let description = [{
8125Reduces `input` along the dimensions given in `axis`. Unless
8126`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
8127`axis`. If `keep_dims` is true, the reduced dimensions are
8128retained with length 1.
8129  }];
8130
8131  let arguments = (ins
8132    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
8133    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
8134`[-rank(input), rank(input))`.}]>:$reduction_indices,
8135
8136    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
8137  );
8138
8139  let results = (outs
8140    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
8141  );
8142
8143  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8144  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
8145}
8146
8147def TF_MergeSummaryOp : TF_Op<"MergeSummary", [NoSideEffect, SameOperandsAndResultType]> {
8148  let summary = "Merges summaries.";
8149
8150  let description = [{
8151This op creates a
8152[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
8153protocol buffer that contains the union of all the values in the input
8154summaries.
8155
8156When the Op is run, it reports an `InvalidArgument` error if multiple values
8157in the summaries to merge use the same tag.
8158  }];
8159
8160  let arguments = (ins
8161    Arg<Variadic<TF_StrTensor>, [{Can be of any shape.  Each must contain serialized `Summary` protocol
8162buffers.}]>:$inputs
8163  );
8164
8165  let results = (outs
8166    Res<TF_StrTensor, [{Scalar. Serialized `Summary` protocol buffer.}]>:$summary
8167  );
8168
8169  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
8170}
8171
8172def TF_MergeV2CheckpointsOp : TF_Op<"MergeV2Checkpoints", []> {
8173  let summary = [{
8174V2 format specific: merges the metadata files of sharded checkpoints.  The
8175  }];
8176
8177  let description = [{
8178result is one logical checkpoint, with one physical metadata file and renamed
8179data files.
8180
8181Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
8182
8183If delete_old_dirs is true, attempts to delete recursively the dirname of each
8184path in the input checkpoint_prefixes.  This is useful when those paths are non
8185user-facing temporary locations.
8186  }];
8187
8188  let arguments = (ins
8189    Arg<TF_StrTensor, [{prefixes of V2 checkpoints to merge.}]>:$checkpoint_prefixes,
8190    Arg<TF_StrTensor, [{scalar.  The desired final prefix.  Allowed to be the same
8191as one of the checkpoint_prefixes.}]>:$destination_prefix,
8192
8193    DefaultValuedAttr<BoolAttr, "true">:$delete_old_dirs
8194  );
8195
8196  let results = (outs);
8197}
8198
8199def TF_MinOp : TF_Op<"Min", [NoSideEffect]> {
8200  let summary = [{
8201Computes the minimum of elements across dimensions of a tensor.
8202  }];
8203
8204  let description = [{
8205Reduces `input` along the dimensions given in `axis`. Unless
8206`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
8207`axis`. If `keep_dims` is true, the reduced dimensions are
8208retained with length 1.
8209  }];
8210
8211  let arguments = (ins
8212    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
8213    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
8214`[-rank(input), rank(input))`.}]>:$reduction_indices,
8215
8216    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
8217  );
8218
8219  let results = (outs
8220    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
8221  );
8222
8223  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8224  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
8225}
8226
8227def TF_MinimumOp : TF_Op<"Minimum", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
8228                   WithBroadcastableBinOpBuilder {
8229  let summary = "Returns the min of x and y (i.e. x < y ? x : y) element-wise.";
8230
8231  let description = [{
8232*NOTE*: `Minimum` supports broadcasting. More about broadcasting
8233[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8234  }];
8235
8236  let arguments = (ins
8237    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Uint8]>:$x,
8238    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Uint8]>:$y
8239  );
8240
8241  let results = (outs
8242    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Uint8]>:$z
8243  );
8244
8245  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8246}
8247
8248def TF_MirrorPadOp : TF_Op<"MirrorPad", [NoSideEffect, TF_OperandHasRank<1, 2>]> {
8249  let summary = "Pads a tensor with mirrored values.";
8250
8251  let description = [{
8252This operation pads a `input` with mirrored values according to the `paddings`
8253you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
8254the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
8255how many values to add before the contents of `input` in that dimension, and
8256`paddings[D, 1]` indicates how many values to add after the contents of `input`
8257in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
8258than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
8259(if false, respectively).
8260
8261The padded size of each dimension D of the output is:
8262
8263`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
8264
8265For example:
8266
8267```
8268# 't' is [[1, 2, 3], [4, 5, 6]].
8269# 'paddings' is [[1, 1]], [2, 2]].
8270# 'mode' is SYMMETRIC.
8271# rank of 't' is 2.
8272pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
8273                      [2, 1, 1, 2, 3, 3, 2]
8274                      [5, 4, 4, 5, 6, 6, 5]
8275                      [5, 4, 4, 5, 6, 6, 5]]
8276```
8277  }];
8278
8279  let arguments = (ins
8280    Arg<TF_Tensor, [{The input tensor to be padded.}]>:$input,
8281    Arg<TF_I32OrI64Tensor, [{A two-column matrix specifying the padding sizes. The number of
8282rows must be the same as the rank of `input`.}]>:$paddings,
8283
8284    TF_AnyStrAttrOf<["REFLECT", "SYMMETRIC"]>:$mode
8285  );
8286
8287  let results = (outs
8288    Res<TF_Tensor, [{The padded tensor.}]>:$output
8289  );
8290
8291  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8292  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
8293}
8294
8295def TF_MirrorPadGradOp : TF_Op<"MirrorPadGrad", [NoSideEffect, TF_OperandHasRank<1, 2>]> {
8296  let summary = [{
8297Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
8298  }];
8299
8300  let description = [{
8301This operation folds the padded areas of `input` by `MirrorPad` according to the
8302`paddings` you specify. `paddings` must be the same as `paddings` argument
8303given to the corresponding `MirrorPad` op.
8304
8305The folded size of each dimension D of the output is:
8306
8307`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
8308
8309For example:
8310
8311```
8312# 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
8313# 'paddings' is [[0, 1]], [0, 1]].
8314# 'mode' is SYMMETRIC.
8315# rank of 't' is 2.
8316pad(t, paddings) ==> [[ 1,  5]
8317                      [11, 28]]
8318```
8319  }];
8320
8321  let arguments = (ins
8322    Arg<TF_Tensor, [{The input tensor to be folded.}]>:$input,
8323    Arg<TF_I32OrI64Tensor, [{A two-column matrix specifying the padding sizes. The number of
8324rows must be the same as the rank of `input`.}]>:$paddings,
8325
8326    TF_AnyStrAttrOf<["REFLECT", "SYMMETRIC"]>:$mode
8327  );
8328
8329  let results = (outs
8330    Res<TF_Tensor, [{The folded tensor.}]>:$output
8331  );
8332
8333  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8334  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
8335}
8336
8337def TF_MlirLocalVarOp : TF_Op<"MlirLocalVarOp", []> {
8338  let summary = "Creates a handle to an in-scope variable.";
8339
8340  let description = [{
8341Used by internal passes for temporary representation of local state, which will
8342be eventually removed.
8343  }];
8344
8345  let arguments = (ins);
8346
8347  let results = (outs
8348    Res<TF_ResourceTensor, "", [TF_VariableAlloc]>:$resource
8349  );
8350}
8351
8352def TF_MlirPassthroughOp : TF_Op<"MlirPassthroughOp", [NoSideEffect]> {
8353  let summary = [{
8354Wraps an arbitrary MLIR computation expressed as a module with a main() function.
8355  }];
8356
8357  let description = [{
8358This operation does not have an associated kernel and is not intended to be
8359executed in a regular TensorFlow session. Instead it is intended to be used for
8360testing or for special case where a user intends to pass custom MLIR computation
8361through a TensorFlow graph with the intent of having custom tooling processing
8362it downstream (when targeting a different environment, like TensorFlow lite for
8363example).
8364The MLIR module is expected to have a main() function that will be used as an
8365entry point. The inputs to the operations will be passed as argument to the
8366main() function and the returned values of the main function mapped to the
8367outputs.
8368Example usage:
8369
8370```
8371import tensorflow as tf
8372from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
8373
8374mlir_module = '''python
8375func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {
8376   %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>
8377   return %ret : tensor<10x10xf32>
8378}
8379'''
8380
8381@tf.function
8382def foo(x, y):
8383  return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
8384
8385graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()
8386```
8387  }];
8388
8389  let arguments = (ins
8390    Variadic<TF_Tensor>:$inputs,
8391
8392    StrAttr:$mlir_module
8393  );
8394
8395  let results = (outs
8396    Variadic<TF_Tensor>:$outputs
8397  );
8398
8399  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
8400  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
8401}
8402
8403def TF_ModOp : TF_Op<"Mod", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
8404               WithBroadcastableBinOpBuilder {
8405  let summary = [{
8406Returns element-wise remainder of division. This emulates C semantics in that
8407  }];
8408
8409  let description = [{
8410the result here is consistent with a truncating divide. E.g.
8411`tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
8412
8413*NOTE*: `Mod` supports broadcasting. More about broadcasting
8414[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8415  }];
8416
8417  let arguments = (ins
8418    TF_FpOrI32OrI64Tensor:$x,
8419    TF_FpOrI32OrI64Tensor:$y
8420  );
8421
8422  let results = (outs
8423    TF_FpOrI32OrI64Tensor:$z
8424  );
8425
8426  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8427}
8428
8429def TF_ModelDatasetOp : TF_Op<"ModelDataset", [NoSideEffect]> {
8430  let summary = "Identity transformation that models performance.";
8431
8432  let description = [{
8433Identity transformation that models performance.
8434  }];
8435
8436  let arguments = (ins
8437    Arg<TF_VariantTensor, [{A variant tensor representing the input dataset.}]>:$input_dataset,
8438
8439    DefaultValuedAttr<I64Attr, "0">:$algorithm,
8440    DefaultValuedAttr<I64Attr, "0">:$cpu_budget,
8441    DefaultValuedAttr<I64Attr, "0">:$ram_budget,
8442    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
8443    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
8444  );
8445
8446  let results = (outs
8447    TF_VariantTensor:$handle
8448  );
8449}
8450
8451def TF_MulOp : TF_Op<"Mul", [Commutative, NoSideEffect, ResultsBroadcastableShape, TF_CwiseBinary, TF_SameOperandsAndResultElementTypeResolveRef]>,
8452               WithBroadcastableBinOpBuilder {
8453  let summary = "Returns x * y element-wise.";
8454
8455  let description = [{
8456*NOTE*: `Multiply` supports broadcasting. More about broadcasting
8457[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8458  }];
8459
8460  let arguments = (ins
8461    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
8462    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
8463  );
8464
8465  let results = (outs
8466    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
8467  );
8468
8469  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8470}
8471
8472def TF_MulNoNanOp : TF_Op<"MulNoNan", [NoSideEffect, ResultsBroadcastableShape]>,
8473                    WithBroadcastableBinOpBuilder {
8474  let summary = [{
8475Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN.
8476  }];
8477
8478  let description = [{
8479*NOTE*: `MulNoNan` supports broadcasting. More about broadcasting
8480[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8481  }];
8482
8483  let arguments = (ins
8484    TF_FpOrComplexTensor:$x,
8485    TF_FpOrComplexTensor:$y
8486  );
8487
8488  let results = (outs
8489    TF_FpOrComplexTensor:$z
8490  );
8491
8492  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8493}
8494
8495def TF_MultiDeviceIteratorOp : TF_Op<"MultiDeviceIterator", []> {
8496  let summary = "Creates a MultiDeviceIterator resource.";
8497
8498  let arguments = (ins
8499    Confined<StrArrayAttr, [ArrayMinCount<1>]>:$devices,
8500    StrAttr:$shared_name,
8501    StrAttr:$container,
8502    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
8503    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
8504  );
8505
8506  let results = (outs
8507    Res<TF_ResourceTensor, [{Handle to the resource created.}], [TF_DatasetIteratorAlloc]>:$handle
8508  );
8509}
8510
8511def TF_MultiDeviceIteratorFromStringHandleOp : TF_Op<"MultiDeviceIteratorFromStringHandle", []> {
8512  let summary = [{
8513Generates a MultiDeviceIterator resource from its provided string handle.
8514  }];
8515
8516  let arguments = (ins
8517    Arg<TF_StrTensor, [{String representing the resource.}]>:$string_handle,
8518
8519    DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
8520    DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
8521  );
8522
8523  let results = (outs
8524    Res<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorAlloc]>:$multi_device_iterator
8525  );
8526}
8527
8528def TF_MultiDeviceIteratorGetNextFromShardOp : TF_Op<"MultiDeviceIteratorGetNextFromShard", []> {
8529  let summary = "Gets next element for the provided shard number.";
8530
8531  let arguments = (ins
8532    Arg<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$multi_device_iterator,
8533    Arg<TF_Int32Tensor, [{Integer representing which shard to fetch data for.}]>:$shard_num,
8534    Arg<TF_Int64Tensor, [{Which incarnation of the MultiDeviceIterator is running.}]>:$incarnation_id
8535  );
8536
8537  let results = (outs
8538    Res<Variadic<TF_Tensor>, [{Result of the get_next on the dataset.}]>:$components
8539  );
8540
8541  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
8542  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
8543}
8544
8545def TF_MultiDeviceIteratorInitOp : TF_Op<"MultiDeviceIteratorInit", []> {
8546  let summary = "Initializes the multi device iterator with the given dataset.";
8547
8548  let arguments = (ins
8549    Arg<TF_VariantTensor, [{Dataset to be iterated upon.}]>:$dataset,
8550    Arg<TF_ResourceTensor, [{A MultiDeviceIteratorResource.}], [TF_DatasetIteratorWrite]>:$multi_device_iterator,
8551    Arg<TF_Int64Tensor, [{The maximum size of the host side per device buffer to keep.}]>:$max_buffer_size
8552  );
8553
8554  let results = (outs
8555    Res<TF_Int64Tensor, [{An int64 indicating which incarnation of the MultiDeviceIterator
8556is running.}]>:$incarnation_id
8557  );
8558}
8559
8560def TF_MultiDeviceIteratorToStringHandleOp : TF_Op<"MultiDeviceIteratorToStringHandle", []> {
8561  let summary = "Produces a string handle for the given MultiDeviceIterator.";
8562
8563  let arguments = (ins
8564    Arg<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorRead]>:$multi_device_iterator
8565  );
8566
8567  let results = (outs
8568    Res<TF_StrTensor, [{A string representing the resource.}]>:$string_handle
8569  );
8570}
8571
8572def TF_MultinomialOp : TF_Op<"Multinomial", [TF_CannotDuplicate]> {
8573  let summary = "Draws samples from a multinomial distribution.";
8574
8575  let arguments = (ins
8576    Arg<TF_IntOrFpTensor, [{2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
8577represents the unnormalized log probabilities for all classes.}]>:$logits,
8578    Arg<TF_Int32Tensor, [{0-D.  Number of independent samples to draw for each row slice.}]>:$num_samples,
8579
8580    DefaultValuedAttr<I64Attr, "0">:$seed,
8581    DefaultValuedAttr<I64Attr, "0">:$seed2
8582  );
8583
8584  let results = (outs
8585    Res<TF_I32OrI64Tensor, [{2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
8586contains the drawn class labels with range `[0, num_classes)`.}]>:$output
8587  );
8588
8589  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8590  TF_DerivedResultTypeAttr output_dtype = TF_DerivedResultTypeAttr<0>;
8591}
8592
8593def TF_MutableDenseHashTableV2Op : TF_Op<"MutableDenseHashTableV2", []> {
8594  let summary = [{
8595Creates an empty hash table that uses tensors as the backing store.
8596  }];
8597
8598  let description = [{
8599It uses "open addressing" with quadratic reprobing to resolve
8600collisions.
8601
8602This op creates a mutable hash table, specifying the type of its keys and
8603values. Each value must be a scalar. Data can be inserted into the table using
8604the insert operations. It does not support the initialization operation.
8605  }];
8606
8607  let arguments = (ins
8608    Arg<TF_Tensor, [{The key used to represent empty key buckets internally. Must not
8609be used in insert or lookup operations.}]>:$empty_key,
8610    TF_Tensor:$deleted_key,
8611
8612    StrAttr:$container,
8613    StrAttr:$shared_name,
8614    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
8615    TypeAttr:$value_dtype,
8616    DefaultValuedAttr<TF_ShapeAttr, "llvm::ArrayRef<int64_t>({})">:$value_shape,
8617    DefaultValuedAttr<I64Attr, "131072">:$initial_num_buckets,
8618    DefaultValuedAttr<F32Attr, "0.8f">:$max_load_factor
8619  );
8620
8621  let results = (outs
8622    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
8623  );
8624
8625  TF_DerivedOperandTypeAttr key_dtype = TF_DerivedOperandTypeAttr<0>;
8626}
8627
8628def TF_MutableHashTableOfTensorsV2Op : TF_Op<"MutableHashTableOfTensorsV2", []> {
8629  let summary = "Creates an empty hash table.";
8630
8631  let description = [{
8632This op creates a mutable hash table, specifying the type of its keys and
8633values. Each value must be a vector. Data can be inserted into the table using
8634the insert operations. It does not support the initialization operation.
8635  }];
8636
8637  let arguments = (ins
8638    StrAttr:$container,
8639    StrAttr:$shared_name,
8640    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
8641    TypeAttr:$key_dtype,
8642    TypeAttr:$value_dtype,
8643    DefaultValuedAttr<TF_ShapeAttr, "llvm::ArrayRef<int64_t>({})">:$value_shape
8644  );
8645
8646  let results = (outs
8647    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
8648  );
8649}
8650
8651def TF_MutableHashTableV2Op : TF_Op<"MutableHashTableV2", []> {
8652  let summary = "Creates an empty hash table.";
8653
8654  let description = [{
8655This op creates a mutable hash table, specifying the type of its keys and
8656values. Each value must be a scalar. Data can be inserted into the table using
8657the insert operations. It does not support the initialization operation.
8658  }];
8659
8660  let arguments = (ins
8661    StrAttr:$container,
8662    StrAttr:$shared_name,
8663    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
8664    TypeAttr:$key_dtype,
8665    TypeAttr:$value_dtype
8666  );
8667
8668  let results = (outs
8669    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
8670  );
8671}
8672
8673def TF_NdtriOp : TF_Op<"Ndtri", [NoSideEffect]> {
8674  let summary = "";
8675
8676  let arguments = (ins
8677    TF_FloatTensor:$x
8678  );
8679
8680  let results = (outs
8681    TF_FloatTensor:$y
8682  );
8683
8684  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8685}
8686
8687def TF_NegOp : TF_Op<"Neg", [Involution, NoSideEffect, SameOperandsAndResultType, TF_CwiseUnary]> {
8688  let summary = "Computes numerical negative value element-wise.";
8689
8690  let description = [{
8691I.e., \\(y = -x\\).
8692  }];
8693
8694  let arguments = (ins
8695    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
8696  );
8697
8698  let results = (outs
8699    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
8700  );
8701
8702  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8703}
8704
8705def TF_NextAfterOp : TF_Op<"NextAfter", [NoSideEffect, ResultsBroadcastableShape]>,
8706                     WithBroadcastableBinOpBuilder {
8707  let summary = [{
8708Returns the next representable value of `x1` in the direction of `x2`, element-wise.
8709  }];
8710
8711  let description = [{
8712This operation returns the same result as the C++ std::nextafter function.
8713
8714It can also return a subnormal number.
8715
8716@compatibility(cpp)
8717Equivalent to C++ std::nextafter function.
8718@end_compatibility
8719  }];
8720
8721  let arguments = (ins
8722    TF_F32OrF64Tensor:$x1,
8723    TF_F32OrF64Tensor:$x2
8724  );
8725
8726  let results = (outs
8727    TF_F32OrF64Tensor:$output
8728  );
8729
8730  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8731}
8732
8733def TF_NoOp : TF_Op<"NoOp", [NoSideEffect]> {
8734  let summary = "Does nothing. Only useful as a placeholder for control edges.";
8735
8736  let arguments = (ins);
8737
8738  let results = (outs);
8739}
8740
8741def TF_NonMaxSuppressionV3Op : TF_Op<"NonMaxSuppressionV3", [NoSideEffect]> {
8742  let summary = [{
8743Greedily selects a subset of bounding boxes in descending order of score,
8744  }];
8745
8746  let description = [{
8747pruning away boxes that have high intersection-over-union (IOU) overlap
8748with previously selected boxes.  Bounding boxes with score less than
8749`score_threshold` are removed.  Bounding boxes are supplied as
8750[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
8751diagonal pair of box corners and the coordinates can be provided as normalized
8752(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
8753is agnostic to where the origin is in the coordinate system and more
8754generally is invariant to orthogonal transformations and translations
8755of the coordinate system; thus translating or reflections of the coordinate
8756system result in the same boxes being selected by the algorithm.
8757The output of this operation is a set of integers indexing into the input
8758collection of bounding boxes representing the selected boxes.  The bounding
8759box coordinates corresponding to the selected indices can then be obtained
8760using the `tf.gather operation`.  For example:
8761  selected_indices = tf.image.non_max_suppression_v2(
8762      boxes, scores, max_output_size, iou_threshold, score_threshold)
8763  selected_boxes = tf.gather(boxes, selected_indices)
8764  }];
8765
8766  let arguments = (ins
8767    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
8768    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
8769score corresponding to each box (each row of boxes).}]>:$scores,
8770    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
8771boxes to be selected by non max suppression.}]>:$max_output_size,
8772    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
8773boxes overlap too much with respect to IOU.}]>:$iou_threshold,
8774    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
8775boxes based on score.}]>:$score_threshold
8776  );
8777
8778  let results = (outs
8779    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
8780indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices
8781  );
8782
8783  TF_DerivedOperandTypeAttr T_threshold = TF_DerivedOperandTypeAttr<3>;
8784  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8785}
8786
8787def TF_NonMaxSuppressionV4Op : TF_Op<"NonMaxSuppressionV4", [NoSideEffect]> {
8788  let summary = [{
8789Greedily selects a subset of bounding boxes in descending order of score,
8790  }];
8791
8792  let description = [{
8793pruning away boxes that have high intersection-over-union (IOU) overlap
8794with previously selected boxes.  Bounding boxes with score less than
8795`score_threshold` are removed.  Bounding boxes are supplied as
8796[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
8797diagonal pair of box corners and the coordinates can be provided as normalized
8798(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
8799is agnostic to where the origin is in the coordinate system and more
8800generally is invariant to orthogonal transformations and translations
8801of the coordinate system; thus translating or reflections of the coordinate
8802system result in the same boxes being selected by the algorithm.
8803The output of this operation is a set of integers indexing into the input
8804collection of bounding boxes representing the selected boxes.  The bounding
8805box coordinates corresponding to the selected indices can then be obtained
8806using the `tf.gather operation`.  For example:
8807  selected_indices = tf.image.non_max_suppression_v2(
8808      boxes, scores, max_output_size, iou_threshold, score_threshold)
8809  selected_boxes = tf.gather(boxes, selected_indices)
8810  }];
8811
8812  let arguments = (ins
8813    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
8814    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
8815score corresponding to each box (each row of boxes).}]>:$scores,
8816    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
8817boxes to be selected by non max suppression.}]>:$max_output_size,
8818    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
8819boxes overlap too much with respect to IOU.}]>:$iou_threshold,
8820    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
8821boxes based on score.}]>:$score_threshold,
8822
8823    DefaultValuedAttr<BoolAttr, "false">:$pad_to_max_output_size
8824  );
8825
8826  let results = (outs
8827    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
8828indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices,
8829    Res<TF_Int32Tensor, [{A 0-D integer tensor representing the number of valid elements in
8830`selected_indices`, with the valid elements appearing first.}]>:$valid_outputs
8831  );
8832
8833  TF_DerivedOperandTypeAttr T_threshold = TF_DerivedOperandTypeAttr<3>;
8834  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8835}
8836
8837def TF_NonMaxSuppressionV5Op : TF_Op<"NonMaxSuppressionV5", [NoSideEffect]> {
8838  let summary = [{
8839Greedily selects a subset of bounding boxes in descending order of score,
8840  }];
8841
8842  let description = [{
8843pruning away boxes that have high intersection-over-union (IOU) overlap
8844with previously selected boxes.  Bounding boxes with score less than
8845`score_threshold` are removed.  Bounding boxes are supplied as
8846[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
8847diagonal pair of box corners and the coordinates can be provided as normalized
8848(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
8849is agnostic to where the origin is in the coordinate system and more
8850generally is invariant to orthogonal transformations and translations
8851of the coordinate system; thus translating or reflections of the coordinate
8852system result in the same boxes being selected by the algorithm.
8853The output of this operation is a set of integers indexing into the input
8854collection of bounding boxes representing the selected boxes.  The bounding
8855box coordinates corresponding to the selected indices can then be obtained
8856using the `tf.gather operation`.  For example:
8857  selected_indices = tf.image.non_max_suppression_v2(
8858      boxes, scores, max_output_size, iou_threshold, score_threshold)
8859  selected_boxes = tf.gather(boxes, selected_indices)
8860This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f.
8861Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
8862of other overlapping boxes instead of directly causing them to be pruned.
8863To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be
8864larger than 0.
8865  }];
8866
8867  let arguments = (ins
8868    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
8869    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
8870score corresponding to each box (each row of boxes).}]>:$scores,
8871    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
8872boxes to be selected by non max suppression.}]>:$max_output_size,
8873    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
8874boxes overlap too much with respect to IOU.}]>:$iou_threshold,
8875    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
8876boxes based on score.}]>:$score_threshold,
8877    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et
8878al (c.f. https://arxiv.org/abs/1704.04503).  When `soft_nms_sigma=0.0` (which
8879is default), we fall back to standard (hard) NMS.}]>:$soft_nms_sigma,
8880
8881    DefaultValuedAttr<BoolAttr, "false">:$pad_to_max_output_size
8882  );
8883
8884  let results = (outs
8885    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
8886indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices,
8887    Res<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[M]` representing the corresponding
8888scores for each selected box, where `M <= max_output_size`.  Scores only differ
8889from corresponding input scores when using Soft NMS (i.e. when
8890`soft_nms_sigma>0`)}]>:$selected_scores,
8891    Res<TF_Int32Tensor, [{A 0-D integer tensor representing the number of valid elements in
8892`selected_indices`, with the valid elements appearing first.}]>:$valid_outputs
8893  );
8894
8895  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8896}
8897
8898def TF_NotEqualOp : TF_Op<"NotEqual", [Commutative, NoSideEffect]> {
8899  let summary = "Returns the truth value of (x != y) element-wise.";
8900
8901  let description = [{
8902*NOTE*: `NotEqual` supports broadcasting. More about broadcasting
8903[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8904  }];
8905
8906  let arguments = (ins
8907    TF_Tensor:$x,
8908    TF_Tensor:$y,
8909
8910    DefaultValuedAttr<BoolAttr, "true">:$incompatible_shape_error
8911  );
8912
8913  let results = (outs
8914    TF_BoolTensor:$z
8915  );
8916
8917  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8918
8919  let builders = [
8920    OpBuilder<(ins "Value":$x, "Value":$y,
8921      "BoolAttr":$incompatible_shape_error)>
8922  ];
8923
8924  let verifier = [{
8925    return Verify(*this);
8926  }];
8927}
8928
8929def TF_OneHotOp : TF_Op<"OneHot", [NoSideEffect]> {
8930  let summary = "Returns a one-hot tensor.";
8931
8932  let description = [{
8933The locations represented by indices in `indices` take value `on_value`,
8934while all other locations take value `off_value`.
8935
8936If the input `indices` is rank `N`, the output will have rank `N+1`,
8937The new axis is created at dimension `axis` (default: the new axis is
8938appended at the end).
8939
8940If `indices` is a scalar the output shape will be a vector of length `depth`.
8941
8942If `indices` is a vector of length `features`, the output shape will be:
8943```
8944  features x depth if axis == -1
8945  depth x features if axis == 0
8946```
8947
8948If `indices` is a matrix (batch) with shape `[batch, features]`,
8949the output shape will be:
8950```
8951  batch x features x depth if axis == -1
8952  batch x depth x features if axis == 1
8953  depth x batch x features if axis == 0
8954```
8955
8956
8957Examples
8958=========
8959
8960Suppose that
8961```
8962  indices = [0, 2, -1, 1]
8963  depth = 3
8964  on_value = 5.0
8965  off_value = 0.0
8966  axis = -1
8967```
8968
8969Then output is `[4 x 3]`:
8970```
8971output =
8972  [5.0 0.0 0.0]  // one_hot(0)
8973  [0.0 0.0 5.0]  // one_hot(2)
8974  [0.0 0.0 0.0]  // one_hot(-1)
8975  [0.0 5.0 0.0]  // one_hot(1)
8976```
8977
8978Suppose that
8979```
8980  indices = [0, 2, -1, 1]
8981  depth = 3
8982  on_value = 0.0
8983  off_value = 3.0
8984  axis = 0
8985```
8986
8987Then output is `[3 x 4]`:
8988```
8989output =
8990  [0.0 3.0 3.0 3.0]
8991  [3.0 3.0 3.0 0.0]
8992  [3.0 3.0 3.0 3.0]
8993  [3.0 0.0 3.0 3.0]
8994//  ^                one_hot(0)
8995//      ^            one_hot(2)
8996//          ^        one_hot(-1)
8997//              ^    one_hot(1)
8998```
8999
9000Suppose that
9001```
9002  indices = [[0, 2], [1, -1]]
9003  depth = 3
9004  on_value = 1.0
9005  off_value = 0.0
9006  axis = -1
9007```
9008
9009Then output is `[2 x 2 x 3]`:
9010```
9011output =
9012  [
9013    [1.0, 0.0, 0.0]  // one_hot(0)
9014    [0.0, 0.0, 1.0]  // one_hot(2)
9015  ][
9016    [0.0, 1.0, 0.0]  // one_hot(1)
9017    [0.0, 0.0, 0.0]  // one_hot(-1)
9018  ]
9019```
9020  }];
9021
9022  let arguments = (ins
9023    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint8]>, [{A tensor of indices.}]>:$indices,
9024    Arg<TF_Int32Tensor, [{A scalar defining the depth of the one hot dimension.}]>:$depth,
9025    Arg<TF_Tensor, [{A scalar defining the value to fill in output when `indices[j] = i`.}]>:$on_value,
9026    Arg<TF_Tensor, [{A scalar defining the value to fill in output when `indices[j] != i`.}]>:$off_value,
9027
9028    DefaultValuedAttr<I64Attr, "-1">:$axis
9029  );
9030
9031  let results = (outs
9032    Res<TF_Tensor, [{The one-hot tensor.}]>:$output
9033  );
9034
9035  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
9036  TF_DerivedOperandTypeAttr TI = TF_DerivedOperandTypeAttr<0>;
9037
9038  let builders = [
9039    OpBuilder<(ins "Value":$indices, "Value":$depth, "Value":$on_value,
9040      "Value":$off_value, "IntegerAttr":$axis)>
9041  ];
9042
9043  let verifier = [{
9044    return Verify(*this);
9045  }];
9046}
9047
9048def TF_OneShotIteratorOp : TF_Op<"OneShotIterator", []> {
9049  let summary = [{
9050Makes a "one-shot" iterator that can be iterated only once.
9051  }];
9052
9053  let description = [{
9054A one-shot iterator bundles the logic for defining the dataset and
9055the state of the iterator in a single op, which allows simple input
9056pipelines to be defined without an additional initialization
9057("MakeIterator") step.
9058
9059One-shot iterators have the following limitations:
9060
9061* They do not support parameterization: all logic for creating the underlying
9062  dataset must be bundled in the `dataset_factory` function.
9063* They are not resettable. Once a one-shot iterator reaches the end of its
9064  underlying dataset, subsequent "IteratorGetNext" operations on that
9065  iterator will always produce an `OutOfRange` error.
9066
9067For greater flexibility, use "Iterator" and "MakeIterator" to define
9068an iterator using an arbitrary subgraph, which may capture tensors
9069(including fed values) as parameters, and which may be reset multiple
9070times by rerunning "MakeIterator".
9071  }];
9072
9073  let arguments = (ins
9074    SymbolRefAttr:$dataset_factory,
9075    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
9076    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
9077    StrAttr:$container,
9078    StrAttr:$shared_name
9079  );
9080
9081  let results = (outs
9082    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to an "IteratorGetNext"
9083op.}], [TF_DatasetIteratorAlloc]>:$handle
9084  );
9085}
9086
9087def TF_OnesLikeOp : TF_Op<"OnesLike", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
9088  let summary = "Returns a tensor of ones with the same shape and type as x.";
9089
9090  let arguments = (ins
9091    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{a tensor of type T.}]>:$x
9092  );
9093
9094  let results = (outs
9095    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{a tensor of the same shape and type as x but filled with ones.}]>:$y
9096  );
9097
9098  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9099}
9100
9101def TF_OptimizeDatasetV2Op : TF_Op<"OptimizeDatasetV2", [NoSideEffect]> {
9102  let summary = [{
9103Creates a dataset by applying related optimizations to `input_dataset`.
9104  }];
9105
9106  let description = [{
9107Creates a dataset by applying related optimizations to `input_dataset`.
9108  }];
9109
9110  let arguments = (ins
9111    Arg<TF_VariantTensor, [{A variant tensor representing the input dataset.}]>:$input_dataset,
9112    Arg<TF_StrTensor, [{A `tf.string` vector `tf.Tensor` identifying user enabled optimizations.}]>:$optimizations_enabled,
9113    Arg<TF_StrTensor, [{A `tf.string` vector `tf.Tensor` identifying user disabled optimizations.}]>:$optimizations_disabled,
9114    Arg<TF_StrTensor, [{A `tf.string` vector `tf.Tensor` identifying optimizations by default.}]>:$optimizations_default,
9115
9116    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
9117    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
9118    DefaultValuedAttr<StrArrayAttr, "{}">:$optimization_configs
9119  );
9120
9121  let results = (outs
9122    TF_VariantTensor:$handle
9123  );
9124}
9125
9126def TF_OptionalGetValueOp : TF_Op<"OptionalGetValue", [NoSideEffect]> {
9127  let summary = [{
9128Returns the value stored in an Optional variant or raises an error if none exists.
9129  }];
9130
9131  let arguments = (ins
9132    TF_VariantTensor:$optional
9133  );
9134
9135  let results = (outs
9136    Variadic<TF_Tensor>:$components
9137  );
9138
9139  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
9140  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
9141}
9142
9143def TF_OptionalHasValueOp : TF_Op<"OptionalHasValue", [NoSideEffect]> {
9144  let summary = [{
9145Returns true if and only if the given Optional variant has a value.
9146  }];
9147
9148  let arguments = (ins
9149    TF_VariantTensor:$optional
9150  );
9151
9152  let results = (outs
9153    TF_BoolTensor:$has_value
9154  );
9155}
9156
9157def TF_OutfeedEnqueueTupleOp : TF_Op<"OutfeedEnqueueTuple", []> {
9158  let summary = "Enqueue multiple Tensor values on the computation outfeed.";
9159
9160  let arguments = (ins
9161    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be inserted into the outfeed queue as an
9162XLA tuple.}]>:$inputs
9163  );
9164
9165  let results = (outs);
9166
9167  TF_DerivedOperandTypeListAttr dtypes = TF_DerivedOperandTypeListAttr<0>;
9168}
9169
9170def TF_PackOp : TF_Op<"Pack", [NoSideEffect]> {
9171  let summary = [{
9172Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
9173  }];
9174
9175  let description = [{
9176Packs the `N` tensors in `values` into a tensor with rank one higher than each
9177tensor in `values`, by packing them along the `axis` dimension.
9178Given a list of tensors of shape `(A, B, C)`;
9179
9180if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
9181if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
9182Etc.
9183
9184For example:
9185
9186```
9187# 'x' is [1, 4]
9188# 'y' is [2, 5]
9189# 'z' is [3, 6]
9190pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
9191pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
9192```
9193
9194This is the opposite of `unpack`.
9195  }];
9196
9197  let arguments = (ins
9198    Arg<Variadic<TF_Tensor>, [{Must be of same shape and type.}]>:$values,
9199
9200    DefaultValuedAttr<I64Attr, "0">:$axis
9201  );
9202
9203  let results = (outs
9204    Res<TF_Tensor, [{The packed tensor.}]>:$output
9205  );
9206
9207  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9208  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
9209
9210  let verifier = [{
9211    return Verify(*this);
9212  }];
9213}
9214
9215def TF_PadOp : TF_Op<"Pad", [NoSideEffect, TF_OperandHasRank<1, 2>]> {
9216  let summary = "Pads a tensor with zeros.";
9217
9218  let description = [{
9219This operation pads a `input` with zeros according to the `paddings` you
9220specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
9221rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
9222how many zeros to add before the contents of `input` in that dimension, and
9223`paddings[D, 1]` indicates how many zeros to add after the contents of `input`
9224in that dimension.
9225
9226The padded size of each dimension D of the output is:
9227
9228`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
9229
9230For example:
9231
9232```
9233# 't' is [[1, 1], [2, 2]]
9234# 'paddings' is [[1, 1], [2, 2]]
9235# rank of 't' is 2
9236pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
9237                      [0, 0, 1, 1, 0, 0]
9238                      [0, 0, 2, 2, 0, 0]
9239                      [0, 0, 0, 0, 0, 0]]
9240```
9241  }];
9242
9243  let arguments = (ins
9244    TF_Tensor:$input,
9245    TF_I32OrI64Tensor:$paddings
9246  );
9247
9248  let results = (outs
9249    TF_Tensor:$output
9250  );
9251
9252  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9253  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
9254}
9255
9256def TF_PadV2Op : TF_Op<"PadV2", [NoSideEffect, TF_OperandHasRank<1, 2>]> {
9257  let summary = "Pads a tensor.";
9258
9259  let description = [{
9260This operation pads `input` according to the `paddings` and `constant_values`
9261you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
9262the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
9263how many padding values to add before the contents of `input` in that dimension,
9264and `paddings[D, 1]` indicates how many padding values to add after the contents
9265of `input` in that dimension. `constant_values` is a scalar tensor of the same
9266type as `input` that indicates the value to use for padding `input`.
9267
9268The padded size of each dimension D of the output is:
9269
9270`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
9271
9272For example:
9273
9274```
9275# 't' is [[1, 1], [2, 2]]
9276# 'paddings' is [[1, 1], [2, 2]]
9277# 'constant_values' is 0
9278# rank of 't' is 2
9279pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
9280                      [0, 0, 1, 1, 0, 0]
9281                      [0, 0, 2, 2, 0, 0]
9282                      [0, 0, 0, 0, 0, 0]]
9283```
9284  }];
9285
9286  let arguments = (ins
9287    TF_Tensor:$input,
9288    TF_I32OrI64Tensor:$paddings,
9289    TF_Tensor:$constant_values
9290  );
9291
9292  let results = (outs
9293    TF_Tensor:$output
9294  );
9295
9296  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9297  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
9298}
9299
9300def TF_ParallelDynamicStitchOp : TF_Op<"ParallelDynamicStitch", [NoSideEffect, SameVariadicOperandSize]> {
9301  let summary = [{
9302Interleave the values from the `data` tensors into a single tensor.
9303  }];
9304
9305  let description = [{
9306Builds a merged tensor such that
9307
9308```python
9309    merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
9310```
9311
9312For example, if each `indices[m]` is scalar or vector, we have
9313
9314```python
9315    # Scalar indices:
9316    merged[indices[m], ...] = data[m][...]
9317
9318    # Vector indices:
9319    merged[indices[m][i], ...] = data[m][i, ...]
9320```
9321
9322Each `data[i].shape` must start with the corresponding `indices[i].shape`,
9323and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
9324must have `data[i].shape = indices[i].shape + constant`.  In terms of this
9325`constant`, the output shape is
9326
9327    merged.shape = [max(indices)] + constant
9328
9329Values may be merged in parallel, so if an index appears in both `indices[m][i]`
9330and `indices[n][j]`, the result may be invalid. This differs from the normal
9331DynamicStitch operator that defines the behavior in that case.
9332
9333For example:
9334
9335```python
9336    indices[0] = 6
9337    indices[1] = [4, 1]
9338    indices[2] = [[5, 2], [0, 3]]
9339    data[0] = [61, 62]
9340    data[1] = [[41, 42], [11, 12]]
9341    data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
9342    merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
9343              [51, 52], [61, 62]]
9344```
9345
9346This method can be used to merge partitions created by `dynamic_partition`
9347as illustrated on the following example:
9348
9349```python
9350    # Apply function (increments x_i) on elements for which a certain condition
9351    # apply (x_i != -1 in this example).
9352    x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
9353    condition_mask=tf.not_equal(x,tf.constant(-1.))
9354    partitioned_data = tf.dynamic_partition(
9355        x, tf.cast(condition_mask, tf.int32) , 2)
9356    partitioned_data[1] = partitioned_data[1] + 1.0
9357    condition_indices = tf.dynamic_partition(
9358        tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
9359    x = tf.dynamic_stitch(condition_indices, partitioned_data)
9360    # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
9361    # unchanged.
9362```
9363
9364<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
9365<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
9366</div>
9367  }];
9368
9369  let arguments = (ins
9370    Variadic<TF_Int32Tensor>:$indices,
9371    Variadic<TF_Tensor>:$data
9372  );
9373
9374  let results = (outs
9375    TF_Tensor:$merged
9376  );
9377
9378  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
9379  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
9380}
9381
9382def TF_ParameterizedTruncatedNormalOp : TF_Op<"ParameterizedTruncatedNormal", [TF_CannotDuplicate]> {
9383  let summary = [{
9384Outputs random values from a normal distribution. The parameters may each be a
9385  }];
9386
9387  let description = [{
9388scalar which applies to the entire output, or a vector of length shape[0] which
9389stores the parameters for each batch.
9390  }];
9391
9392  let arguments = (ins
9393    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor. Batches are indexed by the 0th dimension.}]>:$shape,
9394    Arg<TF_FloatTensor, [{The mean parameter of each batch.}]>:$means,
9395    Arg<TF_FloatTensor, [{The standard deviation parameter of each batch. Must be greater than 0.}]>:$stdevs,
9396    Arg<TF_FloatTensor, [{The minimum cutoff. May be -infinity.}]>:$minvals,
9397    Arg<TF_FloatTensor, [{The maximum cutoff. May be +infinity, and must be more than the minval
9398for each batch.}]>:$maxvals,
9399
9400    DefaultValuedAttr<I64Attr, "0">:$seed,
9401    DefaultValuedAttr<I64Attr, "0">:$seed2
9402  );
9403
9404  let results = (outs
9405    Res<TF_FloatTensor, [{A matrix of shape num_batches x samples_per_batch, filled with random
9406truncated normal values using the parameters for each row.}]>:$output
9407  );
9408
9409  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9410  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
9411}
9412
9413def TF_PolygammaOp : TF_Op<"Polygamma", [NoSideEffect, ResultsBroadcastableShape]>,
9414                     WithBroadcastableBinOpBuilder {
9415  let summary = [{
9416Compute the polygamma function \\(\psi^{(n)}(x)\\).
9417  }];
9418
9419  let description = [{
9420The polygamma function is defined as:
9421
9422
9423\\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\)
9424
9425where \\(\psi(x)\\) is the digamma function.
9426The polygamma function is defined only for non-negative integer orders \\a\\.
9427  }];
9428
9429  let arguments = (ins
9430    TF_F32OrF64Tensor:$a,
9431    TF_F32OrF64Tensor:$x
9432  );
9433
9434  let results = (outs
9435    TF_F32OrF64Tensor:$z
9436  );
9437
9438  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9439}
9440
9441def TF_PopulationCountOp : TF_Op<"PopulationCount", [NoSideEffect, SameOperandsAndResultShape]> {
9442  let summary = [{
9443Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
9444  }];
9445
9446  let description = [{
9447For each entry in `x`, calculates the number of `1` (on) bits in the binary
9448representation of that entry.
9449
9450**NOTE**: It is more efficient to first `tf.bitcast` your tensors into
9451`int32` or `int64` and perform the bitcount on the result, than to feed in
94528- or 16-bit inputs and then aggregate the resulting counts.
9453  }];
9454
9455  let arguments = (ins
9456    TF_IntTensor:$x
9457  );
9458
9459  let results = (outs
9460    TF_Uint8Tensor:$y
9461  );
9462
9463  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9464}
9465
9466def TF_PowOp : TF_Op<"Pow", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
9467               WithBroadcastableBinOpBuilder {
9468  let summary = "Computes the power of one value to another.";
9469
9470  let description = [{
9471Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
9472corresponding elements in `x` and `y`. For example:
9473
9474```
9475# tensor 'x' is [[2, 2]], [3, 3]]
9476# tensor 'y' is [[8, 16], [2, 3]]
9477tf.pow(x, y) ==> [[256, 65536], [9, 27]]
9478```
9479  }];
9480
9481  let arguments = (ins
9482    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x,
9483    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
9484  );
9485
9486  let results = (outs
9487    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$z
9488  );
9489
9490  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9491}
9492
9493def TF_PreventGradientOp : TF_Op<"PreventGradient", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
9494  let summary = [{
9495An identity op that triggers an error if a gradient is requested.
9496  }];
9497
9498  let description = [{
9499When executed in a graph, this op outputs its input tensor as-is.
9500
9501When building ops to compute gradients, the TensorFlow gradient system
9502will return an error when trying to lookup the gradient of this op,
9503because no gradient must ever be registered for this function.  This
9504op exists to prevent subtle bugs from silently returning unimplemented
9505gradients in some corner cases.
9506  }];
9507
9508  let arguments = (ins
9509    Arg<TF_Tensor, [{any tensor.}]>:$input,
9510
9511    StrAttr:$message
9512  );
9513
9514  let results = (outs
9515    Res<TF_Tensor, [{the same input tensor.}]>:$output
9516  );
9517
9518  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9519}
9520
9521def TF_PrintV2Op : TF_Op<"PrintV2", []> {
9522  let summary = "Prints a string scalar.";
9523
9524  let description = [{
9525Prints a string scalar to the desired output_stream.
9526  }];
9527
9528  let arguments = (ins
9529    Arg<TF_StrTensor, [{The string scalar to print.}]>:$input,
9530
9531    DefaultValuedAttr<StrAttr, "stderr">:$output_stream,
9532    DefaultValuedAttr<StrAttr, "\n">:$end
9533  );
9534
9535  let results = (outs);
9536}
9537
9538def TF_ProdOp : TF_Op<"Prod", [NoSideEffect]> {
9539  let summary = [{
9540Computes the product of elements across dimensions of a tensor.
9541  }];
9542
9543  let description = [{
9544Reduces `input` along the dimensions given in `axis`. Unless
9545`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
9546`axis`. If `keep_dims` is true, the reduced dimensions are
9547retained with length 1.
9548  }];
9549
9550  let arguments = (ins
9551    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
9552    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
9553`[-rank(input), rank(input))`.}]>:$reduction_indices,
9554
9555    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
9556  );
9557
9558  let results = (outs
9559    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
9560  );
9561
9562  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9563  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
9564}
9565
9566def TF_QrOp : TF_Op<"Qr", [NoSideEffect]> {
9567  let summary = "Computes the QR decompositions of one or more matrices.";
9568
9569  let description = [{
9570Computes the QR decomposition of each inner matrix in `tensor` such that
9571`tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
9572
9573Currently, the gradient for the QR decomposition is well-defined only when
9574the first `P` columns of the inner matrix are linearly independent, where
9575`P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`.
9576
9577```python
9578# a is a tensor.
9579# q is a tensor of orthonormal matrices.
9580# r is a tensor of upper triangular matrices.
9581q, r = qr(a)
9582q_full, r_full = qr(a, full_matrices=True)
9583```
9584  }];
9585
9586  let arguments = (ins
9587    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
9588form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.}]>:$input,
9589
9590    DefaultValuedAttr<BoolAttr, "false">:$full_matrices
9591  );
9592
9593  let results = (outs
9594    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Orthonormal basis for range of `a`. If `full_matrices` is `False` then
9595shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
9596`[..., M, M]`.}]>:$q,
9597    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Triangular factor. If `full_matrices` is `False` then shape is
9598`[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.}]>:$r
9599  );
9600
9601  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9602
9603  let verifier = [{
9604    return Verify(*this);
9605  }];
9606}
9607
9608def TF_QuantizeAndDequantizeOp : TF_Op<"QuantizeAndDequantize", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
9609  let summary = "Use QuantizeAndDequantizeV2 instead.";
9610
9611  let arguments = (ins
9612    TF_FloatTensor:$input,
9613
9614    DefaultValuedAttr<BoolAttr, "true">:$signed_input,
9615    DefaultValuedAttr<I64Attr, "8">:$num_bits,
9616    DefaultValuedAttr<BoolAttr, "false">:$range_given,
9617    DefaultValuedAttr<F32Attr, "0.0f">:$input_min,
9618    DefaultValuedAttr<F32Attr, "0.0f">:$input_max
9619  );
9620
9621  let results = (outs
9622    TF_FloatTensor:$output
9623  );
9624
9625  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9626}
9627
9628def TF_QuantizeAndDequantizeV2Op : TF_Op<"QuantizeAndDequantizeV2", [NoSideEffect]> {
9629  let summary = "Quantizes then dequantizes a tensor.";
9630
9631  let description = [{
9632This op simulates the precision loss from the quantized forward pass by:
9633
96341. Quantizing the tensor to fixed point numbers, which should match the target
9635   quantization method when it is used in inference.
96362. Dequantizing it back to floating point numbers for the following ops, most
9637   likely matmul.
9638
9639There are different ways to quantize. This version uses only scaling, so 0.0
9640maps to 0.
9641
9642From the specified 'num_bits' in the quantized output type, it determines
9643minimum and maximum representable quantized values.
9644
9645e.g.
9646
9647*   [-128, 127] for signed, num_bits = 8, or
9648*   [0, 255] for unsigned, num_bits = 8.
9649
9650If range_given == False, the initial input_min, input_max will be determined
9651automatically as the minimum and maximum values in the input tensor, otherwise
9652the specified values of input_min, input_max are used.
9653
9654Note: If the input_min, input_max are specified, they do not need to equal the
9655actual minimum and maximum values in the tensor. e.g. in some cases it may be
9656beneficial to specify these values such that the low probability extremes of the
9657input distribution are clipped.
9658
9659This op determines the maximum scale_factor that would map the initial
9660[input_min, input_max] range to a range that lies within the representable
9661quantized range.
9662
9663It determines the scale from one of input_min and input_max, then updates the
9664other one to maximize the representable range.
9665
9666e.g.
9667
9668*   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
9669    5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
9670    would update input_max to be 127 / 12.8 = 9.921875
9671*   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
9672    10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
9673    would update input_min to be 128.0 / 12.7 = -10.07874
9674*   if the output is unsigned, input_min is forced to be 0, and only the
9675    specified input_max is used.
9676
9677After determining the scale_factor and updating the input range, it applies the
9678following to each value in the 'input' tensor.
9679
9680output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
9681
9682The above round function rounds the value based on the given round_mode.
9683  }];
9684
9685  let arguments = (ins
9686    Arg<TF_FloatTensor, [{Tensor to quantize and then dequantize.}]>:$input,
9687    Arg<TF_FloatTensor, [{If `range_given == True`, this specifies the minimum input value that needs to
9688be represented, otherwise it is determined from the min value of the `input`
9689tensor.}]>:$input_min,
9690    Arg<TF_FloatTensor, [{If `range_given == True`, this specifies the maximum input value that needs to
9691be represented, otherwise it is determined from the max value of the `input`
9692tensor.}]>:$input_max,
9693
9694    DefaultValuedAttr<BoolAttr, "true">:$signed_input,
9695    DefaultValuedAttr<I64Attr, "8">:$num_bits,
9696    DefaultValuedAttr<BoolAttr, "false">:$range_given,
9697    DefaultValuedAttr<TF_AnyStrAttrOf<["HALF_TO_EVEN", "HALF_UP"]>, "HALF_TO_EVEN">:$round_mode,
9698    DefaultValuedAttr<BoolAttr, "false">:$narrow_range,
9699    DefaultValuedAttr<I64Attr, "-1">:$axis
9700  );
9701
9702  let results = (outs
9703    TF_FloatTensor:$output
9704  );
9705
9706  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9707}
9708
9709def TF_QuantizeAndDequantizeV3Op : TF_Op<"QuantizeAndDequantizeV3", [NoSideEffect]> {
9710  let summary = "Quantizes then dequantizes a tensor.";
9711
9712  let description = [{
9713This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
9714tensor, so its value can change during training.
9715  }];
9716
9717  let arguments = (ins
9718    TF_FloatTensor:$input,
9719    TF_FloatTensor:$input_min,
9720    TF_FloatTensor:$input_max,
9721    TF_Int32Tensor:$num_bits,
9722
9723    DefaultValuedAttr<BoolAttr, "true">:$signed_input,
9724    DefaultValuedAttr<BoolAttr, "true">:$range_given,
9725    DefaultValuedAttr<BoolAttr, "false">:$narrow_range,
9726    DefaultValuedAttr<I64Attr, "-1">:$axis
9727  );
9728
9729  let results = (outs
9730    TF_FloatTensor:$output
9731  );
9732
9733  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9734}
9735
9736def TF_QueueDequeueV2Op : TF_Op<"QueueDequeueV2", []> {
9737  let summary = "Dequeues a tuple of one or more tensors from the given queue.";
9738
9739  let description = [{
9740This operation has k outputs, where k is the number of components
9741in the tuples stored in the given queue, and output i is the ith
9742component of the dequeued tuple.
9743
9744N.B. If the queue is empty, this operation will block until an element
9745has been dequeued (or 'timeout_ms' elapses, if specified).
9746  }];
9747
9748  let arguments = (ins
9749    Arg<TF_ResourceTensor, [{The handle to a queue.}]>:$handle,
9750
9751    DefaultValuedAttr<I64Attr, "-1">:$timeout_ms
9752  );
9753
9754  let results = (outs
9755    Res<Variadic<TF_Tensor>, [{One or more tensors that were dequeued as a tuple.}]>:$components
9756  );
9757
9758  TF_DerivedResultTypeListAttr component_types = TF_DerivedResultTypeListAttr<0>;
9759}
9760
9761def TF_RFFTOp : TF_Op<"RFFT", [NoSideEffect]> {
9762  let summary = "Real-valued fast Fourier transform.";
9763
9764  let description = [{
9765Computes the 1-dimensional discrete Fourier transform of a real-valued signal
9766over the inner-most dimension of `input`.
9767
9768Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
9769`fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
9770followed by the `fft_length / 2` positive-frequency terms.
9771
9772Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
9773corresponding dimension of `input`, the dimension is cropped. If it is larger,
9774the dimension is padded with zeros.
9775  }];
9776
9777  let arguments = (ins
9778    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
9779    Arg<TF_Int32Tensor, [{An int32 tensor of shape [1]. The FFT length.}]>:$fft_length
9780  );
9781
9782  let results = (outs
9783    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most
9784  dimension of `input` is replaced with the `fft_length / 2 + 1` unique
9785  frequency components of its 1D Fourier transform.
9786
9787@compatibility(numpy)
9788Equivalent to np.fft.rfft
9789@end_compatibility}]>:$output
9790  );
9791
9792  TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
9793  TF_DerivedResultTypeAttr Tcomplex = TF_DerivedResultTypeAttr<0>;
9794}
9795
9796def TF_RFFT2DOp : TF_Op<"RFFT2D", [NoSideEffect]> {
9797  let summary = "2D real-valued fast Fourier transform.";
9798
9799  let description = [{
9800Computes the 2-dimensional discrete Fourier transform of a real-valued signal
9801over the inner-most 2 dimensions of `input`.
9802
9803Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
9804`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
9805of `output`: the zero-frequency term, followed by the `fft_length / 2`
9806positive-frequency terms.
9807
9808Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
9809corresponding dimension of `input`, the dimension is cropped. If it is larger,
9810the dimension is padded with zeros.
9811  }];
9812
9813  let arguments = (ins
9814    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
9815    Arg<TF_Int32Tensor, [{An int32 tensor of shape [2]. The FFT length for each dimension.}]>:$fft_length
9816  );
9817
9818  let results = (outs
9819    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most 2
9820  dimensions of `input` are replaced with their 2D Fourier transform. The
9821  inner-most dimension contains `fft_length / 2 + 1` unique frequency
9822  components.
9823
9824@compatibility(numpy)
9825Equivalent to np.fft.rfft2
9826@end_compatibility}]>:$output
9827  );
9828
9829  TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
9830  TF_DerivedResultTypeAttr Tcomplex = TF_DerivedResultTypeAttr<0>;
9831}
9832
9833def TF_RFFT3DOp : TF_Op<"RFFT3D", [NoSideEffect]> {
9834  let summary = "3D real-valued fast Fourier transform.";
9835
9836  let description = [{
9837Computes the 3-dimensional discrete Fourier transform of a real-valued signal
9838over the inner-most 3 dimensions of `input`.
9839
9840Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
9841`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
9842of `output`: the zero-frequency term, followed by the `fft_length / 2`
9843positive-frequency terms.
9844
9845Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
9846corresponding dimension of `input`, the dimension is cropped. If it is larger,
9847the dimension is padded with zeros.
9848  }];
9849
9850  let arguments = (ins
9851    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
9852    Arg<TF_Int32Tensor, [{An int32 tensor of shape [3]. The FFT length for each dimension.}]>:$fft_length
9853  );
9854
9855  let results = (outs
9856    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most 3
9857  dimensions of `input` are replaced with the their 3D Fourier transform. The
9858  inner-most dimension contains `fft_length / 2 + 1` unique frequency
9859  components.
9860
9861@compatibility(numpy)
9862Equivalent to np.fft.rfftn with 3 dimensions.
9863@end_compatibility}]>:$output
9864  );
9865
9866  TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
9867  TF_DerivedResultTypeAttr Tcomplex = TF_DerivedResultTypeAttr<0>;
9868}
9869
9870def TF_RGBToHSVOp : TF_Op<"RGBToHSV", [NoSideEffect]> {
9871  let summary = "Converts one or more images from RGB to HSV.";
9872
9873  let description = [{
9874Outputs a tensor of the same shape as the `images` tensor, containing the HSV
9875value of the pixels. The output is only well defined if the value in `images`
9876are in `[0,1]`.
9877
9878`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
9879`output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
9880corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
9881
9882Usage Example:
9883
9884>>> blue_image = tf.stack([
9885...    tf.zeros([5,5]),
9886...    tf.zeros([5,5]),
9887...    tf.ones([5,5])],
9888...    axis=-1)
9889>>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image)
9890>>> blue_hsv_image[0,0].numpy()
9891array([0.6666667, 1. , 1. ], dtype=float32)
9892  }];
9893
9894  let arguments = (ins
9895    Arg<TF_FloatTensor, [{1-D or higher rank. RGB data to convert. Last dimension must be size 3.}]>:$images
9896  );
9897
9898  let results = (outs
9899    Res<TF_FloatTensor, [{`images` converted to HSV.}]>:$output
9900  );
9901
9902  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9903}
9904
9905def TF_RaggedGatherOp : TF_Op<"RaggedGather", [NoSideEffect]> {
9906  let summary = [{
9907Gather ragged slices from `params` axis `0` according to `indices`.
9908  }];
9909
9910  let description = [{
9911Outputs a `RaggedTensor` output composed from `output_dense_values` and
9912`output_nested_splits`, such that:
9913
9914```python
9915output.shape = indices.shape + params.shape[1:]
9916output.ragged_rank = indices.shape.ndims + params.ragged_rank
9917output[i...j, d0...dn] = params[indices[i...j], d0...dn]
9918```
9919
9920where
9921
9922* `params =
9923   ragged.from_nested_row_splits(params_dense_values, params_nested_splits)`
9924   provides the values that should be gathered.
9925* `indices` ia a dense tensor with dtype `int32` or `int64`, indicating which
9926   values should be gathered.
9927* `output =
9928   ragged.from_nested_row_splits(output_dense_values, output_nested_splits)`
9929   is the output tensor.
9930
9931(Note: This c++ op is used to implement the higher-level python
9932`tf.ragged.gather` op, which also supports ragged indices.)
9933  }];
9934
9935  let arguments = (ins
9936    Arg<Variadic<TF_I32OrI64Tensor>, [{The `nested_row_splits` tensors that define the row-partitioning for the
9937`params` RaggedTensor input.}]>:$params_nested_splits,
9938    Arg<TF_Tensor, [{The `flat_values` for the `params` RaggedTensor. There was a terminology change
9939at the python level from dense_values to flat_values, so dense_values is the
9940deprecated name.}]>:$params_dense_values,
9941    Arg<TF_I32OrI64Tensor, [{Indices in the outermost dimension of `params` of the values that should be
9942gathered.}]>:$indices
9943  );
9944
9945  let results = (outs
9946    Res<Variadic<TF_I32OrI64Tensor>, [{The `nested_row_splits` tensors that define the row-partitioning for the
9947returned RaggedTensor.}]>:$output_nested_splits,
9948    Res<TF_Tensor, [{The `flat_values` for the returned RaggedTensor.}]>:$output_dense_values
9949  );
9950
9951  TF_DerivedOperandTypeAttr Tsplits = TF_DerivedOperandTypeAttr<0>;
9952  TF_DerivedOperandTypeAttr Tvalues = TF_DerivedOperandTypeAttr<1>;
9953  TF_DerivedResultSizeAttr OUTPUT_RAGGED_RANK = TF_DerivedResultSizeAttr<0>;
9954  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
9955  TF_DerivedOperandSizeAttr PARAMS_RAGGED_RANK = TF_DerivedOperandSizeAttr<0>;
9956}
9957
9958def TF_RaggedRangeOp : TF_Op<"RaggedRange", [NoSideEffect]> {
9959  let summary = [{
9960Returns a `RaggedTensor` containing the specified sequences of numbers.
9961  }];
9962
9963  let description = [{
9964Returns a `RaggedTensor` `result` composed from `rt_dense_values` and
9965`rt_nested_splits`, such that
9966`result[i] = range(starts[i], limits[i], deltas[i])`.
9967
9968```python
9969(rt_nested_splits, rt_dense_values) = ragged_range(
9970      starts=[2, 5, 8], limits=[3, 5, 12], deltas=1)
9971result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits)
9972print(result)
9973<tf.RaggedTensor [[2], [], [8, 9, 10, 11]] >
9974```
9975
9976The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
9977The vector inputs must all have the same size.  Scalar inputs are broadcast
9978to match the size of the vector inputs.
9979  }];
9980
9981  let arguments = (ins
9982    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The starts of each range.}]>:$starts,
9983    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The limits of each range.}]>:$limits,
9984    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The deltas of each range.}]>:$deltas
9985  );
9986
9987  let results = (outs
9988    Res<TF_I32OrI64Tensor, [{The `row_splits` for the returned `RaggedTensor`.}]>:$rt_nested_splits,
9989    Res<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The `flat_values` for the returned `RaggedTensor`.}]>:$rt_dense_values
9990  );
9991
9992  TF_DerivedResultTypeAttr Tsplits = TF_DerivedResultTypeAttr<0>;
9993  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9994}
9995
9996def TF_RandomGammaOp : TF_Op<"RandomGamma", [TF_CannotDuplicate]> {
9997  let summary = [{
9998Outputs random values from the Gamma distribution(s) described by alpha.
9999  }];
10000
10001  let description = [{
10002This op uses the algorithm by Marsaglia et al. to acquire samples via
10003transformation-rejection from pairs of uniform and normal random variables.
10004See http://dl.acm.org/citation.cfm?id=358414
10005  }];
10006
10007  let arguments = (ins
10008    Arg<TF_I32OrI64Tensor, [{1-D integer tensor. Shape of independent samples to draw from each
10009distribution described by the shape parameters given in alpha.}]>:$shape,
10010    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{A tensor in which each scalar is a "shape" parameter describing the
10011associated gamma distribution.}]>:$alpha,
10012
10013    DefaultValuedAttr<I64Attr, "0">:$seed,
10014    DefaultValuedAttr<I64Attr, "0">:$seed2
10015  );
10016
10017  let results = (outs
10018    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{A tensor with shape `shape + shape(alpha)`. Each slice
10019`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
10020`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.}]>:$output
10021  );
10022
10023  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
10024  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
10025}
10026
10027def TF_RandomGammaGradOp : TF_Op<"RandomGammaGrad", [NoSideEffect, ResultsBroadcastableShape]>,
10028                           WithBroadcastableBinOpBuilder {
10029  let summary = [{
10030Computes the derivative of a Gamma random sample w.r.t. `alpha`.
10031  }];
10032
10033  let arguments = (ins
10034    TF_F32OrF64Tensor:$alpha,
10035    TF_F32OrF64Tensor:$sample
10036  );
10037
10038  let results = (outs
10039    TF_F32OrF64Tensor:$output
10040  );
10041
10042  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10043}
10044
10045def TF_RandomPoissonOp : TF_Op<"RandomPoisson", [TF_CannotDuplicate]> {
10046  let summary = "Use RandomPoissonV2 instead.";
10047
10048  let arguments = (ins
10049    TF_I32OrI64Tensor:$shape,
10050    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$rate,
10051
10052    DefaultValuedAttr<I64Attr, "0">:$seed,
10053    DefaultValuedAttr<I64Attr, "0">:$seed2
10054  );
10055
10056  let results = (outs
10057    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$output
10058  );
10059
10060  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
10061  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
10062}
10063
10064def TF_RandomPoissonV2Op : TF_Op<"RandomPoissonV2", [TF_CannotDuplicate]> {
10065  let summary = [{
10066Outputs random values from the Poisson distribution(s) described by rate.
10067  }];
10068
10069  let description = [{
10070This op uses two algorithms, depending on rate. If rate >= 10, then
10071the algorithm by Hormann is used to acquire samples via
10072transformation-rejection.
10073See http://www.sciencedirect.com/science/article/pii/0167668793909974.
10074
10075Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
10076random variables.
10077See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
10078Programming, Volume 2. Addison Wesley
10079  }];
10080
10081  let arguments = (ins
10082    Arg<TF_I32OrI64Tensor, [{1-D integer tensor. Shape of independent samples to draw from each
10083distribution described by the shape parameters given in rate.}]>:$shape,
10084    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{A tensor in which each scalar is a "rate" parameter describing the
10085associated poisson distribution.}]>:$rate,
10086
10087    DefaultValuedAttr<I64Attr, "0">:$seed,
10088    DefaultValuedAttr<I64Attr, "0">:$seed2
10089  );
10090
10091  let results = (outs
10092    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{A tensor with shape `shape + shape(rate)`. Each slice
10093`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
10094`rate[i0, i1, ...iN]`.}]>:$output
10095  );
10096
10097  TF_DerivedOperandTypeAttr R = TF_DerivedOperandTypeAttr<1>;
10098  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
10099  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
10100}
10101
10102def TF_RandomShuffleOp : TF_Op<"RandomShuffle", [TF_CannotDuplicate, TF_SameOperandsAndResultTypeResolveRef]> {
10103  let summary = "Randomly shuffles a tensor along its first dimension.";
10104
10105  let description = [{
10106The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
10107  to one and only one `output[i]`. For example, a mapping that might occur for a
10108  3x2 tensor is:
10109
10110```
10111[[1, 2],       [[5, 6],
10112 [3, 4],  ==>   [1, 2],
10113 [5, 6]]        [3, 4]]
10114```
10115  }];
10116
10117  let arguments = (ins
10118    Arg<TF_Tensor, [{The tensor to be shuffled.}]>:$value,
10119
10120    DefaultValuedAttr<I64Attr, "0">:$seed,
10121    DefaultValuedAttr<I64Attr, "0">:$seed2
10122  );
10123
10124  let results = (outs
10125    Res<TF_Tensor, [{A tensor of same shape and type as `value`, shuffled along its first
10126dimension.}]>:$output
10127  );
10128
10129  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10130}
10131
10132def TF_RandomStandardNormalOp : TF_Op<"RandomStandardNormal", [TF_CannotDuplicate]> {
10133  let summary = "Outputs random values from a normal distribution.";
10134
10135  let description = [{
10136The generated values will have mean 0 and standard deviation 1.
10137  }];
10138
10139  let arguments = (ins
10140    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
10141
10142    DefaultValuedAttr<I64Attr, "0">:$seed,
10143    DefaultValuedAttr<I64Attr, "0">:$seed2
10144  );
10145
10146  let results = (outs
10147    Res<TF_FloatTensor, [{A tensor of the specified shape filled with random normal values.}]>:$output
10148  );
10149
10150  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10151  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
10152}
10153
10154def TF_RandomUniformOp : TF_Op<"RandomUniform", [TF_CannotDuplicate]> {
10155  let summary = "Outputs random values from a uniform distribution.";
10156
10157  let description = [{
10158The generated values follow a uniform distribution in the range `[0, 1)`. The
10159lower bound 0 is included in the range, while the upper bound 1 is excluded.
10160  }];
10161
10162  let arguments = (ins
10163    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
10164
10165    DefaultValuedAttr<I64Attr, "0">:$seed,
10166    DefaultValuedAttr<I64Attr, "0">:$seed2
10167  );
10168
10169  let results = (outs
10170    Res<TF_FloatTensor, [{A tensor of the specified shape filled with uniform random values.}]>:$output
10171  );
10172
10173  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10174  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
10175
10176  let verifier = [{
10177    return Verify(*this);
10178  }];
10179}
10180
10181def TF_RandomUniformIntOp : TF_Op<"RandomUniformInt", [TF_CannotDuplicate]> {
10182  let summary = "Outputs random integers from a uniform distribution.";
10183
10184  let description = [{
10185The generated values are uniform integers in the range `[minval, maxval)`.
10186The lower bound `minval` is included in the range, while the upper bound
10187`maxval` is excluded.
10188
10189The random integers are slightly biased unless `maxval - minval` is an exact
10190power of two.  The bias is small for values of `maxval - minval` significantly
10191smaller than the range of the output (either `2^32` or `2^64`).
10192  }];
10193
10194  let arguments = (ins
10195    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
10196    Arg<TF_I32OrI64Tensor, [{0-D.  Inclusive lower bound on the generated integers.}]>:$minval,
10197    Arg<TF_I32OrI64Tensor, [{0-D.  Exclusive upper bound on the generated integers.}]>:$maxval,
10198
10199    DefaultValuedAttr<I64Attr, "0">:$seed,
10200    DefaultValuedAttr<I64Attr, "0">:$seed2
10201  );
10202
10203  let results = (outs
10204    Res<TF_I32OrI64Tensor, [{A tensor of the specified shape filled with uniform random integers.}]>:$output
10205  );
10206
10207  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10208  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<1>;
10209}
10210
10211def TF_RangeOp : TF_Op<"Range", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
10212  let summary = "Creates a sequence of numbers.";
10213
10214  let description = [{
10215This operation creates a sequence of numbers that begins at `start` and
10216extends by increments of `delta` up to but not including `limit`.
10217
10218For example:
10219
10220```
10221# 'start' is 3
10222# 'limit' is 18
10223# 'delta' is 3
10224tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
10225```
10226  }];
10227
10228  let arguments = (ins
10229    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{0-D (scalar). First entry in the sequence.}]>:$start,
10230    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{0-D (scalar). Upper limit of sequence, exclusive.}]>:$limit,
10231    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{0-D (scalar). Optional. Default is 1. Number that increments `start`.}]>:$delta
10232  );
10233
10234  let results = (outs
10235    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{1-D.}]>:$output
10236  );
10237
10238  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<0>;
10239
10240  let builders = [
10241    OpBuilder<(ins "Value":$start, "Value":$limit, "Value":$delta)>
10242  ];
10243}
10244
10245def TF_RangeDatasetOp : TF_Op<"RangeDataset", []> {
10246  let summary = [{
10247Creates a dataset with a range of values. Corresponds to python's xrange.
10248  }];
10249
10250  let arguments = (ins
10251    Arg<TF_Int64Tensor, [{corresponds to start in python's xrange().}]>:$start,
10252    Arg<TF_Int64Tensor, [{corresponds to stop in python's xrange().}]>:$stop,
10253    Arg<TF_Int64Tensor, [{corresponds to step in python's xrange().}]>:$step,
10254
10255    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
10256    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
10257  );
10258
10259  let results = (outs
10260    TF_VariantTensor:$handle
10261  );
10262}
10263
10264def TF_RankOp : TF_Op<"Rank", [NoSideEffect]> {
10265  let summary = "Returns the rank of a tensor.";
10266
10267  let description = [{
10268This operation returns an integer representing the rank of `input`.
10269
10270For example:
10271
10272```
10273# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
10274# shape of tensor 't' is [2, 2, 3]
10275rank(t) ==> 3
10276```
10277
10278**Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
10279of a tensor is the number of indices required to uniquely select each element
10280of the tensor. Rank is also known as "order", "degree", or "ndims."
10281  }];
10282
10283  let arguments = (ins
10284    TF_Tensor:$input
10285  );
10286
10287  let results = (outs
10288    TF_Int32Tensor:$output
10289  );
10290
10291  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10292
10293  let builders = [
10294    OpBuilder<(ins "Value":$input)>
10295  ];
10296}
10297
10298def TF_ReadVariableOp : TF_Op<"ReadVariableOp", []> {
10299  let summary = "Reads the value of a variable.";
10300
10301  let description = [{
10302The tensor returned by this operation is immutable.
10303
10304The value returned by this operation is guaranteed to be influenced by all the
10305writes on which this operation depends directly or indirectly, and to not be
10306influenced by any of the writes which depend directly or indirectly on this
10307operation.
10308  }];
10309
10310  let arguments = (ins
10311    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead]>:$resource
10312  );
10313
10314  let results = (outs
10315    TF_Tensor:$value
10316  );
10317
10318  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
10319}
10320
10321def TF_RealOp : TF_Op<"Real", [NoSideEffect, SameOperandsAndResultShape]> {
10322  let summary = "Returns the real part of a complex number.";
10323
10324  let description = [{
10325Given a tensor `input` of complex numbers, this operation returns a tensor of
10326type `float` that is the real part of each element in `input`. All elements in
10327`input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
10328 part returned by this operation and *b* is the imaginary part.
10329
10330For example:
10331
10332```
10333# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
10334tf.real(input) ==> [-2.25, 3.25]
10335```
10336  }];
10337
10338  let arguments = (ins
10339    TensorOf<[TF_Complex128, TF_Complex64]>:$input
10340  );
10341
10342  let results = (outs
10343    TF_F32OrF64Tensor:$output
10344  );
10345
10346  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10347  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
10348}
10349
10350def TF_ReciprocalOp : TF_Op<"Reciprocal", [Involution, NoSideEffect, SameOperandsAndResultType]> {
10351  let summary = "Computes the reciprocal of x element-wise.";
10352
10353  let description = [{
10354I.e., \\(y = 1 / x\\).
10355  }];
10356
10357  let arguments = (ins
10358    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
10359  );
10360
10361  let results = (outs
10362    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
10363  );
10364
10365  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10366}
10367
10368def TF_ReciprocalGradOp : TF_Op<"ReciprocalGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
10369  let summary = "Computes the gradient for the inverse of `x` wrt its input.";
10370
10371  let description = [{
10372Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
10373is the corresponding input gradient.
10374  }];
10375
10376  let arguments = (ins
10377    TF_FpOrComplexTensor:$y,
10378    TF_FpOrComplexTensor:$dy
10379  );
10380
10381  let results = (outs
10382    TF_FpOrComplexTensor:$z
10383  );
10384
10385  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10386}
10387
10388def TF_RecvOp : TF_Op<"Recv", []> {
10389  let summary = "Receives the named tensor from send_device on recv_device.";
10390
10391  let arguments = (ins
10392    StrAttr:$tensor_name,
10393    StrAttr:$send_device,
10394    I64Attr:$send_device_incarnation,
10395    StrAttr:$recv_device,
10396    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
10397  );
10398
10399  let results = (outs
10400    Res<TF_Tensor, [{The tensor to receive.}]>:$tensor
10401  );
10402
10403  TF_DerivedResultTypeAttr tensor_type = TF_DerivedResultTypeAttr<0>;
10404}
10405
10406def TF_RecvTPUEmbeddingActivationsOp : TF_Op<"RecvTPUEmbeddingActivations", [TF_TPUEmbeddingSideEffect]> {
10407  let summary = "An op that receives embedding activations on the TPU.";
10408
10409  let description = [{
10410The TPU system performs the embedding lookups and aggregations specified by
10411the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The
10412results of these aggregations are visible to the Tensorflow Graph as the
10413outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing
10414one Tensor of activations per table specified in the model. There can be at
10415most one RecvTPUEmbeddingActivations op in the TPU graph.
10416  }];
10417
10418  let arguments = (ins
10419    StrAttr:$config
10420  );
10421
10422  let results = (outs
10423    Res<Variadic<TF_Float32Tensor>, [{A TensorList of embedding activations containing one Tensor per
10424embedding table in the model.}]>:$outputs
10425  );
10426
10427  TF_DerivedResultSizeAttr num_outputs = TF_DerivedResultSizeAttr<0>;
10428}
10429
10430def TF_ReduceJoinOp : TF_Op<"ReduceJoin", [NoSideEffect]> {
10431  let summary = "Joins a string Tensor across the given dimensions.";
10432
10433  let description = [{
10434Computes the string join across dimensions in the given string Tensor of shape
10435`[\\(d_0, d_1, ..., d_{n-1}\\)]`.  Returns a new Tensor created by joining the input
10436strings with the given separator (default: empty string).  Negative indices are
10437counted backwards from the end, with `-1` being equivalent to `n - 1`.  If
10438indices are not specified, joins across all dimensions beginning from `n - 1`
10439through `0`.
10440
10441For example:
10442
10443```python
10444# tensor `a` is [["a", "b"], ["c", "d"]]
10445tf.reduce_join(a, 0) ==> ["ac", "bd"]
10446tf.reduce_join(a, 1) ==> ["ab", "cd"]
10447tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
10448tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
10449tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
10450tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
10451tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
10452tf.reduce_join(a, [0, 1]) ==> "acbd"
10453tf.reduce_join(a, [1, 0]) ==> "abcd"
10454tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]]
10455tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd"
10456```
10457  }];
10458
10459  let arguments = (ins
10460    Arg<TF_StrTensor, [{The input to be joined.  All reduced indices must have non-zero size.}]>:$inputs,
10461    Arg<TF_Int32Tensor, [{The dimensions to reduce over.  Dimensions are reduced in the
10462order specified.  Omitting `reduction_indices` is equivalent to passing
10463`[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.}]>:$reduction_indices,
10464
10465    DefaultValuedAttr<BoolAttr, "false">:$keep_dims,
10466    StrAttr:$separator
10467  );
10468
10469  let results = (outs
10470    Res<TF_StrTensor, [{Has shape equal to that of the input with reduced dimensions removed or
10471set to `1` depending on `keep_dims`.}]>:$output
10472  );
10473}
10474
10475def TF_ReluOp : TF_Op<"Relu", [Idempotent, NoSideEffect, SameOperandsAndResultType, TF_LayoutAgnostic]> {
10476  let summary = "Computes rectified linear: `max(features, 0)`.";
10477
10478  let description = [{
10479See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
10480Example usage:
10481>>> tf.nn.relu([-2., 0., -0., 3.]).numpy()
10482array([ 0.,  0., -0.,  3.], dtype=float32)
10483  }];
10484
10485  let arguments = (ins
10486    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$features
10487  );
10488
10489  let results = (outs
10490    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$activations
10491  );
10492
10493  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10494}
10495
10496def TF_Relu6Op : TF_Op<"Relu6", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
10497  let summary = "Computes rectified linear 6: `min(max(features, 0), 6)`.";
10498
10499  let arguments = (ins
10500    TF_IntOrFpTensor:$features
10501  );
10502
10503  let results = (outs
10504    TF_IntOrFpTensor:$activations
10505  );
10506
10507  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10508}
10509
10510def TF_Relu6GradOp : TF_Op<"Relu6Grad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
10511  let summary = "Computes rectified linear 6 gradients for a Relu6 operation.";
10512
10513  let arguments = (ins
10514    Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu6 operation.}]>:$gradients,
10515    Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu6 operation, or
10516its output; using either one produces the same result.}]>:$features
10517  );
10518
10519  let results = (outs
10520    Res<TF_IntOrFpTensor, [{The gradients:
10521`gradients * (features > 0) * (features < 6)`.}]>:$backprops
10522  );
10523
10524  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10525}
10526
10527def TF_ReluGradOp : TF_Op<"ReluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
10528  let summary = "Computes rectified linear gradients for a Relu operation.";
10529
10530  let arguments = (ins
10531    Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu operation.}]>:$gradients,
10532    Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu operation, OR
10533the outputs of that operation (both work equivalently).}]>:$features
10534  );
10535
10536  let results = (outs
10537    Res<TF_IntOrFpTensor, [{`gradients * (features > 0)`.}]>:$backprops
10538  );
10539
10540  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10541}
10542
10543def TF_RemoteCallOp : TF_Op<"RemoteCall", []> {
10544  let summary = "Runs function `f` on a remote device indicated by `target`.";
10545
10546  let arguments = (ins
10547    Arg<TF_StrTensor, [{A fully specified device name where we want to run the function.}]>:$target,
10548    Arg<Variadic<TF_Tensor>, [{A list of arguments for the function.}]>:$args,
10549
10550    SymbolRefAttr:$f
10551  );
10552
10553  let results = (outs
10554    Res<Variadic<TF_Tensor>, [{A list of return values.}]>:$output
10555  );
10556
10557  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<1>;
10558  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
10559}
10560
10561def TF_ReshapeOp : TF_Op<"Reshape", [NoSideEffect]> {
10562  let summary = "Reshapes a tensor.";
10563
10564  let description = [{
10565Given `tensor`, this operation returns a tensor that has the same values
10566as `tensor` with shape `shape`.
10567
10568If one component of 1-D tensor `shape` is the special value -1, the size of that
10569dimension is computed so that the total size remains constant.  In particular, a
10570`shape` of `[-1]` flattens into 1-D.  At most one component of `shape` may be
10571unknown.
10572
10573The `shape` must be 1-D and the operation returns a tensor with shape
10574`shape` filled with the values of `tensor`. In this case, the number of elements
10575implied by `shape` must be the same as the number of elements in `tensor`.
10576
10577It is an error if `shape` is not 1-D.
10578
10579For example:
10580
10581```
10582# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
10583# tensor 't' has shape [9]
10584reshape(t, [3, 3]) ==> [[1, 2, 3],
10585                        [4, 5, 6],
10586                        [7, 8, 9]]
10587
10588# tensor 't' is [[[1, 1], [2, 2]],
10589#                [[3, 3], [4, 4]]]
10590# tensor 't' has shape [2, 2, 2]
10591reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
10592                        [3, 3, 4, 4]]
10593
10594# tensor 't' is [[[1, 1, 1],
10595#                 [2, 2, 2]],
10596#                [[3, 3, 3],
10597#                 [4, 4, 4]],
10598#                [[5, 5, 5],
10599#                 [6, 6, 6]]]
10600# tensor 't' has shape [3, 2, 3]
10601# pass '[-1]' to flatten 't'
10602reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
10603
10604# -1 can also be used to infer the shape
10605
10606# -1 is inferred to be 9:
10607reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
10608                         [4, 4, 4, 5, 5, 5, 6, 6, 6]]
10609# -1 is inferred to be 2:
10610reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
10611                         [4, 4, 4, 5, 5, 5, 6, 6, 6]]
10612# -1 is inferred to be 3:
10613reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
10614                              [2, 2, 2],
10615                              [3, 3, 3]],
10616                             [[4, 4, 4],
10617                              [5, 5, 5],
10618                              [6, 6, 6]]]
10619
10620# tensor 't' is [7]
10621# shape `[]` reshapes to a scalar
10622reshape(t, []) ==> 7
10623```
10624  }];
10625
10626  let arguments = (ins
10627    TF_Tensor:$tensor,
10628    Arg<TF_I32OrI64Tensor, [{Defines the shape of the output tensor.}]>:$shape
10629  );
10630
10631  let results = (outs
10632    TF_Tensor:$output
10633  );
10634
10635  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10636  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<1>;
10637
10638  let builders = [
10639    OpBuilder<(ins "Value":$tensor, "Value":$shape)>
10640  ];
10641
10642  let verifier = [{
10643    return Verify(*this);
10644  }];
10645}
10646
10647def TF_ResizeBilinearOp : TF_Op<"ResizeBilinear", [NoSideEffect]> {
10648  let summary = "Resize `images` to `size` using bilinear interpolation.";
10649
10650  let description = [{
10651Input images can be of different types but output images are always float.
10652  }];
10653
10654  let arguments = (ins
10655    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$images,
10656    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
10657new size for the images.}]>:$size,
10658
10659    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
10660    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
10661  );
10662
10663  let results = (outs
10664    Res<TF_Float32Tensor, [{4-D with shape
10665`[batch, new_height, new_width, channels]`.}]>:$resized_images
10666  );
10667
10668  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10669}
10670
10671def TF_ResizeBilinearGradOp : TF_Op<"ResizeBilinearGrad", [NoSideEffect]> {
10672  let summary = "Computes the gradient of bilinear interpolation.";
10673
10674  let arguments = (ins
10675    Arg<TF_Float32Tensor, [{4-D with shape `[batch, height, width, channels]`.}]>:$grads,
10676    Arg<TF_FloatTensor, [{4-D with shape `[batch, orig_height, orig_width, channels]`,
10677The image tensor that was resized.}]>:$original_image,
10678
10679    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
10680    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
10681  );
10682
10683  let results = (outs
10684    Res<TF_FloatTensor, [{4-D with shape `[batch, orig_height, orig_width, channels]`.
10685Gradients with respect to the input image. Input image must have been
10686float or double.}]>:$output
10687  );
10688
10689  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
10690}
10691
10692def TF_ResizeNearestNeighborOp : TF_Op<"ResizeNearestNeighbor", [NoSideEffect]> {
10693  let summary = [{
10694Resize `images` to `size` using nearest neighbor interpolation.
10695  }];
10696
10697  let arguments = (ins
10698    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$images,
10699    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
10700new size for the images.}]>:$size,
10701
10702    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
10703    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
10704  );
10705
10706  let results = (outs
10707    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape
10708`[batch, new_height, new_width, channels]`.}]>:$resized_images
10709  );
10710
10711  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10712}
10713
10714def TF_ResizeNearestNeighborGradOp : TF_Op<"ResizeNearestNeighborGrad", [NoSideEffect]> {
10715  let summary = "Computes the gradient of nearest neighbor interpolation.";
10716
10717  let arguments = (ins
10718    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int8, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$grads,
10719    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
10720original input size.}]>:$size,
10721
10722    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
10723    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
10724  );
10725
10726  let results = (outs
10727    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int8, TF_Uint8]>, [{4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
10728with respect to the input image.}]>:$output
10729  );
10730
10731  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10732}
10733
10734def TF_ResourceApplyAdaMaxOp : TF_Op<"ResourceApplyAdaMax", []> {
10735  let summary = "Update '*var' according to the AdaMax algorithm.";
10736
10737  let description = [{
10738m_t <- beta1 * m_{t-1} + (1 - beta1) * g
10739v_t <- max(beta2 * v_{t-1}, abs(g))
10740variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
10741  }];
10742
10743  let arguments = (ins
10744    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10745    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
10746    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$v,
10747    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta1_power,
10748    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10749    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta1,
10750    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta2,
10751    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
10752    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10753
10754    DefaultValuedAttr<BoolAttr, "false">:$use_locking
10755  );
10756
10757  let results = (outs);
10758
10759  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
10760}
10761
10762def TF_ResourceApplyAdadeltaOp : TF_Op<"ResourceApplyAdadelta", []> {
10763  let summary = "Update '*var' according to the adadelta scheme.";
10764
10765  let description = [{
10766accum = rho() * accum + (1 - rho()) * grad.square();
10767update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
10768update_accum = rho() * update_accum + (1 - rho()) * update.square();
10769var -= update;
10770  }];
10771
10772  let arguments = (ins
10773    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10774    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
10775    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum_update,
10776    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10777    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay factor. Must be a scalar.}]>:$rho,
10778    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Constant factor. Must be a scalar.}]>:$epsilon,
10779    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10780
10781    DefaultValuedAttr<BoolAttr, "false">:$use_locking
10782  );
10783
10784  let results = (outs);
10785
10786  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
10787}
10788
10789def TF_ResourceApplyAdagradOp : TF_Op<"ResourceApplyAdagrad", []> {
10790  let summary = "Update '*var' according to the adagrad scheme.";
10791
10792  let description = [{
10793accum += grad * grad
10794var -= lr * grad * (1 / sqrt(accum))
10795  }];
10796
10797  let arguments = (ins
10798    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10799    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
10800    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10801    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10802
10803    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
10804    DefaultValuedAttr<BoolAttr, "true">:$update_slots
10805  );
10806
10807  let results = (outs);
10808
10809  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
10810}
10811
10812def TF_ResourceApplyAdagradDAOp : TF_Op<"ResourceApplyAdagradDA", []> {
10813  let summary = "Update '*var' according to the proximal adagrad scheme.";
10814
10815  let arguments = (ins
10816    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10817    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$gradient_accumulator,
10818    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$gradient_squared_accumulator,
10819    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10820    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10821    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
10822    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
10823    Arg<TF_Int64Tensor, [{Training step number. Must be a scalar.}]>:$global_step,
10824
10825    DefaultValuedAttr<BoolAttr, "false">:$use_locking
10826  );
10827
10828  let results = (outs);
10829
10830  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
10831}
10832
10833def TF_ResourceApplyAdagradV2Op : TF_Op<"ResourceApplyAdagradV2", []> {
10834  let summary = "Update '*var' according to the adagrad scheme.";
10835
10836  let description = [{
10837accum += grad * grad
10838var -= lr * grad * (1 / (sqrt(accum) + epsilon))
10839  }];
10840
10841  let arguments = (ins
10842    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10843    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
10844    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10845    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Constant factor. Must be a scalar.}]>:$epsilon,
10846    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10847
10848    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
10849    DefaultValuedAttr<BoolAttr, "true">:$update_slots
10850  );
10851
10852  let results = (outs);
10853
10854  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
10855}
10856
10857def TF_ResourceApplyAdamOp : TF_Op<"ResourceApplyAdam", []> {
10858  let summary = "Update '*var' according to the Adam algorithm.";
10859
10860  let description = [{
10861$$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
10862$$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
10863$$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$
10864$$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$
10865  }];
10866
10867  let arguments = (ins
10868    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10869    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
10870    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$v,
10871    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta1_power,
10872    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta2_power,
10873    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10874    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta1,
10875    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta2,
10876    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
10877    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10878
10879    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
10880    DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
10881  );
10882
10883  let results = (outs);
10884
10885  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
10886}
10887
10888def TF_ResourceApplyAddSignOp : TF_Op<"ResourceApplyAddSign", []> {
10889  let summary = "Update '*var' according to the AddSign update.";
10890
10891  let description = [{
10892m_t <- beta1 * m_{t-1} + (1 - beta1) * g
10893update <- (alpha + sign_decay * sign(g) *sign(m)) * g
10894variable <- variable - lr_t * update
10895  }];
10896
10897  let arguments = (ins
10898    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10899    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
10900    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10901    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$alpha,
10902    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$sign_decay,
10903    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta,
10904    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10905
10906    DefaultValuedAttr<BoolAttr, "false">:$use_locking
10907  );
10908
10909  let results = (outs);
10910
10911  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
10912}
10913
10914def TF_ResourceApplyCenteredRMSPropOp : TF_Op<"ResourceApplyCenteredRMSProp", []> {
10915  let summary = "Update '*var' according to the centered RMSProp algorithm.";
10916
10917  let description = [{
10918The centered RMSProp algorithm uses an estimate of the centered second moment
10919(i.e., the variance) for normalization, as opposed to regular RMSProp, which
10920uses the (uncentered) second moment. This often helps with training, but is
10921slightly more expensive in terms of computation and memory.
10922
10923Note that in dense implementation of this algorithm, mg, ms, and mom will
10924update even if the grad is zero, but in this sparse implementation, mg, ms,
10925and mom will not update in iterations during which the grad is zero.
10926
10927mean_square = decay * mean_square + (1-decay) * gradient ** 2
10928mean_grad = decay * mean_grad + (1-decay) * gradient
10929
10930Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
10931
10932mg <- rho * mg_{t-1} + (1-rho) * grad
10933ms <- rho * ms_{t-1} + (1-rho) * grad * grad
10934mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
10935var <- var - mom
10936  }];
10937
10938  let arguments = (ins
10939    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10940    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mg,
10941    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$ms,
10942    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mom,
10943    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10944    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay rate. Must be a scalar.}]>:$rho,
10945    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum Scale. Must be a scalar.}]>:$momentum,
10946    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
10947    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10948
10949    DefaultValuedAttr<BoolAttr, "false">:$use_locking
10950  );
10951
10952  let results = (outs);
10953
10954  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<4>;
10955}
10956
10957def TF_ResourceApplyFtrlOp : TF_Op<"ResourceApplyFtrl", []> {
10958  let summary = "Update '*var' according to the Ftrl-proximal scheme.";
10959
10960  let description = [{
10961accum_new = accum + grad * grad
10962linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
10963quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
10964var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
10965accum = accum_new
10966  }];
10967
10968  let arguments = (ins
10969    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10970    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
10971    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$linear,
10972    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10973    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10974    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
10975    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
10976    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr_power,
10977
10978    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
10979    DefaultValuedAttr<BoolAttr, "false">:$multiply_linear_by_lr
10980  );
10981
10982  let results = (outs);
10983
10984  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
10985}
10986
10987def TF_ResourceApplyFtrlV2Op : TF_Op<"ResourceApplyFtrlV2", []> {
10988  let summary = "Update '*var' according to the Ftrl-proximal scheme.";
10989
10990  let description = [{
10991grad_with_shrinkage = grad + 2 * l2_shrinkage * var
10992accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
10993linear += grad_with_shrinkage +
10994    (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
10995quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
10996var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
10997accum = accum_new
10998  }];
10999
11000  let arguments = (ins
11001    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11002    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11003    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$linear,
11004    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11005    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11006    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
11007    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 shrinkage regularization. Must be a scalar.}]>:$l2,
11008    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l2_shrinkage,
11009    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr_power,
11010
11011    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
11012    DefaultValuedAttr<BoolAttr, "false">:$multiply_linear_by_lr
11013  );
11014
11015  let results = (outs);
11016
11017  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
11018}
11019
11020def TF_ResourceApplyGradientDescentOp : TF_Op<"ResourceApplyGradientDescent", []> {
11021  let summary = "Update '*var' by subtracting 'alpha' * 'delta' from it.";
11022
11023  let arguments = (ins
11024    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11025    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$alpha,
11026    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The change.}]>:$delta,
11027
11028    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11029  );
11030
11031  let results = (outs);
11032
11033  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
11034}
11035
11036def TF_ResourceApplyKerasMomentumOp : TF_Op<"ResourceApplyKerasMomentum", []> {
11037  let summary = "Update '*var' according to the momentum scheme.";
11038
11039  let description = [{
11040Set use_nesterov = True if you want to use Nesterov momentum.
11041
11042accum = accum * momentum - lr * grad
11043var += accum
11044  }];
11045
11046  let arguments = (ins
11047    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11048    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11049    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11050    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11051    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum. Must be a scalar.}]>:$momentum,
11052
11053    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
11054    DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
11055  );
11056
11057  let results = (outs);
11058
11059  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11060}
11061
11062def TF_ResourceApplyMomentumOp : TF_Op<"ResourceApplyMomentum", []> {
11063  let summary = "Update '*var' according to the momentum scheme.";
11064
11065  let description = [{
11066Set use_nesterov = True if you want to use Nesterov momentum.
11067
11068accum = accum * momentum + grad
11069var -= lr * accum
11070  }];
11071
11072  let arguments = (ins
11073    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11074    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11075    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11076    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11077    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum. Must be a scalar.}]>:$momentum,
11078
11079    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
11080    DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
11081  );
11082
11083  let results = (outs);
11084
11085  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11086}
11087
11088def TF_ResourceApplyPowerSignOp : TF_Op<"ResourceApplyPowerSign", []> {
11089  let summary = "Update '*var' according to the AddSign update.";
11090
11091  let description = [{
11092m_t <- beta1 * m_{t-1} + (1 - beta1) * g
11093update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
11094variable <- variable - lr_t * update
11095  }];
11096
11097  let arguments = (ins
11098    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11099    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
11100    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11101    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$logbase,
11102    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$sign_decay,
11103    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta,
11104    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11105
11106    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11107  );
11108
11109  let results = (outs);
11110
11111  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11112}
11113
11114def TF_ResourceApplyProximalAdagradOp : TF_Op<"ResourceApplyProximalAdagrad", []> {
11115  let summary = [{
11116Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
11117  }];
11118
11119  let description = [{
11120accum += grad * grad
11121prox_v = var - lr * grad * (1 / sqrt(accum))
11122var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
11123  }];
11124
11125  let arguments = (ins
11126    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11127    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11128    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11129    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
11130    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
11131    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11132
11133    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11134  );
11135
11136  let results = (outs);
11137
11138  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11139}
11140
11141def TF_ResourceApplyProximalGradientDescentOp : TF_Op<"ResourceApplyProximalGradientDescent", []> {
11142  let summary = "Update '*var' as FOBOS algorithm with fixed learning rate.";
11143
11144  let description = [{
11145prox_v = var - alpha * delta
11146var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
11147  }];
11148
11149  let arguments = (ins
11150    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11151    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$alpha,
11152    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
11153    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
11154    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The change.}]>:$delta,
11155
11156    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11157  );
11158
11159  let results = (outs);
11160
11161  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
11162}
11163
11164def TF_ResourceApplyRMSPropOp : TF_Op<"ResourceApplyRMSProp", []> {
11165  let summary = "Update '*var' according to the RMSProp algorithm.";
11166
11167  let description = [{
11168Note that in dense implementation of this algorithm, ms and mom will
11169update even if the grad is zero, but in this sparse implementation, ms
11170and mom will not update in iterations during which the grad is zero.
11171
11172mean_square = decay * mean_square + (1-decay) * gradient ** 2
11173Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
11174
11175ms <- rho * ms_{t-1} + (1-rho) * grad * grad
11176mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
11177var <- var - mom
11178  }];
11179
11180  let arguments = (ins
11181    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11182    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$ms,
11183    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mom,
11184    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11185    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay rate. Must be a scalar.}]>:$rho,
11186    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$momentum,
11187    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
11188    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11189
11190    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11191  );
11192
11193  let results = (outs);
11194
11195  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
11196}
11197
11198def TF_ResourceGatherOp : TF_Op<"ResourceGather", []> {
11199  let summary = [{
11200Gather slices from the variable pointed to by `resource` according to `indices`.
11201  }];
11202
11203  let description = [{
11204`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
11205Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
11206
11207```python
11208    # Scalar indices
11209    output[:, ..., :] = params[indices, :, ... :]
11210
11211    # Vector indices
11212    output[i, :, ..., :] = params[indices[i], :, ... :]
11213
11214    # Higher rank indices
11215    output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
11216```
11217  }];
11218
11219  let arguments = (ins
11220    Arg<TF_ResourceTensor, "", [TF_VariableRead]>:$resource,
11221    TF_I32OrI64Tensor:$indices,
11222
11223    DefaultValuedAttr<I64Attr, "0">:$batch_dims,
11224    DefaultValuedAttr<BoolAttr, "true">:$validate_indices
11225  );
11226
11227  let results = (outs
11228    TF_Tensor:$output
11229  );
11230
11231  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11232  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
11233}
11234
11235def TF_ResourceScatterAddOp : TF_Op<"ResourceScatterAdd", []> {
11236  let summary = "Adds sparse updates to the variable referenced by `resource`.";
11237
11238  let description = [{
11239This operation computes
11240
11241    # Scalar indices
11242    ref[indices, ...] += updates[...]
11243
11244    # Vector indices (for each i)
11245    ref[indices[i], ...] += updates[i, ...]
11246
11247    # High rank indices (for each i, ..., j)
11248    ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
11249
11250Duplicate entries are handled correctly: if multiple `indices` reference
11251the same location, their contributions add.
11252
11253Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11254
11255<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11256<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11257</div>
11258  }];
11259
11260  let arguments = (ins
11261    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11262    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11263    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11264  );
11265
11266  let results = (outs);
11267
11268  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11269  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11270}
11271
11272def TF_ResourceScatterDivOp : TF_Op<"ResourceScatterDiv", []> {
11273  let summary = [{
11274Divides sparse updates into the variable referenced by `resource`.
11275  }];
11276
11277  let description = [{
11278This operation computes
11279
11280    # Scalar indices
11281    ref[indices, ...] /= updates[...]
11282
11283    # Vector indices (for each i)
11284    ref[indices[i], ...] /= updates[i, ...]
11285
11286    # High rank indices (for each i, ..., j)
11287    ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
11288
11289Duplicate entries are handled correctly: if multiple `indices` reference
11290the same location, their contributions multiply.
11291
11292Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11293
11294<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11295<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11296</div>
11297  }];
11298
11299  let arguments = (ins
11300    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11301    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11302    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11303  );
11304
11305  let results = (outs);
11306
11307  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11308  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11309}
11310
11311def TF_ResourceScatterMaxOp : TF_Op<"ResourceScatterMax", []> {
11312  let summary = [{
11313Reduces sparse updates into the variable referenced by `resource` using the `max` operation.
11314  }];
11315
11316  let description = [{
11317This operation computes
11318
11319    # Scalar indices
11320    ref[indices, ...] = max(ref[indices, ...], updates[...])
11321
11322    # Vector indices (for each i)
11323    ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
11324
11325    # High rank indices (for each i, ..., j)
11326    ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
11327
11328Duplicate entries are handled correctly: if multiple `indices` reference
11329the same location, their contributions are combined.
11330
11331Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11332
11333<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11334<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11335</div>
11336  }];
11337
11338  let arguments = (ins
11339    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11340    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11341    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11342  );
11343
11344  let results = (outs);
11345
11346  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11347  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11348}
11349
11350def TF_ResourceScatterMinOp : TF_Op<"ResourceScatterMin", []> {
11351  let summary = [{
11352Reduces sparse updates into the variable referenced by `resource` using the `min` operation.
11353  }];
11354
11355  let description = [{
11356This operation computes
11357
11358    # Scalar indices
11359    ref[indices, ...] = min(ref[indices, ...], updates[...])
11360
11361    # Vector indices (for each i)
11362    ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
11363
11364    # High rank indices (for each i, ..., j)
11365    ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
11366
11367Duplicate entries are handled correctly: if multiple `indices` reference
11368the same location, their contributions are combined.
11369
11370Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11371
11372<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11373<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11374</div>
11375  }];
11376
11377  let arguments = (ins
11378    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11379    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11380    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11381  );
11382
11383  let results = (outs);
11384
11385  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11386  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11387}
11388
11389def TF_ResourceScatterMulOp : TF_Op<"ResourceScatterMul", []> {
11390  let summary = [{
11391Multiplies sparse updates into the variable referenced by `resource`.
11392  }];
11393
11394  let description = [{
11395This operation computes
11396
11397    # Scalar indices
11398    ref[indices, ...] *= updates[...]
11399
11400    # Vector indices (for each i)
11401    ref[indices[i], ...] *= updates[i, ...]
11402
11403    # High rank indices (for each i, ..., j)
11404    ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
11405
11406Duplicate entries are handled correctly: if multiple `indices` reference
11407the same location, their contributions multiply.
11408
11409Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11410
11411<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11412<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11413</div>
11414  }];
11415
11416  let arguments = (ins
11417    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11418    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11419    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11420  );
11421
11422  let results = (outs);
11423
11424  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11425  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11426}
11427
11428def TF_ResourceScatterNdAddOp : TF_Op<"ResourceScatterNdAdd", []> {
11429  let summary = [{
11430Applies sparse addition to individual values or slices in a Variable.
11431  }];
11432
11433  let description = [{
11434`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
11435
11436`indices` must be integer tensor, containing indices into `ref`.
11437It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
11438
11439The innermost dimension of `indices` (with length `K`) corresponds to
11440indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
11441dimension of `ref`.
11442
11443`updates` is `Tensor` of rank `Q-1+P-K` with shape:
11444
11445```
11446[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
11447```
11448
11449For example, say we want to add 4 scattered elements to a rank-1 tensor to
114508 elements. In Python, that addition would look like this:
11451
11452```python
11453ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
11454indices = tf.constant([[4], [3], [1], [7]])
11455updates = tf.constant([9, 10, 11, 12])
11456add = tf.scatter_nd_add(ref, indices, updates)
11457with tf.Session() as sess:
11458  print sess.run(add)
11459```
11460
11461The resulting update to ref would look like this:
11462
11463    [1, 13, 3, 14, 14, 6, 7, 20]
11464
11465See `tf.scatter_nd` for more details about how to make updates to
11466slices.
11467  }];
11468
11469  let arguments = (ins
11470    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
11471    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
11472A tensor of indices into ref.}]>:$indices,
11473    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of
11474values to add to ref.}]>:$updates,
11475
11476    DefaultValuedAttr<BoolAttr, "true">:$use_locking
11477  );
11478
11479  let results = (outs);
11480
11481  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11482  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11483}
11484
11485def TF_ResourceScatterNdSubOp : TF_Op<"ResourceScatterNdSub", []> {
11486  let summary = [{
11487Applies sparse subtraction to individual values or slices in a Variable.
11488  }];
11489
11490  let description = [{
11491`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
11492
11493`indices` must be integer tensor, containing indices into `ref`.
11494It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
11495
11496The innermost dimension of `indices` (with length `K`) corresponds to
11497indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
11498dimension of `ref`.
11499
11500`updates` is `Tensor` of rank `Q-1+P-K` with shape:
11501
11502```
11503[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
11504```
11505
11506For example, say we want to subtract 4 scattered elements from a rank-1 tensor
11507with 8 elements. In Python, that subtraction would look like this:
11508
11509```python
11510ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
11511indices = tf.constant([[4], [3], [1], [7]])
11512updates = tf.constant([9, 10, 11, 12])
11513sub = tf.scatter_nd_sub(ref, indices, updates)
11514with tf.Session() as sess:
11515  print sess.run(sub)
11516```
11517
11518The resulting update to ref would look like this:
11519
11520    [1, -9, 3, -6, -4, 6, 7, -4]
11521
11522See `tf.scatter_nd` for more details about how to make updates to
11523slices.
11524  }];
11525
11526  let arguments = (ins
11527    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
11528    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
11529A tensor of indices into ref.}]>:$indices,
11530    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of
11531values to add to ref.}]>:$updates,
11532
11533    DefaultValuedAttr<BoolAttr, "true">:$use_locking
11534  );
11535
11536  let results = (outs);
11537
11538  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11539  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11540}
11541
11542def TF_ResourceScatterNdUpdateOp : TF_Op<"ResourceScatterNdUpdate", []> {
11543  let summary = [{
11544Applies sparse `updates` to individual values or slices within a given
11545  }];
11546
11547  let description = [{
11548variable according to `indices`.
11549
11550`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
11551
11552`indices` must be integer tensor, containing indices into `ref`.
11553It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
11554
11555The innermost dimension of `indices` (with length `K`) corresponds to
11556indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
11557dimension of `ref`.
11558
11559`updates` is `Tensor` of rank `Q-1+P-K` with shape:
11560
11561```
11562[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
11563```
11564
11565For example, say we want to update 4 scattered elements to a rank-1 tensor to
115668 elements. In Python, that update would look like this:
11567
11568```python
11569    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
11570    indices = tf.constant([[4], [3], [1] ,[7]])
11571    updates = tf.constant([9, 10, 11, 12])
11572    update = tf.scatter_nd_update(ref, indices, updates)
11573    with tf.Session() as sess:
11574      print sess.run(update)
11575```
11576
11577The resulting update to ref would look like this:
11578
11579    [1, 11, 3, 10, 9, 6, 7, 12]
11580
11581See `tf.scatter_nd` for more details about how to make updates to
11582slices.
11583  }];
11584
11585  let arguments = (ins
11586    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
11587    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
11588A tensor of indices into ref.}]>:$indices,
11589    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of updated
11590values to add to ref.}]>:$updates,
11591
11592    DefaultValuedAttr<BoolAttr, "true">:$use_locking
11593  );
11594
11595  let results = (outs);
11596
11597  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11598  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11599}
11600
11601def TF_ResourceScatterSubOp : TF_Op<"ResourceScatterSub", []> {
11602  let summary = [{
11603Subtracts sparse updates from the variable referenced by `resource`.
11604  }];
11605
11606  let description = [{
11607This operation computes
11608
11609    # Scalar indices
11610    ref[indices, ...] -= updates[...]
11611
11612    # Vector indices (for each i)
11613    ref[indices[i], ...] -= updates[i, ...]
11614
11615    # High rank indices (for each i, ..., j)
11616    ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
11617
11618Duplicate entries are handled correctly: if multiple `indices` reference
11619the same location, their contributions add.
11620
11621Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11622
11623<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11624<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11625</div>
11626  }];
11627
11628  let arguments = (ins
11629    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11630    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11631    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11632  );
11633
11634  let results = (outs);
11635
11636  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11637  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11638}
11639
11640def TF_ResourceScatterUpdateOp : TF_Op<"ResourceScatterUpdate", []> {
11641  let summary = [{
11642Assigns sparse updates to the variable referenced by `resource`.
11643  }];
11644
11645  let description = [{
11646This operation computes
11647
11648    # Scalar indices
11649    ref[indices, ...] = updates[...]
11650
11651    # Vector indices (for each i)
11652    ref[indices[i], ...] = updates[i, ...]
11653
11654    # High rank indices (for each i, ..., j)
11655    ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
11656  }];
11657
11658  let arguments = (ins
11659    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11660    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11661    Arg<TF_Tensor, [{A tensor of updated values to add to `ref`.}]>:$updates
11662  );
11663
11664  let results = (outs);
11665
11666  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11667  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11668}
11669
11670def TF_ResourceStridedSliceAssignOp : TF_Op<"ResourceStridedSliceAssign", []> {
11671  let summary = "Assign `value` to the sliced l-value reference of `ref`.";
11672
11673  let description = [{
11674The values of `value` are assigned to the positions in the variable
11675`ref` that are selected by the slice parameters. The slice parameters
11676`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
11677
11678NOTE this op currently does not support broadcasting and so `value`'s
11679shape must be exactly the shape produced by the slice of `ref`.
11680  }];
11681
11682  let arguments = (ins
11683    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$ref,
11684    TF_I32OrI64Tensor:$begin,
11685    TF_I32OrI64Tensor:$end,
11686    TF_I32OrI64Tensor:$strides,
11687    TF_Tensor:$value,
11688
11689    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
11690    DefaultValuedAttr<I64Attr, "0">:$end_mask,
11691    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
11692    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
11693    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
11694  );
11695
11696  let results = (outs);
11697
11698  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<4>;
11699  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
11700}
11701
11702def TF_RestoreOp : TF_Op<"Restore", []> {
11703  let summary = "Restores a tensor from checkpoint files.";
11704
11705  let description = [{
11706Reads a tensor stored in one or several files. If there are several files (for
11707instance because a tensor was saved as slices), `file_pattern` may contain
11708wildcard symbols (`*` and `?`) in the filename portion only, not in the
11709directory portion.
11710
11711If a `file_pattern` matches several files, `preferred_shard` can be used to hint
11712in which file the requested tensor is likely to be found. This op will first
11713open the file at index `preferred_shard` in the list of matching files and try
11714to restore tensors from that file.  Only if some tensors or tensor slices are
11715not found in that first file, then the Op opens all the files. Setting
11716`preferred_shard` to match the value passed as the `shard` input
11717of a matching `Save` Op may speed up Restore.  This attribute only affects
11718performance, not correctness.  The default value -1 means files are processed in
11719order.
11720
11721See also `RestoreSlice`.
11722  }];
11723
11724  let arguments = (ins
11725    Arg<TF_StrTensor, [{Must have a single element. The pattern of the files from
11726which we read the tensor.}]>:$file_pattern,
11727    Arg<TF_StrTensor, [{Must have a single element. The name of the tensor to be
11728restored.}]>:$tensor_name,
11729
11730    DefaultValuedAttr<I64Attr, "-1">:$preferred_shard
11731  );
11732
11733  let results = (outs
11734    Res<TF_Tensor, [{The restored tensor.}]>:$tensor
11735  );
11736
11737  TF_DerivedResultTypeAttr dt = TF_DerivedResultTypeAttr<0>;
11738}
11739
11740def TF_RestoreV2Op : TF_Op<"RestoreV2", []> {
11741  let summary = "Restores tensors from a V2 checkpoint.";
11742
11743  let description = [{
11744For backward compatibility with the V1 format, this Op currently allows
11745restoring from a V1 checkpoint as well:
11746  - This Op first attempts to find the V2 index file pointed to by "prefix", and
11747    if found proceed to read it as a V2 checkpoint;
11748  - Otherwise the V1 read path is invoked.
11749Relying on this behavior is not recommended, as the ability to fall back to read
11750V1 might be deprecated and eventually removed.
11751
11752By default, restores the named tensors in full.  If the caller wishes to restore
11753specific slices of stored tensors, "shape_and_slices" should be non-empty
11754strings and correspondingly well-formed.
11755
11756Callers must ensure all the named tensors are indeed stored in the checkpoint.
11757  }];
11758
11759  let arguments = (ins
11760    Arg<TF_StrTensor, [{Must have a single element.  The prefix of a V2 checkpoint.}]>:$prefix,
11761    Arg<TF_StrTensor, [{shape {N}.  The names of the tensors to be restored.}]>:$tensor_names,
11762    Arg<TF_StrTensor, [{shape {N}.  The slice specs of the tensors to be restored.
11763Empty strings indicate that they are non-partitioned tensors.}]>:$shape_and_slices
11764  );
11765
11766  let results = (outs
11767    Res<Variadic<TF_Tensor>, [{shape {N}.  The restored tensors, whose shapes are read from the
11768checkpoint directly.}]>:$tensors
11769  );
11770
11771  TF_DerivedResultTypeListAttr dtypes = TF_DerivedResultTypeListAttr<0>;
11772}
11773
11774def TF_RetrieveTPUEmbeddingADAMParametersOp : TF_Op<"RetrieveTPUEmbeddingADAMParameters", [TF_TPUEmbeddingSideEffect]> {
11775  let summary = "Retrieve ADAM embedding parameters.";
11776
11777  let description = [{
11778An op that retrieves optimization parameters from embedding to host
11779memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11780the correct embedding table configuration. For example, this op is
11781used to retrieve updated parameters before saving a checkpoint.
11782  }];
11783
11784  let arguments = (ins
11785    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11786    StrAttr:$table_name,
11787    I64Attr:$num_shards,
11788    I64Attr:$shard_id,
11789    StrAttr:$config
11790  );
11791
11792  let results = (outs
11793    Res<TF_Float32Tensor, [{Parameter parameters updated by the ADAM optimization algorithm.}]>:$parameters,
11794    Res<TF_Float32Tensor, [{Parameter momenta updated by the ADAM optimization algorithm.}]>:$momenta,
11795    Res<TF_Float32Tensor, [{Parameter velocities updated by the ADAM optimization algorithm.}]>:$velocities
11796  );
11797}
11798
11799def TF_RetrieveTPUEmbeddingADAMParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingADAMParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
11800  let summary = "Retrieve ADAM embedding parameters with debug support.";
11801
11802  let description = [{
11803An op that retrieves optimization parameters from embedding to host
11804memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11805the correct embedding table configuration. For example, this op is
11806used to retrieve updated parameters before saving a checkpoint.
11807  }];
11808
11809  let arguments = (ins
11810    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11811    StrAttr:$table_name,
11812    I64Attr:$num_shards,
11813    I64Attr:$shard_id,
11814    StrAttr:$config
11815  );
11816
11817  let results = (outs
11818    Res<TF_Float32Tensor, [{Parameter parameters updated by the ADAM optimization algorithm.}]>:$parameters,
11819    Res<TF_Float32Tensor, [{Parameter momenta updated by the ADAM optimization algorithm.}]>:$momenta,
11820    Res<TF_Float32Tensor, [{Parameter velocities updated by the ADAM optimization algorithm.}]>:$velocities,
11821    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the ADAM optimization algorithm.}]>:$gradient_accumulators
11822  );
11823}
11824
11825def TF_RetrieveTPUEmbeddingAdadeltaParametersOp : TF_Op<"RetrieveTPUEmbeddingAdadeltaParameters", [TF_TPUEmbeddingSideEffect]> {
11826  let summary = "Retrieve Adadelta embedding parameters.";
11827
11828  let description = [{
11829An op that retrieves optimization parameters from embedding to host
11830memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11831the correct embedding table configuration. For example, this op is
11832used to retrieve updated parameters before saving a checkpoint.
11833  }];
11834
11835  let arguments = (ins
11836    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11837    StrAttr:$table_name,
11838    I64Attr:$num_shards,
11839    I64Attr:$shard_id,
11840    StrAttr:$config
11841  );
11842
11843  let results = (outs
11844    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adadelta optimization algorithm.}]>:$parameters,
11845    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adadelta optimization algorithm.}]>:$accumulators,
11846    Res<TF_Float32Tensor, [{Parameter updates updated by the Adadelta optimization algorithm.}]>:$updates
11847  );
11848}
11849
11850def TF_RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
11851  let summary = "Retrieve Adadelta embedding parameters with debug support.";
11852
11853  let description = [{
11854An op that retrieves optimization parameters from embedding to host
11855memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11856the correct embedding table configuration. For example, this op is
11857used to retrieve updated parameters before saving a checkpoint.
11858  }];
11859
11860  let arguments = (ins
11861    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11862    StrAttr:$table_name,
11863    I64Attr:$num_shards,
11864    I64Attr:$shard_id,
11865    StrAttr:$config
11866  );
11867
11868  let results = (outs
11869    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adadelta optimization algorithm.}]>:$parameters,
11870    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adadelta optimization algorithm.}]>:$accumulators,
11871    Res<TF_Float32Tensor, [{Parameter updates updated by the Adadelta optimization algorithm.}]>:$updates,
11872    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Adadelta optimization algorithm.}]>:$gradient_accumulators
11873  );
11874}
11875
11876def TF_RetrieveTPUEmbeddingAdagradParametersOp : TF_Op<"RetrieveTPUEmbeddingAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
11877  let summary = "Retrieve Adagrad embedding parameters.";
11878
11879  let description = [{
11880An op that retrieves optimization parameters from embedding to host
11881memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11882the correct embedding table configuration. For example, this op is
11883used to retrieve updated parameters before saving a checkpoint.
11884  }];
11885
11886  let arguments = (ins
11887    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11888    StrAttr:$table_name,
11889    I64Attr:$num_shards,
11890    I64Attr:$shard_id,
11891    StrAttr:$config
11892  );
11893
11894  let results = (outs
11895    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adagrad optimization algorithm.}]>:$parameters,
11896    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adagrad optimization algorithm.}]>:$accumulators
11897  );
11898}
11899
11900def TF_RetrieveTPUEmbeddingAdagradParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
11901  let summary = "Retrieve Adagrad embedding parameters with debug support.";
11902
11903  let description = [{
11904An op that retrieves optimization parameters from embedding to host
11905memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11906the correct embedding table configuration. For example, this op is
11907used to retrieve updated parameters before saving a checkpoint.
11908  }];
11909
11910  let arguments = (ins
11911    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11912    StrAttr:$table_name,
11913    I64Attr:$num_shards,
11914    I64Attr:$shard_id,
11915    StrAttr:$config
11916  );
11917
11918  let results = (outs
11919    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adagrad optimization algorithm.}]>:$parameters,
11920    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adagrad optimization algorithm.}]>:$accumulators,
11921    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Adagrad optimization algorithm.}]>:$gradient_accumulators
11922  );
11923}
11924
11925def TF_RetrieveTPUEmbeddingCenteredRMSPropParametersOp : TF_Op<"RetrieveTPUEmbeddingCenteredRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
11926  let summary = "Retrieve centered RMSProp embedding parameters.";
11927
11928  let description = [{
11929An op that retrieves optimization parameters from embedding to host
11930memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11931the correct embedding table configuration. For example, this op is
11932used to retrieve updated parameters before saving a checkpoint.
11933  }];
11934
11935  let arguments = (ins
11936    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11937    StrAttr:$table_name,
11938    I64Attr:$num_shards,
11939    I64Attr:$shard_id,
11940    StrAttr:$config
11941  );
11942
11943  let results = (outs
11944    Res<TF_Float32Tensor, [{Parameter parameters updated by the centered RMSProp optimization algorithm.}]>:$parameters,
11945    Res<TF_Float32Tensor, [{Parameter ms updated by the centered RMSProp optimization algorithm.}]>:$ms,
11946    Res<TF_Float32Tensor, [{Parameter mom updated by the centered RMSProp optimization algorithm.}]>:$mom,
11947    Res<TF_Float32Tensor, [{Parameter mg updated by the centered RMSProp optimization algorithm.}]>:$mg
11948  );
11949}
11950
11951def TF_RetrieveTPUEmbeddingFTRLParametersOp : TF_Op<"RetrieveTPUEmbeddingFTRLParameters", [TF_TPUEmbeddingSideEffect]> {
11952  let summary = "Retrieve FTRL embedding parameters.";
11953
11954  let description = [{
11955An op that retrieves optimization parameters from embedding to host
11956memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11957the correct embedding table configuration. For example, this op is
11958used to retrieve updated parameters before saving a checkpoint.
11959  }];
11960
11961  let arguments = (ins
11962    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11963    StrAttr:$table_name,
11964    I64Attr:$num_shards,
11965    I64Attr:$shard_id,
11966    StrAttr:$config
11967  );
11968
11969  let results = (outs
11970    Res<TF_Float32Tensor, [{Parameter parameters updated by the FTRL optimization algorithm.}]>:$parameters,
11971    Res<TF_Float32Tensor, [{Parameter accumulators updated by the FTRL optimization algorithm.}]>:$accumulators,
11972    Res<TF_Float32Tensor, [{Parameter linears updated by the FTRL optimization algorithm.}]>:$linears
11973  );
11974}
11975
11976def TF_RetrieveTPUEmbeddingFTRLParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingFTRLParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
11977  let summary = "Retrieve FTRL embedding parameters with debug support.";
11978
11979  let description = [{
11980An op that retrieves optimization parameters from embedding to host
11981memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11982the correct embedding table configuration. For example, this op is
11983used to retrieve updated parameters before saving a checkpoint.
11984  }];
11985
11986  let arguments = (ins
11987    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11988    StrAttr:$table_name,
11989    I64Attr:$num_shards,
11990    I64Attr:$shard_id,
11991    StrAttr:$config
11992  );
11993
11994  let results = (outs
11995    Res<TF_Float32Tensor, [{Parameter parameters updated by the FTRL optimization algorithm.}]>:$parameters,
11996    Res<TF_Float32Tensor, [{Parameter accumulators updated by the FTRL optimization algorithm.}]>:$accumulators,
11997    Res<TF_Float32Tensor, [{Parameter linears updated by the FTRL optimization algorithm.}]>:$linears,
11998    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the FTRL optimization algorithm.}]>:$gradient_accumulators
11999  );
12000}
12001
12002def TF_RetrieveTPUEmbeddingMDLAdagradLightParametersOp : TF_Op<"RetrieveTPUEmbeddingMDLAdagradLightParameters", [TF_TPUEmbeddingSideEffect]> {
12003  let summary = "Retrieve MDL Adagrad Light embedding parameters.";
12004
12005  let description = [{
12006An op that retrieves optimization parameters from embedding to host
12007memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12008the correct embedding table configuration. For example, this op is
12009used to retrieve updated parameters before saving a checkpoint.
12010  }];
12011
12012  let arguments = (ins
12013    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12014    StrAttr:$table_name,
12015    I64Attr:$num_shards,
12016    I64Attr:$shard_id,
12017    StrAttr:$config
12018  );
12019
12020  let results = (outs
12021    Res<TF_Float32Tensor, [{Parameter parameters updated by the MDL Adagrad Light optimization algorithm.}]>:$parameters,
12022    Res<TF_Float32Tensor, [{Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.}]>:$accumulators,
12023    Res<TF_Float32Tensor, [{Parameter weights updated by the MDL Adagrad Light optimization algorithm.}]>:$weights,
12024    Res<TF_Float32Tensor, [{Parameter benefits updated by the MDL Adagrad Light optimization algorithm.}]>:$benefits
12025  );
12026}
12027
12028def TF_RetrieveTPUEmbeddingMomentumParametersOp : TF_Op<"RetrieveTPUEmbeddingMomentumParameters", [TF_TPUEmbeddingSideEffect]> {
12029  let summary = "Retrieve Momentum embedding parameters.";
12030
12031  let description = [{
12032An op that retrieves optimization parameters from embedding to host
12033memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12034the correct embedding table configuration. For example, this op is
12035used to retrieve updated parameters before saving a checkpoint.
12036  }];
12037
12038  let arguments = (ins
12039    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12040    StrAttr:$table_name,
12041    I64Attr:$num_shards,
12042    I64Attr:$shard_id,
12043    StrAttr:$config
12044  );
12045
12046  let results = (outs
12047    Res<TF_Float32Tensor, [{Parameter parameters updated by the Momentum optimization algorithm.}]>:$parameters,
12048    Res<TF_Float32Tensor, [{Parameter momenta updated by the Momentum optimization algorithm.}]>:$momenta
12049  );
12050}
12051
12052def TF_RetrieveTPUEmbeddingMomentumParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingMomentumParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12053  let summary = "Retrieve Momentum embedding parameters with debug support.";
12054
12055  let description = [{
12056An op that retrieves optimization parameters from embedding to host
12057memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12058the correct embedding table configuration. For example, this op is
12059used to retrieve updated parameters before saving a checkpoint.
12060  }];
12061
12062  let arguments = (ins
12063    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12064    StrAttr:$table_name,
12065    I64Attr:$num_shards,
12066    I64Attr:$shard_id,
12067    StrAttr:$config
12068  );
12069
12070  let results = (outs
12071    Res<TF_Float32Tensor, [{Parameter parameters updated by the Momentum optimization algorithm.}]>:$parameters,
12072    Res<TF_Float32Tensor, [{Parameter momenta updated by the Momentum optimization algorithm.}]>:$momenta,
12073    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Momentum optimization algorithm.}]>:$gradient_accumulators
12074  );
12075}
12076
12077def TF_RetrieveTPUEmbeddingProximalAdagradParametersOp : TF_Op<"RetrieveTPUEmbeddingProximalAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
12078  let summary = "Retrieve proximal Adagrad embedding parameters.";
12079
12080  let description = [{
12081An op that retrieves optimization parameters from embedding to host
12082memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12083the correct embedding table configuration. For example, this op is
12084used to retrieve updated parameters before saving a checkpoint.
12085  }];
12086
12087  let arguments = (ins
12088    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12089    StrAttr:$table_name,
12090    I64Attr:$num_shards,
12091    I64Attr:$shard_id,
12092    StrAttr:$config
12093  );
12094
12095  let results = (outs
12096    Res<TF_Float32Tensor, [{Parameter parameters updated by the proximal Adagrad optimization algorithm.}]>:$parameters,
12097    Res<TF_Float32Tensor, [{Parameter accumulators updated by the proximal Adagrad optimization algorithm.}]>:$accumulators
12098  );
12099}
12100
12101def TF_RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12102  let summary = [{
12103Retrieve proximal Adagrad embedding parameters with debug support.
12104  }];
12105
12106  let description = [{
12107An op that retrieves optimization parameters from embedding to host
12108memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12109the correct embedding table configuration. For example, this op is
12110used to retrieve updated parameters before saving a checkpoint.
12111  }];
12112
12113  let arguments = (ins
12114    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12115    StrAttr:$table_name,
12116    I64Attr:$num_shards,
12117    I64Attr:$shard_id,
12118    StrAttr:$config
12119  );
12120
12121  let results = (outs
12122    Res<TF_Float32Tensor, [{Parameter parameters updated by the proximal Adagrad optimization algorithm.}]>:$parameters,
12123    Res<TF_Float32Tensor, [{Parameter accumulators updated by the proximal Adagrad optimization algorithm.}]>:$accumulators,
12124    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the proximal Adagrad optimization algorithm.}]>:$gradient_accumulators
12125  );
12126}
12127
12128def TF_RetrieveTPUEmbeddingProximalYogiParametersOp : TF_Op<"RetrieveTPUEmbeddingProximalYogiParameters", [TF_TPUEmbeddingSideEffect]> {
12129  let summary = "";
12130
12131  let arguments = (ins
12132    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12133    StrAttr:$table_name,
12134    I64Attr:$num_shards,
12135    I64Attr:$shard_id,
12136    StrAttr:$config
12137  );
12138
12139  let results = (outs
12140    TF_Float32Tensor:$parameters,
12141    TF_Float32Tensor:$v,
12142    TF_Float32Tensor:$m
12143  );
12144}
12145
12146def TF_RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12147  let summary = "";
12148
12149  let arguments = (ins
12150    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12151    StrAttr:$table_name,
12152    I64Attr:$num_shards,
12153    I64Attr:$shard_id,
12154    StrAttr:$config
12155  );
12156
12157  let results = (outs
12158    TF_Float32Tensor:$parameters,
12159    TF_Float32Tensor:$v,
12160    TF_Float32Tensor:$m,
12161    TF_Float32Tensor:$gradient_accumulators
12162  );
12163}
12164
12165def TF_RetrieveTPUEmbeddingRMSPropParametersOp : TF_Op<"RetrieveTPUEmbeddingRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
12166  let summary = "Retrieve RMSProp embedding parameters.";
12167
12168  let description = [{
12169An op that retrieves optimization parameters from embedding to host
12170memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12171the correct embedding table configuration. For example, this op is
12172used to retrieve updated parameters before saving a checkpoint.
12173  }];
12174
12175  let arguments = (ins
12176    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12177    StrAttr:$table_name,
12178    I64Attr:$num_shards,
12179    I64Attr:$shard_id,
12180    StrAttr:$config
12181  );
12182
12183  let results = (outs
12184    Res<TF_Float32Tensor, [{Parameter parameters updated by the RMSProp optimization algorithm.}]>:$parameters,
12185    Res<TF_Float32Tensor, [{Parameter ms updated by the RMSProp optimization algorithm.}]>:$ms,
12186    Res<TF_Float32Tensor, [{Parameter mom updated by the RMSProp optimization algorithm.}]>:$mom
12187  );
12188}
12189
12190def TF_RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12191  let summary = "Retrieve RMSProp embedding parameters with debug support.";
12192
12193  let description = [{
12194An op that retrieves optimization parameters from embedding to host
12195memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12196the correct embedding table configuration. For example, this op is
12197used to retrieve updated parameters before saving a checkpoint.
12198  }];
12199
12200  let arguments = (ins
12201    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12202    StrAttr:$table_name,
12203    I64Attr:$num_shards,
12204    I64Attr:$shard_id,
12205    StrAttr:$config
12206  );
12207
12208  let results = (outs
12209    Res<TF_Float32Tensor, [{Parameter parameters updated by the RMSProp optimization algorithm.}]>:$parameters,
12210    Res<TF_Float32Tensor, [{Parameter ms updated by the RMSProp optimization algorithm.}]>:$ms,
12211    Res<TF_Float32Tensor, [{Parameter mom updated by the RMSProp optimization algorithm.}]>:$mom,
12212    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the RMSProp optimization algorithm.}]>:$gradient_accumulators
12213  );
12214}
12215
12216def TF_RetrieveTPUEmbeddingStochasticGradientDescentParametersOp : TF_Op<"RetrieveTPUEmbeddingStochasticGradientDescentParameters", [TF_TPUEmbeddingSideEffect]> {
12217  let summary = "Retrieve SGD embedding parameters.";
12218
12219  let description = [{
12220An op that retrieves optimization parameters from embedding to host
12221memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12222the correct embedding table configuration. For example, this op is
12223used to retrieve updated parameters before saving a checkpoint.
12224  }];
12225
12226  let arguments = (ins
12227    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12228    StrAttr:$table_name,
12229    I64Attr:$num_shards,
12230    I64Attr:$shard_id,
12231    StrAttr:$config
12232  );
12233
12234  let results = (outs
12235    Res<TF_Float32Tensor, [{Parameter parameters updated by the stochastic gradient descent optimization algorithm.}]>:$parameters
12236  );
12237}
12238
12239def TF_RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12240  let summary = "Retrieve SGD embedding parameters with debug support.";
12241
12242  let description = [{
12243An op that retrieves optimization parameters from embedding to host
12244memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12245the correct embedding table configuration. For example, this op is
12246used to retrieve updated parameters before saving a checkpoint.
12247  }];
12248
12249  let arguments = (ins
12250    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12251    StrAttr:$table_name,
12252    I64Attr:$num_shards,
12253    I64Attr:$shard_id,
12254    StrAttr:$config
12255  );
12256
12257  let results = (outs
12258    Res<TF_Float32Tensor, [{Parameter parameters updated by the stochastic gradient descent optimization algorithm.}]>:$parameters,
12259    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Adadelta optimization algorithm.}]>:$gradient_accumulators
12260  );
12261}
12262
12263def TF_ReverseSequenceOp : TF_Op<"ReverseSequence", [NoSideEffect]> {
12264  let summary = "Reverses variable length slices.";
12265
12266  let description = [{
12267This op first slices `input` along the dimension `batch_dim`, and for each
12268slice `i`, reverses the first `seq_lengths[i]` elements along
12269the dimension `seq_dim`.
12270
12271The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
12272and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
12273
12274The output slice `i` along dimension `batch_dim` is then given by input
12275slice `i`, with the first `seq_lengths[i]` slices along dimension
12276`seq_dim` reversed.
12277
12278For example:
12279
12280```
12281# Given this:
12282batch_dim = 0
12283seq_dim = 1
12284input.dims = (4, 8, ...)
12285seq_lengths = [7, 2, 3, 5]
12286
12287# then slices of input are reversed on seq_dim, but only up to seq_lengths:
12288output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
12289output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
12290output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
12291output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
12292
12293# while entries past seq_lens are copied through:
12294output[0, 7:, :, ...] = input[0, 7:, :, ...]
12295output[1, 2:, :, ...] = input[1, 2:, :, ...]
12296output[2, 3:, :, ...] = input[2, 3:, :, ...]
12297output[3, 2:, :, ...] = input[3, 2:, :, ...]
12298```
12299
12300In contrast, if:
12301
12302```
12303# Given this:
12304batch_dim = 2
12305seq_dim = 0
12306input.dims = (8, ?, 4, ...)
12307seq_lengths = [7, 2, 3, 5]
12308
12309# then slices of input are reversed on seq_dim, but only up to seq_lengths:
12310output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
12311output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
12312output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
12313output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
12314
12315# while entries past seq_lens are copied through:
12316output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
12317output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
12318output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
12319output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
12320```
12321  }];
12322
12323  let arguments = (ins
12324    Arg<TF_Tensor, [{The input to reverse.}]>:$input,
12325    Arg<TF_I32OrI64Tensor, [{1-D with length `input.dims(batch_dim)` and
12326`max(seq_lengths) <= input.dims(seq_dim)`}]>:$seq_lengths,
12327
12328    I64Attr:$seq_dim,
12329    DefaultValuedAttr<I64Attr, "0">:$batch_dim
12330  );
12331
12332  let results = (outs
12333    Res<TF_Tensor, [{The partially reversed input. It has the same shape as `input`.}]>:$output
12334  );
12335
12336  TF_DerivedOperandTypeAttr Tlen = TF_DerivedOperandTypeAttr<1>;
12337  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12338}
12339
12340def TF_ReverseV2Op : TF_Op<"ReverseV2", [NoSideEffect]> {
12341  let summary = "Reverses specific dimensions of a tensor.";
12342
12343  let description = [{
12344NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
12345`tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
12346
12347Given a `tensor`, and a `int32` tensor `axis` representing the set of
12348dimensions of `tensor` to reverse. This operation reverses each dimension
12349`i` for which there exists `j` s.t. `axis[j] == i`.
12350
12351`tensor` can have up to 8 dimensions. The number of dimensions specified
12352in `axis` may be 0 or more entries. If an index is specified more than
12353once, a InvalidArgument error is raised.
12354
12355For example:
12356
12357```
12358# tensor 't' is [[[[ 0,  1,  2,  3],
12359#                  [ 4,  5,  6,  7],
12360#                  [ 8,  9, 10, 11]],
12361#                 [[12, 13, 14, 15],
12362#                  [16, 17, 18, 19],
12363#                  [20, 21, 22, 23]]]]
12364# tensor 't' shape is [1, 2, 3, 4]
12365
12366# 'dims' is [3] or 'dims' is [-1]
12367reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
12368                        [ 7,  6,  5,  4],
12369                        [ 11, 10, 9, 8]],
12370                       [[15, 14, 13, 12],
12371                        [19, 18, 17, 16],
12372                        [23, 22, 21, 20]]]]
12373
12374# 'dims' is '[1]' (or 'dims' is '[-3]')
12375reverse(t, dims) ==> [[[[12, 13, 14, 15],
12376                        [16, 17, 18, 19],
12377                        [20, 21, 22, 23]
12378                       [[ 0,  1,  2,  3],
12379                        [ 4,  5,  6,  7],
12380                        [ 8,  9, 10, 11]]]]
12381
12382# 'dims' is '[2]' (or 'dims' is '[-2]')
12383reverse(t, dims) ==> [[[[8, 9, 10, 11],
12384                        [4, 5, 6, 7],
12385                        [0, 1, 2, 3]]
12386                       [[20, 21, 22, 23],
12387                        [16, 17, 18, 19],
12388                        [12, 13, 14, 15]]]]
12389```
12390  }];
12391
12392  let arguments = (ins
12393    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint8]>, [{Up to 8-D.}]>:$tensor,
12394    Arg<TF_I32OrI64Tensor, [{1-D. The indices of the dimensions to reverse. Must be in the range
12395`[-rank(tensor), rank(tensor))`.}]>:$axis
12396  );
12397
12398  let results = (outs
12399    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint8]>, [{The same shape as `tensor`.}]>:$output
12400  );
12401
12402  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12403  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
12404}
12405
12406def TF_RightShiftOp : TF_Op<"RightShift", [NoSideEffect, ResultsBroadcastableShape]>,
12407                      WithBroadcastableBinOpBuilder {
12408  let summary = "Elementwise computes the bitwise right-shift of `x` and `y`.";
12409
12410  let description = [{
12411Performs a logical shift for unsigned integer types, and an arithmetic shift
12412for signed integer types.
12413
12414If `y` is negative, or greater than or equal to than the width of `x` in bits
12415the result is implementation defined.
12416
12417Example:
12418
12419```python
12420import tensorflow as tf
12421from tensorflow.python.ops import bitwise_ops
12422import numpy as np
12423dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
12424
12425for dtype in dtype_list:
12426  lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
12427  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
12428
12429  right_shift_result = bitwise_ops.right_shift(lhs, rhs)
12430
12431  print(right_shift_result)
12432
12433# This will print:
12434# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8)
12435# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16)
12436# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32)
12437# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64)
12438
12439lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
12440rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
12441bitwise_ops.right_shift(lhs, rhs)
12442# <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
12443```
12444  }];
12445
12446  let arguments = (ins
12447    TF_IntTensor:$x,
12448    TF_IntTensor:$y
12449  );
12450
12451  let results = (outs
12452    TF_IntTensor:$z
12453  );
12454
12455  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12456}
12457
12458def TF_RintOp : TF_Op<"Rint", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
12459  let summary = "Returns element-wise integer closest to x.";
12460
12461  let description = [{
12462If the result is midway between two representable values,
12463the even representable is chosen.
12464For example:
12465
12466```
12467rint(-1.5) ==> -2.0
12468rint(0.5000001) ==> 1.0
12469rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
12470```
12471  }];
12472
12473  let arguments = (ins
12474    TF_FloatTensor:$x
12475  );
12476
12477  let results = (outs
12478    TF_FloatTensor:$y
12479  );
12480
12481  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12482}
12483
12484def TF_RiscAddOp : TF_Op<"RiscAdd", [Commutative, NoSideEffect]> {
12485  let summary = "Returns x + y element-wise.";
12486
12487  let description = [{
12488*NOTE*: `RiscAdd` does not supports broadcasting.
12489
12490Given two input tensors, the `tf.risc_add` operation computes the sum for every element in the tensor.
12491
12492Both input and output have a range `(-inf, inf)`.
12493  }];
12494
12495  let arguments = (ins
12496    TF_FloatTensor:$x,
12497    TF_FloatTensor:$y
12498  );
12499
12500  let results = (outs
12501    TF_FloatTensor:$z
12502  );
12503
12504  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12505}
12506
12507def TF_RiscDotOp : TF_Op<"RiscDot", [NoSideEffect]> {
12508  let summary = "";
12509
12510  let arguments = (ins
12511    TF_FloatTensor:$a,
12512    TF_FloatTensor:$b,
12513
12514    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
12515    DefaultValuedAttr<BoolAttr, "false">:$transpose_b
12516  );
12517
12518  let results = (outs
12519    TF_FloatTensor:$product
12520  );
12521
12522  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12523}
12524
12525def TF_RngReadAndSkipOp : TF_Op<"RngReadAndSkip", []> {
12526  let summary = "Advance the counter of a counter-based RNG.";
12527
12528  let description = [{
12529The state of the RNG after
12530`rng_read_and_skip(n)` will be the same as that after `uniform([n])`
12531(or any other distribution). The actual increment added to the
12532counter is an unspecified implementation choice.
12533  }];
12534
12535  let arguments = (ins
12536    TF_ResourceTensor:$resource,
12537    TF_Int32Tensor:$alg,
12538    TF_Uint64Tensor:$delta
12539  );
12540
12541  let results = (outs
12542    TF_Int64Tensor:$value
12543  );
12544}
12545
12546def TF_RollOp : TF_Op<"Roll", [NoSideEffect]> {
12547  let summary = "Rolls the elements of a tensor along an axis.";
12548
12549  let description = [{
12550The elements are shifted positively (towards larger indices) by the offset of
12551`shift` along the dimension of `axis`. Negative `shift` values will shift
12552elements in the opposite direction. Elements that roll passed the last position
12553will wrap around to the first and vice versa. Multiple shifts along multiple
12554axes may be specified.
12555
12556For example:
12557
12558```
12559# 't' is [0, 1, 2, 3, 4]
12560roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
12561
12562# shifting along multiple dimensions
12563# 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
12564roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
12565
12566# shifting along the same axis multiple times
12567# 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
12568roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
12569```
12570  }];
12571
12572  let arguments = (ins
12573    TF_Tensor:$input,
12574    Arg<TF_I32OrI64Tensor, [{Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which
12575elements are shifted positively (towards larger indices) along the dimension
12576specified by `axis[i]`. Negative shifts will roll the elements in the opposite
12577direction.}]>:$shift,
12578    Arg<TF_I32OrI64Tensor, [{Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift
12579`shift[i]` should occur. If the same axis is referenced more than once, the
12580total shift for that axis will be the sum of all the shifts that belong to that
12581axis.}]>:$axis
12582  );
12583
12584  let results = (outs
12585    Res<TF_Tensor, [{Has the same shape and size as the input. The elements are shifted
12586positively (towards larger indices) by the offsets of `shift` along the
12587dimensions of `axis`.}]>:$output
12588  );
12589
12590  TF_DerivedOperandTypeAttr Tshift = TF_DerivedOperandTypeAttr<1>;
12591  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12592  TF_DerivedOperandTypeAttr Taxis = TF_DerivedOperandTypeAttr<2>;
12593}
12594
12595def TF_RoundOp : TF_Op<"Round", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
12596  let summary = [{
12597Rounds the values of a tensor to the nearest integer, element-wise.
12598  }];
12599
12600  let description = [{
12601Rounds half to even.  Also known as bankers rounding. If you want to round
12602according to the current system rounding mode use std::cint.
12603  }];
12604
12605  let arguments = (ins
12606    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
12607  );
12608
12609  let results = (outs
12610    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
12611  );
12612
12613  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12614}
12615
12616def TF_RsqrtOp : TF_Op<"Rsqrt", [NoSideEffect, SameOperandsAndResultType]> {
12617  let summary = "Computes reciprocal of square root of x element-wise.";
12618
12619  let description = [{
12620I.e., \\(y = 1 / \sqrt{x}\\).
12621  }];
12622
12623  let arguments = (ins
12624    TF_FpOrComplexTensor:$x
12625  );
12626
12627  let results = (outs
12628    TF_FpOrComplexTensor:$y
12629  );
12630
12631  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12632}
12633
12634def TF_RsqrtGradOp : TF_Op<"RsqrtGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
12635  let summary = "Computes the gradient for the rsqrt of `x` wrt its input.";
12636
12637  let description = [{
12638Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
12639is the corresponding input gradient.
12640  }];
12641
12642  let arguments = (ins
12643    TF_FpOrComplexTensor:$y,
12644    TF_FpOrComplexTensor:$dy
12645  );
12646
12647  let results = (outs
12648    TF_FpOrComplexTensor:$z
12649  );
12650
12651  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12652}
12653
12654def TF_SaveOp : TF_Op<"Save", []> {
12655  let summary = "Saves the input tensors to disk.";
12656
12657  let description = [{
12658The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
12659is written to `filename` with name `tensor_names[i]`.
12660
12661See also `SaveSlices`.
12662  }];
12663
12664  let arguments = (ins
12665    Arg<TF_StrTensor, [{Must have a single element. The name of the file to which we write
12666the tensor.}]>:$filename,
12667    Arg<TF_StrTensor, [{Shape `[N]`. The names of the tensors to be saved.}]>:$tensor_names,
12668    Arg<Variadic<TF_Tensor>, [{`N` tensors to save.}]>:$data
12669  );
12670
12671  let results = (outs);
12672
12673  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<2>;
12674}
12675
12676def TF_SaveSlicesOp : TF_Op<"SaveSlices", []> {
12677  let summary = "Saves input tensors slices to disk.";
12678
12679  let description = [{
12680This is like `Save` except that tensors can be listed in the saved file as being
12681a slice of a larger tensor.  `shapes_and_slices` specifies the shape of the
12682larger tensor and the slice that this tensor covers. `shapes_and_slices` must
12683have as many elements as `tensor_names`.
12684
12685Elements of the `shapes_and_slices` input must either be:
12686
12687*  The empty string, in which case the corresponding tensor is
12688   saved normally.
12689*  A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
12690   `dimI` are the dimensions of the larger tensor and `slice-spec`
12691   specifies what part is covered by the tensor to save.
12692
12693`slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
12694where each `sliceI` is either:
12695
12696*  The string `-` meaning that the slice covers all indices of this dimension
12697*  `start,length` where `start` and `length` are integers.  In that
12698   case the slice covers `length` indices starting at `start`.
12699
12700See also `Save`.
12701  }];
12702
12703  let arguments = (ins
12704    Arg<TF_StrTensor, [{Must have a single element. The name of the file to which we write the
12705tensor.}]>:$filename,
12706    Arg<TF_StrTensor, [{Shape `[N]`. The names of the tensors to be saved.}]>:$tensor_names,
12707    Arg<TF_StrTensor, [{Shape `[N]`.  The shapes and slice specifications to use when
12708saving the tensors.}]>:$shapes_and_slices,
12709    Arg<Variadic<TF_Tensor>, [{`N` tensors to save.}]>:$data
12710  );
12711
12712  let results = (outs);
12713
12714  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<3>;
12715}
12716
12717def TF_SaveV2Op : TF_Op<"SaveV2", []> {
12718  let summary = "Saves tensors in V2 checkpoint format.";
12719
12720  let description = [{
12721By default, saves the named tensors in full.  If the caller wishes to save
12722specific slices of full tensors, "shape_and_slices" should be non-empty strings
12723and correspondingly well-formed.
12724  }];
12725
12726  let arguments = (ins
12727    Arg<TF_StrTensor, [{Must have a single element. The prefix of the V2 checkpoint to which we
12728write the tensors.}]>:$prefix,
12729    Arg<TF_StrTensor, [{shape {N}. The names of the tensors to be saved.}]>:$tensor_names,
12730    Arg<TF_StrTensor, [{shape {N}.  The slice specs of the tensors to be saved.
12731Empty strings indicate that they are non-partitioned tensors.}]>:$shape_and_slices,
12732    Arg<Variadic<TF_Tensor>, [{`N` tensors to save.}]>:$tensors
12733  );
12734
12735  let results = (outs);
12736
12737  TF_DerivedOperandTypeListAttr dtypes = TF_DerivedOperandTypeListAttr<3>;
12738}
12739
12740def TF_ScatterNdOp : TF_Op<"ScatterNd", [NoSideEffect]> {
12741  let summary = "Scatter `updates` into a new tensor according to `indices`.";
12742
12743  let description = [{
12744Creates a new tensor by applying sparse `updates` to individual values or
12745slices within a tensor (initially zero for numeric, empty for string) of
12746the given `shape` according to indices.  This operator is the inverse of the
12747`tf.gather_nd` operator which extracts values or slices from a given tensor.
12748
12749This operation is similar to tensor_scatter_add, except that the tensor is
12750zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical
12751to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)`
12752
12753If `indices` contains duplicates, then their updates are accumulated (summed).
12754
12755**WARNING**: The order in which updates are applied is nondeterministic, so the
12756output will be nondeterministic if `indices` contains duplicates -- because
12757of some numerical approximation issues, numbers summed in different order
12758may yield different results.
12759
12760`indices` is an integer tensor containing indices into a new tensor of shape
12761`shape`.  The last dimension of `indices` can be at most the rank of `shape`:
12762
12763    indices.shape[-1] <= shape.rank
12764
12765The last dimension of `indices` corresponds to indices into elements
12766(if `indices.shape[-1] = shape.rank`) or slices
12767(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
12768`shape`.  `updates` is a tensor with shape
12769
12770    indices.shape[:-1] + shape[indices.shape[-1]:]
12771
12772The simplest form of scatter is to insert individual elements in a tensor by
12773index. For example, say we want to insert 4 scattered elements in a rank-1
12774tensor with 8 elements.
12775
12776<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12777<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
12778</div>
12779
12780In Python, this scatter operation would look like this:
12781
12782```python
12783    indices = tf.constant([[4], [3], [1], [7]])
12784    updates = tf.constant([9, 10, 11, 12])
12785    shape = tf.constant([8])
12786    scatter = tf.scatter_nd(indices, updates, shape)
12787    print(scatter)
12788```
12789
12790The resulting tensor would look like this:
12791
12792    [0, 11, 0, 10, 9, 0, 0, 12]
12793
12794We can also, insert entire slices of a higher rank tensor all at once. For
12795example, if we wanted to insert two slices in the first dimension of a
12796rank-3 tensor with two matrices of new values.
12797
12798<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12799<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
12800</div>
12801
12802In Python, this scatter operation would look like this:
12803
12804```python
12805    indices = tf.constant([[0], [2]])
12806    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
12807                            [7, 7, 7, 7], [8, 8, 8, 8]],
12808                           [[5, 5, 5, 5], [6, 6, 6, 6],
12809                            [7, 7, 7, 7], [8, 8, 8, 8]]])
12810    shape = tf.constant([4, 4, 4])
12811    scatter = tf.scatter_nd(indices, updates, shape)
12812    print(scatter)
12813```
12814
12815The resulting tensor would look like this:
12816
12817    [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
12818     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
12819     [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
12820     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
12821
12822Note that on CPU, if an out of bound index is found, an error is returned.
12823On GPU, if an out of bound index is found, the index is ignored.
12824  }];
12825
12826  let arguments = (ins
12827    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
12828    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates,
12829    Arg<TF_I32OrI64Tensor, [{1-D. The shape of the resulting tensor.}]>:$shape
12830  );
12831
12832  let results = (outs
12833    Res<TF_Tensor, [{A new tensor with the given shape and updates applied according
12834to the indices.}]>:$output
12835  );
12836
12837  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<0>;
12838  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
12839}
12840
12841def TF_SegmentMaxOp : TF_Op<"SegmentMax", [NoSideEffect]> {
12842  let summary = "Computes the maximum along segments of a tensor.";
12843
12844  let description = [{
12845Read
12846[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
12847for an explanation of segments.
12848
12849Computes a tensor such that
12850\\(output_i = \max_j(data_j)\\) where `max` is over `j` such
12851that `segment_ids[j] == i`.
12852
12853If the max is empty for a given segment ID `i`, `output[i] = 0`.
12854
12855<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12856<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
12857</div>
12858
12859For example:
12860
12861```
12862c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
12863tf.segment_max(c, tf.constant([0, 0, 1]))
12864# ==> [[4, 3, 3, 4],
12865#      [5, 6, 7, 8]]
12866```
12867  }];
12868
12869  let arguments = (ins
12870    TF_IntOrFpTensor:$data,
12871    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
12872first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
12873  );
12874
12875  let results = (outs
12876    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
12877has size `k`, the number of segments.}]>:$output
12878  );
12879
12880  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12881  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12882}
12883
12884def TF_SegmentMeanOp : TF_Op<"SegmentMean", [NoSideEffect]> {
12885  let summary = "Computes the mean along segments of a tensor.";
12886
12887  let description = [{
12888Read
12889[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
12890for an explanation of segments.
12891
12892Computes a tensor such that
12893\\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
12894over `j` such that `segment_ids[j] == i` and `N` is the total number of
12895values summed.
12896
12897If the mean is empty for a given segment ID `i`, `output[i] = 0`.
12898
12899<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12900<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
12901</div>
12902
12903For example:
12904
12905```
12906c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
12907tf.segment_mean(c, tf.constant([0, 0, 1]))
12908# ==> [[2.5, 2.5, 2.5, 2.5],
12909#      [5, 6, 7, 8]]
12910```
12911  }];
12912
12913  let arguments = (ins
12914    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
12915    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
12916first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
12917  );
12918
12919  let results = (outs
12920    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
12921has size `k`, the number of segments.}]>:$output
12922  );
12923
12924  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12925  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12926}
12927
12928def TF_SegmentMinOp : TF_Op<"SegmentMin", [NoSideEffect]> {
12929  let summary = "Computes the minimum along segments of a tensor.";
12930
12931  let description = [{
12932Read
12933[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
12934for an explanation of segments.
12935
12936Computes a tensor such that
12937\\(output_i = \min_j(data_j)\\) where `min` is over `j` such
12938that `segment_ids[j] == i`.
12939
12940If the min is empty for a given segment ID `i`, `output[i] = 0`.
12941
12942<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12943<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
12944</div>
12945
12946For example:
12947
12948```
12949c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
12950tf.segment_min(c, tf.constant([0, 0, 1]))
12951# ==> [[1, 2, 2, 1],
12952#      [5, 6, 7, 8]]
12953```
12954  }];
12955
12956  let arguments = (ins
12957    TF_IntOrFpTensor:$data,
12958    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
12959first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
12960  );
12961
12962  let results = (outs
12963    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
12964has size `k`, the number of segments.}]>:$output
12965  );
12966
12967  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12968  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12969}
12970
12971def TF_SegmentProdOp : TF_Op<"SegmentProd", [NoSideEffect]> {
12972  let summary = "Computes the product along segments of a tensor.";
12973
12974  let description = [{
12975Read
12976[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
12977for an explanation of segments.
12978
12979Computes a tensor such that
12980\\(output_i = \prod_j data_j\\) where the product is over `j` such
12981that `segment_ids[j] == i`.
12982
12983If the product is empty for a given segment ID `i`, `output[i] = 1`.
12984
12985<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12986<img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
12987</div>
12988
12989For example:
12990
12991```
12992c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
12993tf.segment_prod(c, tf.constant([0, 0, 1]))
12994# ==> [[4, 6, 6, 4],
12995#      [5, 6, 7, 8]]
12996```
12997  }];
12998
12999  let arguments = (ins
13000    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
13001    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
13002first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
13003  );
13004
13005  let results = (outs
13006    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
13007has size `k`, the number of segments.}]>:$output
13008  );
13009
13010  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
13011  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13012}
13013
13014def TF_SegmentSumOp : TF_Op<"SegmentSum", [NoSideEffect]> {
13015  let summary = "Computes the sum along segments of a tensor.";
13016
13017  let description = [{
13018Read
13019[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
13020for an explanation of segments.
13021
13022Computes a tensor such that
13023\\(output_i = \sum_j data_j\\) where sum is over `j` such
13024that `segment_ids[j] == i`.
13025
13026If the sum is empty for a given segment ID `i`, `output[i] = 0`.
13027
13028<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13029<img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
13030</div>
13031
13032For example:
13033
13034```
13035c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
13036tf.segment_sum(c, tf.constant([0, 0, 1]))
13037# ==> [[5, 5, 5, 5],
13038#      [5, 6, 7, 8]]
13039```
13040  }];
13041
13042  let arguments = (ins
13043    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
13044    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
13045first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
13046  );
13047
13048  let results = (outs
13049    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
13050has size `k`, the number of segments.}]>:$output
13051  );
13052
13053  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
13054  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13055}
13056
13057def TF_SelectOp : TF_Op<"Select", [NoSideEffect]> {
13058  let summary = "Selects elements from `x` or `y`, depending on `condition`.";
13059
13060  let description = [{
13061The `x`, and `y` tensors must all have the same shape, and the
13062output will also have that shape.
13063
13064The `condition` tensor must be a scalar if `x` and `y` are scalars.
13065If `x` and `y` are vectors or higher rank, then `condition` must be either a
13066scalar, a vector with size matching the first dimension of `x`, or must have
13067the same shape as `x`.
13068
13069The `condition` tensor acts as a mask that chooses, based on the value at each
13070element, whether the corresponding element / row in the output should be
13071taken from `x` (if true) or `y` (if false).
13072
13073If `condition` is a vector and `x` and `y` are higher rank matrices, then
13074it chooses which row (outer dimension) to copy from `x` and `y`.
13075If `condition` has the same shape as `x` and `y`, then it chooses which
13076element to copy from `x` and `y`.
13077
13078For example:
13079
13080```python
13081# 'condition' tensor is [[True,  False]
13082#                        [False, True]]
13083# 't' is [[1, 2],
13084#         [3, 4]]
13085# 'e' is [[5, 6],
13086#         [7, 8]]
13087select(condition, t, e)  # => [[1, 6], [7, 4]]
13088
13089
13090# 'condition' tensor is [True, False]
13091# 't' is [[1, 2],
13092#         [3, 4]]
13093# 'e' is [[5, 6],
13094#         [7, 8]]
13095select(condition, t, e) ==> [[1, 2],
13096                             [7, 8]]
13097
13098```
13099  }];
13100
13101  let arguments = (ins
13102    TF_BoolTensor:$condition,
13103    Arg<TF_Tensor, [{= A `Tensor` which may have the same shape as `condition`.
13104If `condition` is rank 1, `x` may have higher rank,
13105but its first dimension must match the size of `condition`.}]>:$t,
13106    Arg<TF_Tensor, [{= A `Tensor` with the same type and shape as `x`.}]>:$e
13107  );
13108
13109  let results = (outs
13110    Res<TF_Tensor, [{= A `Tensor` with the same type and shape as `x` and `y`.}]>:$output
13111  );
13112
13113  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
13114
13115  let verifier = [{
13116    return Verify(*this);
13117  }];
13118}
13119
13120def TF_SelectV2Op : TF_Op<"SelectV2", [NoSideEffect, ResultsBroadcastableShape]> {
13121  let summary = "";
13122
13123  let arguments = (ins
13124    TF_BoolTensor:$condition,
13125    TF_Tensor:$t,
13126    TF_Tensor:$e
13127  );
13128
13129  let results = (outs
13130    TF_Tensor:$output
13131  );
13132
13133  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
13134
13135  let builders = [
13136    OpBuilder<(ins "Value":$condition, "Value":$e, "Value":$t)>
13137  ];
13138}
13139
13140def TF_SelfAdjointEigV2Op : TF_Op<"SelfAdjointEigV2", [NoSideEffect]> {
13141  let summary = [{
13142Computes the eigen decomposition of one or more square self-adjoint matrices.
13143  }];
13144
13145  let description = [{
13146Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
13147`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
13148are sorted in non-decreasing order.
13149
13150```python
13151# a is a tensor.
13152# e is a tensor of eigenvalues.
13153# v is a tensor of eigenvectors.
13154e, v = self_adjoint_eig(a)
13155e = self_adjoint_eig(a, compute_v=False)
13156```
13157  }];
13158
13159  let arguments = (ins
13160    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{`Tensor` input of shape `[N, N]`.}]>:$input,
13161
13162    DefaultValuedAttr<BoolAttr, "true">:$compute_v
13163  );
13164
13165  let results = (outs
13166    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Eigenvalues. Shape is `[N]`.}]>:$e,
13167    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Eigenvectors. Shape is `[N, N]`.}]>:$v
13168  );
13169
13170  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13171}
13172
13173def TF_SeluOp : TF_Op<"Selu", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13174  let summary = [{
13175Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
13176  }];
13177
13178  let description = [{
13179if < 0, `scale * features` otherwise.
13180
13181To be used together with
13182`initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
13183For correct dropout, use `tf.contrib.nn.alpha_dropout`.
13184
13185See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
13186  }];
13187
13188  let arguments = (ins
13189    TF_FloatTensor:$features
13190  );
13191
13192  let results = (outs
13193    TF_FloatTensor:$activations
13194  );
13195
13196  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13197}
13198
13199def TF_SeluGradOp : TF_Op<"SeluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13200  let summary = [{
13201Computes gradients for the scaled exponential linear (Selu) operation.
13202  }];
13203
13204  let arguments = (ins
13205    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding Selu operation.}]>:$gradients,
13206    Arg<TF_FloatTensor, [{The outputs of the corresponding Selu operation.}]>:$outputs
13207  );
13208
13209  let results = (outs
13210    Res<TF_FloatTensor, [{The gradients: `gradients * (outputs + scale * alpha)`
13211if outputs < 0, `scale * gradients` otherwise.}]>:$backprops
13212  );
13213
13214  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13215}
13216
13217def TF_SendOp : TF_Op<"Send", []> {
13218  let summary = "Sends the named tensor from send_device to recv_device.";
13219
13220  let arguments = (ins
13221    Arg<TF_Tensor, [{The tensor to send.}]>:$tensor,
13222
13223    StrAttr:$tensor_name,
13224    StrAttr:$send_device,
13225    I64Attr:$send_device_incarnation,
13226    StrAttr:$recv_device,
13227    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
13228  );
13229
13230  let results = (outs);
13231
13232  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13233}
13234
13235def TF_SerializeIteratorOp : TF_Op<"SerializeIterator", []> {
13236  let summary = [{
13237Converts the given `resource_handle` representing an iterator to a variant tensor.
13238  }];
13239
13240  let arguments = (ins
13241    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorRead]>:$resource_handle,
13242
13243    DefaultValuedAttr<I64Attr, "0">:$external_state_policy
13244  );
13245
13246  let results = (outs
13247    Res<TF_VariantTensor, [{A variant tensor storing the state of the iterator contained in the
13248resource.}]>:$serialized
13249  );
13250}
13251
13252def TF_SerializeSparseOp : TF_Op<"SerializeSparse", [NoSideEffect]> {
13253  let summary = "Serialize a `SparseTensor` into a `[3]` `Tensor` object.";
13254
13255  let arguments = (ins
13256    Arg<TF_Int64Tensor, [{2-D.  The `indices` of the `SparseTensor`.}]>:$sparse_indices,
13257    Arg<TF_Tensor, [{1-D.  The `values` of the `SparseTensor`.}]>:$sparse_values,
13258    Arg<TF_Int64Tensor, [{1-D.  The `shape` of the `SparseTensor`.}]>:$sparse_shape
13259  );
13260
13261  let results = (outs
13262    TensorOf<[TF_Str, TF_Variant]>:$serialized_sparse
13263  );
13264
13265  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
13266  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
13267}
13268
13269def TF_ShapeOp : TF_Op<"Shape", [NoSideEffect]> {
13270  let summary = "Returns the shape of a tensor.";
13271
13272  let description = [{
13273This operation returns a 1-D integer tensor representing the shape of `input`.
13274
13275For example:
13276
13277```
13278# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
13279shape(t) ==> [2, 2, 3]
13280```
13281  }];
13282
13283  let arguments = (ins
13284    TF_Tensor:$input
13285  );
13286
13287  let results = (outs
13288    TF_I32OrI64Tensor:$output
13289  );
13290
13291  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13292  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
13293
13294  let verifier = [{
13295    return Verify(*this);
13296  }];
13297
13298  let builders = [
13299    OpBuilder<(ins "Value":$input, "BoolAttr":$use32Bit)>
13300  ];
13301}
13302
13303def TF_ShapeNOp : TF_Op<"ShapeN", [NoSideEffect]> {
13304  let summary = "Returns shape of tensors.";
13305
13306  let description = [{
13307This operation returns N 1-D integer tensors representing shape of `input[i]s`.
13308  }];
13309
13310  let arguments = (ins
13311    Variadic<TF_Tensor>:$input
13312  );
13313
13314  let results = (outs
13315    Variadic<TF_I32OrI64Tensor>:$output
13316  );
13317
13318  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13319  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
13320  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
13321
13322  let verifier = [{
13323    return Verify(*this);
13324  }];
13325}
13326
13327def TF_ShardedFilenameOp : TF_Op<"ShardedFilename", [NoSideEffect]> {
13328  let summary = [{
13329Generate a sharded filename. The filename is printf formatted as
13330  }];
13331
13332  let description = [{
13333%s-%05d-of-%05d, basename, shard, num_shards.
13334  }];
13335
13336  let arguments = (ins
13337    TF_StrTensor:$basename,
13338    TF_Int32Tensor:$shard,
13339    TF_Int32Tensor:$num_shards
13340  );
13341
13342  let results = (outs
13343    TF_StrTensor:$filename
13344  );
13345}
13346
13347def TF_ShuffleAndRepeatDatasetV2Op : TF_Op<"ShuffleAndRepeatDatasetV2", []> {
13348  let summary = "";
13349
13350  let arguments = (ins
13351    TF_VariantTensor:$input_dataset,
13352    TF_Int64Tensor:$buffer_size,
13353    TF_Int64Tensor:$seed,
13354    TF_Int64Tensor:$seed2,
13355    TF_Int64Tensor:$count,
13356    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorRead, TF_DatasetSeedGeneratorWrite]>:$seed_generator,
13357
13358    DefaultValuedAttr<BoolAttr, "true">:$reshuffle_each_iteration,
13359    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
13360    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
13361  );
13362
13363  let results = (outs
13364    TF_VariantTensor:$handle
13365  );
13366}
13367
13368def TF_ShuffleDatasetV2Op : TF_Op<"ShuffleDatasetV2", []> {
13369  let summary = "";
13370
13371  let arguments = (ins
13372    TF_VariantTensor:$input_dataset,
13373    TF_Int64Tensor:$buffer_size,
13374    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorRead, TF_DatasetSeedGeneratorWrite]>:$seed_generator,
13375
13376    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
13377    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
13378  );
13379
13380  let results = (outs
13381    TF_VariantTensor:$handle
13382  );
13383}
13384
13385def TF_ShuffleDatasetV3Op : TF_Op<"ShuffleDatasetV3", []> {
13386  let summary = "";
13387
13388  let arguments = (ins
13389    TF_VariantTensor:$input_dataset,
13390    TF_Int64Tensor:$buffer_size,
13391    TF_Int64Tensor:$seed,
13392    TF_Int64Tensor:$seed2,
13393    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorRead, TF_DatasetSeedGeneratorWrite]>:$seed_generator,
13394
13395    DefaultValuedAttr<BoolAttr, "true">:$reshuffle_each_iteration,
13396    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
13397    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
13398  );
13399
13400  let results = (outs
13401    TF_VariantTensor:$handle
13402  );
13403}
13404
13405def TF_ShutdownDistributedTPUOp : TF_Op<"ShutdownDistributedTPU", []> {
13406  let summary = "Shuts down a running distributed TPU system.";
13407
13408  let description = [{
13409The op returns an error if no system is running.
13410  }];
13411
13412  let arguments = (ins);
13413
13414  let results = (outs);
13415}
13416
13417def TF_SigmoidOp : TF_Op<"Sigmoid", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13418  let summary = "Computes sigmoid of `x` element-wise.";
13419
13420  let description = [{
13421Specifically, `y = 1 / (1 + exp(-x))`.
13422  }];
13423
13424  let arguments = (ins
13425    TF_FpOrComplexTensor:$x
13426  );
13427
13428  let results = (outs
13429    TF_FpOrComplexTensor:$y
13430  );
13431
13432  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13433}
13434
13435def TF_SigmoidGradOp : TF_Op<"SigmoidGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13436  let summary = "Computes the gradient of the sigmoid of `x` wrt its input.";
13437
13438  let description = [{
13439Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
13440`dy` is the corresponding input gradient.
13441  }];
13442
13443  let arguments = (ins
13444    TF_FpOrComplexTensor:$y,
13445    TF_FpOrComplexTensor:$dy
13446  );
13447
13448  let results = (outs
13449    TF_FpOrComplexTensor:$z
13450  );
13451
13452  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13453}
13454
13455def TF_SignOp : TF_Op<"Sign", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
13456  let summary = "Returns an element-wise indication of the sign of a number.";
13457
13458  let description = [{
13459`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
13460
13461For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
13462
13463Example usage:
13464>>> tf.math.sign([0., 2., -3.])
13465<tf.Tensor: shape=(3,), dtype=float32, numpy=array([ 0.,  1., -1.], dtype=float32)>
13466  }];
13467
13468  let arguments = (ins
13469    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$x
13470  );
13471
13472  let results = (outs
13473    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$y
13474  );
13475
13476  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13477}
13478
13479def TF_SinOp : TF_Op<"Sin", [NoSideEffect, SameOperandsAndResultType]> {
13480  let summary = "Computes sine of x element-wise.";
13481
13482  let description = [{
13483Given an input tensor, this function computes sine of every
13484  element in the tensor. Input range is `(-inf, inf)` and
13485  output range is `[-1,1]`.
13486
13487  ```python
13488  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")])
13489  tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan]
13490  ```
13491  }];
13492
13493  let arguments = (ins
13494    TF_FpOrComplexTensor:$x
13495  );
13496
13497  let results = (outs
13498    TF_FpOrComplexTensor:$y
13499  );
13500
13501  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13502}
13503
13504def TF_SinhOp : TF_Op<"Sinh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13505  let summary = "Computes hyperbolic sine of x element-wise.";
13506
13507  let description = [{
13508Given an input tensor, this function computes hyperbolic sine of every
13509  element in the tensor. Input range is `[-inf,inf]` and output range
13510  is `[-inf,inf]`.
13511
13512  ```python
13513  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
13514  tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf]
13515  ```
13516  }];
13517
13518  let arguments = (ins
13519    TF_FpOrComplexTensor:$x
13520  );
13521
13522  let results = (outs
13523    TF_FpOrComplexTensor:$y
13524  );
13525
13526  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13527}
13528
13529def TF_SizeOp : TF_Op<"Size", [NoSideEffect]> {
13530  let summary = "Returns the size of a tensor.";
13531
13532  let description = [{
13533This operation returns an integer representing the number of elements in
13534`input`.
13535
13536For example:
13537
13538```
13539# 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
13540size(t) ==> 12
13541```
13542  }];
13543
13544  let arguments = (ins
13545    TF_Tensor:$input
13546  );
13547
13548  let results = (outs
13549    TF_I32OrI64Tensor:$output
13550  );
13551
13552  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13553  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
13554
13555  let verifier = [{
13556    return Verify(*this);
13557  }];
13558}
13559
13560def TF_SliceOp : TF_Op<"Slice", [NoSideEffect]> {
13561  let summary = "Return a slice from 'input'.";
13562
13563  let description = [{
13564The output tensor is a tensor with dimensions described by 'size'
13565whose values are extracted from 'input' starting at the offsets in
13566'begin'.
13567
13568*Requirements*:
13569  0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
13570  }];
13571
13572  let arguments = (ins
13573    TF_Tensor:$input,
13574    Arg<TF_I32OrI64Tensor, [{begin[i] specifies the offset into the 'i'th dimension of
13575'input' to slice from.}]>:$begin,
13576    Arg<TF_I32OrI64Tensor, [{size[i] specifies the number of elements of the 'i'th dimension
13577of 'input' to slice. If size[i] is -1, all remaining elements in dimension
13578i are included in the slice (i.e. this is equivalent to setting
13579size[i] = input.dim_size(i) - begin[i]).}]>:$size
13580  );
13581
13582  let results = (outs
13583    TF_Tensor:$output
13584  );
13585
13586  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13587  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
13588
13589  let verifier = [{
13590    return Verify(*this);
13591  }];
13592}
13593
13594def TF_SnapshotOp : TF_Op<"Snapshot", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13595  let summary = "Returns a copy of the input tensor.";
13596
13597  let arguments = (ins
13598    TF_Tensor:$input
13599  );
13600
13601  let results = (outs
13602    TF_Tensor:$output
13603  );
13604
13605  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13606}
13607
13608def TF_SoftmaxOp : TF_Op<"Softmax", [NoSideEffect, SameOperandsAndResultType]> {
13609  let summary = "Computes softmax activations.";
13610
13611  let description = [{
13612For each batch `i` and class `j` we have
13613
13614    $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
13615  }];
13616
13617  let arguments = (ins
13618    Arg<TF_FloatTensor, [{2-D with shape `[batch_size, num_classes]`.}]>:$logits
13619  );
13620
13621  let results = (outs
13622    Res<TF_FloatTensor, [{Same shape as `logits`.}]>:$softmax
13623  );
13624
13625  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13626
13627  let verifier = [{
13628    return Verify(*this);
13629  }];
13630}
13631
13632def TF_SoftmaxCrossEntropyWithLogitsOp : TF_Op<"SoftmaxCrossEntropyWithLogits", [NoSideEffect]> {
13633  let summary = [{
13634Computes softmax cross entropy cost and gradients to backpropagate.
13635  }];
13636
13637  let description = [{
13638Inputs are the logits, not probabilities.
13639  }];
13640
13641  let arguments = (ins
13642    Arg<TF_FloatTensor, [{batch_size x num_classes matrix}]>:$features,
13643    Arg<TF_FloatTensor, [{batch_size x num_classes matrix
13644The caller must ensure that each batch of labels represents a valid
13645probability distribution.}]>:$labels
13646  );
13647
13648  let results = (outs
13649    Res<TF_FloatTensor, [{Per example loss (batch_size vector).}]>:$loss,
13650    Res<TF_FloatTensor, [{backpropagated gradients (batch_size x num_classes matrix).}]>:$backprop
13651  );
13652
13653  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13654
13655  let verifier = [{
13656    return Verify(*this);
13657  }];
13658}
13659
13660def TF_SoftplusOp : TF_Op<"Softplus", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13661  let summary = "Computes softplus: `log(exp(features) + 1)`.";
13662
13663  let arguments = (ins
13664    TF_FloatTensor:$features
13665  );
13666
13667  let results = (outs
13668    TF_FloatTensor:$activations
13669  );
13670
13671  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13672}
13673
13674def TF_SoftplusGradOp : TF_Op<"SoftplusGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13675  let summary = "Computes softplus gradients for a softplus operation.";
13676
13677  let arguments = (ins
13678    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding softplus operation.}]>:$gradients,
13679    Arg<TF_FloatTensor, [{The features passed as input to the corresponding softplus operation.}]>:$features
13680  );
13681
13682  let results = (outs
13683    Res<TF_FloatTensor, [{The gradients: `gradients / (1 + exp(-features))`.}]>:$backprops
13684  );
13685
13686  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13687}
13688
13689def TF_SoftsignOp : TF_Op<"Softsign", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13690  let summary = "Computes softsign: `features / (abs(features) + 1)`.";
13691
13692  let arguments = (ins
13693    TF_FloatTensor:$features
13694  );
13695
13696  let results = (outs
13697    TF_FloatTensor:$activations
13698  );
13699
13700  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13701}
13702
13703def TF_SoftsignGradOp : TF_Op<"SoftsignGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13704  let summary = "Computes softsign gradients for a softsign operation.";
13705
13706  let arguments = (ins
13707    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding softsign operation.}]>:$gradients,
13708    Arg<TF_FloatTensor, [{The features passed as input to the corresponding softsign operation.}]>:$features
13709  );
13710
13711  let results = (outs
13712    Res<TF_FloatTensor, [{The gradients: `gradients / (1 + abs(features)) ** 2`.}]>:$backprops
13713  );
13714
13715  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13716}
13717
13718def TF_SpaceToBatchOp : TF_Op<"SpaceToBatch", [NoSideEffect]> {
13719  let summary = "SpaceToBatch for 4-D tensors of type T.";
13720
13721  let description = [{
13722This is a legacy version of the more general SpaceToBatchND.
13723
13724Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
13725More specifically, this op outputs a copy of the input tensor where values from
13726the `height` and `width` dimensions are moved to the `batch` dimension. After
13727the zero-padding, both `height` and `width` of the input must be divisible by the
13728block size.
13729  }];
13730
13731  let arguments = (ins
13732    Arg<TF_Tensor, [{4-D with shape `[batch, height, width, depth]`.}]>:$input,
13733    Arg<TF_I32OrI64Tensor, [{2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
13734  the padding of the input with zeros across the spatial dimensions as follows:
13735
13736      paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
13737
13738  The effective spatial dimensions of the zero-padded input tensor will be:
13739
13740      height_pad = pad_top + height + pad_bottom
13741      width_pad = pad_left + width + pad_right
13742
13743The attr `block_size` must be greater than one. It indicates the block size.
13744
13745  * Non-overlapping blocks of size `block_size x block size` in the height and
13746    width dimensions are rearranged into the batch dimension at each location.
13747  * The batch of the output tensor is `batch * block_size * block_size`.
13748  * Both height_pad and width_pad must be divisible by block_size.
13749
13750The shape of the output will be:
13751
13752    [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
13753     depth]
13754
13755Some examples:
13756
13757(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
13758
13759```
13760x = [[[[1], [2]], [[3], [4]]]]
13761```
13762
13763The output tensor has shape `[4, 1, 1, 1]` and value:
13764
13765```
13766[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
13767```
13768
13769(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
13770
13771```
13772x = [[[[1, 2, 3], [4, 5, 6]],
13773      [[7, 8, 9], [10, 11, 12]]]]
13774```
13775
13776The output tensor has shape `[4, 1, 1, 3]` and value:
13777
13778```
13779[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
13780```
13781
13782(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
13783
13784```
13785x = [[[[1],   [2],  [3],  [4]],
13786      [[5],   [6],  [7],  [8]],
13787      [[9],  [10], [11],  [12]],
13788      [[13], [14], [15],  [16]]]]
13789```
13790
13791The output tensor has shape `[4, 2, 2, 1]` and value:
13792
13793```
13794x = [[[[1], [3]], [[9], [11]]],
13795     [[[2], [4]], [[10], [12]]],
13796     [[[5], [7]], [[13], [15]]],
13797     [[[6], [8]], [[14], [16]]]]
13798```
13799
13800(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
13801
13802```
13803x = [[[[1],   [2],  [3],  [4]],
13804      [[5],   [6],  [7],  [8]]],
13805     [[[9],  [10], [11],  [12]],
13806      [[13], [14], [15],  [16]]]]
13807```
13808
13809The output tensor has shape `[8, 1, 2, 1]` and value:
13810
13811```
13812x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
13813     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
13814```
13815
13816Among others, this operation is useful for reducing atrous convolution into
13817regular convolution.}]>:$paddings,
13818
13819    Confined<I64Attr, [IntMinValue<2>]>:$block_size
13820  );
13821
13822  let results = (outs
13823    TF_Tensor:$output
13824  );
13825
13826  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13827  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
13828}
13829
13830def TF_SpaceToBatchNDOp : TF_Op<"SpaceToBatchND", [NoSideEffect]> {
13831  let summary = "SpaceToBatch for N-D tensors of type T.";
13832
13833  let description = [{
13834This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
13835grid of blocks of shape `block_shape`, and interleaves these blocks with the
13836"batch" dimension (0) such that in the output, the spatial dimensions
13837`[1, ..., M]` correspond to the position within the grid, and the batch
13838dimension combines both the position within a spatial block and the original
13839batch position.  Prior to division into blocks, the spatial dimensions of the
13840input are optionally zero padded according to `paddings`.  See below for a
13841precise description.
13842  }];
13843
13844  let arguments = (ins
13845    Arg<TF_Tensor, [{N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
13846where spatial_shape has `M` dimensions.}]>:$input,
13847    Arg<TF_I32OrI64Tensor, [{1-D with shape `[M]`, all values must be >= 1.}]>:$block_shape,
13848    Arg<TF_I32OrI64Tensor, [{2-D with shape `[M, 2]`, all values must be >= 0.
13849  `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
13850  `i + 1`, which corresponds to spatial dimension `i`.  It is required that
13851  `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
13852
13853This operation is equivalent to the following steps:
13854
138551. Zero-pad the start and end of dimensions `[1, ..., M]` of the
13856   input according to `paddings` to produce `padded` of shape `padded_shape`.
13857
138582. Reshape `padded` to `reshaped_padded` of shape:
13859
13860     [batch] +
13861     [padded_shape[1] / block_shape[0],
13862       block_shape[0],
13863      ...,
13864      padded_shape[M] / block_shape[M-1],
13865      block_shape[M-1]] +
13866     remaining_shape
13867
138683. Permute dimensions of `reshaped_padded` to produce
13869   `permuted_reshaped_padded` of shape:
13870
13871     block_shape +
13872     [batch] +
13873     [padded_shape[1] / block_shape[0],
13874      ...,
13875      padded_shape[M] / block_shape[M-1]] +
13876     remaining_shape
13877
138784. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
13879   dimension, producing an output tensor of shape:
13880
13881     [batch * prod(block_shape)] +
13882     [padded_shape[1] / block_shape[0],
13883      ...,
13884      padded_shape[M] / block_shape[M-1]] +
13885     remaining_shape
13886
13887Some examples:
13888
13889(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
13890    `paddings = [[0, 0], [0, 0]]`:
13891
13892```
13893x = [[[[1], [2]], [[3], [4]]]]
13894```
13895
13896The output tensor has shape `[4, 1, 1, 1]` and value:
13897
13898```
13899[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
13900```
13901
13902(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
13903    `paddings = [[0, 0], [0, 0]]`:
13904
13905```
13906x = [[[[1, 2, 3], [4, 5, 6]],
13907      [[7, 8, 9], [10, 11, 12]]]]
13908```
13909
13910The output tensor has shape `[4, 1, 1, 3]` and value:
13911
13912```
13913[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
13914```
13915
13916(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
13917    `paddings = [[0, 0], [0, 0]]`:
13918
13919```
13920x = [[[[1],   [2],  [3],  [4]],
13921      [[5],   [6],  [7],  [8]],
13922      [[9],  [10], [11],  [12]],
13923      [[13], [14], [15],  [16]]]]
13924```
13925
13926The output tensor has shape `[4, 2, 2, 1]` and value:
13927
13928```
13929x = [[[[1], [3]], [[9], [11]]],
13930     [[[2], [4]], [[10], [12]]],
13931     [[[5], [7]], [[13], [15]]],
13932     [[[6], [8]], [[14], [16]]]]
13933```
13934
13935(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
13936    paddings = `[[0, 0], [2, 0]]`:
13937
13938```
13939x = [[[[1],   [2],  [3],  [4]],
13940      [[5],   [6],  [7],  [8]]],
13941     [[[9],  [10], [11],  [12]],
13942      [[13], [14], [15],  [16]]]]
13943```
13944
13945The output tensor has shape `[8, 1, 3, 1]` and value:
13946
13947```
13948x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
13949     [[[0], [2], [4]]], [[[0], [10], [12]]],
13950     [[[0], [5], [7]]], [[[0], [13], [15]]],
13951     [[[0], [6], [8]]], [[[0], [14], [16]]]]
13952```
13953
13954Among others, this operation is useful for reducing atrous convolution into
13955regular convolution.}]>:$paddings
13956  );
13957
13958  let results = (outs
13959    TF_Tensor:$output
13960  );
13961
13962  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13963  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<2>;
13964  TF_DerivedOperandTypeAttr Tblock_shape = TF_DerivedOperandTypeAttr<1>;
13965
13966  let verifier = [{ return Verify(*this); }];
13967}
13968
13969def TF_SpaceToDepthOp : TF_Op<"SpaceToDepth", [NoSideEffect]> {
13970  let summary = "SpaceToDepth for tensors of type T.";
13971
13972  let description = [{
13973Rearranges blocks of spatial data, into depth. More specifically,
13974this op outputs a copy of the input tensor where values from the `height`
13975and `width` dimensions are moved to the `depth` dimension.
13976The attr `block_size` indicates the input block size.
13977
13978  * Non-overlapping blocks of size `block_size x block size` are rearranged
13979    into depth at each location.
13980  * The depth of the output tensor is `block_size * block_size * input_depth`.
13981  * The Y, X coordinates within each block of the input become the high order
13982    component of the output channel index.
13983  * The input tensor's height and width must be divisible by block_size.
13984
13985The `data_format` attr specifies the layout of the input and output tensors
13986with the following options:
13987  "NHWC": `[ batch, height, width, channels ]`
13988  "NCHW": `[ batch, channels, height, width ]`
13989  "NCHW_VECT_C":
13990      `qint8 [ batch, channels / 4, height, width, 4 ]`
13991
13992It is useful to consider the operation as transforming a 6-D Tensor.
13993e.g. for data_format = NHWC,
13994     Each element in the input tensor can be specified via 6 coordinates,
13995     ordered by decreasing memory layout significance as:
13996     n,oY,bY,oX,bX,iC  (where n=batch index, oX, oY means X or Y coordinates
13997                        within the output image, bX, bY means coordinates
13998                        within the input block, iC means input channels).
13999     The output would be a transpose to the following layout:
14000     n,oY,oX,bY,bX,iC
14001
14002This operation is useful for resizing the activations between convolutions
14003(but keeping all data), e.g. instead of pooling. It is also useful for training
14004purely convolutional models.
14005
14006For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
14007block_size = 2:
14008
14009```
14010x = [[[[1], [2]],
14011      [[3], [4]]]]
14012```
14013
14014This operation will output a tensor of shape `[1, 1, 1, 4]`:
14015
14016```
14017[[[[1, 2, 3, 4]]]]
14018```
14019
14020Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
14021the corresponding output will have a single element (i.e. width and height are
14022both 1) and will have a depth of 4 channels (1 * block_size * block_size).
14023The output element shape is `[1, 1, 4]`.
14024
14025For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
14026
14027```
14028x = [[[[1, 2, 3], [4, 5, 6]],
14029      [[7, 8, 9], [10, 11, 12]]]]
14030```
14031
14032This operation, for block_size of 2, will return the following tensor of shape
14033`[1, 1, 1, 12]`
14034
14035```
14036[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
14037```
14038
14039Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
14040
14041```
14042x = [[[[1],   [2],  [5],  [6]],
14043      [[3],   [4],  [7],  [8]],
14044      [[9],  [10], [13],  [14]],
14045      [[11], [12], [15],  [16]]]]
14046```
14047
14048the operator will return the following tensor of shape `[1 2 2 4]`:
14049
14050```
14051x = [[[[1, 2, 3, 4],
14052       [5, 6, 7, 8]],
14053      [[9, 10, 11, 12],
14054       [13, 14, 15, 16]]]]
14055```
14056  }];
14057
14058  let arguments = (ins
14059    TF_Tensor:$input,
14060
14061    Confined<I64Attr, [IntMinValue<2>]>:$block_size,
14062    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
14063  );
14064
14065  let results = (outs
14066    TF_Tensor:$output
14067  );
14068
14069  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14070}
14071
14072def TF_SparseFillEmptyRowsOp : TF_Op<"SparseFillEmptyRows", [NoSideEffect]> {
14073  let summary = [{
14074Fills empty rows in the input 2-D `SparseTensor` with a default value.
14075  }];
14076
14077  let description = [{
14078The input `SparseTensor` is represented via the tuple of inputs
14079(`indices`, `values`, `dense_shape`).  The output `SparseTensor` has the
14080same `dense_shape` but with indices `output_indices` and values
14081`output_values`.
14082
14083This op inserts a single entry for every row that doesn't have any values.
14084The index is created as `[row, 0, ..., 0]` and the inserted value
14085is `default_value`.
14086
14087For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
14088
14089    [0, 1]: a
14090    [0, 3]: b
14091    [2, 0]: c
14092    [3, 1]: d
14093
14094Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
14095
14096    [0, 1]: a
14097    [0, 3]: b
14098    [1, 0]: default_value
14099    [2, 0]: c
14100    [3, 1]: d
14101    [4, 0]: default_value
14102
14103The output `SparseTensor` will be in row-major order and will have the
14104same shape as the input.
14105
14106This op also returns an indicator vector shaped `[dense_shape[0]]` such that
14107
14108    empty_row_indicator[i] = True iff row i was an empty row.
14109
14110And a reverse index map vector shaped `[indices.shape[0]]` that is used during
14111backpropagation,
14112
14113    reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
14114  }];
14115
14116  let arguments = (ins
14117    Arg<TF_Int64Tensor, [{2-D. the indices of the sparse tensor.}]>:$indices,
14118    Arg<TF_Tensor, [{1-D. the values of the sparse tensor.}]>:$values,
14119    Arg<TF_Int64Tensor, [{1-D. the shape of the sparse tensor.}]>:$dense_shape,
14120    Arg<TF_Tensor, [{0-D. default value to insert into location `[row, 0, ..., 0]`
14121  for rows missing from the input sparse tensor.
14122output indices: 2-D. the indices of the filled sparse tensor.}]>:$default_value
14123  );
14124
14125  let results = (outs
14126    TF_Int64Tensor:$output_indices,
14127    Res<TF_Tensor, [{1-D. the values of the filled sparse tensor.}]>:$output_values,
14128    Res<TF_BoolTensor, [{1-D. whether the dense row was missing in the
14129input sparse tensor.}]>:$empty_row_indicator,
14130    Res<TF_Int64Tensor, [{1-D. a map from the input indices to the output indices.}]>:$reverse_index_map
14131  );
14132
14133  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
14134}
14135
14136def TF_SparseMatMulOp : TF_Op<"SparseMatMul", [NoSideEffect]> {
14137  let summary = [{
14138Multiply matrix "a" by matrix "b".
14139  }];
14140
14141  let description = [{
14142The inputs must be two-dimensional matrices and the inner dimension of "a" must
14143match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not
14144`SparseTensor`s.  This op is optimized for the case where at least one of "a" or
14145"b" is sparse, in the sense that they have a large proportion of zero values.
14146The breakeven for using this versus a dense matrix multiply on one platform was
1414730% zero values in the sparse matrix.
14148
14149The gradient computation of this operation will only take advantage of sparsity
14150in the input gradient when that gradient comes from a Relu.
14151  }];
14152
14153  let arguments = (ins
14154    TensorOf<[TF_Bfloat16, TF_Float32]>:$a,
14155    TensorOf<[TF_Bfloat16, TF_Float32]>:$b,
14156
14157    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
14158    DefaultValuedAttr<BoolAttr, "false">:$transpose_b,
14159    DefaultValuedAttr<BoolAttr, "false">:$a_is_sparse,
14160    DefaultValuedAttr<BoolAttr, "false">:$b_is_sparse
14161  );
14162
14163  let results = (outs
14164    TF_Float32Tensor:$product
14165  );
14166
14167  TF_DerivedOperandTypeAttr Ta = TF_DerivedOperandTypeAttr<0>;
14168  TF_DerivedOperandTypeAttr Tb = TF_DerivedOperandTypeAttr<1>;
14169}
14170
14171def TF_SparseReshapeOp : TF_Op<"SparseReshape", [NoSideEffect]> {
14172  let summary = [{
14173Reshapes a SparseTensor to represent values in a new dense shape.
14174  }];
14175
14176  let description = [{
14177This operation has the same semantics as reshape on the represented dense
14178tensor.  The `input_indices` are recomputed based on the requested `new_shape`.
14179
14180If one component of `new_shape` is the special value -1, the size of that
14181dimension is computed so that the total dense size remains constant.  At
14182most one component of `new_shape` can be -1.  The number of dense elements
14183implied by `new_shape` must be the same as the number of dense elements
14184originally implied by `input_shape`.
14185
14186Reshaping does not affect the order of values in the SparseTensor.
14187
14188If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
14189has length `R_out`, then `input_indices` has shape `[N, R_in]`,
14190`input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
14191`output_shape` has length `R_out`.
14192  }];
14193
14194  let arguments = (ins
14195    Arg<TF_Int64Tensor, [{2-D.  `N x R_in` matrix with the indices of non-empty values in a
14196SparseTensor.}]>:$input_indices,
14197    Arg<TF_Int64Tensor, [{1-D.  `R_in` vector with the input SparseTensor's dense shape.}]>:$input_shape,
14198    Arg<TF_Int64Tensor, [{1-D.  `R_out` vector with the requested new dense shape.}]>:$new_shape
14199  );
14200
14201  let results = (outs
14202    Res<TF_Int64Tensor, [{2-D.  `N x R_out` matrix with the updated indices of non-empty
14203values in the output SparseTensor.}]>:$output_indices,
14204    Res<TF_Int64Tensor, [{1-D.  `R_out` vector with the full dense shape of the output
14205SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
14206filled in.}]>:$output_shape
14207  );
14208}
14209
14210def TF_SparseSegmentMeanOp : TF_Op<"SparseSegmentMean", [NoSideEffect]> {
14211  let summary = "Computes the mean along sparse segments of a tensor.";
14212
14213  let description = [{
14214See `tf.sparse.segment_sum` for usage examples.
14215
14216Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
14217dimension, selecting a subset of dimension 0, specified by `indices`.
14218  }];
14219
14220  let arguments = (ins
14221    TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>:$data,
14222    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
14223    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
14224  );
14225
14226  let results = (outs
14227    Res<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>, [{Has same shape as data, except for dimension 0 which
14228has size `k`, the number of segments.}]>:$output
14229  );
14230
14231  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14232  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
14233  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
14234}
14235
14236def TF_SparseSegmentSqrtNOp : TF_Op<"SparseSegmentSqrtN", [NoSideEffect]> {
14237  let summary = [{
14238Computes the sum along sparse segments of a tensor divided by the sqrt of N.
14239  }];
14240
14241  let description = [{
14242N is the size of the segment being reduced.
14243
14244See `tf.sparse.segment_sum` for usage examples.
14245  }];
14246
14247  let arguments = (ins
14248    TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>:$data,
14249    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
14250    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
14251  );
14252
14253  let results = (outs
14254    Res<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>, [{Has same shape as data, except for dimension 0 which
14255has size `k`, the number of segments.}]>:$output
14256  );
14257
14258  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14259  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
14260  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
14261}
14262
14263def TF_SparseSegmentSumOp : TF_Op<"SparseSegmentSum", [NoSideEffect]> {
14264  let summary = "Computes the sum along sparse segments of a tensor.";
14265
14266  let description = [{
14267Read
14268[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
14269for an explanation of segments.
14270
14271Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
14272dimension, selecting a subset of dimension 0, specified by `indices`.
14273
14274For example:
14275
14276```python
14277c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
14278
14279# Select two rows, one segment.
14280tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
14281# => [[0 0 0 0]]
14282
14283# Select two rows, two segment.
14284tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
14285# => [[ 1  2  3  4]
14286#     [-1 -2 -3 -4]]
14287
14288# Select all rows, two segments.
14289tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
14290# => [[0 0 0 0]
14291#     [5 6 7 8]]
14292
14293# Which is equivalent to:
14294tf.segment_sum(c, tf.constant([0, 0, 1]))
14295```
14296  }];
14297
14298  let arguments = (ins
14299    TF_IntOrFpTensor:$data,
14300    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
14301    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
14302  );
14303
14304  let results = (outs
14305    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
14306has size `k`, the number of segments.}]>:$output
14307  );
14308
14309  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14310  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
14311  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
14312}
14313
14314def TF_SparseSoftmaxCrossEntropyWithLogitsOp : TF_Op<"SparseSoftmaxCrossEntropyWithLogits", [NoSideEffect]> {
14315  let summary = [{
14316Computes softmax cross entropy cost and gradients to backpropagate.
14317  }];
14318
14319  let description = [{
14320Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
14321a matrix of label probabilities, but rather a single label per row
14322of features.  This label is considered to have probability 1.0 for the
14323given row.
14324
14325Inputs are the logits, not probabilities.
14326  }];
14327
14328  let arguments = (ins
14329    Arg<TF_FloatTensor, [{batch_size x num_classes matrix}]>:$features,
14330    Arg<TF_I32OrI64Tensor, [{batch_size vector with values in [0, num_classes).
14331This is the label for the given minibatch entry.}]>:$labels
14332  );
14333
14334  let results = (outs
14335    Res<TF_FloatTensor, [{Per example loss (batch_size vector).}]>:$loss,
14336    Res<TF_FloatTensor, [{backpropagated gradients (batch_size x num_classes matrix).}]>:$backprop
14337  );
14338
14339  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14340  TF_DerivedOperandTypeAttr Tlabels = TF_DerivedOperandTypeAttr<1>;
14341
14342  let verifier = [{ return Verify(*this); }];
14343}
14344
14345def TF_SparseToDenseOp : TF_Op<"SparseToDense", [NoSideEffect]> {
14346  let summary = "Converts a sparse representation into a dense tensor.";
14347
14348  let description = [{
14349Builds an array `dense` with shape `output_shape` such that
14350
14351```
14352# If sparse_indices is scalar
14353dense[i] = (i == sparse_indices ? sparse_values : default_value)
14354
14355# If sparse_indices is a vector, then for each i
14356dense[sparse_indices[i]] = sparse_values[i]
14357
14358# If sparse_indices is an n by d matrix, then for each i in [0, n)
14359dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
14360```
14361
14362All other values in `dense` are set to `default_value`.  If `sparse_values` is a
14363scalar, all sparse indices are set to this single value.
14364
14365Indices should be sorted in lexicographic order, and indices must not
14366contain any repeats. If `validate_indices` is true, these properties
14367are checked during execution.
14368  }];
14369
14370  let arguments = (ins
14371    Arg<TF_I32OrI64Tensor, [{0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
14372index where `sparse_values[i]` will be placed.}]>:$sparse_indices,
14373    Arg<TF_I32OrI64Tensor, [{1-D.  Shape of the dense output tensor.}]>:$output_shape,
14374    Arg<TF_Tensor, [{1-D.  Values corresponding to each row of `sparse_indices`,
14375or a scalar value to be used for all sparse indices.}]>:$sparse_values,
14376    Arg<TF_Tensor, [{Scalar value to set for indices not specified in
14377`sparse_indices`.}]>:$default_value,
14378
14379    DefaultValuedAttr<BoolAttr, "true">:$validate_indices
14380  );
14381
14382  let results = (outs
14383    Res<TF_Tensor, [{Dense output tensor of shape `output_shape`.}]>:$dense
14384  );
14385
14386  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<0>;
14387  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
14388}
14389
14390def TF_SplitOp : TF_Op<"Split", [NoSideEffect]> {
14391  let summary = "Splits a tensor into `num_split` tensors along one dimension.";
14392
14393  let arguments = (ins
14394    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to split.  Must be in the range
14395`[-rank(value), rank(value))`.}]>:$split_dim,
14396    Arg<TF_Tensor, [{The tensor to split.}]>:$value
14397  );
14398
14399  let results = (outs
14400    Res<Variadic<TF_Tensor>, [{They are identically shaped tensors, whose shape matches that of `value`
14401except along `axis`, where their sizes are
14402`values.shape[split_dim] / num_split`.}]>:$output
14403  );
14404
14405  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
14406  TF_DerivedResultSizeAttr num_split = TF_DerivedResultSizeAttr<0>;
14407
14408  let verifier = [{ return Verify(*this); }];
14409}
14410
14411def TF_SplitVOp : TF_Op<"SplitV", [NoSideEffect]> {
14412  let summary = "Splits a tensor into `num_split` tensors along one dimension.";
14413
14414  let arguments = (ins
14415    Arg<TF_Tensor, [{The tensor to split.}]>:$value,
14416    Arg<TF_I32OrI64Tensor, [{list containing the sizes of each output tensor along the split
14417dimension. Must sum to the dimension of value along split_dim.
14418Can contain one -1 indicating that dimension is to be inferred.}]>:$size_splits,
14419    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to split.  Must be in the range
14420`[-rank(value), rank(value))`.}]>:$split_dim
14421  );
14422
14423  let results = (outs
14424    Res<Variadic<TF_Tensor>, [{Tensors whose shape matches that of `value`
14425except along `axis`, where their sizes are
14426`size_splits[i]`.}]>:$output
14427  );
14428
14429  TF_DerivedOperandTypeAttr Tlen = TF_DerivedOperandTypeAttr<1>;
14430  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14431  TF_DerivedResultSizeAttr num_split = TF_DerivedResultSizeAttr<0>;
14432
14433  let verifier = [{ return Verify(*this); }];
14434}
14435
14436def TF_SqrtOp : TF_Op<"Sqrt", [NoSideEffect, SameOperandsAndResultType]> {
14437  let summary = "Computes square root of x element-wise.";
14438
14439  let description = [{
14440I.e., \\(y = \sqrt{x} = x^{1/2}\\).
14441  }];
14442
14443  let arguments = (ins
14444    TF_FpOrComplexTensor:$x
14445  );
14446
14447  let results = (outs
14448    TF_FpOrComplexTensor:$y
14449  );
14450
14451  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14452}
14453
14454def TF_SqrtGradOp : TF_Op<"SqrtGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14455  let summary = "Computes the gradient for the sqrt of `x` wrt its input.";
14456
14457  let description = [{
14458Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
14459is the corresponding input gradient.
14460  }];
14461
14462  let arguments = (ins
14463    TF_FpOrComplexTensor:$y,
14464    TF_FpOrComplexTensor:$dy
14465  );
14466
14467  let results = (outs
14468    TF_FpOrComplexTensor:$z
14469  );
14470
14471  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14472}
14473
14474def TF_SquareOp : TF_Op<"Square", [NoSideEffect, SameOperandsAndResultType]> {
14475  let summary = "Computes square of x element-wise.";
14476
14477  let description = [{
14478I.e., \\(y = x * x = x^2\\).
14479  }];
14480
14481  let arguments = (ins
14482    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
14483  );
14484
14485  let results = (outs
14486    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
14487  );
14488
14489  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14490}
14491
14492def TF_SquaredDifferenceOp : TF_Op<"SquaredDifference", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
14493                             WithBroadcastableBinOpBuilder {
14494  let summary = "Returns conj(x - y)(x - y) element-wise.";
14495
14496  let description = [{
14497*NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
14498[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
14499  }];
14500
14501  let arguments = (ins
14502    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$x,
14503    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$y
14504  );
14505
14506  let results = (outs
14507    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$z
14508  );
14509
14510  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14511}
14512
14513def TF_SqueezeOp : TF_Op<"Squeeze", [NoSideEffect]> {
14514  let summary = "Removes dimensions of size 1 from the shape of a tensor.";
14515
14516  let description = [{
14517Given a tensor `input`, this operation returns a tensor of the same type with
14518all dimensions of size 1 removed. If you don't want to remove all size 1
14519dimensions, you can remove specific size 1 dimensions by specifying
14520`axis`.
14521
14522For example:
14523
14524```
14525# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
14526shape(squeeze(t)) ==> [2, 3]
14527```
14528
14529Or, to remove specific size 1 dimensions:
14530
14531```
14532# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
14533shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
14534```
14535  }];
14536
14537  let arguments = (ins
14538    Arg<TF_Tensor, [{The `input` to squeeze.}]>:$input,
14539
14540    DefaultValuedAttr<I64ArrayAttr, "{}">:$squeeze_dims
14541  );
14542
14543  let results = (outs
14544    Res<TF_Tensor, [{Contains the same data as `input`, but has one or more dimensions of
14545size 1 removed.}]>:$output
14546  );
14547
14548  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14549}
14550
14551def TF_StackCloseV2Op : TF_Op<"StackCloseV2", []> {
14552  let summary = "Delete the stack from its resource container.";
14553
14554  let arguments = (ins
14555    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackFree]>:$handle
14556  );
14557
14558  let results = (outs);
14559}
14560
14561def TF_StackPopV2Op : TF_Op<"StackPopV2", []> {
14562  let summary = "Pop the element at the top of the stack.";
14563
14564  let arguments = (ins
14565    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackRead, TF_StackWrite]>:$handle
14566  );
14567
14568  let results = (outs
14569    Res<TF_Tensor, [{The tensor that is popped from the top of the stack.}]>:$elem
14570  );
14571
14572  TF_DerivedResultTypeAttr elem_type = TF_DerivedResultTypeAttr<0>;
14573}
14574
14575def TF_StackPushV2Op : TF_Op<"StackPushV2", []> {
14576  let summary = "Push an element onto the stack.";
14577
14578  let arguments = (ins
14579    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackRead, TF_StackWrite]>:$handle,
14580    Arg<TF_Tensor, [{The tensor to be pushed onto the stack.}]>:$elem,
14581
14582    DefaultValuedAttr<BoolAttr, "false">:$swap_memory
14583  );
14584
14585  let results = (outs
14586    Res<TF_Tensor, [{The same tensor as the input 'elem'.}]>:$output
14587  );
14588
14589  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
14590}
14591
14592def TF_StackV2Op : TF_Op<"StackV2", []> {
14593  let summary = "A stack that produces elements in first-in last-out order.";
14594
14595  let arguments = (ins
14596    Arg<TF_Int32Tensor, [{The maximum size of the stack if non-negative. If negative, the stack
14597size is unlimited.}]>:$max_size,
14598
14599    TypeAttr:$elem_type,
14600    StrAttr:$stack_name
14601  );
14602
14603  let results = (outs
14604    Res<TF_ResourceTensor, [{The handle to the stack.}], [TF_StackAlloc]>:$handle
14605  );
14606}
14607
14608def TF_StatelessMultinomialOp : TF_Op<"StatelessMultinomial", [NoSideEffect, TF_NoConstantFold]> {
14609  let summary = "Draws samples from a multinomial distribution.";
14610
14611  let arguments = (ins
14612    Arg<TF_IntOrFpTensor, [{2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
14613represents the unnormalized log probabilities for all classes.}]>:$logits,
14614    Arg<TF_Int32Tensor, [{0-D.  Number of independent samples to draw for each row slice.}]>:$num_samples,
14615    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
14616  );
14617
14618  let results = (outs
14619    Res<TF_I32OrI64Tensor, [{2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
14620contains the drawn class labels with range `[0, num_classes)`.}]>:$output
14621  );
14622
14623  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14624  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<2>;
14625  TF_DerivedResultTypeAttr output_dtype = TF_DerivedResultTypeAttr<0>;
14626}
14627
14628def TF_StatelessParameterizedTruncatedNormalOp : TF_Op<"StatelessParameterizedTruncatedNormal", [NoSideEffect, TF_NoConstantFold]> {
14629  let summary = "";
14630
14631  let arguments = (ins
14632    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14633    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
14634    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The mean parameter of each batch.}]>:$means,
14635    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The standard deviation parameter of each batch. Must be greater than 0.}]>:$stddevs,
14636    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The minimum cutoff. May be -infinity.}]>:$minvals,
14637    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The maximum cutoff. May be +infinity, and must be more than the minval
14638for each batch.}]>:$maxvals
14639  );
14640
14641  let results = (outs
14642    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The outputs are truncated normal samples and are a deterministic function of
14643`shape`, `seed`, `minvals`, `maxvals`, `means` and `stddevs`.}]>:$output
14644  );
14645
14646  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
14647  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14648  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
14649}
14650
14651def TF_StatelessRandomBinomialOp : TF_Op<"StatelessRandomBinomial", [NoSideEffect, TF_NoConstantFold]> {
14652  let summary = [{
14653Outputs deterministic pseudorandom random numbers from a binomial distribution.
14654  }];
14655
14656  let description = [{
14657Outputs random values from a binomial distribution.
14658
14659The outputs are a deterministic function of `shape`, `seed`, `counts`, and `probs`.
14660  }];
14661
14662  let arguments = (ins
14663    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14664    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
14665    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The counts of the binomial distribution. Must be broadcastable with `probs`,
14666and broadcastable with the rightmost dimensions of `shape`.}]>:$counts,
14667    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The probability of success for the binomial distribution. Must be broadcastable
14668with `counts` and broadcastable with the rightmost dimensions of `shape`.}]>:$probs
14669  );
14670
14671  let results = (outs
14672    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Random values with specified shape.}]>:$output
14673  );
14674
14675  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
14676  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
14677  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14678  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
14679}
14680
14681def TF_StatelessRandomGammaV2Op : TF_Op<"StatelessRandomGammaV2", [NoSideEffect, TF_NoConstantFold]> {
14682  let summary = [{
14683Outputs deterministic pseudorandom random numbers from a gamma distribution.
14684  }];
14685
14686  let description = [{
14687Outputs random values from a gamma distribution.
14688
14689The outputs are a deterministic function of `shape`, `seed`, and `alpha`.
14690  }];
14691
14692  let arguments = (ins
14693    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14694    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
14695    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The concentration of the gamma distribution. Shape must match the rightmost
14696dimensions of `shape`.}]>:$alpha
14697  );
14698
14699  let results = (outs
14700    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{Random values with specified shape.}]>:$output
14701  );
14702
14703  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14704  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14705  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
14706}
14707
14708def TF_StatelessRandomGetAlgOp : TF_Op<"StatelessRandomGetAlg", []> {
14709  let summary = "Picks the best counter-based RNG algorithm based on device.";
14710
14711  let description = [{
14712This op picks the best counter-based RNG algorithm based on device.
14713  }];
14714
14715  let arguments = (ins);
14716
14717  let results = (outs
14718    Res<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
14719  );
14720}
14721
14722def TF_StatelessRandomGetKeyCounterOp : TF_Op<"StatelessRandomGetKeyCounter", []> {
14723  let summary = [{
14724Scrambles seed into key and counter, using the best algorithm based on device.
14725  }];
14726
14727  let description = [{
14728This op scrambles a shape-[2] seed into a key and a counter, both needed by counter-based RNG algorithms. The scrambing uses the best algorithm based on device. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
14729  }];
14730
14731  let arguments = (ins
14732    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
14733  );
14734
14735  let results = (outs
14736    Res<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
14737    Res<TF_Uint64Tensor, [{Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).}]>:$counter
14738  );
14739
14740  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<0>;
14741}
14742
14743def TF_StatelessRandomGetKeyCounterAlgOp : TF_Op<"StatelessRandomGetKeyCounterAlg", [NoSideEffect, TF_NoConstantFold]> {
14744  let summary = [{
14745Picks the best algorithm based on device, and scrambles seed into key and counter.
14746  }];
14747
14748  let description = [{
14749This op picks the best counter-based RNG algorithm based on device, and scrambles a shape-[2] seed into a key and a counter, both needed by the counter-based algorithm. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
14750  }];
14751
14752  let arguments = (ins
14753    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
14754  );
14755
14756  let results = (outs
14757    Res<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
14758    Res<TF_Uint64Tensor, [{Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).}]>:$counter,
14759    Res<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
14760  );
14761
14762  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<0>;
14763}
14764
14765def TF_StatelessRandomNormalOp : TF_Op<"StatelessRandomNormal", [NoSideEffect, TF_NoConstantFold]> {
14766  let summary = [{
14767Outputs deterministic pseudorandom values from a normal distribution.
14768  }];
14769
14770  let description = [{
14771The generated values will have mean 0 and standard deviation 1.
14772
14773The outputs are a deterministic function of `shape` and `seed`.
14774  }];
14775
14776  let arguments = (ins
14777    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14778    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
14779  );
14780
14781  let results = (outs
14782    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
14783  );
14784
14785  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14786  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14787  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
14788}
14789
14790def TF_StatelessRandomNormalV2Op : TF_Op<"StatelessRandomNormalV2", [NoSideEffect]> {
14791  let summary = [{
14792Outputs deterministic pseudorandom values from a normal distribution.
14793  }];
14794
14795  let description = [{
14796The generated values will have mean 0 and standard deviation 1.
14797
14798The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
14799  }];
14800
14801  let arguments = (ins
14802    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14803    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
14804    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
14805    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
14806  );
14807
14808  let results = (outs
14809    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
14810  );
14811
14812  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
14813  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
14814}
14815
14816def TF_StatelessRandomPoissonOp : TF_Op<"StatelessRandomPoisson", [NoSideEffect, TF_NoConstantFold]> {
14817  let summary = [{
14818Outputs deterministic pseudorandom random numbers from a Poisson distribution.
14819  }];
14820
14821  let description = [{
14822Outputs random values from a Poisson distribution.
14823
14824The outputs are a deterministic function of `shape`, `seed`, and `lam`.
14825  }];
14826
14827  let arguments = (ins
14828    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14829    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
14830    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The rate of the Poisson distribution. Shape must match the rightmost dimensions
14831of `shape`.}]>:$lam
14832  );
14833
14834  let results = (outs
14835    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Random values with specified shape.}]>:$output
14836  );
14837
14838  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14839  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14840  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
14841  TF_DerivedOperandTypeAttr Rtype = TF_DerivedOperandTypeAttr<2>;
14842}
14843
14844def TF_StatelessRandomUniformOp : TF_Op<"StatelessRandomUniform", [NoSideEffect, TF_NoConstantFold]> {
14845  let summary = [{
14846Outputs deterministic pseudorandom random values from a uniform distribution.
14847  }];
14848
14849  let description = [{
14850The generated values follow a uniform distribution in the range `[0, 1)`. The
14851lower bound 0 is included in the range, while the upper bound 1 is excluded.
14852
14853The outputs are a deterministic function of `shape` and `seed`.
14854  }];
14855
14856  let arguments = (ins
14857    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14858    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
14859  );
14860
14861  let results = (outs
14862    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
14863  );
14864
14865  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14866  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14867  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
14868}
14869
14870def TF_StatelessRandomUniformFullIntOp : TF_Op<"StatelessRandomUniformFullInt", [NoSideEffect, TF_NoConstantFold]> {
14871  let summary = [{
14872Outputs deterministic pseudorandom random integers from a uniform distribution.
14873  }];
14874
14875  let description = [{
14876The generated values are uniform integers covering the whole range of `dtype`.
14877
14878The outputs are a deterministic function of `shape` and `seed`.
14879  }];
14880
14881  let arguments = (ins
14882    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14883    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{2 seeds (shape [2]).}]>:$seed
14884  );
14885
14886  let results = (outs
14887    Res<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Random values with specified shape.}]>:$output
14888  );
14889
14890  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14891  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14892  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
14893}
14894
14895def TF_StatelessRandomUniformFullIntV2Op : TF_Op<"StatelessRandomUniformFullIntV2", [NoSideEffect]> {
14896  let summary = [{
14897Outputs deterministic pseudorandom random integers from a uniform distribution.
14898  }];
14899
14900  let description = [{
14901The generated values are uniform integers covering the whole range of `dtype`.
14902
14903The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
14904  }];
14905
14906  let arguments = (ins
14907    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14908    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
14909    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
14910    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
14911  );
14912
14913  let results = (outs
14914    Res<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Random values with specified shape.}]>:$output
14915  );
14916
14917  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
14918  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
14919}
14920
14921def TF_StatelessRandomUniformIntOp : TF_Op<"StatelessRandomUniformInt", [NoSideEffect, TF_NoConstantFold]> {
14922  let summary = [{
14923Outputs deterministic pseudorandom random integers from a uniform distribution.
14924  }];
14925
14926  let description = [{
14927The generated values follow a uniform distribution in the range `[minval, maxval)`.
14928
14929The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`.
14930  }];
14931
14932  let arguments = (ins
14933    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14934    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
14935    Arg<TF_I32OrI64Tensor, [{Minimum value (inclusive, scalar).}]>:$minval,
14936    Arg<TF_I32OrI64Tensor, [{Maximum value (exclusive, scalar).}]>:$maxval
14937  );
14938
14939  let results = (outs
14940    Res<TF_I32OrI64Tensor, [{Random values with specified shape.}]>:$output
14941  );
14942
14943  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14944  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14945  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
14946}
14947
14948def TF_StatelessRandomUniformIntV2Op : TF_Op<"StatelessRandomUniformIntV2", [NoSideEffect]> {
14949  let summary = [{
14950Outputs deterministic pseudorandom random integers from a uniform distribution.
14951  }];
14952
14953  let description = [{
14954The generated values follow a uniform distribution in the range `[minval, maxval)`.
14955
14956The outputs are a deterministic function of `shape`, `key`, `counter`, `alg`, `minval` and `maxval`.
14957  }];
14958
14959  let arguments = (ins
14960    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14961    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
14962    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
14963    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg,
14964    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Minimum value (inclusive, scalar).}]>:$minval,
14965    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Maximum value (exclusive, scalar).}]>:$maxval
14966  );
14967
14968  let results = (outs
14969    Res<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Random values with specified shape.}]>:$output
14970  );
14971
14972  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
14973  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<4>;
14974}
14975
14976def TF_StatelessRandomUniformV2Op : TF_Op<"StatelessRandomUniformV2", [NoSideEffect]> {
14977  let summary = [{
14978Outputs deterministic pseudorandom random values from a uniform distribution.
14979  }];
14980
14981  let description = [{
14982The generated values follow a uniform distribution in the range `[0, 1)`. The
14983lower bound 0 is included in the range, while the upper bound 1 is excluded.
14984
14985The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
14986  }];
14987
14988  let arguments = (ins
14989    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14990    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
14991    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
14992    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
14993  );
14994
14995  let results = (outs
14996    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
14997  );
14998
14999  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
15000  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15001}
15002
15003def TF_StatelessTruncatedNormalOp : TF_Op<"StatelessTruncatedNormal", [NoSideEffect, TF_NoConstantFold]> {
15004  let summary = [{
15005Outputs deterministic pseudorandom values from a truncated normal distribution.
15006  }];
15007
15008  let description = [{
15009The generated values follow a normal distribution with mean 0 and standard
15010deviation 1, except that values whose magnitude is more than 2 standard
15011deviations from the mean are dropped and re-picked.
15012
15013The outputs are a deterministic function of `shape` and `seed`.
15014  }];
15015
15016  let arguments = (ins
15017    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
15018    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
15019  );
15020
15021  let results = (outs
15022    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
15023  );
15024
15025  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15026  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
15027  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15028}
15029
15030def TF_StatelessTruncatedNormalV2Op : TF_Op<"StatelessTruncatedNormalV2", [NoSideEffect]> {
15031  let summary = [{
15032Outputs deterministic pseudorandom values from a truncated normal distribution.
15033  }];
15034
15035  let description = [{
15036The generated values follow a normal distribution with mean 0 and standard
15037deviation 1, except that values whose magnitude is more than 2 standard
15038deviations from the mean are dropped and re-picked.
15039
15040The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
15041  }];
15042
15043  let arguments = (ins
15044    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
15045    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
15046    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
15047    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
15048  );
15049
15050  let results = (outs
15051    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
15052  );
15053
15054  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
15055  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15056}
15057
15058def TF_StopGradientOp : TF_Op<"StopGradient", [NoSideEffect, TF_AllTypesMatch<["input", "output"]>]> {
15059  let summary = "Stops gradient computation.";
15060
15061  let description = [{
15062When executed in a graph, this op outputs its input tensor as-is.
15063
15064When building ops to compute gradients, this op prevents the contribution of
15065its inputs to be taken into account.  Normally, the gradient generator adds ops
15066to a graph to compute the derivatives of a specified 'loss' by recursively
15067finding out inputs that contributed to its computation.  If you insert this op
15068in the graph it inputs are masked from the gradient generator.  They are not
15069taken into account for computing gradients.
15070
15071This is useful any time you want to compute a value with TensorFlow but need
15072to pretend that the value was a constant. For example, the softmax function
15073for a vector x can be written as
15074
15075```python
15076
15077  def softmax(x):
15078    numerator = tf.exp(x)
15079    denominator = tf.reduce_sum(numerator)
15080    return numerator / denominator
15081```
15082
15083This however is susceptible to overflow if the values in x are large. An
15084alternative more stable way is to subtract the maximum of x from each of the
15085values.
15086
15087```python
15088
15089  def stable_softmax(x):
15090    z = x - tf.reduce_max(x)
15091    numerator = tf.exp(z)
15092    denominator = tf.reduce_sum(numerator)
15093    return numerator / denominator
15094```
15095
15096However, when we backprop through the softmax to x, we dont want to backprop
15097through the `tf.reduce_max(x)` (if the max values are not unique then the
15098gradient could flow to the wrong input) calculation and treat that as a
15099constant. Therefore, we should write this out as
15100
15101```python
15102
15103  def stable_softmax(x):
15104    z = x - tf.stop_gradient(tf.reduce_max(x))
15105    numerator = tf.exp(z)
15106    denominator = tf.reduce_sum(numerator)
15107    return numerator / denominator
15108```
15109
15110Some other examples include:
15111
15112*  The *EM* algorithm where the *M-step* should not involve backpropagation
15113   through the output of the *E-step*.
15114*  Contrastive divergence training of Boltzmann machines where, when
15115   differentiating the energy function, the training must not backpropagate
15116   through the graph that generated the samples from the model.
15117*  Adversarial training, where no backprop should happen through the adversarial
15118   example generation process.
15119  }];
15120
15121  let arguments = (ins
15122    TF_Tensor:$input
15123  );
15124
15125  let results = (outs
15126    TF_Tensor:$output
15127  );
15128
15129  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15130}
15131
15132def TF_StridedSliceOp : TF_Op<"StridedSlice", [NoSideEffect]> {
15133  let summary = "Return a strided slice from `input`.";
15134
15135  let description = [{
15136Note, most python users will want to use the Python `Tensor.__getitem__`
15137or `Variable.__getitem__` rather than this op directly.
15138
15139The goal of this op is to produce a new tensor with a subset of
15140the elements from the `n` dimensional `input` tensor. The subset is chosen using
15141a sequence of `m` sparse range specifications encoded into the arguments
15142of this function. Note, in some cases
15143`m` could be equal to `n`, but this need not be the case. Each
15144range specification entry can be one of the following:
15145
15146- An ellipsis (...). Ellipses are used to imply zero or more
15147  dimensions of full-dimension selection and are produced using
15148  `ellipsis_mask`. For example, `foo[...]` is the identity slice.
15149
15150- A new axis. This is used to insert a new shape=1 dimension and is
15151  produced using `new_axis_mask`. For example, `foo[:, ...]` where
15152  `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
15153
15154
15155- A range `begin:end:stride`. This is used to specify how much to choose from
15156  a given dimension. `stride` can be any integer but 0.  `begin` is an integer
15157  which represents the index of the first value to select while `end` represents
15158  the index of the last value to select. The number of values selected in each
15159  dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
15160  `begin` and `end` can be negative where `-1` is the last element, `-2` is
15161  the second to last. `begin_mask` controls whether to replace the explicitly
15162  given `begin` with an implicit effective value of `0` if `stride > 0` and
15163  `-1` if `stride < 0`. `end_mask` is analogous but produces the number
15164  required to create the largest open interval. For example, given a shape
15165  `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
15166  not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
15167  and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
15168  first dimension of a tensor while dropping the last two (in the original
15169  order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
15170
15171- A single index. This is used to keep only elements that have a given
15172  index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
15173  shape `(6,)` tensor. This is encoded in `begin` and `end` and
15174  `shrink_axis_mask`.
15175
15176Each conceptual range specification is encoded in the op's argument. This
15177encoding is best understand by considering a non-trivial example. In
15178particular,
15179`foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
15180
15181```
15182begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
15183end = [2, 4, x, x, -3, x]
15184strides = [1, 1, x, x, -1, 1]
15185begin_mask = 1<<4 | 1<<5 = 48
15186end_mask = 1<<5 = 32
15187ellipsis_mask = 1<<3 = 8
15188new_axis_mask = 1<<2 = 4
15189shrink_axis_mask = 1<<0 = 1
15190```
15191
15192In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
15193the slice becomes (2, 1, 5, 5, 2, 5).
15194Let us walk step by step through each argument specification.
15195
151961.  The first argument in the example slice is turned into `begin = 1` and
15197`end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
15198also set the appropriate bit in `shrink_axis_mask`.
15199
152002. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
15201zero bits contributed.
15202
152033. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
15204dimension in the final shape. Dummy values are contributed to begin,
15205end and stride, while the new_axis_mask bit is set.
15206
152074. `...` grab the full ranges from as many dimensions as needed to
15208fully specify a slice for every dimension of the input shape.
15209
152105. `:-3:-1` shows the use of negative indices. A negative index `i` associated
15211with a dimension that has shape `s` is converted to a positive index
15212`s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
15213is done internally so begin, end and strides receive x, -3, and -1.
15214The appropriate begin_mask bit is set to indicate the start range is the
15215full range (ignoring the x).
15216
152176. `:` indicates that the entire contents of the corresponding dimension
15218is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
15219receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
15220`end_mask` are also set.
15221
15222*Requirements*:
15223  `0 != strides[i] for i in [0, m)`
15224  `ellipsis_mask must be a power of two (only one ellipsis)`
15225  }];
15226
15227  let arguments = (ins
15228    TF_Tensor:$input,
15229    Arg<TF_I32OrI64Tensor, [{`begin[k]` specifies the offset into the `k`th range specification.
15230The exact dimension this corresponds to will be determined by context.
15231Out-of-bounds values will be silently clamped. If the `k`th bit of
15232`begin_mask` then `begin[k]` is ignored and the full range of the
15233appropriate dimension is used instead. Negative values causes indexing
15234to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.}]>:$begin,
15235    Arg<TF_I32OrI64Tensor, [{`end[i]` is like `begin` with the exception that `end_mask` is
15236used to determine full ranges.}]>:$end,
15237    Arg<TF_I32OrI64Tensor, [{`strides[i]` specifies the increment in the `i`th specification
15238after extracting a given element. Negative indices will reverse
15239the original order. Out or range values are
15240clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`}]>:$strides,
15241
15242    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
15243    DefaultValuedAttr<I64Attr, "0">:$end_mask,
15244    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
15245    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
15246    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
15247  );
15248
15249  let results = (outs
15250    TF_Tensor:$output
15251  );
15252
15253  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15254  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
15255
15256  let verifier = [{ return VerifyStridedSliceBase(*this); }];
15257
15258  let extraClassDeclaration = [{
15259    // If sliced shape is able to be deduced, returns true, updates
15260    // `begin_indices`, `end_indices`, and `strides` with their canonical
15261    // values, respectively.
15262    bool GetSlicedBoundRanges(
15263      ::llvm::SmallVectorImpl<int64_t> *slice_begin,
15264      ::llvm::SmallVectorImpl<int64_t> *slice_end,
15265      ::llvm::SmallVectorImpl<int64_t> *slice_stride);
15266  }];
15267}
15268
15269def TF_StridedSliceGradOp : TF_Op<"StridedSliceGrad", [NoSideEffect]> {
15270  let summary = "Returns the gradient of `StridedSlice`.";
15271
15272  let description = [{
15273Since `StridedSlice` cuts out pieces of its `input` which is size
15274`shape`, its gradient will have the same shape (which is passed here
15275as `shape`). The gradient will be zero in any element that the slice
15276does not select.
15277
15278Arguments are the same as StridedSliceGrad with the exception that
15279`dy` is the input gradient to be propagated and `shape` is the
15280shape of `StridedSlice`'s `input`.
15281  }];
15282
15283  let arguments = (ins
15284    TF_I32OrI64Tensor:$shape,
15285    TF_I32OrI64Tensor:$begin,
15286    TF_I32OrI64Tensor:$end,
15287    TF_I32OrI64Tensor:$strides,
15288    TF_Tensor:$dy,
15289
15290    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
15291    DefaultValuedAttr<I64Attr, "0">:$end_mask,
15292    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
15293    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
15294    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
15295  );
15296
15297  let results = (outs
15298    TF_Tensor:$output
15299  );
15300
15301  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<4>;
15302  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<0>;
15303
15304  let verifier = [{ return Verify(*this); }];
15305
15306  let extraClassDeclaration = [{
15307    // If sliced shape is able to be deduced, returns true, updates `shape`
15308    // with the final shape after performing StridedSlice, and updates
15309    // `begin_indices`, `end_indices`, and `strides` with their canonical
15310    // values, respectively.
15311    bool GetSlicedShapeAndBoundRanges(
15312      ::llvm::SmallVectorImpl<int64_t> *input_shape,
15313      ::llvm::SmallVectorImpl<int64_t> *slice_begin,
15314      ::llvm::SmallVectorImpl<int64_t> *slice_end,
15315      ::llvm::SmallVectorImpl<int64_t> *slice_stride);
15316  }];
15317}
15318
15319def TF_StringJoinOp : TF_Op<"StringJoin", [NoSideEffect]> {
15320  let summary = [{
15321Joins the strings in the given list of string tensors into one tensor;
15322  }];
15323
15324  let description = [{
15325with the given separator (default is an empty separator).
15326
15327Examples:
15328
15329>>> s = ["hello", "world", "tensorflow"]
15330>>> tf.strings.join(s, " ")
15331<tf.Tensor: shape=(), dtype=string, numpy=b'hello world tensorflow'>
15332  }];
15333
15334  let arguments = (ins
15335    Arg<Variadic<TF_StrTensor>, [{A list of string tensors.  The tensors must all have the same shape,
15336or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
15337of non-scalar inputs.}]>:$inputs,
15338
15339    StrAttr:$separator
15340  );
15341
15342  let results = (outs
15343    TF_StrTensor:$output
15344  );
15345
15346  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
15347}
15348
15349def TF_StringToHashBucketFastOp : TF_Op<"StringToHashBucketFast", [NoSideEffect]> {
15350  let summary = [{
15351Converts each string in the input Tensor to its hash mod by a number of buckets.
15352  }];
15353
15354  let description = [{
15355The hash function is deterministic on the content of the string within the
15356process and will never change. However, it is not suitable for cryptography.
15357This function may be used when CPU time is scarce and inputs are trusted or
15358unimportant. There is a risk of adversaries constructing inputs that all hash
15359to the same bucket. To prevent this problem, use a strong hash function with
15360`tf.string_to_hash_bucket_strong`.
15361
15362Examples:
15363
15364>>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy()
15365array([0, 2, 2])
15366  }];
15367
15368  let arguments = (ins
15369    Arg<TF_StrTensor, [{The strings to assign a hash bucket.}]>:$input,
15370
15371    Confined<I64Attr, [IntMinValue<1>]>:$num_buckets
15372  );
15373
15374  let results = (outs
15375    Res<TF_Int64Tensor, [{A Tensor of the same shape as the input `string_tensor`.}]>:$output
15376  );
15377}
15378
15379def TF_SubOp : TF_Op<"Sub", [NoSideEffect, ResultsBroadcastableShape, TF_CwiseBinary, TF_SameOperandsAndResultElementTypeResolveRef]>,
15380               WithBroadcastableBinOpBuilder {
15381  let summary = "Returns x - y element-wise.";
15382
15383  let description = [{
15384*NOTE*: `Subtract` supports broadcasting. More about broadcasting
15385[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
15386  }];
15387
15388  let arguments = (ins
15389    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
15390    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
15391  );
15392
15393  let results = (outs
15394    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
15395  );
15396
15397  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15398}
15399
15400def TF_SumOp : TF_Op<"Sum", [NoSideEffect]> {
15401  let summary = "Computes the sum of elements across dimensions of a tensor.";
15402
15403  let description = [{
15404Reduces `input` along the dimensions given in `axis`. Unless
15405`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
15406`axis`. If `keep_dims` is true, the reduced dimensions are
15407retained with length 1.
15408  }];
15409
15410  let arguments = (ins
15411    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
15412    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
15413`[-rank(input), rank(input))`.}]>:$reduction_indices,
15414
15415    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
15416  );
15417
15418  let results = (outs
15419    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
15420  );
15421
15422  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15423  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
15424
15425  let builders = [
15426    OpBuilder<(ins "Value":$input, "Value":$reduction_indices,
15427      "BoolAttr":$keep_dims)>
15428  ];
15429}
15430
15431def TF_SvdOp : TF_Op<"Svd", [NoSideEffect]> {
15432  let summary = [{
15433Computes the singular value decompositions of one or more matrices.
15434  }];
15435
15436  let description = [{
15437Computes the SVD of each inner matrix in `input` such that
15438`input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
15439
15440```python
15441# a is a tensor containing a batch of matrices.
15442# s is a tensor of singular values for each matrix.
15443# u is the tensor containing the left singular vectors for each matrix.
15444# v is the tensor containing the right singular vectors for each matrix.
15445s, u, v = svd(a)
15446s, _, _ = svd(a, compute_uv=False)
15447```
15448  }];
15449
15450  let arguments = (ins
15451    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
15452form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.}]>:$input,
15453
15454    DefaultValuedAttr<BoolAttr, "true">:$compute_uv,
15455    DefaultValuedAttr<BoolAttr, "false">:$full_matrices
15456  );
15457
15458  let results = (outs
15459    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Singular values. Shape is `[..., P]`.}]>:$s,
15460    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Left singular vectors. If `full_matrices` is `False` then shape is
15461`[..., M, P]`; if `full_matrices` is `True` then shape is
15462`[..., M, M]`. Undefined if `compute_uv` is `False`.}]>:$u,
15463    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Left singular vectors. If `full_matrices` is `False` then shape is
15464`[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
15465Undefined if `compute_uv` is false.}]>:$v
15466  );
15467
15468  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15469}
15470
15471def TF_SymbolicGradientOp : TF_Op<"SymbolicGradient", [NoSideEffect]> {
15472  let summary = [{
15473Computes the gradient function for function f via backpropagation.
15474  }];
15475
15476  let arguments = (ins
15477    Arg<Variadic<TF_Tensor>, [{a list of input tensors of size N + M;}]>:$input,
15478
15479    SymbolRefAttr:$f
15480  );
15481
15482  let results = (outs
15483    Res<Variadic<TF_Tensor>, [{a list of output tensors of size N;}]>:$output
15484  );
15485
15486  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
15487  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
15488}
15489
15490def TF_TPUCompilationResultOp : TF_Op<"TPUCompilationResult", []> {
15491  let summary = "Returns the result of a TPU compilation.";
15492
15493  let description = [{
15494This operation returns the result of a TPU compilation as a serialized
15495CompilationResultProto, which holds a status and an error message if an error
15496occurred during compilation.
15497  }];
15498
15499  let arguments = (ins);
15500
15501  let results = (outs
15502    TF_StrTensor:$output
15503  );
15504}
15505
15506def TF_TPUCompileSucceededAssertOp : TF_Op<"TPUCompileSucceededAssert", []> {
15507  let summary = [{
15508Asserts that compilation succeeded. This op produces no output and closes the
15509  }];
15510
15511  let description = [{
15512device during failure to ensure all pending device interactions fail.
15513
15514'compilation_status' is a serialized CompilationResultProto.
15515  }];
15516
15517  let arguments = (ins
15518    TF_StrTensor:$compilation_status
15519  );
15520
15521  let results = (outs);
15522}
15523
15524def TF_TPUCopyWithLayoutOp : TF_Op<"TPUCopyWithLayout", [NoSideEffect]> {
15525  let summary = "Op that copies host tensor to device with specified layout.";
15526
15527  let description = [{
15528For internal use only.
15529  }];
15530
15531  let arguments = (ins
15532    TF_Tensor:$input,
15533    TF_Int64Tensor:$layout
15534  );
15535
15536  let results = (outs
15537    TF_Tensor:$output
15538  );
15539
15540  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15541}
15542
15543def TF_TPUEmbeddingActivationsOp : TF_Op<"TPUEmbeddingActivations", [NoSideEffect]> {
15544  let summary = "An op enabling differentiation of TPU Embeddings.";
15545
15546  let description = [{
15547This op simply returns its first input, which is assumed to have been sliced
15548from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of
15549this op, and its first argument being a trainable Variable, enables automatic
15550differentiation of graphs containing embeddings via the TPU Embedding Python
15551libraries.
15552  }];
15553
15554  let arguments = (ins
15555    Arg<TF_Float32Tensor, [{A trainable variable, enabling optimizers to find this op.}]>:$embedding_variable,
15556    Arg<TF_Float32Tensor, [{The embedding activations Tensor to return.}]>:$sliced_activations,
15557
15558    Confined<I64Attr, [IntMinValue<0>]>:$table_id,
15559    Confined<I64Attr, [IntMinValue<0>]>:$lookup_id
15560  );
15561
15562  let results = (outs
15563    TF_Float32Tensor:$output
15564  );
15565}
15566
15567def TF_TPUExecuteOp : TF_Op<"TPUExecute", []> {
15568  let summary = "Op that loads and executes a TPU program on a TPU device.";
15569
15570  let description = [{
15571For the internal use of the distributed TPU compiler.
15572  }];
15573
15574  let arguments = (ins
15575    Variadic<TF_Tensor>:$args,
15576    TF_StrTensor:$key
15577  );
15578
15579  let results = (outs
15580    Variadic<TF_Tensor>:$results
15581  );
15582
15583  TF_DerivedOperandTypeListAttr Targs = TF_DerivedOperandTypeListAttr<0>;
15584  TF_DerivedResultTypeListAttr Tresults = TF_DerivedResultTypeListAttr<0>;
15585}
15586
15587def TF_TPUExecuteAndUpdateVariablesOp : TF_Op<"TPUExecuteAndUpdateVariables", []> {
15588  let summary = [{
15589Op that executes a program with optional in-place variable updates.
15590  }];
15591
15592  let description = [{
15593It (optionally) reads device variables, loads and executes a TPU program on a
15594TPU device, and then (optionally) in-place updates variables using the program
15595outputs, as specified in attributes device_var_reads_indices (program input
15596indices from directly reading variables) and device_var_updates_indices (program
15597output indices used to update variables, -1 means no-update/read-only). Such
15598program outputs are consumed by these variables will not appear in the op
15599output. For the internal use of the distributed TPU compiler.
15600  }];
15601
15602  let arguments = (ins
15603    Variadic<TF_Tensor>:$args,
15604    TF_StrTensor:$key,
15605
15606    I64ArrayAttr:$device_var_reads_indices,
15607    I64ArrayAttr:$device_var_updates_indices
15608  );
15609
15610  let results = (outs
15611    Variadic<TF_Tensor>:$results
15612  );
15613
15614  TF_DerivedOperandTypeListAttr Targs = TF_DerivedOperandTypeListAttr<0>;
15615  TF_DerivedResultTypeListAttr Tresults = TF_DerivedResultTypeListAttr<0>;
15616
15617  let verifier = [{ return Verify(*this); }];
15618}
15619
15620def TF_TPUGetLayoutOp : TF_Op<"TPUGetLayoutOp", [NoSideEffect]> {
15621  let summary = [{
15622Op that retrieves the layout of an input or output determined by TPUCompile.
15623  }];
15624
15625  let description = [{
15626For internal use only.
15627  }];
15628
15629  let arguments = (ins
15630    TF_StrTensor:$cache_key,
15631
15632    I64Attr:$index,
15633    BoolAttr:$is_output
15634  );
15635
15636  let results = (outs
15637    TF_Int64Tensor:$layout
15638  );
15639}
15640
15641def TF_TPUOrdinalSelectorOp : TF_Op<"TPUOrdinalSelector", []> {
15642  let summary = "A TPU core selector Op.";
15643
15644  let description = [{
15645This Op produces a set of TPU cores (for warm-up) or a single TPU core
15646(for regular inference) to execute the TPU program on. The output is
15647consumed by TPUPartitionedCall.
15648  }];
15649
15650  let arguments = (ins);
15651
15652  let results = (outs
15653    Res<TF_Int32Tensor, [{A vector 1 or more TPU cores.}]>:$device_ordinals
15654  );
15655}
15656
15657def TF_TPUReplicatedInputOp : TF_Op<"TPUReplicatedInput", [NoSideEffect]> {
15658  let summary = "Connects N inputs to an N-way replicated TPU computation.";
15659
15660  let description = [{
15661This operation holds a replicated input to a `tpu.replicate()` computation subgraph.
15662Each replicated input has the same shape and type alongside the output.
15663
15664For example:
15665```
15666%a = "tf.opA"()
15667%b = "tf.opB"()
15668%replicated_input = "tf.TPUReplicatedInput"(%a, %b)
15669%computation = "tf.Computation"(%replicated_input)
15670```
15671The above computation has a replicated input of two replicas.
15672  }];
15673
15674  let arguments = (ins
15675    Variadic<TF_Tensor>:$inputs,
15676
15677    DefaultValuedAttr<BoolAttr, "false">:$is_mirrored_variable,
15678    DefaultValuedAttr<I64Attr, "-1">:$index,
15679    DefaultValuedAttr<BoolAttr, "false">:$is_packed
15680  );
15681
15682  let results = (outs
15683    TF_Tensor:$output
15684  );
15685
15686  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15687  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
15688}
15689
15690def TF_TPUReplicatedOutputOp : TF_Op<"TPUReplicatedOutput", [NoSideEffect]> {
15691  let summary = "Connects N outputs from an N-way replicated TPU computation.";
15692
15693  let description = [{
15694This operation holds a replicated output from a `tpu.replicate()` computation subgraph.
15695Each replicated output has the same shape and type alongside the input.
15696
15697For example:
15698```
15699%computation = "tf.Computation"()
15700%replicated_output:2 = "tf.TPUReplicatedOutput"(%computation)
15701```
15702The above computation has a replicated output of two replicas.
15703  }];
15704
15705  let arguments = (ins
15706    TF_Tensor:$input
15707  );
15708
15709  let results = (outs
15710    Variadic<TF_Tensor>:$outputs
15711  );
15712
15713  TF_DerivedResultSizeAttr num_replicas = TF_DerivedResultSizeAttr<0>;
15714  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15715}
15716
15717def TF_TPUReshardVariablesOp : TF_Op<"TPUReshardVariables", []> {
15718  let summary = [{
15719Op that reshards on-device TPU variables to specified state. Internal use only.
15720  }];
15721
15722  let description = [{
15723The sharding state is represented as the key of the compilation that generated
15724the sharding/unsharding programs along with the main program. new_format_key
15725specifies the desired state, and format_state_var is the current state of the
15726variables.
15727  }];
15728
15729  let arguments = (ins
15730    Arg<Variadic<TF_ResourceTensor>, "", [TF_VariableRead, TF_VariableWrite]>:$vars,
15731    TF_StrTensor:$new_format_key,
15732    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$format_state_var
15733  );
15734
15735  let results = (outs);
15736
15737  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
15738}
15739
15740def TF_TanOp : TF_Op<"Tan", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
15741  let summary = "Computes tan of x element-wise.";
15742
15743  let description = [{
15744Given an input tensor, this function computes tangent of every
15745  element in the tensor. Input range is `(-inf, inf)` and
15746  output range is `(-inf, inf)`. If input lies outside the boundary, `nan`
15747  is returned.
15748
15749  ```python
15750  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
15751  tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan]
15752  ```
15753  }];
15754
15755  let arguments = (ins
15756    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
15757  );
15758
15759  let results = (outs
15760    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
15761  );
15762
15763  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15764}
15765
15766def TF_TanhOp : TF_Op<"Tanh", [NoSideEffect, TF_LayoutAgnostic, TF_SameOperandsAndResultTypeResolveRef]> {
15767  let summary = "Computes hyperbolic tangent of `x` element-wise.";
15768
15769  let description = [{
15770Given an input tensor, this function computes hyperbolic tangent of every
15771  element in the tensor. Input range is `[-inf, inf]` and
15772  output range is `[-1,1]`.
15773
15774  >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")])
15775  >>> tf.math.tanh(x)
15776  <tf.Tensor: shape=(8,), dtype=float32, numpy=
15777  array([-1.        , -0.99990916, -0.46211717,  0.7615942 ,  0.8336547 ,
15778          0.9640276 ,  0.9950547 ,  1.        ], dtype=float32)>
15779  }];
15780
15781  let arguments = (ins
15782    TF_FpOrComplexTensor:$x
15783  );
15784
15785  let results = (outs
15786    TF_FpOrComplexTensor:$y
15787  );
15788
15789  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15790}
15791
15792def TF_TanhGradOp : TF_Op<"TanhGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
15793  let summary = "Computes the gradient for the tanh of `x` wrt its input.";
15794
15795  let description = [{
15796Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
15797is the corresponding input gradient.
15798  }];
15799
15800  let arguments = (ins
15801    TF_FpOrComplexTensor:$y,
15802    TF_FpOrComplexTensor:$dy
15803  );
15804
15805  let results = (outs
15806    TF_FpOrComplexTensor:$z
15807  );
15808
15809  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15810}
15811
15812def TF_TensorArrayCloseV3Op : TF_Op<"TensorArrayCloseV3", []> {
15813  let summary = "Delete the TensorArray from its resource container.";
15814
15815  let description = [{
15816This enables the user to close and release the resource in the middle
15817of a step/run.
15818  }];
15819
15820  let arguments = (ins
15821    Arg<TF_ResourceTensor, [{The handle to a TensorArray (output of TensorArray or TensorArrayGrad).}], [TF_TensorArrayFree]>:$handle
15822  );
15823
15824  let results = (outs);
15825}
15826
15827def TF_TensorArrayConcatV3Op : TF_Op<"TensorArrayConcatV3", []> {
15828  let summary = "Concat the elements from the TensorArray into value `value`.";
15829
15830  let description = [{
15831Takes `T` elements of shapes
15832
15833  ```
15834  (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
15835  ```
15836
15837and concatenates them into a Tensor of shape:
15838
15839  ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
15840
15841All elements must have the same shape (excepting the first dimension).
15842  }];
15843
15844  let arguments = (ins
15845    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
15846    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
15847
15848    DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape_except0
15849  );
15850
15851  let results = (outs
15852    Res<TF_Tensor, [{All of the elements in the TensorArray, concatenated along the first
15853axis.}]>:$value,
15854    Res<TF_Int64Tensor, [{A vector of the row sizes of the original T elements in the
15855value output.  In the example above, this would be the values:
15856`(n1, n2, ..., n(T-1))`.}]>:$lengths
15857  );
15858
15859  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15860}
15861
15862def TF_TensorArrayGatherV3Op : TF_Op<"TensorArrayGatherV3", []> {
15863  let summary = [{
15864Gather specific elements from the TensorArray into output `value`.
15865  }];
15866
15867  let description = [{
15868All elements selected by `indices` must have the same shape.
15869  }];
15870
15871  let arguments = (ins
15872    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
15873    Arg<TF_Int32Tensor, [{The locations in the TensorArray from which to read tensor elements.}]>:$indices,
15874    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
15875
15876    DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape
15877  );
15878
15879  let results = (outs
15880    Res<TF_Tensor, [{All of the elements in the TensorArray, concatenated along a new
15881axis (the new dimension 0).}]>:$value
15882  );
15883
15884  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15885}
15886
15887def TF_TensorArrayGradV3Op : TF_Op<"TensorArrayGradV3", []> {
15888  let summary = [{
15889Creates a TensorArray for storing the gradients of values in the given handle.
15890  }];
15891
15892  let description = [{
15893If the given TensorArray gradient already exists, returns a reference to it.
15894
15895Locks the size of the original TensorArray by disabling its dynamic size flag.
15896
15897**A note about the input flow_in:**
15898
15899The handle flow_in forces the execution of the gradient lookup to occur
15900only after certain other operations have occurred.  For example, when
15901the forward TensorArray is dynamically sized, writes to this TensorArray
15902may resize the object.  The gradient TensorArray is statically sized based
15903on the size of the forward TensorArray when this operation executes.
15904Furthermore, the size of the forward TensorArray is frozen by this call.
15905As a result, the flow is used to ensure that the call to generate the gradient
15906TensorArray only happens after all writes are executed.
15907
15908In the case of dynamically sized TensorArrays, gradient computation should
15909only be performed on read operations that have themselves been chained via
15910flow to occur only after all writes have executed. That way the final size
15911of the forward TensorArray is known when this operation is called.
15912
15913**A note about the source attribute:**
15914
15915TensorArray gradient calls use an accumulator TensorArray object.  If
15916multiple gradients are calculated and run in the same session, the multiple
15917gradient nodes may accidentally flow through the same accumulator TensorArray.
15918This double counts and generally breaks the TensorArray gradient flow.
15919
15920The solution is to identify which gradient call this particular
15921TensorArray gradient is being called in.  This is performed by identifying
15922a unique string (e.g. "gradients", "gradients_1", ...) from the input
15923gradient Tensor's name.  This string is used as a suffix when creating
15924the TensorArray gradient object here (the attribute `source`).
15925
15926The attribute `source` is added as a suffix to the forward TensorArray's
15927name when performing the creation / lookup, so that each separate gradient
15928calculation gets its own TensorArray accumulator.
15929  }];
15930
15931  let arguments = (ins
15932    Arg<TF_ResourceTensor, [{The handle to the forward TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
15933    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
15934
15935    StrAttr:$source
15936  );
15937
15938  let results = (outs
15939    Res<TF_ResourceTensor, "", [TF_TensorArrayAlloc]>:$grad_handle,
15940    TF_Float32Tensor:$flow_out
15941  );
15942}
15943
15944def TF_TensorArrayReadV3Op : TF_Op<"TensorArrayReadV3", []> {
15945  let summary = "Read an element from the TensorArray into output `value`.";
15946
15947  let arguments = (ins
15948    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
15949    TF_Int32Tensor:$index,
15950    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
15951  );
15952
15953  let results = (outs
15954    Res<TF_Tensor, [{The tensor that is read from the TensorArray.}]>:$value
15955  );
15956
15957  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15958}
15959
15960def TF_TensorArrayScatterV3Op : TF_Op<"TensorArrayScatterV3", []> {
15961  let summary = [{
15962Scatter the data from the input value into specific TensorArray elements.
15963  }];
15964
15965  let description = [{
15966`indices` must be a vector, its length must match the first dim of `value`.
15967  }];
15968
15969  let arguments = (ins
15970    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
15971    Arg<TF_Int32Tensor, [{The locations at which to write the tensor elements.}]>:$indices,
15972    Arg<TF_Tensor, [{The concatenated tensor to write to the TensorArray.}]>:$value,
15973    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
15974  );
15975
15976  let results = (outs
15977    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
15978  );
15979
15980  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
15981}
15982
15983def TF_TensorArraySizeV3Op : TF_Op<"TensorArraySizeV3", []> {
15984  let summary = "Get the current size of the TensorArray.";
15985
15986  let arguments = (ins
15987    Arg<TF_ResourceTensor, [{The handle to a TensorArray (output of TensorArray or TensorArrayGrad).}], [TF_TensorArrayRead]>:$handle,
15988    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
15989  );
15990
15991  let results = (outs
15992    Res<TF_Int32Tensor, [{The current size of the TensorArray.}]>:$size
15993  );
15994}
15995
15996def TF_TensorArraySplitV3Op : TF_Op<"TensorArraySplitV3", []> {
15997  let summary = [{
15998Split the data from the input value into TensorArray elements.
15999  }];
16000
16001  let description = [{
16002Assuming that `lengths` takes on values
16003
16004  ```(n0, n1, ..., n(T-1))```
16005
16006and that `value` has shape
16007
16008  ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
16009
16010this splits values into a TensorArray with T tensors.
16011
16012TensorArray index t will be the subtensor of values with starting position
16013
16014  ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
16015
16016and having size
16017
16018  ```nt x d0 x d1 x ...```
16019  }];
16020
16021  let arguments = (ins
16022    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
16023    Arg<TF_Tensor, [{The concatenated tensor to write to the TensorArray.}]>:$value,
16024    Arg<TF_Int64Tensor, [{The vector of lengths, how to split the rows of value into the
16025TensorArray.}]>:$lengths,
16026    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
16027  );
16028
16029  let results = (outs
16030    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
16031  );
16032
16033  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
16034}
16035
16036def TF_TensorArrayV3Op : TF_Op<"TensorArrayV3", []> {
16037  let summary = "An array of Tensors of given size.";
16038
16039  let description = [{
16040Write data via Write and read via Read or Pack.
16041  }];
16042
16043  let arguments = (ins
16044    Arg<TF_Int32Tensor, [{The size of the array.}]>:$size,
16045
16046    TypeAttr:$dtype,
16047    DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape,
16048    DefaultValuedAttr<BoolAttr, "false">:$dynamic_size,
16049    DefaultValuedAttr<BoolAttr, "true">:$clear_after_read,
16050    DefaultValuedAttr<BoolAttr, "false">:$identical_element_shapes,
16051    StrAttr:$tensor_array_name
16052  );
16053
16054  let results = (outs
16055    Res<TF_ResourceTensor, [{The handle to the TensorArray.}], [TF_TensorArrayAlloc]>:$handle,
16056    Res<TF_Float32Tensor, [{A scalar used to control gradient flow.}]>:$flow
16057  );
16058}
16059
16060def TF_TensorArrayWriteV3Op : TF_Op<"TensorArrayWriteV3", []> {
16061  let summary = "Push an element onto the tensor_array.";
16062
16063  let arguments = (ins
16064    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
16065    Arg<TF_Int32Tensor, [{The position to write to inside the TensorArray.}]>:$index,
16066    Arg<TF_Tensor, [{The tensor to write to the TensorArray.}]>:$value,
16067    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
16068  );
16069
16070  let results = (outs
16071    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
16072  );
16073
16074  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
16075}
16076
16077def TF_TensorListConcatV2Op : TF_Op<"TensorListConcatV2", [NoSideEffect]> {
16078  let summary = "Concats all tensors in the list along the 0th dimension.";
16079
16080  let description = [{
16081Requires that all tensors have the same shape except the first dimension.
16082
16083input_handle: The input list.
16084element_shape: The shape of the uninitialized elements in the list. If the first
16085  dimension is not -1, it is assumed that all list elements have the same
16086  leading dim.
16087leading_dims: The list of leading dims of uninitialized list elements. Used if
16088  the leading dim of input_handle.element_shape or the element_shape input arg
16089  is not already set.
16090tensor: The concated result.
16091lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
16092  }];
16093
16094  let arguments = (ins
16095    TF_VariantTensor:$input_handle,
16096    TF_I32OrI64Tensor:$element_shape,
16097    TF_Int64Tensor:$leading_dims
16098  );
16099
16100  let results = (outs
16101    TF_Tensor:$tensor,
16102    TF_Int64Tensor:$lengths
16103  );
16104
16105  TF_DerivedOperandTypeAttr shape_type = TF_DerivedOperandTypeAttr<1>;
16106  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
16107}
16108
16109def TF_TensorListElementShapeOp : TF_Op<"TensorListElementShape", [NoSideEffect]> {
16110  let summary = "The shape of the elements of the given list, as a tensor.";
16111
16112  let description = [{
16113input_handle: the list
16114  element_shape: the shape of elements of the list
16115  }];
16116
16117  let arguments = (ins
16118    TF_VariantTensor:$input_handle
16119  );
16120
16121  let results = (outs
16122    TF_I32OrI64Tensor:$element_shape
16123  );
16124
16125  TF_DerivedResultTypeAttr shape_type = TF_DerivedResultTypeAttr<0>;
16126}
16127
16128def TF_TensorListFromTensorOp : TF_Op<"TensorListFromTensor", [NoSideEffect]> {
16129  let summary = [{
16130Creates a TensorList which, when stacked, has the value of `tensor`.
16131  }];
16132
16133  let description = [{
16134Each tensor in the result list corresponds to one row of the input tensor.
16135
16136tensor: The input tensor.
16137output_handle: The list.
16138  }];
16139
16140  let arguments = (ins
16141    TF_Tensor:$tensor,
16142    TF_I32OrI64Tensor:$element_shape
16143  );
16144
16145  let results = (outs
16146    TF_VariantTensor:$output_handle
16147  );
16148
16149  TF_DerivedOperandTypeAttr shape_type = TF_DerivedOperandTypeAttr<1>;
16150  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<0>;
16151}
16152
16153def TF_TensorListGatherOp : TF_Op<"TensorListGather", [NoSideEffect]> {
16154  let summary = "Creates a Tensor by indexing into the TensorList.";
16155
16156  let description = [{
16157Each row in the produced Tensor corresponds to the element in the TensorList
16158specified by the given index (see `tf.gather`).
16159
16160input_handle: The input tensor list.
16161indices: The indices used to index into the list.
16162values: The tensor.
16163  }];
16164
16165  let arguments = (ins
16166    TF_VariantTensor:$input_handle,
16167    TF_Int32Tensor:$indices,
16168    TF_Int32Tensor:$element_shape
16169  );
16170
16171  let results = (outs
16172    TF_Tensor:$values
16173  );
16174
16175  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
16176}
16177
16178def TF_TensorListGetItemOp : TF_Op<"TensorListGetItem", [NoSideEffect]> {
16179  let summary = "";
16180
16181  let arguments = (ins
16182    TF_VariantTensor:$input_handle,
16183    TF_Int32Tensor:$index,
16184    TF_Int32Tensor:$element_shape
16185  );
16186
16187  let results = (outs
16188    TF_Tensor:$item
16189  );
16190
16191  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
16192}
16193
16194def TF_TensorListLengthOp : TF_Op<"TensorListLength", [NoSideEffect]> {
16195  let summary = "Returns the number of tensors in the input tensor list.";
16196
16197  let description = [{
16198input_handle: the input list
16199length: the number of tensors in the list
16200  }];
16201
16202  let arguments = (ins
16203    TF_VariantTensor:$input_handle
16204  );
16205
16206  let results = (outs
16207    TF_Int32Tensor:$length
16208  );
16209}
16210
16211def TF_TensorListPopBackOp : TF_Op<"TensorListPopBack", [NoSideEffect]> {
16212  let summary = [{
16213Returns the last element of the input list as well as a list with all but that element.
16214  }];
16215
16216  let description = [{
16217Fails if the list is empty.
16218
16219input_handle: the input list
16220tensor: the withdrawn last element of the list
16221element_dtype: the type of elements in the list
16222element_shape: the shape of the output tensor
16223  }];
16224
16225  let arguments = (ins
16226    TF_VariantTensor:$input_handle,
16227    TF_Int32Tensor:$element_shape
16228  );
16229
16230  let results = (outs
16231    TF_VariantTensor:$output_handle,
16232    TF_Tensor:$tensor
16233  );
16234
16235  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<1>;
16236}
16237
16238def TF_TensorListPushBackOp : TF_Op<"TensorListPushBack", [NoSideEffect]> {
16239  let summary = [{
16240Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`.
16241  }];
16242
16243  let description = [{
16244tensor: The tensor to put on the list.
16245input_handle: The old list.
16246output_handle: A list with the elements of the old list followed by tensor.
16247element_dtype: the type of elements in the list.
16248element_shape: a shape compatible with that of elements in the list.
16249  }];
16250
16251  let arguments = (ins
16252    TF_VariantTensor:$input_handle,
16253    TF_Tensor:$tensor
16254  );
16255
16256  let results = (outs
16257    TF_VariantTensor:$output_handle
16258  );
16259
16260  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<1>;
16261}
16262
16263def TF_TensorListResizeOp : TF_Op<"TensorListResize", [NoSideEffect]> {
16264  let summary = "Resizes the list.";
16265
16266  let description = [{
16267input_handle: the input list
16268size: size of the output list
16269  }];
16270
16271  let arguments = (ins
16272    TF_VariantTensor:$input_handle,
16273    TF_Int32Tensor:$size
16274  );
16275
16276  let results = (outs
16277    TF_VariantTensor:$output_handle
16278  );
16279}
16280
16281def TF_TensorListScatterIntoExistingListOp : TF_Op<"TensorListScatterIntoExistingList", [NoSideEffect]> {
16282  let summary = "Scatters tensor at indices in an input list.";
16283
16284  let description = [{
16285Each member of the TensorList corresponds to one row of the input tensor,
16286specified by the given index (see `tf.gather`).
16287
16288input_handle: The list to scatter into.
16289tensor: The input tensor.
16290indices: The indices used to index into the list.
16291output_handle: The TensorList.
16292  }];
16293
16294  let arguments = (ins
16295    TF_VariantTensor:$input_handle,
16296    TF_Tensor:$tensor,
16297    TF_Int32Tensor:$indices
16298  );
16299
16300  let results = (outs
16301    TF_VariantTensor:$output_handle
16302  );
16303
16304  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<1>;
16305}
16306
16307def TF_TensorListSetItemOp : TF_Op<"TensorListSetItem", [NoSideEffect]> {
16308  let summary = "";
16309
16310  let arguments = (ins
16311    TF_VariantTensor:$input_handle,
16312    TF_Int32Tensor:$index,
16313    TF_Tensor:$item
16314  );
16315
16316  let results = (outs
16317    TF_VariantTensor:$output_handle
16318  );
16319
16320  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<2>;
16321}
16322
16323def TF_TensorListStackOp : TF_Op<"TensorListStack", [NoSideEffect]> {
16324  let summary = "Stacks all tensors in the list.";
16325
16326  let description = [{
16327Requires that all tensors have the same shape.
16328
16329input_handle: the input list
16330tensor: the gathered result
16331num_elements: optional. If not -1, the number of elements in the list.
16332  }];
16333
16334  let arguments = (ins
16335    TF_VariantTensor:$input_handle,
16336    TF_Int32Tensor:$element_shape,
16337
16338    DefaultValuedAttr<I64Attr, "-1">:$num_elements
16339  );
16340
16341  let results = (outs
16342    TF_Tensor:$tensor
16343  );
16344
16345  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
16346
16347  let verifier = [{
16348    return Verify(*this);
16349  }];
16350}
16351
16352def TF_TensorScatterAddOp : TF_Op<"TensorScatterAdd", [NoSideEffect]> {
16353  let summary = [{
16354Adds sparse `updates` to an existing tensor according to `indices`.
16355  }];
16356
16357  let description = [{
16358This operation creates a new tensor by adding sparse `updates` to the passed
16359in `tensor`.
16360This operation is very similar to `tf.scatter_nd_add`, except that the updates
16361are added onto an existing tensor (as opposed to a variable). If the memory
16362for the existing tensor cannot be re-used, a copy is made and updated.
16363
16364`indices` is an integer tensor containing indices into a new tensor of shape
16365`tensor.shape`.  The last dimension of `indices` can be at most the rank of
16366`tensor.shape`:
16367
16368    indices.shape[-1] <= tensor.shape.rank
16369
16370The last dimension of `indices` corresponds to indices into elements
16371(if `indices.shape[-1] = tensor.shape.rank`) or slices
16372(if `indices.shape[-1] < tensor.shape.rank`) along dimension
16373`indices.shape[-1]` of `tensor.shape`.  `updates` is a tensor with shape
16374
16375    indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
16376
16377The simplest form of tensor_scatter_add is to add individual elements to a
16378tensor by index. For example, say we want to add 4 elements in a rank-1
16379tensor with 8 elements.
16380
16381In Python, this scatter add operation would look like this:
16382
16383```python
16384    indices = tf.constant([[4], [3], [1], [7]])
16385    updates = tf.constant([9, 10, 11, 12])
16386    tensor = tf.ones([8], dtype=tf.int32)
16387    updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
16388    print(updated)
16389```
16390
16391The resulting tensor would look like this:
16392
16393    [1, 12, 1, 11, 10, 1, 1, 13]
16394
16395We can also, insert entire slices of a higher rank tensor all at once. For
16396example, if we wanted to insert two slices in the first dimension of a
16397rank-3 tensor with two matrices of new values.
16398
16399In Python, this scatter add operation would look like this:
16400
16401```python
16402    indices = tf.constant([[0], [2]])
16403    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
16404                            [7, 7, 7, 7], [8, 8, 8, 8]],
16405                           [[5, 5, 5, 5], [6, 6, 6, 6],
16406                            [7, 7, 7, 7], [8, 8, 8, 8]]])
16407    tensor = tf.ones([4, 4, 4],dtype=tf.int32)
16408    updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
16409    print(updated)
16410```
16411
16412The resulting tensor would look like this:
16413
16414    [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
16415     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
16416     [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
16417     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
16418
16419Note that on CPU, if an out of bound index is found, an error is returned.
16420On GPU, if an out of bound index is found, the index is ignored.
16421  }];
16422
16423  let arguments = (ins
16424    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
16425    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
16426    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
16427  );
16428
16429  let results = (outs
16430    Res<TF_Tensor, [{A new tensor copied from tensor and updates added according to the indices.}]>:$output
16431  );
16432
16433  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
16434  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16435}
16436
16437def TF_TensorScatterMaxOp : TF_Op<"TensorScatterMax", [NoSideEffect]> {
16438  let summary = "";
16439
16440  let arguments = (ins
16441    Arg<TF_Tensor, [{Tensor to update.}]>:$tensor,
16442    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
16443    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
16444  );
16445
16446  let results = (outs
16447    Res<TF_Tensor, [{A new tensor copied from tensor whose values are element-wise maximum between tensor and updates according to the indices.}]>:$output
16448  );
16449
16450  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
16451  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16452}
16453
16454def TF_TensorScatterMinOp : TF_Op<"TensorScatterMin", [NoSideEffect]> {
16455  let summary = "";
16456
16457  let arguments = (ins
16458    Arg<TF_Tensor, [{Tensor to update.}]>:$tensor,
16459    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
16460    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
16461  );
16462
16463  let results = (outs
16464    Res<TF_Tensor, [{A new tensor copied from tensor whose values are element-wise minimum between tensor and updates according to the indices.}]>:$output
16465  );
16466
16467  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
16468  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16469}
16470
16471def TF_TensorScatterSubOp : TF_Op<"TensorScatterSub", [NoSideEffect]> {
16472  let summary = [{
16473Subtracts sparse `updates` from an existing tensor according to `indices`.
16474  }];
16475
16476  let description = [{
16477This operation creates a new tensor by subtracting sparse `updates` from the
16478passed in `tensor`.
16479This operation is very similar to `tf.scatter_nd_sub`, except that the updates
16480are subtracted from an existing tensor (as opposed to a variable). If the memory
16481for the existing tensor cannot be re-used, a copy is made and updated.
16482
16483`indices` is an integer tensor containing indices into a new tensor of shape
16484`shape`.  The last dimension of `indices` can be at most the rank of `shape`:
16485
16486    indices.shape[-1] <= shape.rank
16487
16488The last dimension of `indices` corresponds to indices into elements
16489(if `indices.shape[-1] = shape.rank`) or slices
16490(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
16491`shape`.  `updates` is a tensor with shape
16492
16493    indices.shape[:-1] + shape[indices.shape[-1]:]
16494
16495The simplest form of tensor_scatter_sub is to subtract individual elements
16496from a tensor by index. For example, say we want to insert 4 scattered elements
16497in a rank-1 tensor with 8 elements.
16498
16499In Python, this scatter subtract operation would look like this:
16500
16501```python
16502    indices = tf.constant([[4], [3], [1], [7]])
16503    updates = tf.constant([9, 10, 11, 12])
16504    tensor = tf.ones([8], dtype=tf.int32)
16505    updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
16506    print(updated)
16507```
16508
16509The resulting tensor would look like this:
16510
16511    [1, -10, 1, -9, -8, 1, 1, -11]
16512
16513We can also, insert entire slices of a higher rank tensor all at once. For
16514example, if we wanted to insert two slices in the first dimension of a
16515rank-3 tensor with two matrices of new values.
16516
16517In Python, this scatter add operation would look like this:
16518
16519```python
16520    indices = tf.constant([[0], [2]])
16521    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
16522                            [7, 7, 7, 7], [8, 8, 8, 8]],
16523                           [[5, 5, 5, 5], [6, 6, 6, 6],
16524                            [7, 7, 7, 7], [8, 8, 8, 8]]])
16525    tensor = tf.ones([4, 4, 4],dtype=tf.int32)
16526    updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
16527    print(updated)
16528```
16529
16530The resulting tensor would look like this:
16531
16532    [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
16533     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
16534     [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
16535     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
16536
16537Note that on CPU, if an out of bound index is found, an error is returned.
16538On GPU, if an out of bound index is found, the index is ignored.
16539  }];
16540
16541  let arguments = (ins
16542    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
16543    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
16544    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
16545  );
16546
16547  let results = (outs
16548    Res<TF_Tensor, [{A new tensor copied from tensor and updates subtracted according to the indices.}]>:$output
16549  );
16550
16551  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
16552  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16553}
16554
16555def TF_TensorScatterUpdateOp : TF_Op<"TensorScatterUpdate", [NoSideEffect]> {
16556  let summary = [{
16557Scatter `updates` into an existing tensor according to `indices`.
16558  }];
16559
16560  let description = [{
16561This operation creates a new tensor by applying sparse `updates` to the passed
16562in `tensor`.
16563This operation is very similar to `tf.scatter_nd`, except that the updates are
16564scattered onto an existing tensor (as opposed to a zero-tensor). If the memory
16565for the existing tensor cannot be re-used, a copy is made and updated.
16566
16567If `indices` contains duplicates, then we pick the last update for the index.
16568
16569If an out of bound index is found on CPU, an error is returned.
16570
16571**WARNING**: There are some GPU specific semantics for this operation.
16572- If an out of bound index is found, the index is ignored.
16573- The order in which updates are applied is nondeterministic, so the output
16574will be nondeterministic if `indices` contains duplicates.
16575
16576`indices` is an integer tensor containing indices into a new tensor of shape
16577`shape`.
16578
16579* `indices` must have at least 2 axes: `(num_updates, index_depth)`.
16580* The last axis of `indices` is how deep to index into `tensor` so  this index
16581  depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim`
16582
16583if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements.
16584if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input
16585`tensor`.
16586
16587Each `update` has a rank of `tensor.rank - indices.shape[-1]`.
16588The overall shape of `updates` is:
16589
16590```
16591indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
16592```
16593
16594For usage examples see the python [tf.tensor_scatter_nd_update](
16595https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function
16596  }];
16597
16598  let arguments = (ins
16599    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
16600    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
16601    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
16602  );
16603
16604  let results = (outs
16605    Res<TF_Tensor, [{A new tensor with the given shape and updates applied according
16606to the indices.}]>:$output
16607  );
16608
16609  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
16610  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16611
16612  let verifier = [{ return Verify(*this); }];
16613
16614  let builders = [
16615    OpBuilder<(ins "Value":$tensor, "Value":$indices, "Value":$updates),
16616    [{build($_builder, $_state, tensor.getType(), tensor, indices, updates);}]>
16617  ];
16618}
16619
16620def TF_TensorStridedSliceUpdateOp : TF_Op<"TensorStridedSliceUpdate", [NoSideEffect]> {
16621  let summary = "Assign `value` to the sliced l-value reference of `input`.";
16622
16623  let description = [{
16624The values of `value` are assigned to the positions in the tensor `input` that
16625are selected by the slice parameters. The slice parameters `begin` `end`
16626`strides` etc. work exactly as in `StridedSlice`.
16627
16628NOTE this op currently does not support broadcasting and so `value`'s shape
16629must be exactly the shape produced by the slice of `input`.
16630  }];
16631
16632  let arguments = (ins
16633    TF_Tensor:$input,
16634    TF_I32OrI64Tensor:$begin,
16635    TF_I32OrI64Tensor:$end,
16636    TF_I32OrI64Tensor:$strides,
16637    TF_Tensor:$value,
16638
16639    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
16640    DefaultValuedAttr<I64Attr, "0">:$end_mask,
16641    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
16642    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
16643    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
16644  );
16645
16646  let results = (outs
16647    TF_Tensor:$output
16648  );
16649
16650  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16651  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
16652}
16653
16654def TF_TileOp : TF_Op<"Tile", [NoSideEffect]> {
16655  let summary = "Constructs a tensor by tiling a given tensor.";
16656
16657  let description = [{
16658This operation creates a new tensor by replicating `input` `multiples` times.
16659The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
16660and the values of `input` are replicated `multiples[i]` times along the 'i'th
16661dimension. For example, tiling `[a b c d]` by `[2]` produces
16662`[a b c d a b c d]`.
16663
16664>>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32)
16665>>> b = tf.constant([1,2], tf.int32)
16666>>> tf.tile(a, b)
16667<tf.Tensor: shape=(2, 6), dtype=int32, numpy=
16668array([[1, 2, 3, 1, 2, 3],
16669       [4, 5, 6, 4, 5, 6]], dtype=int32)>
16670>>> c = tf.constant([2,1], tf.int32)
16671>>> tf.tile(a, c)
16672<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
16673array([[1, 2, 3],
16674       [4, 5, 6],
16675       [1, 2, 3],
16676       [4, 5, 6]], dtype=int32)>
16677>>> d = tf.constant([2,2], tf.int32)
16678>>> tf.tile(a, d)
16679<tf.Tensor: shape=(4, 6), dtype=int32, numpy=
16680array([[1, 2, 3, 1, 2, 3],
16681       [4, 5, 6, 4, 5, 6],
16682       [1, 2, 3, 1, 2, 3],
16683       [4, 5, 6, 4, 5, 6]], dtype=int32)>
16684  }];
16685
16686  let arguments = (ins
16687    Arg<TF_Tensor, [{1-D or higher.}]>:$input,
16688    Arg<TF_I32OrI64Tensor, [{1-D. Length must be the same as the number of dimensions in `input`}]>:$multiples
16689  );
16690
16691  let results = (outs
16692    TF_Tensor:$output
16693  );
16694
16695  TF_DerivedOperandTypeAttr Tmultiples = TF_DerivedOperandTypeAttr<1>;
16696  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16697
16698  let verifier = [{ return Verify(*this); }];
16699}
16700
16701def TF_TopKUniqueOp : TF_Op<"TopKUnique", [NoSideEffect]> {
16702  let summary = "Returns the TopK unique values in the array in sorted order.";
16703
16704  let description = [{
16705The running time is proportional to the product of K and the input
16706size. Sorting the whole array is more efficient for sufficiently large
16707values of K. The median-of-medians algorithm is probably faster, but
16708difficult to implement efficiently in XLA. If there are fewer than K
16709unique numbers (not NANs), the results are padded with negative
16710infinity. NaNs are never returned. Subnormal numbers are flushed to
16711zero. If an element appears at multiple indices, the highest index is
16712returned. If a TopK element never appears in the input due to padding
16713values, the indices are padded with negative one. If a padding value
16714appears in the input and padding is needed, the highest index of the
16715padding value will be returned. The semantics are not the same as
16716kth_order_statistic.
16717  }];
16718
16719  let arguments = (ins
16720    TF_Float32Tensor:$input,
16721
16722    I64Attr:$k
16723  );
16724
16725  let results = (outs
16726    TF_Float32Tensor:$topk,
16727    TF_Int32Tensor:$topk_indices
16728  );
16729}
16730
16731def TF_TopKV2Op : TF_Op<"TopKV2", [NoSideEffect]> {
16732  let summary = [{
16733Finds values and indices of the `k` largest elements for the last dimension.
16734  }];
16735
16736  let description = [{
16737If the input is a vector (rank-1), finds the `k` largest entries in the vector
16738and outputs their values and indices as vectors.  Thus `values[j]` is the
16739`j`-th largest entry in `input`, and its index is `indices[j]`.
16740
16741For matrices (resp. higher rank input), computes the top `k` entries in each
16742row (resp. vector along the last dimension).  Thus,
16743
16744    values.shape = indices.shape = input.shape[:-1] + [k]
16745
16746If two elements are equal, the lower-index element appears first.
16747  }];
16748
16749  let arguments = (ins
16750    Arg<TF_IntOrFpTensor, [{1-D or higher with last dimension at least `k`.}]>:$input,
16751    Arg<TF_Int32Tensor, [{0-D.  Number of top elements to look for along the last dimension (along each
16752row for matrices).}]>:$k,
16753
16754    DefaultValuedAttr<BoolAttr, "true">:$sorted
16755  );
16756
16757  let results = (outs
16758    Res<TF_IntOrFpTensor, [{The `k` largest elements along each last dimensional slice.}]>:$values,
16759    Res<TF_Int32Tensor, [{The indices of `values` within the last dimension of `input`.}]>:$indices
16760  );
16761
16762  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16763
16764  let verifier = [{ return Verify(*this); }];
16765}
16766
16767def TF_TopKWithUniqueOp : TF_Op<"TopKWithUnique", [NoSideEffect]> {
16768  let summary = "Returns the TopK values in the array in sorted order.";
16769
16770  let description = [{
16771This is a combination of MakeUnique and TopKUnique. The returned top-K will
16772have its lower bits replaced by iota, thus it will be close to the original
16773value but not exactly the same. The running time is proportional to the product
16774of K and the input size. NaNs are never returned. Subnormal numbers are flushed
16775to zero.
16776  }];
16777
16778  let arguments = (ins
16779    TF_Float32Tensor:$input,
16780
16781    I64Attr:$k
16782  );
16783
16784  let results = (outs
16785    TF_Float32Tensor:$topk,
16786    TF_Int32Tensor:$topk_indices
16787  );
16788}
16789
16790def TF_TransposeOp : TF_Op<"Transpose", [NoSideEffect]> {
16791  let summary = "Shuffle dimensions of x according to a permutation.";
16792
16793  let description = [{
16794The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
16795  `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
16796  }];
16797
16798  let arguments = (ins
16799    TF_Tensor:$x,
16800    TF_I32OrI64Tensor:$perm
16801  );
16802
16803  let results = (outs
16804    TF_Tensor:$y
16805  );
16806
16807  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16808  TF_DerivedOperandTypeAttr Tperm = TF_DerivedOperandTypeAttr<1>;
16809
16810  let builders = [
16811    OpBuilder<(ins "Value":$x, "Value":$perm)>
16812  ];
16813
16814  let verifier = [{
16815    return Verify(*this);
16816  }];
16817}
16818
16819def TF_TridiagonalSolveOp : TF_Op<"TridiagonalSolve", [NoSideEffect]> {
16820  let summary = "Solves tridiagonal systems of equations.";
16821
16822  let description = [{
16823Solves tridiagonal systems of equations.
16824  Supports batch dimensions and multiple right-hand sides per each left-hand
16825  side.
16826  On CPU, solution is computed via Gaussian elimination with or without partial
16827  pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE
16828  library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
16829  Partial pivoting is not yet supported by XLA backends.
16830  }];
16831
16832  let arguments = (ins
16833    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the
16834tridiagonal matrices with three rows being the superdiagonal, diagonals, and
16835subdiagonals, in order. The last element of the superdiagonal and the first
16836element of the subdiagonal is ignored.}]>:$diagonals,
16837    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., M, K]`, representing K right-hand sides per each
16838left-hand side.}]>:$rhs,
16839
16840    DefaultValuedAttr<BoolAttr, "true">:$partial_pivoting
16841  );
16842
16843  let results = (outs
16844    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., M, K]` containing the solutions}]>:$output
16845  );
16846
16847  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16848}
16849
16850def TF_TruncateDivOp : TF_Op<"TruncateDiv", [NoSideEffect, ResultsBroadcastableShape]>,
16851                       WithBroadcastableBinOpBuilder {
16852  let summary = "Returns x / y element-wise for integer types.";
16853
16854  let description = [{
16855Truncation designates that negative numbers will round fractional quantities
16856toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
16857than Python semantics. See `FloorDiv` for a division function that matches
16858Python Semantics.
16859
16860*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
16861[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
16862  }];
16863
16864  let arguments = (ins
16865    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
16866    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
16867  );
16868
16869  let results = (outs
16870    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
16871  );
16872
16873  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16874}
16875
16876def TF_TruncateModOp : TF_Op<"TruncateMod", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
16877                       WithBroadcastableBinOpBuilder {
16878  let summary = [{
16879Returns element-wise remainder of division. This emulates C semantics in that
16880  }];
16881
16882  let description = [{
16883the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
16884y + truncate_mod(x, y) = x`.
16885
16886*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
16887[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
16888  }];
16889
16890  let arguments = (ins
16891    TF_FpOrI32OrI64Tensor:$x,
16892    TF_FpOrI32OrI64Tensor:$y
16893  );
16894
16895  let results = (outs
16896    TF_FpOrI32OrI64Tensor:$z
16897  );
16898
16899  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16900}
16901
16902def TF_TruncatedNormalOp : TF_Op<"TruncatedNormal", [TF_CannotDuplicate]> {
16903  let summary = "Outputs random values from a truncated normal distribution.";
16904
16905  let description = [{
16906The generated values follow a normal distribution with mean 0 and standard
16907deviation 1, except that values whose magnitude is more than 2 standard
16908deviations from the mean are dropped and re-picked.
16909  }];
16910
16911  let arguments = (ins
16912    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16913
16914    DefaultValuedAttr<I64Attr, "0">:$seed,
16915    DefaultValuedAttr<I64Attr, "0">:$seed2
16916  );
16917
16918  let results = (outs
16919    Res<TF_FloatTensor, [{A tensor of the specified shape filled with random truncated normal
16920values.}]>:$output
16921  );
16922
16923  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16924  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16925}
16926
16927def TF_UncompressElementOp : TF_Op<"UncompressElement", [NoSideEffect]> {
16928  let summary = "Uncompresses a compressed dataset element.";
16929
16930  let arguments = (ins
16931    TF_VariantTensor:$compressed
16932  );
16933
16934  let results = (outs
16935    Variadic<TF_Tensor>:$components
16936  );
16937
16938  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
16939  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
16940}
16941
16942def TF_UniqueOp : TF_Op<"Unique", [NoSideEffect]> {
16943  let summary = "Finds unique elements in a 1-D tensor.";
16944
16945  let description = [{
16946This operation returns a tensor `y` containing all of the unique elements of `x`
16947sorted in the same order that they occur in `x`; `x` does not need to be sorted.
16948This operation also returns a tensor `idx` the same size as `x` that contains
16949the index of each value of `x` in the unique output `y`. In other words:
16950
16951`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
16952
16953Examples:
16954
16955```
16956# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
16957y, idx = unique(x)
16958y ==> [1, 2, 4, 7, 8]
16959idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
16960```
16961
16962```
16963# tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5]
16964y, idx = unique(x)
16965y ==> [4, 5, 1, 2, 3]
16966idx ==> [0, 1, 2, 3, 4, 4, 0, 1]
16967```
16968  }];
16969
16970  let arguments = (ins
16971    Arg<TF_Tensor, [{1-D.}]>:$x
16972  );
16973
16974  let results = (outs
16975    Res<TF_Tensor, [{1-D.}]>:$y,
16976    Res<TF_I32OrI64Tensor, [{1-D.}]>:$idx
16977  );
16978
16979  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16980  TF_DerivedResultTypeAttr out_idx = TF_DerivedResultTypeAttr<1>;
16981}
16982
16983def TF_UnpackOp : TF_Op<"Unpack", [NoSideEffect]> {
16984  let summary = [{
16985Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
16986  }];
16987
16988  let description = [{
16989Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
16990For example, given a tensor of shape `(A, B, C, D)`;
16991
16992If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
16993  and each tensor in `output` will have shape `(B, C, D)`. (Note that the
16994  dimension unpacked along is gone, unlike `split`).
16995
16996If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
16997  and each tensor in `output` will have shape `(A, C, D)`.
16998Etc.
16999
17000This is the opposite of `pack`.
17001  }];
17002
17003  let arguments = (ins
17004    Arg<TF_Tensor, [{1-D or higher, with `axis` dimension size equal to `num`.}]>:$value,
17005
17006    DefaultValuedAttr<I64Attr, "0">:$axis
17007  );
17008
17009  let results = (outs
17010    Res<Variadic<TF_Tensor>, [{The list of tensors unpacked from `value`.}]>:$output
17011  );
17012
17013  TF_DerivedResultSizeAttr num = TF_DerivedResultSizeAttr<0>;
17014  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17015
17016  let verifier = [{ return Verify(*this); }];
17017}
17018
17019def TF_UnsortedSegmentMaxOp : TF_Op<"UnsortedSegmentMax", [NoSideEffect]> {
17020  let summary = "Computes the maximum along segments of a tensor.";
17021
17022  let description = [{
17023Read
17024[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
17025for an explanation of segments.
17026
17027This operator is similar to the unsorted segment sum operator found
17028[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
17029Instead of computing the sum over segments, it computes the maximum such that:
17030
17031\\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such
17032that `segment_ids[j...] == i`.
17033
17034If the maximum is empty for a given segment ID `i`, it outputs the smallest
17035possible value for the specific numeric type,
17036`output[i] = numeric_limits<T>::lowest()`.
17037
17038If the given segment ID `i` is negative, then the corresponding value is
17039dropped, and will not be included in the result.
17040
17041<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
17042<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
17043</div>
17044
17045For example:
17046
17047``` python
17048c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
17049tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2)
17050# ==> [[ 4,  3, 3, 4],
17051#       [5,  6, 7, 8]]
17052```
17053  }];
17054
17055  let arguments = (ins
17056    TF_IntOrFpTensor:$data,
17057    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
17058    TF_I32OrI64Tensor:$num_segments
17059  );
17060
17061  let results = (outs
17062    Res<TF_IntOrFpTensor, [{Has same shape as data, except for the first `segment_ids.rank`
17063dimensions, which are replaced with a single dimension which has size
17064`num_segments`.}]>:$output
17065  );
17066
17067  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17068  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17069  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
17070
17071  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
17072}
17073
17074def TF_UnsortedSegmentMinOp : TF_Op<"UnsortedSegmentMin", [NoSideEffect]> {
17075  let summary = "Computes the minimum along segments of a tensor.";
17076
17077  let description = [{
17078Read
17079[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
17080for an explanation of segments.
17081
17082This operator is similar to the unsorted segment sum operator found
17083[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
17084Instead of computing the sum over segments, it computes the minimum such that:
17085
17086\\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such
17087that `segment_ids[j...] == i`.
17088
17089If the minimum is empty for a given segment ID `i`, it outputs the largest
17090possible value for the specific numeric type,
17091`output[i] = numeric_limits<T>::max()`.
17092
17093For example:
17094
17095``` python
17096c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
17097tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2)
17098# ==> [[ 1,  2, 2, 1],
17099#       [5,  6, 7, 8]]
17100```
17101
17102If the given segment ID `i` is negative, then the corresponding value is
17103dropped, and will not be included in the result.
17104  }];
17105
17106  let arguments = (ins
17107    TF_IntOrFpTensor:$data,
17108    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
17109    TF_I32OrI64Tensor:$num_segments
17110  );
17111
17112  let results = (outs
17113    Res<TF_IntOrFpTensor, [{Has same shape as data, except for the first `segment_ids.rank`
17114dimensions, which are replaced with a single dimension which has size
17115`num_segments`.}]>:$output
17116  );
17117
17118  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17119  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17120  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
17121
17122  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
17123}
17124
17125def TF_UnsortedSegmentProdOp : TF_Op<"UnsortedSegmentProd", [NoSideEffect]> {
17126  let summary = "Computes the product along segments of a tensor.";
17127
17128  let description = [{
17129Read
17130[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
17131for an explanation of segments.
17132
17133This operator is similar to the unsorted segment sum operator found
17134[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
17135Instead of computing the sum over segments, it computes the product of all
17136entries belonging to a segment such that:
17137
17138\\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples
17139`j...` such that `segment_ids[j...] == i`.
17140
17141For example:
17142
17143``` python
17144c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
17145tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2)
17146# ==> [[ 4,  6, 6, 4],
17147#       [5,  6, 7, 8]]
17148```
17149
17150If there is no entry for a given segment ID `i`, it outputs 1.
17151
17152If the given segment ID `i` is negative, then the corresponding value is
17153dropped, and will not be included in the result.
17154  }];
17155
17156  let arguments = (ins
17157    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
17158    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
17159    TF_I32OrI64Tensor:$num_segments
17160  );
17161
17162  let results = (outs
17163    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for the first `segment_ids.rank`
17164dimensions, which are replaced with a single dimension which has size
17165`num_segments`.}]>:$output
17166  );
17167
17168  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17169  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17170  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
17171
17172  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
17173}
17174
17175def TF_UnsortedSegmentSumOp : TF_Op<"UnsortedSegmentSum", [NoSideEffect]> {
17176  let summary = "Computes the sum along segments of a tensor.";
17177
17178  let description = [{
17179Read
17180[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
17181for an explanation of segments.
17182
17183Computes a tensor such that
17184\\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such
17185that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
17186need not be sorted and need not cover all values in the full
17187range of valid values.
17188
17189If the sum is empty for a given segment ID `i`, `output[i] = 0`.
17190If the given segment ID `i` is negative, the value is dropped and will not be
17191added to the sum of the segment.
17192
17193`num_segments` should equal the number of distinct segment IDs.
17194
17195<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
17196<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
17197</div>
17198
17199``` python
17200c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
17201tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
17202# ==> [[ 5,  5, 5, 5],
17203#       [5,  6, 7, 8]]
17204```
17205  }];
17206
17207  let arguments = (ins
17208    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
17209    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
17210    TF_I32OrI64Tensor:$num_segments
17211  );
17212
17213  let results = (outs
17214    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for the first `segment_ids.rank`
17215dimensions, which are replaced with a single dimension which has size
17216`num_segments`.}]>:$output
17217  );
17218
17219  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17220  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17221  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
17222
17223  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
17224}
17225
17226def TF_UpperBoundOp : TF_Op<"UpperBound", [NoSideEffect]> {
17227  let summary = [{
17228Applies upper_bound(sorted_search_values, values) along each row.
17229  }];
17230
17231  let description = [{
17232Each set of rows with the same index in (sorted_inputs, values) is treated
17233independently.  The resulting row is the equivalent of calling
17234`np.searchsorted(sorted_inputs, values, side='right')`.
17235
17236The result is not a global index to the entire
17237`Tensor`, but rather just the index in the last dimension.
17238
17239A 2-D example:
17240  sorted_sequence = [[0, 3, 9, 9, 10],
17241                     [1, 2, 3, 4, 5]]
17242  values = [[2, 4, 9],
17243            [0, 2, 6]]
17244
17245  result = UpperBound(sorted_sequence, values)
17246
17247  result == [[1, 2, 4],
17248             [0, 2, 5]]
17249  }];
17250
17251  let arguments = (ins
17252    Arg<TF_Tensor, [{2-D Tensor where each row is ordered.}]>:$sorted_inputs,
17253    Arg<TF_Tensor, [{2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
17254the values that will be searched for in `sorted_search_values`.}]>:$values
17255  );
17256
17257  let results = (outs
17258    Res<TF_I32OrI64Tensor, [{A `Tensor` with the same shape as `values`.  It contains the last scalar index
17259into the last dimension where values can be inserted without changing the
17260ordered property.}]>:$output
17261  );
17262
17263  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17264  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
17265}
17266
17267def TF_VarIsInitializedOp : TF_Op<"VarIsInitializedOp", []> {
17268  let summary = [{
17269Checks whether a resource handle-based variable has been initialized.
17270  }];
17271
17272  let arguments = (ins
17273    Arg<TF_ResourceTensor, [{the input resource handle.}], [TF_VariableRead]>:$resource
17274  );
17275
17276  let results = (outs
17277    Res<TF_BoolTensor, [{a scalar boolean which is true if the variable has been
17278initialized.}]>:$is_initialized
17279  );
17280}
17281
17282def TF_VariableOp : TF_Op<"Variable", []> {
17283  let summary = "Use VariableV2 instead.";
17284
17285  let arguments = (ins
17286    TF_ShapeAttr:$shape,
17287    StrAttr:$container,
17288    StrAttr:$shared_name
17289  );
17290
17291  let results = (outs
17292    TF_Tensor:$ref
17293  );
17294
17295  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
17296}
17297
17298def TF_VariableShapeOp : TF_Op<"VariableShape", []> {
17299  let summary = "Returns the shape of the variable pointed to by `resource`.";
17300
17301  let description = [{
17302This operation returns a 1-D integer tensor representing the shape of `input`.
17303
17304For example:
17305
17306```
17307# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
17308shape(t) ==> [2, 2, 3]
17309```
17310  }];
17311
17312  let arguments = (ins
17313    Arg<TF_ResourceTensor, "", [TF_VariableRead]>:$input
17314  );
17315
17316  let results = (outs
17317    TF_I32OrI64Tensor:$output
17318  );
17319
17320  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
17321
17322  let verifier = [{
17323    return Verify(*this);
17324  }];
17325}
17326
17327def TF_VariableV2Op : TF_Op<"VariableV2", []> {
17328  let summary = [{
17329Holds state in the form of a tensor that persists across steps.
17330  }];
17331
17332  let description = [{
17333Outputs a ref to the tensor state so it may be read or modified.
17334TODO(zhifengc/mrry): Adds a pointer to a more detail document
17335about sharing states in tensorflow.
17336  }];
17337
17338  let arguments = (ins
17339    TF_ShapeAttr:$shape,
17340    StrAttr:$container,
17341    StrAttr:$shared_name
17342  );
17343
17344  let results = (outs
17345    Res<TF_Tensor, [{A reference to the variable tensor.}]>:$ref
17346  );
17347
17348  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
17349}
17350
17351def TF_WhereOp : TF_Op<"Where", [NoSideEffect]> {
17352  let summary = "Returns locations of nonzero / true values in a tensor.";
17353
17354  let description = [{
17355This operation returns the coordinates of true elements in `condition`. The
17356coordinates are returned in a 2-D tensor where the first dimension (rows)
17357represents the number of true elements, and the second dimension (columns)
17358represents the coordinates of the true elements. Keep in mind, the shape of
17359the output tensor can vary depending on how many true values there are in
17360`condition`. Indices are output in row-major order.
17361
17362For example:
17363
17364```
17365# 'input' tensor is [[True, False]
17366#                    [True, False]]
17367# 'input' has two true values, so output has two coordinates.
17368# 'input' has rank of 2, so coordinates have two indices.
17369where(input) ==> [[0, 0],
17370                  [1, 0]]
17371
17372# `condition` tensor is [[[True, False]
17373#                     [True, False]]
17374#                    [[False, True]
17375#                     [False, True]]
17376#                    [[False, False]
17377#                     [False, True]]]
17378# 'input' has 5 true values, so output has 5 coordinates.
17379# 'input' has rank of 3, so coordinates have three indices.
17380where(input) ==> [[0, 0, 0],
17381                  [0, 1, 0],
17382                  [1, 0, 1],
17383                  [1, 1, 1],
17384                  [2, 1, 1]]
17385
17386# `condition` tensor is [[[1.5,  0.0]
17387#                     [-0.5, 0.0]]
17388#                    [[0.0,  0.25]
17389#                     [0.0,  0.75]]
17390#                    [[0.0,  0.0]
17391#                     [0.0,  0.01]]]
17392# 'input' has 5 nonzero values, so output has 5 coordinates.
17393# 'input' has rank of 3, so coordinates have three indices.
17394where(input) ==> [[0, 0, 0],
17395                  [0, 1, 0],
17396                  [1, 0, 1],
17397                  [1, 1, 1],
17398                  [2, 1, 1]]
17399
17400# `condition` tensor is [[[1.5 + 0.0j, 0.0  + 0.0j]
17401#                     [0.0 + 0.5j, 0.0  + 0.0j]]
17402#                    [[0.0 + 0.0j, 0.25 + 1.5j]
17403#                     [0.0 + 0.0j, 0.75 + 0.0j]]
17404#                    [[0.0 + 0.0j, 0.0  + 0.0j]
17405#                     [0.0 + 0.0j, 0.01 + 0.0j]]]
17406# 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
17407# 'input' has rank of 3, so coordinates have three indices.
17408where(input) ==> [[0, 0, 0],
17409                  [0, 1, 0],
17410                  [1, 0, 1],
17411                  [1, 1, 1],
17412                  [2, 1, 1]]
17413```
17414  }];
17415
17416  let arguments = (ins
17417    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input
17418  );
17419
17420  let results = (outs
17421    TF_Int64Tensor:$index
17422  );
17423
17424  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17425}
17426
17427def TF_XdivyOp : TF_Op<"Xdivy", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
17428                 WithBroadcastableBinOpBuilder {
17429  let summary = "Returns 0 if x == 0, and x / y otherwise, elementwise.";
17430
17431  let arguments = (ins
17432    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$x,
17433    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$y
17434  );
17435
17436  let results = (outs
17437    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$z
17438  );
17439
17440  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17441}
17442
17443def TF_XlaBroadcastHelperOp : TF_Op<"XlaBroadcastHelper", [NoSideEffect]> {
17444  let summary = "Helper operator for performing XLA-style broadcasts";
17445
17446  let description = [{
17447Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to
17448whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules
17449for binary operators.
17450  }];
17451
17452  let arguments = (ins
17453    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the LHS input tensor}]>:$lhs,
17454    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the RHS input tensor}]>:$rhs,
17455    Arg<TF_I32OrI64Tensor, [{an XLA-style broadcast dimension specification}]>:$broadcast_dims
17456  );
17457
17458  let results = (outs
17459    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the broadcasted LHS tensor}]>:$lhs_output,
17460    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the broadcasted RHS tensor}]>:$rhs_output
17461  );
17462
17463  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
17464  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17465}
17466
17467def TF_XlaClusterOutputOp : TF_Op<"XlaClusterOutput", [NoSideEffect]> {
17468  let summary = [{
17469Operator that connects the output of an XLA computation to other consumer graph nodes.
17470  }];
17471
17472  let arguments = (ins
17473    TF_Tensor:$input
17474  );
17475
17476  let results = (outs
17477    TF_Tensor:$outputs
17478  );
17479
17480  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17481}
17482
17483def TF_XlaConvOp : TF_Op<"XlaConv", [NoSideEffect]> {
17484  let summary = "Wraps the XLA ConvGeneralDilated operator, documented at";
17485
17486  let description = [{
17487https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
17488.
17489  }];
17490
17491  let arguments = (ins
17492    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$lhs,
17493    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the kernel tensor}]>:$rhs,
17494    Arg<TF_I32OrI64Tensor, [{the inter-window strides}]>:$window_strides,
17495    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start and end of each input dimensions}]>:$padding,
17496    Arg<TF_I32OrI64Tensor, [{dilation to apply between input elements}]>:$lhs_dilation,
17497    Arg<TF_I32OrI64Tensor, [{dilation to apply between kernel elements}]>:$rhs_dilation,
17498    Arg<TF_I32OrI64Tensor, [{number of feature groups for grouped convolution.}]>:$feature_group_count,
17499
17500    StrAttr:$dimension_numbers,
17501    StrAttr:$precision_config
17502  );
17503
17504  let results = (outs
17505    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17506  );
17507
17508  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
17509  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17510}
17511
17512def TF_XlaConvV2Op : TF_Op<"XlaConvV2", [NoSideEffect]> {
17513  let summary = "Wraps the XLA ConvGeneralDilated operator, documented at";
17514
17515  let description = [{
17516https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
17517.
17518  }];
17519
17520  let arguments = (ins
17521    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$lhs,
17522    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the kernel tensor}]>:$rhs,
17523    Arg<TF_I32OrI64Tensor, [{the inter-window strides}]>:$window_strides,
17524    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start and end of each input dimensions}]>:$padding,
17525    Arg<TF_I32OrI64Tensor, [{dilation to apply between input elements}]>:$lhs_dilation,
17526    Arg<TF_I32OrI64Tensor, [{dilation to apply between kernel elements}]>:$rhs_dilation,
17527    Arg<TF_I32OrI64Tensor, [{number of feature groups for grouped convolution.}]>:$feature_group_count,
17528
17529    StrAttr:$dimension_numbers,
17530    StrAttr:$precision_config
17531  );
17532
17533  let results = (outs
17534    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17535  );
17536
17537  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
17538  TF_DerivedOperandTypeAttr LhsT = TF_DerivedOperandTypeAttr<0>;
17539  TF_DerivedOperandTypeAttr RhsT = TF_DerivedOperandTypeAttr<1>;
17540  TF_DerivedResultTypeAttr preferred_element_type = TF_DerivedResultTypeAttr<0>;
17541}
17542
17543def TF_XlaDotOp : TF_Op<"XlaDot", [NoSideEffect]> {
17544  let summary = "Wraps the XLA DotGeneral operator, documented at";
17545
17546  let description = [{
17547https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
17548.
17549  }];
17550
17551  let arguments = (ins
17552    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the LHS tensor}]>:$lhs,
17553    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the RHS tensor}]>:$rhs,
17554
17555    StrAttr:$dimension_numbers,
17556    StrAttr:$precision_config
17557  );
17558
17559  let results = (outs
17560    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17561  );
17562
17563  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17564}
17565
17566def TF_XlaDotV2Op : TF_Op<"XlaDotV2", [NoSideEffect]> {
17567  let summary = "Wraps the XLA DotGeneral operator, documented at";
17568
17569  let description = [{
17570https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
17571.
17572  }];
17573
17574  let arguments = (ins
17575    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the LHS tensor}]>:$lhs,
17576    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the RHS tensor}]>:$rhs,
17577
17578    StrAttr:$dimension_numbers,
17579    StrAttr:$precision_config
17580  );
17581
17582  let results = (outs
17583    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17584  );
17585
17586  TF_DerivedOperandTypeAttr LhsT = TF_DerivedOperandTypeAttr<0>;
17587  TF_DerivedOperandTypeAttr RhsT = TF_DerivedOperandTypeAttr<1>;
17588  TF_DerivedResultTypeAttr preferred_element_type = TF_DerivedResultTypeAttr<0>;
17589}
17590
17591def TF_XlaDynamicSliceOp : TF_Op<"XlaDynamicSlice", [NoSideEffect]> {
17592  let summary = "Wraps the XLA DynamicSlice operator, documented at";
17593
17594  let description = [{
17595https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice
17596.
17597
17598DynamicSlice extracts a sub-array from the input array at dynamic
17599start_indices. The size of the slice in each dimension is passed in
17600size_indices, which specify the end point of exclusive slice intervals in each
17601dimension -- [start, start + size). The shape of start_indices must have rank 1,
17602with dimension size equal to the rank of operand.
17603  }];
17604
17605  let arguments = (ins
17606    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input,
17607    Arg<TF_I32OrI64Tensor, [{List of N integers containing the slice size for each
17608dimension. Each value must be strictly greater than zero, and start + size
17609must be less than or equal to the size of the dimension to avoid
17610implementation defined behavior.}]>:$start_indices,
17611    TF_I32OrI64Tensor:$size_indices
17612  );
17613
17614  let results = (outs
17615    TF_Tensor:$output
17616  );
17617
17618  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17619  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17620}
17621
17622def TF_XlaDynamicUpdateSliceOp : TF_Op<"XlaDynamicUpdateSlice", [NoSideEffect]> {
17623  let summary = "Wraps the XLA DynamicUpdateSlice operator, documented at";
17624
17625  let description = [{
17626https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice
17627.
17628
17629XlaDynamicUpdateSlice generates a result which is the value of the `input`
17630operand, with a slice update overwritten at `indices`. The shape of `update`
17631determines the shape of the sub-array of the result which is updated. The shape
17632of indices must be rank == 1, with dimension size equal to the rank of `input`.
17633
17634Handling of out-of-bounds slice indices is implementation-defined.
17635  }];
17636
17637  let arguments = (ins
17638    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input,
17639    Arg<TF_Tensor, [{A `Tensor` of type T. Same rank as `input`.}]>:$update,
17640    Arg<TF_I32OrI64Tensor, [{A vector of indices into `input`. Must have length equal to the rank of
17641`input`.}]>:$indices
17642  );
17643
17644  let results = (outs
17645    Res<TF_Tensor, [{A `Tensor` of type T.}]>:$output
17646  );
17647
17648  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
17649  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17650}
17651
17652def TF_XlaEinsumOp : TF_Op<"XlaEinsum", [NoSideEffect]> {
17653  let summary = [{
17654An op which supports basic einsum op with 2 inputs and 1 output.
17655  }];
17656
17657  let description = [{
17658This op has better TPU performance since it doesn't have explicitly reshape and
17659transpose operations as tf.einsum does.
17660  }];
17661
17662  let arguments = (ins
17663    TensorOf<[TF_Bfloat16, TF_Complex64, TF_Float32]>:$a,
17664    TensorOf<[TF_Bfloat16, TF_Complex64, TF_Float32]>:$b,
17665
17666    StrAttr:$equation
17667  );
17668
17669  let results = (outs
17670    TensorOf<[TF_Bfloat16, TF_Complex64, TF_Float32]>:$product
17671  );
17672
17673  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17674}
17675
17676def TF_XlaGatherOp : TF_Op<"XlaGather", [NoSideEffect]> {
17677  let summary = "Wraps the XLA Gather operator documented at";
17678
17679  let description = [{
17680https://www.tensorflow.org/xla/operation_semantics#gather
17681  }];
17682
17683  let arguments = (ins
17684    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The array we're gathering from.}]>:$operand,
17685    Arg<TF_I32OrI64Tensor, [{Array containing the starting indices of the slices we gather.}]>:$start_indices,
17686    Arg<TF_I32OrI64Tensor, [{slice_sizes[i] is the bounds for the slice on dimension i.}]>:$slice_sizes,
17687
17688    StrAttr:$dimension_numbers,
17689    BoolAttr:$indices_are_sorted
17690  );
17691
17692  let results = (outs
17693    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17694  );
17695
17696  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17697  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17698}
17699
17700def TF_XlaHostComputeOp : TF_Op<"XlaHostCompute", []> {
17701  let summary = [{
17702A pseudo-op to represent host-side computation in an XLA program.
17703  }];
17704
17705  let arguments = (ins
17706    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the host.}]>:$inputs,
17707
17708    StrArrayAttr:$ancestors,
17709    TF_ShapeAttrArray:$shapes,
17710    SymbolRefAttr:$shape_inference_graph,
17711    StrAttr:$key,
17712    DefaultValuedAttr<I64Attr, "1000000">:$cost_estimate_ns,
17713    DefaultValuedAttr<I64Attr, "0">:$tpu_core
17714  );
17715
17716  let results = (outs
17717    Res<Variadic<TF_Tensor>, [{A list of tensors that will be returned to the device.}]>:$outputs
17718  );
17719
17720  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
17721  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
17722}
17723
17724def TF_XlaKeyValueSortOp : TF_Op<"XlaKeyValueSort", [NoSideEffect]> {
17725  let summary = "Wraps the XLA Sort operator, documented at";
17726
17727  let description = [{
17728https://www.tensorflow.org/performance/xla/operation_semantics#sort
17729.
17730
17731Sorts a tensor. Currently only sorts in ascending order are supported.
17732  }];
17733
17734  let arguments = (ins
17735    Arg<TF_IntOrFpTensor, [{A `Tensor` of type K.}]>:$keys,
17736    Arg<TF_Tensor, [{A `Tensor` of type V.}]>:$values
17737  );
17738
17739  let results = (outs
17740    Res<TF_IntOrFpTensor, [{A `Tensor` of type K.}]>:$sorted_keys,
17741    Res<TF_Tensor, [{A `Tensor` of type V.}]>:$sorted_values
17742  );
17743
17744  TF_DerivedOperandTypeAttr V = TF_DerivedOperandTypeAttr<1>;
17745  TF_DerivedOperandTypeAttr K = TF_DerivedOperandTypeAttr<0>;
17746}
17747
17748def TF_XlaPadOp : TF_Op<"XlaPad", [NoSideEffect]> {
17749  let summary = "Wraps the XLA Pad operator, documented at";
17750
17751  let description = [{
17752https://www.tensorflow.org/performance/xla/operation_semantics#pad
17753.
17754  }];
17755
17756  let arguments = (ins
17757    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input,
17758    Arg<TF_Tensor, [{A scalar `Tensor` of type T.}]>:$padding_value,
17759    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start of each input dimensions. Must
17760be a compile-time constant 1D tensor of length equal to rank of input.}]>:$padding_low,
17761    Arg<TF_I32OrI64Tensor, [{the padding to apply at the end of each input dimension. Must
17762be a compile-time constant 1D tensor of length equal to rank of input.}]>:$padding_high,
17763    Arg<TF_I32OrI64Tensor, [{the padding to apply between each input element. Must
17764be a compile-time constant 1D tensor of length equal to rank of input,
17765containing only non-negative values.}]>:$padding_interior
17766  );
17767
17768  let results = (outs
17769    Res<TF_Tensor, [{A `Tensor` of type T.}]>:$output
17770  );
17771
17772  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
17773  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17774}
17775
17776def TF_XlaRecvFromHostOp : TF_Op<"XlaRecvFromHost", []> {
17777  let summary = "An op to receive a tensor from the host.";
17778
17779  let description = [{
17780output: the tensor that will be received from the host.
17781Toutput: element type for output.
17782shape: shape for output.
17783key: A unique identifier for this region used to match up host transfers.
17784  }];
17785
17786  let arguments = (ins
17787    TF_ShapeAttr:$shape,
17788    StrAttr:$key
17789  );
17790
17791  let results = (outs
17792    TF_Tensor:$output
17793  );
17794
17795  TF_DerivedResultTypeAttr Toutput = TF_DerivedResultTypeAttr<0>;
17796}
17797
17798def TF_XlaReduceOp : TF_Op<"XlaReduce", [NoSideEffect]> {
17799  let summary = "Wraps the XLA Reduce operator, documented at";
17800
17801  let description = [{
17802https://www.tensorflow.org/performance/xla/operation_semantics#reduce .
17803  }];
17804
17805  let arguments = (ins
17806    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$input,
17807    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a scalar representing the initial value for the reduction}]>:$init_value,
17808
17809    I64ArrayAttr:$dimensions_to_reduce,
17810    SymbolRefAttr:$reducer
17811  );
17812
17813  let results = (outs
17814    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17815  );
17816
17817  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17818}
17819
17820def TF_XlaReduceWindowOp : TF_Op<"XlaReduceWindow", [NoSideEffect]> {
17821  let summary = "Wraps the XLA ReduceWindow operator, documented at";
17822
17823  let description = [{
17824https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow .
17825  }];
17826
17827  let arguments = (ins
17828    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$input,
17829    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a scalar representing the initial value for the reduction}]>:$init_value,
17830    Arg<TF_I32OrI64Tensor, [{the shape of the window}]>:$window_dimensions,
17831    Arg<TF_I32OrI64Tensor, [{the inter-window strides}]>:$window_strides,
17832    TF_I32OrI64Tensor:$base_dilations,
17833    TF_I32OrI64Tensor:$window_dilations,
17834    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start and end of each input dimensions}]>:$padding,
17835
17836    SymbolRefAttr:$computation
17837  );
17838
17839  let results = (outs
17840    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17841  );
17842
17843  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
17844  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17845}
17846
17847def TF_XlaReplicaIdOp : TF_Op<"XlaReplicaId", [NoSideEffect, TF_NoConstantFold]> {
17848  let summary = "Replica ID.";
17849
17850  let arguments = (ins);
17851
17852  let results = (outs
17853    TF_Int32Tensor:$id
17854  );
17855
17856  // Constant folding is disabled for this op as it is a runtime op and can't
17857  // constant folded at the compile time.
17858}
17859
17860def TF_XlaScatterOp : TF_Op<"XlaScatter", [NoSideEffect]> {
17861  let summary = "Wraps the XLA Scatter operator documented at";
17862
17863  let description = [{
17864https://www.tensorflow.org/xla/operation_semantics#scatter.
17865  }];
17866
17867  let arguments = (ins
17868    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Array to be scattered into.}]>:$operand,
17869    Arg<TF_I32OrI64Tensor, [{Array containing the starting indices of the slices that must
17870be scattered to.}]>:$scatter_indices,
17871    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Array containing the values that must be used for scattering.}]>:$updates,
17872
17873    SymbolRefAttr:$update_computation,
17874    StrAttr:$dimension_numbers,
17875    BoolAttr:$indices_are_sorted
17876  );
17877
17878  let results = (outs
17879    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17880  );
17881
17882  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17883  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17884}
17885
17886def TF_XlaSelectAndScatterOp : TF_Op<"XlaSelectAndScatter", [NoSideEffect]> {
17887  let summary = "Wraps the XLA SelectAndScatter operator, documented at";
17888
17889  let description = [{
17890https://www.tensorflow.org/performance/xla/operation_semantics#selectandscatter
17891.
17892  }];
17893
17894  let arguments = (ins
17895    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$operand,
17896    Arg<TF_I32OrI64Tensor, [{the shape of the window}]>:$window_dimensions,
17897    Arg<TF_I32OrI64Tensor, [{the inter-window strides}]>:$window_strides,
17898    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start and end of each input dimensions}]>:$padding,
17899    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a tensor of values to scatter}]>:$source,
17900    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a scalar representing the initial value for the output tensor}]>:$init_value,
17901
17902    SymbolRefAttr:$select,
17903    SymbolRefAttr:$scatter
17904  );
17905
17906  let results = (outs
17907    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17908  );
17909
17910  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17911  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17912}
17913
17914def TF_XlaSelfAdjointEigOp : TF_Op<"XlaSelfAdjointEig", [NoSideEffect]> {
17915  let summary = [{
17916Computes the eigen decomposition of a batch of self-adjoint matrices
17917  }];
17918
17919  let description = [{
17920(Note: Only real inputs are supported).
17921
17922Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in
17923tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for
17924i=0...N-1.
17925  }];
17926
17927  let arguments = (ins
17928    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor.}]>:$a,
17929
17930    BoolAttr:$lower,
17931    I64Attr:$max_iter,
17932    F32Attr:$epsilon
17933  );
17934
17935  let results = (outs
17936    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The eigenvalues in ascending order, each repeated according to its
17937multiplicity.}]>:$w,
17938    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The column v[..., :, i] is the normalized eigenvector corresponding to the
17939eigenvalue w[..., i].}]>:$v
17940  );
17941
17942  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17943}
17944
17945def TF_XlaSendToHostOp : TF_Op<"XlaSendToHost", []> {
17946  let summary = "An op to send a tensor to the host.";
17947
17948  let description = [{
17949input: the tensor that will be sent to the host.
17950Tinput: element type for input.
17951key: A unique identifier for this region used to match up host transfers.
17952  }];
17953
17954  let arguments = (ins
17955    TF_Tensor:$input,
17956
17957    StrAttr:$key
17958  );
17959
17960  let results = (outs);
17961
17962  TF_DerivedOperandTypeAttr Tinput = TF_DerivedOperandTypeAttr<0>;
17963}
17964
17965def TF_XlaSetDynamicDimensionSizeOp : TF_Op<"XlaSetDynamicDimensionSize", [NoSideEffect, TF_NoConstantFold]> {
17966  let summary = "Make a static dimension into a xla bounded dynamic dimension.";
17967
17968  let description = [{
17969The current static dimension size will become the bound and the second
17970        operand becomes the dynamic size of the dimension.
17971  }];
17972
17973  let arguments = (ins
17974    TF_Tensor:$input,
17975    TF_Int32Tensor:$dim_index,
17976    TF_Int32Tensor:$size
17977  );
17978
17979  let results = (outs
17980    TF_Tensor:$output
17981  );
17982
17983  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17984}
17985
17986def TF_XlaSortOp : TF_Op<"XlaSort", [NoSideEffect]> {
17987  let summary = "Wraps the XLA Sort operator, documented at";
17988
17989  let description = [{
17990https://www.tensorflow.org/performance/xla/operation_semantics#sort
17991.
17992
17993Sorts a tensor. Currently only sorts in ascending order are supported.
17994  }];
17995
17996  let arguments = (ins
17997    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input
17998  );
17999
18000  let results = (outs
18001    Res<TF_Tensor, [{A `Tensor` of type T.}]>:$output
18002  );
18003
18004  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18005}
18006
18007def TF_XlaSvdOp : TF_Op<"XlaSvd", [NoSideEffect]> {
18008  let summary = [{
18009Computes the eigen decomposition of a batch of self-adjoint matrices
18010  }];
18011
18012  let description = [{
18013(Note: Only real inputs are supported).
18014
18015Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in
18016tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]).
18017  }];
18018
18019  let arguments = (ins
18020    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor.}]>:$a,
18021
18022    I64Attr:$max_iter,
18023    F32Attr:$epsilon,
18024    StrAttr:$precision_config
18025  );
18026
18027  let results = (outs
18028    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Singular values. The values are sorted in reverse order of magnitude, so
18029s[..., 0] is the largest value, s[..., 1] is the second largest, etc.}]>:$s,
18030    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Left singular vectors.}]>:$u,
18031    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Right singular vectors.}]>:$v
18032  );
18033
18034  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18035}
18036
18037def TF_XlaVariadicReduceOp : TF_Op<"XlaVariadicReduce", [NoSideEffect, SameVariadicOperandSize]> {
18038  let summary = "Wraps the variadic XLA Reduce operator.";
18039
18040  let description = [{
18041Semantics are documented at
18042 https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce.
18043  }];
18044
18045  let arguments = (ins
18046    Arg<Variadic<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>>, [{the input tensor(s)}]>:$input,
18047    Arg<Variadic<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>>, [{scalar initial value(s) for the reduction}]>:$init_value,
18048
18049    I64ArrayAttr:$dimensions_to_reduce,
18050    SymbolRefAttr:$reducer
18051  );
18052
18053  let results = (outs
18054    Variadic<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>>:$output
18055  );
18056
18057  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18058  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
18059}
18060
18061def TF_XlaVariadicSortOp : TF_Op<"XlaVariadicSort", [NoSideEffect]> {
18062  let summary = "Wraps the XLA Sort operator, documented at";
18063
18064  let description = [{
18065https://www.tensorflow.org/performance/xla/operation_semantics#sort
18066.
18067
18068Sorts one or more tensors, with support for custom comparator, dimension, and
18069is_stable attributes.
18070  }];
18071
18072  let arguments = (ins
18073    Arg<Variadic<TF_Tensor>, [{A list of `Tensor` of identical shape but possibly different types.}]>:$inputs,
18074    Arg<TF_Int32Tensor, [{The dimension along which to sort. Must be a compile-time constant.}]>:$dimension,
18075
18076    SymbolRefAttr:$comparator,
18077    BoolAttr:$is_stable
18078  );
18079
18080  let results = (outs
18081    Res<Variadic<TF_Tensor>, [{A list of `Tensor` of same shape and types as the `input`.}]>:$outputs
18082  );
18083
18084  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<0>;
18085}
18086
18087def TF_Xlog1pyOp : TF_Op<"Xlog1py", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
18088  let summary = "Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise.";
18089
18090  let arguments = (ins
18091    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$x,
18092    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$y
18093  );
18094
18095  let results = (outs
18096    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$z
18097  );
18098
18099  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18100}
18101
18102def TF_XlogyOp : TF_Op<"Xlogy", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
18103                 WithBroadcastableBinOpBuilder {
18104  let summary = "Returns 0 if x == 0, and x * log(y) otherwise, elementwise.";
18105
18106  let arguments = (ins
18107    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$x,
18108    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$y
18109  );
18110
18111  let results = (outs
18112    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$z
18113  );
18114
18115  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18116}
18117
18118def TF_ZerosLikeOp : TF_Op<"ZerosLike", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
18119  let summary = "Returns a tensor of zeros with the same shape and type as x.";
18120
18121  let arguments = (ins
18122    Arg<TF_Tensor, [{a tensor of type T.}]>:$x
18123  );
18124
18125  let results = (outs
18126    Res<TF_Tensor, [{a tensor of the same shape and type as x but filled with zeros.}]>:$y
18127  );
18128
18129  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18130}
18131
18132def TF_ZetaOp : TF_Op<"Zeta", [NoSideEffect, ResultsBroadcastableShape]>,
18133                WithBroadcastableBinOpBuilder {
18134  let summary = [{
18135Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
18136  }];
18137
18138  let description = [{
18139The Hurwitz zeta function is defined as:
18140
18141
18142\\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
18143  }];
18144
18145  let arguments = (ins
18146    TF_F32OrF64Tensor:$x,
18147    TF_F32OrF64Tensor:$q
18148  );
18149
18150  let results = (outs
18151    TF_F32OrF64Tensor:$z
18152  );
18153
18154  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18155}
18156
18157def TF__ArrayToListOp : TF_Op<"_ArrayToList", [NoSideEffect]> {
18158  let summary = "Converts an array of tensors to a list of tensors.";
18159
18160  let arguments = (ins
18161    Variadic<TF_Tensor>:$input
18162  );
18163
18164  let results = (outs
18165    Variadic<TF_Tensor>:$output
18166  );
18167
18168  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18169  TF_DerivedResultTypeListAttr out_types = TF_DerivedResultTypeListAttr<0>;
18170  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
18171}
18172
18173def TF__FusedBatchNormExOp : TF_Op<"_FusedBatchNormEx", [NoSideEffect]> {
18174  let summary = "Internal FusedBatchNorm operation: reserved for internal use.";
18175
18176  let description = [{
18177Do not invoke this operator directly in Python. A fusion optimization is
18178expected to create these operators.
18179  }];
18180
18181  let arguments = (ins
18182    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$x,
18183    TF_Float32Tensor:$scale,
18184    TF_Float32Tensor:$offset,
18185    TF_Float32Tensor:$mean,
18186    TF_Float32Tensor:$variance,
18187    Variadic<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>>:$side_input,
18188
18189    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
18190    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
18191    DefaultValuedAttr<StrAttr, "Identity">:$activation_mode,
18192    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
18193    DefaultValuedAttr<BoolAttr, "true">:$is_training
18194  );
18195
18196  let results = (outs
18197    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$y,
18198    TF_Float32Tensor:$batch_mean,
18199    TF_Float32Tensor:$batch_variance,
18200    TF_Float32Tensor:$reserve_space_1,
18201    TF_Float32Tensor:$reserve_space_2,
18202    TF_Float32Tensor:$reserve_space_3
18203  );
18204
18205  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18206  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<1>;
18207  TF_DerivedOperandSizeAttr num_side_inputs = TF_DerivedOperandSizeAttr<5>;
18208}
18209
18210def TF__FusedConv2DOp : TF_Op<"_FusedConv2D", [NoSideEffect]> {
18211  let summary = [{
18212Performs a convolution followed by a specified series of operations.
18213  }];
18214
18215  let description = [{
18216The inputs to the convolution are `input` and `filter`. The series of operations
18217that follows is specified by the `fused_ops` attribute, which is a list of TF op
18218names specified as strings (e.g. "Relu"). They are performed in order, where the
18219(first) input to each op is the output of the preceding op. The first input and
18220the output of each fused_op must be of type T.
18221
18222Currently supported fused_op combinations are: [X] and [X,A], where X is one of
18223{"BiasAdd","FusedBatchNorm"} and A is one of {"Elu","Relu","Relu6"}.
18224
18225* The first input to op X is the Conv2D result, and the additional input(s) to X
18226are specified by `args`.
18227* If there is an op A specified, the output of op X is the input to op A, and op
18228A produces the _FusedConv2D output. Otherwise, op X produces the _FusedConv2D
18229output.
18230
18231*NOTE*: Do not invoke this operator directly in Python. Grappler is expected to
18232create these operators.
18233  }];
18234
18235  let arguments = (ins
18236    TF_F32OrF64Tensor:$input,
18237    TF_F32OrF64Tensor:$filter,
18238    Variadic<TF_F32OrF64Tensor>:$args,
18239
18240    I64ArrayAttr:$strides,
18241    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
18242    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
18243    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
18244    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations,
18245    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
18246    DefaultValuedAttr<StrArrayAttr, "{}">:$fused_ops,
18247    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
18248    DefaultValuedAttr<F32Attr, "0.2f">:$leakyrelu_alpha
18249  );
18250
18251  let results = (outs
18252    TF_F32OrF64Tensor:$output
18253  );
18254
18255  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18256  TF_DerivedOperandSizeAttr num_args = TF_DerivedOperandSizeAttr<2>;
18257}
18258
18259def TF__FusedMatMulOp : TF_Op<"_FusedMatMul", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
18260  let summary = [{
18261Performs a MatMul followed by a specified series of operations.
18262  }];
18263
18264  let description = [{
18265The inputs to the MatMul are specified by `a` and `b`. The series of operations
18266that follows is specified by the `fused_ops` attribute, which is a list of TF op
18267names specified as strings (e.g. "Relu"). They are performed in order, where the
18268(first) input to each op is the output of the preceding op. The first input and
18269the output of each fused_op must be of type T.
18270
18271Currently supported fused_op combinations are: ["BiasAdd"] and ["BiasAdd",A],
18272where A is one of {"Elu","Relu","Relu6"}.
18273
18274* The first input to BiasAdd is the Conv2D result, and the additional BiasAdd
18275input is specified by `args`.
18276* If there is an op A specified, the output of the BiasAdd is the input to op A,
18277and op A produces the _FusedConv2D output. Otherwise, the BiasAdd produces the
18278_FusedConv2D output.
18279
18280*NOTE*: Do not invoke this operator directly in Python. Grappler is
18281expected to create these operators.
18282  }];
18283
18284  let arguments = (ins
18285    TensorOf<[TF_Bfloat16, TF_Float32]>:$a,
18286    TensorOf<[TF_Bfloat16, TF_Float32]>:$b,
18287    Variadic<TensorOf<[TF_Bfloat16, TF_Float32]>>:$args,
18288
18289    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
18290    DefaultValuedAttr<BoolAttr, "false">:$transpose_b,
18291    DefaultValuedAttr<StrArrayAttr, "{}">:$fused_ops,
18292    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
18293    DefaultValuedAttr<F32Attr, "0.2f">:$leakyrelu_alpha
18294  );
18295
18296  let results = (outs
18297    TensorOf<[TF_Bfloat16, TF_Float32]>:$product
18298  );
18299
18300  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18301  TF_DerivedOperandSizeAttr num_args = TF_DerivedOperandSizeAttr<2>;
18302}
18303
18304def TF__HostRecvOp : TF_Op<"_HostRecv", []> {
18305  let summary = "Receives the named tensor from send_device on recv_device.";
18306
18307  let description = [{
18308_HostRecv produces its output on host memory whereas _Recv produces its
18309output on device memory.
18310  }];
18311
18312  let arguments = (ins
18313    StrAttr:$tensor_name,
18314    StrAttr:$send_device,
18315    I64Attr:$send_device_incarnation,
18316    StrAttr:$recv_device,
18317    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
18318  );
18319
18320  let results = (outs
18321    Res<TF_Tensor, [{The tensor to receive.}]>:$tensor
18322  );
18323
18324  TF_DerivedResultTypeAttr tensor_type = TF_DerivedResultTypeAttr<0>;
18325}
18326
18327def TF__HostSendOp : TF_Op<"_HostSend", []> {
18328  let summary = "Sends the named tensor from send_device to recv_device.";
18329
18330  let description = [{
18331_HostSend requires its input on host memory whereas _Send requires its
18332input on device memory.
18333  }];
18334
18335  let arguments = (ins
18336    Arg<TF_Tensor, [{The tensor to send.}]>:$tensor,
18337
18338    StrAttr:$tensor_name,
18339    StrAttr:$send_device,
18340    I64Attr:$send_device_incarnation,
18341    StrAttr:$recv_device,
18342    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
18343  );
18344
18345  let results = (outs);
18346
18347  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18348}
18349
18350def TF__ListToArrayOp : TF_Op<"_ListToArray", [NoSideEffect]> {
18351  let summary = "Converts a list of tensors to an array of tensors.";
18352
18353  let arguments = (ins
18354    Variadic<TF_Tensor>:$input
18355  );
18356
18357  let results = (outs
18358    Variadic<TF_Tensor>:$output
18359  );
18360
18361  TF_DerivedResultTypeAttr T = TF_DerivedResultTypeAttr<0>;
18362  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
18363  TF_DerivedResultSizeAttr N = TF_DerivedResultSizeAttr<0>;
18364}
18365
18366def TF__RecvTPUEmbeddingActivationsOp : TF_Op<"_RecvTPUEmbeddingActivations", [TF_TPUEmbeddingSideEffect]> {
18367  let summary = "An op that receives embeddng activations on the TPU.";
18368
18369  let description = [{
18370The TPU system performs the embedding lookups and aggregations. The results of
18371these aggregations are visible to the Tensorflow Graph as the outputs of a
18372_RecvTPUEmbeddingActivations Op. This op returns a list containing one
18373Tensor of activations per table specified in the model.
18374  }];
18375
18376  let arguments = (ins
18377    Arg<TF_VariantTensor, [{A Tensor with type=DT_VARIANT containing the deduplication
18378data. The tensor is an XLA nested tuple containing N elements (where N is
18379the ratio of the number of embedding to tensor cores per TPU chip). Each
18380element of the nested tuple is a tuple of rank 1 tensors. Each tensor either
18381contains indices (DT_UINT32) for embedding lookup on the TensorCore or
18382weights (DT_FLOAT) to apply to the output of the embedding lookup operation.}]>:$deduplication_data,
18383
18384    StrAttr:$config
18385  );
18386
18387  let results = (outs
18388    Res<Variadic<TF_Float32Tensor>, [{A TensorList of embedding activations containing one Tensor per
18389embedding table in the model.}]>:$outputs
18390  );
18391
18392  TF_DerivedResultSizeAttr num_tables = TF_DerivedResultSizeAttr<0>;
18393}
18394
18395def TF__TPUCompileMlirOp : TF_Op<"_TPUCompileMlir", []> {
18396  let summary = [{
18397Compiles a computations for execution on one or more TPU devices.
18398  }];
18399
18400  let description = [{
18401For the internal use of the distributed TPU compiler.
18402
18403'mlir_module' is a serialized MLIR module with a `main` function that contains
18404target computation.
18405'dynamic_shapes' contains dynamic shapes of arguments whose shapes were not
18406known statically at TPUReplication rewrite time.
18407'metadata' is a serialized TPUCompileMetadataProto describing the shapes and
18408types of the inputs to the computation, as well as a mapping onto the TPU pod
18409topology.
18410'program' output is a string key that is passed to the TPUExecute op and used to
18411look up the program in the compilation cache.
18412  }];
18413
18414  let arguments = (ins
18415    Variadic<TF_Int64Tensor>:$dynamic_shapes,
18416
18417    StrAttr:$mlir_module,
18418    StrAttr:$metadata
18419  );
18420
18421  let results = (outs
18422    TF_StrTensor:$compilation_status,
18423    Variadic<TF_StrTensor>:$program
18424  );
18425
18426  TF_DerivedResultSizeAttr num_computations = TF_DerivedResultSizeAttr<1>;
18427  TF_DerivedOperandSizeAttr NumDynamicShapes = TF_DerivedOperandSizeAttr<0>;
18428}
18429
18430def TF__TPUCompileMlirPlaceholderProgramKeyOp : TF_Op<"_TPUCompileMlirPlaceholderProgramKey", []> {
18431  let summary = [{
18432Placeholder program key (compilation cache key) of a _TPUCompileMlir `program`.
18433  }];
18434
18435  let description = [{
18436This op can be used when certain rewrite passes materialize ops that require a
18437program key but the _TPUCompileMlir op has not been added yet. Subsequent
18438rewrite passes must replace this op with a _TPUCompileMlir op `program` output.
18439  }];
18440
18441  let arguments = (ins);
18442
18443  let results = (outs
18444    TF_StrTensor:$program
18445  );
18446}
18447
18448def TF__UnaryOpsCompositionOp : TF_Op<"_UnaryOpsComposition", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
18449  let summary = [{
18450*NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is
18451  }];
18452
18453  let description = [{
18454expected to create these operators.
18455  }];
18456
18457  let arguments = (ins
18458    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$x,
18459
18460    StrArrayAttr:$op_names
18461  );
18462
18463  let results = (outs
18464    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$y
18465  );
18466
18467  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18468}
18469
18470def TF__XlaHostComputeMlirOp : TF_Op<"_XlaHostComputeMlir", []> {
18471  let summary = [{
18472A pseudo-op to represent host-side computation in an XLA program.
18473  }];
18474
18475  let arguments = (ins
18476    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the host.}]>:$inputs,
18477
18478    StrAttr:$send_key,
18479    StrAttr:$recv_key,
18480    DefaultValuedAttr<I64Attr, "0">:$tpu_core
18481  );
18482
18483  let results = (outs
18484    Res<Variadic<TF_Tensor>, [{A list of tensors that will be returned to the device.}]>:$outputs
18485  );
18486
18487  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
18488  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
18489}
18490
18491def TF__XlaRecvAtHostOp : TF_Op<"_XlaRecvAtHost", []> {
18492  let summary = [{
18493A placeholder op to receive values from a running XLA computation.
18494  }];
18495
18496  let arguments = (ins
18497    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
18498execution the transfer corresponds to.}]>:$dynamic_key,
18499
18500    StrAttr:$key,
18501    I64Attr:$device_ordinal
18502  );
18503
18504  let results = (outs
18505    Res<Variadic<TF_Tensor>, [{A list of tensors that will be received from the XLA computation.}]>:$outputs
18506  );
18507
18508  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
18509}
18510
18511def TF__XlaRecvAtHostV2Op : TF_Op<"_XlaRecvAtHostV2", []> {
18512  let summary = [{
18513A placeholder op to receive values from a running XLA computation with support for a runtime device ordinal.
18514  }];
18515
18516  let arguments = (ins
18517    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
18518execution the transfer corresponds to.}]>:$dynamic_key,
18519    Arg<TF_Int64Tensor, [{The device id relative to the associated host device.}]>:$device_ordinal,
18520
18521    StrAttr:$key
18522  );
18523
18524  let results = (outs
18525    Res<Variadic<TF_Tensor>, [{A list of tensors that will be received from the XLA computation.}]>:$outputs
18526  );
18527
18528  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
18529}
18530
18531def TF__XlaSendFromHostOp : TF_Op<"_XlaSendFromHost", []> {
18532  let summary = "A placeholder op to send values to a running XLA computation.";
18533
18534  let arguments = (ins
18535    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the XLA computation.}]>:$inputs,
18536    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
18537execution the transfer corresponds to.}]>:$dynamic_key,
18538
18539    StrAttr:$key,
18540    I64Attr:$device_ordinal
18541  );
18542
18543  let results = (outs);
18544
18545  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
18546}
18547
18548def TF__XlaSendFromHostV2Op : TF_Op<"_XlaSendFromHostV2", []> {
18549  let summary = [{
18550A placeholder op to send values to a running XLA computation with support for a runtime device ordinal.
18551  }];
18552
18553  let arguments = (ins
18554    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the XLA computation.}]>:$inputs,
18555    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
18556execution the transfer corresponds to.}]>:$dynamic_key,
18557    Arg<TF_Int64Tensor, [{The device id relative to the associated host device.}]>:$device_ordinal,
18558
18559    StrAttr:$key
18560  );
18561
18562  let results = (outs);
18563
18564  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
18565}
18566