1 /*
2  * SPDX-License-Identifier: Apache-2.0
3  */
4 
5 
6 #include "onnx/defs/function.h"
7 #include "onnx/defs/schema.h"
8 
9 namespace ONNX_NAMESPACE {
10 
11 static const char* QuantizeLinear_ver10_doc = R"DOC(
12 The linear per-tensor/layer quantization operator. It consumes a high precision tensor, a scale, a zero point to compute the low precision / quantized tensor.
13 The quantization formula is y = saturate ((x / y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8.
14 For (x / y_scale), it's rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type.
15 )DOC";
16 
17 ONNX_OPERATOR_SET_SCHEMA(
18     QuantizeLinear,
19     10,
20     OpSchema()
21         .Input(0, "x", "N-D full precision Input tensor to be quantized.", "T1")
22         .Input(
23             1,
24             "y_scale",
25             "Scale for doing quantization to get 'y'. It's a scalar, which means a per-tensor/layer quantization.",
26             "tensor(float)")
27         .Input(
28             2,
29             "y_zero_point",
30             "Zero point for doing quantization to get 'y'. It's a scalar, which means a per-tensor/layer quantization. "
31             "Default value is uint8 typed 0 if it's not specified.",
32             "T2",
33             OpSchema::Optional)
34         .Output(
35             0,
36             "y",
37             "N-D quantized output tensor. It has same shape as input 'x'.",
38             "T2")
39         .TypeConstraint(
40             "T1",
41             {"tensor(float)", "tensor(int32)"},
42             "Constrain 'x' to float or int32 tensor.")
43         .TypeConstraint(
44             "T2",
45             {"tensor(int8)", "tensor(uint8)"},
46             "Constrain 'y_zero_point' and 'y' to 8-bit integer tensor.")
47         .SetDoc(QuantizeLinear_ver10_doc)
48         .TypeAndShapeInferenceFunction(
__anond9af5f780102(ONNX_NAMESPACE::InferenceContext& ctx) 49             [](ONNX_NAMESPACE::InferenceContext& ctx) {
50               propagateElemTypeFromInputToOutput(ctx, 2, 0);
51 
52               if (!hasInputShape(ctx, 0))
53                 return;
54 
55               auto& input_shape = getInputShape(ctx, 0);
56               updateOutputShape(ctx, 0, input_shape);
57         }));
58 
59 static const char* DequantizeLinear_ver10_doc = R"DOC(
60 The linear dequantization operator. It consumes a quantized tensor, a scale, a zero point to compute the full precision tensor.
61 The dequantization formula is y = (x - x_zero_point) * x_scale. 'x_scale' and 'x_zero_point' must have same shape.
62 'x_zero_point' and 'x' must have same type. 'x' and 'y' must have same shape. In the case of dequantizing int32,
63 there's no zero point (zero point is supposed to be 0).
64 )DOC";
65 
66 ONNX_OPERATOR_SET_SCHEMA(
67     DequantizeLinear,
68     10,
69     OpSchema()
70         .Input(0, "x", "N-D quantized input tensor to be de-quantized.", "T")
71         .Input(
72             1,
73             "x_scale",
74             "Scale for input 'x'. It's a scalar, which means a per-tensor/layer quantization.",
75             "tensor(float)")
76         .Input(
77             2,
78             "x_zero_point",
79             "Zero point for input 'x'. It's a scalar, which means a per-tensor/layer quantization. "
80             "It's optional. 0 is the default value when it's not specified.",
81             "T",
82             OpSchema::Optional)
83         .Output(
84             0,
85             "y",
86             "N-D full precision output tensor. It has same shape as input 'x'.",
87             "tensor(float)")
88         .TypeConstraint(
89             "T",
90             {"tensor(int8)", "tensor(uint8)", "tensor(int32)"},
91             "Constrain 'x_zero_point' and 'x' to 8-bit/32-bit integer tensor.")
92         .SetDoc(DequantizeLinear_ver10_doc)
93         .TypeAndShapeInferenceFunction(
__anond9af5f780202(ONNX_NAMESPACE::InferenceContext& ctx) 94             [](ONNX_NAMESPACE::InferenceContext& ctx) {
95               auto y_type = ctx.getOutputType(0);
96               // only float is supported
97               y_type->mutable_tensor_type()->set_elem_type(
98                   ONNX_NAMESPACE::TensorProto::FLOAT);
99 
100               if (!hasInputShape(ctx, 0))
101                 return;
102 
103               auto& input_shape = getInputShape(ctx, 0);
104               updateOutputShape(ctx, 0, input_shape);
105             }));
106 
107 } // namespace ONNX_NAMESPACE
108