1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements.  See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership.  The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License.  You may obtain a copy of the License at
9  *
10  *   http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing,
13  * software distributed under the License is distributed on an
14  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15  * KIND, either express or implied.  See the License for the
16  * specific language governing permissions and limitations
17  * under the License.
18  */
19 
20 /*!
21  * \file src/relay/qnn/op/add.cc
22  * \brief QNN add operator.
23  */
24 #include <tvm/relay/analysis.h>
25 #include <tvm/relay/op_attr_types.h>
26 #include <tvm/relay/qnn/attrs.h>
27 #include "../../pass/pattern_util.h"
28 #include "../util.h"
29 #include "op_common.h"
30 
31 namespace tvm {
32 namespace relay {
33 namespace qnn {
34 
35 /*
36  * \brief Canonicalizes the QNN add op.
37  * \param attrs The QNN concatenate attrs.
38  * \param new_args The new mutated args to the call node.
39  * \param arg_types The types of input and output.
40  * \return The sequence of Relay ops for add op.
41  */
QnnAddCanonicalize(const Attrs & attrs,const Array<Expr> & new_args,const Array<tvm::relay::Type> & arg_types)42 Expr QnnAddCanonicalize(const Attrs& attrs, const Array<Expr>& new_args,
43                         const Array<tvm::relay::Type>& arg_types) {
44   // Get the attrs.
45   CHECK_EQ(new_args.size(), 2);
46   auto& lhs = new_args[0];
47   auto& rhs = new_args[1];
48   const auto* binary_op_attrs = attrs.as<QnnBinaryOpAttrs>();
49   CHECK(binary_op_attrs != nullptr);
50   auto lhs_scale = binary_op_attrs->lhs_scale;
51   auto lhs_zero_point = binary_op_attrs->lhs_zero_point;
52   auto rhs_scale = binary_op_attrs->rhs_scale;
53   auto rhs_zero_point = binary_op_attrs->rhs_zero_point;
54   auto output_scale = binary_op_attrs->output_scale;
55   auto output_zero_point = binary_op_attrs->output_zero_point;
56 
57   // Get the input dtype and shape.
58   CHECK_EQ(arg_types.size(), 3);
59   auto tensor_type = arg_types[0].as<TensorTypeNode>();
60   auto input_dtype = tensor_type->dtype;
61   auto input_shape = tensor_type->shape;
62 
63   // FIXME (anijain2305) - The lowering can be further optimized. Instead of inserting requantize in
64   // the start, we can insert requantize at the end if both input tensors have same qnn params. In
65   // that case, we can first add the tensors, subtract the zero point, and requantize at the end.
66   // This can be done in future.
67 
68   // Since the input qnn params can be different than output qnn params, we first requantize the
69   // input tensors to the output qnn params. Then we call relay.add on the requantized inputs. This
70   // addition results in extra addition of the output zero point. We futher subtract the zero
71   // point. The whole process can be represented using following equations
72   //
73   //          scale_c * (Q_c - zp_c) = scale_a * (Q_a - zp_a) + scale_b * (Q_b - zp_b)
74   //
75   // After requantizing Q_a and Q_b, equation becomes,
76   //          scale_c * (Q_c - zp_c) = scale_c * (Q_a' - zp_c) + scale_c * (Q_b' - zp_c)
77   //          scale_c * (Q_c - zp_c) = scale_c * (Q_a' + Q_b' - zp_c - zp_c)
78   //
79   // Comparing the LHS and RHS, it results in
80   //          Q_c = Q_a' + Q_b' - zp_c
81   // The add op is done in int32 precision.
82 
83   // Requantize LHS if necessary.
84   auto requantized_lhs = lhs;
85   if (lhs_scale != output_scale || lhs_zero_point != output_zero_point) {
86     requantized_lhs = Requantize(lhs, input_shape, lhs_scale, lhs_zero_point, output_scale,
87                                  output_zero_point, Int(32));
88   } else {
89     requantized_lhs = Cast(requantized_lhs, Int(32));
90   }
91 
92   // Requantize RHS if necessary.
93   auto requantized_rhs = rhs;
94   if (rhs_scale != output_scale || rhs_zero_point != output_zero_point) {
95     requantized_rhs = Requantize(rhs, input_shape, rhs_scale, rhs_zero_point, output_scale,
96                                  output_zero_point, Int(32));
97   } else {
98     requantized_rhs = Cast(requantized_rhs, Int(32));
99   }
100 
101   auto output = Add(requantized_lhs, requantized_rhs);
102 
103   // Subtract zero point.
104   if (output_zero_point != 0) {
105     auto output_zp = MakeConstantScalar(Int(32), output_zero_point);
106     output = Subtract(output, output_zp);
107   }
108 
109   // Go back to lower precision.
110   auto q_min = GetQmin(input_dtype);
111   auto q_max = GetQmax(input_dtype);
112   output = Clip(output, q_min, q_max);
113   return Cast(output, input_dtype);
114 }
115 
116 // QNN Addition operator.
117 QNN_REGISTER_BINARY_OP("add")
118 .describe("Elementwise add with with broadcasting for quantized tensors.")
119 .set_support_level(11)
120 .set_attr<FTVMLegalize>("FTVMQnnCanonicalize", QnnAddCanonicalize);
121 
122 }  // namespace qnn
123 }  // namespace relay
124 }  // namespace tvm
125