1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements.  See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership.  The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License.  You may obtain a copy of the License at
9  *
10  *   http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing,
13  * software distributed under the License is distributed on an
14  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15  * KIND, either express or implied.  See the License for the
16  * specific language governing permissions and limitations
17  * under the License.
18  */
19 
20 /*!
21  * \file src/relay/qnn/op/convolution.cc
22  * \brief Property def of qnn convolution operator.
23  */
24 #include <tvm/data_layout.h>
25 #include <tvm/relay/analysis.h>
26 #include <tvm/relay/base.h>
27 #include <tvm/relay/op.h>
28 #include <tvm/relay/qnn/attrs.h>
29 #include <tvm/relay/transform.h>
30 #include "../../op/nn/convolution.h"
31 #include "../../pass/pattern_util.h"
32 #include "../util.h"
33 
34 namespace tvm {
35 namespace relay {
36 namespace qnn {
37 
38 // relay.op.qnn.conv2d
39 TVM_REGISTER_NODE_TYPE(QnnConv2DAttrs);
40 
QnnConv2DRel(const Array<Type> & types,int num_inputs,const Attrs & attrs,const TypeReporter & reporter)41 bool QnnConv2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
42                   const TypeReporter& reporter) {
43   CHECK_EQ(types.size(), 3);
44   const auto* data = types[0].as<TensorTypeNode>();
45   const auto* weight = types[1].as<TensorTypeNode>();
46   if (data == nullptr || weight == nullptr) return false;
47   const auto* param = attrs.as<QnnConv2DAttrs>();
48   CHECK(param != nullptr) << "QnnConv2DAttrs cannot be nullptr.";
49   CHECK(data->dtype == Int(8) || data->dtype == UInt(8))
50       << "Expected qnn conv2d type(int8, uint8) for input but was " << data->dtype;
51   CHECK(weight->dtype == Int(8) || weight->dtype == UInt(8))
52       << "Expected qnn conv2d type(int8, uint8) for weight but was " << weight->dtype;
53   CHECK(param->out_dtype == Int(16) || param->out_dtype == Int(32))
54       << "Expected qnn conv2d type(int32, int16) for output but was " << param->out_dtype;
55   CHECK(param->out_dtype.bits() > 0) << "Output dtype bits should be greater than 0.";
56   return Conv2DRel<QnnConv2DAttrs>(types, num_inputs, attrs, reporter);
57 }
58 
is_depthwise(const QnnConv2DAttrs * param)59 bool is_depthwise(const QnnConv2DAttrs* param) {
60   return param->channels.defined() && tvm::ir::Equal(param->channels, param->groups) &&
61          param->groups != 1;
62 }
63 
64 // Workload - batch_size, in_channels, out_channels, kernel_h, kernel_w, channel_multiplier
65 using WorkloadType = std::tuple<int, int, int, int, int, int>;
66 
67 /*
68  * \brief Get the conv parameters like batch_size, kernel_height etc.
69  * \param ref_call The original callnode.
70  * \param param The qnn conv2d attributes.
71  * \return A tuple of workload.
72  */
GetWorkload(const Array<tvm::relay::Type> & arg_types,const QnnConv2DAttrs * param)73 WorkloadType GetWorkload(const Array<tvm::relay::Type>& arg_types, const QnnConv2DAttrs* param) {
74   // Get conv parameters.
75   const auto in_shape = get_shape(arg_types[0]);
76   int batch_size, in_channels;
77   if (param->data_layout == "NCHW") {
78     batch_size = get_const_int(in_shape[0]);
79     in_channels = get_const_int(in_shape[1]);
80   } else if (param->data_layout == "NHWC") {
81     batch_size = get_const_int(in_shape[0]);
82     in_channels = get_const_int(in_shape[3]);
83   } else {
84     LOG(FATAL) << "qnn.conv2d does not support " << param->data_layout << " layout";
85   }
86 
87   const auto kernel_shape = get_shape(arg_types[1]);
88   int out_channels, kernel_h, kernel_w;
89   int channel_multiplier = -1;
90   bool depthwise = is_depthwise(param);
91   if (param->kernel_layout == "OIHW") {
92     out_channels = get_const_int(kernel_shape[0]);
93     kernel_h = get_const_int(kernel_shape[2]);
94     kernel_w = get_const_int(kernel_shape[3]);
95     if (depthwise) {
96       channel_multiplier = get_const_int(kernel_shape[1]);
97     }
98   } else if (param->kernel_layout == "HWIO") {
99     kernel_h = get_const_int(kernel_shape[0]);
100     kernel_w = get_const_int(kernel_shape[1]);
101     out_channels = get_const_int(kernel_shape[3]);
102     if (depthwise) {
103       channel_multiplier = get_const_int(kernel_shape[2]);
104     }
105   } else if (param->kernel_layout == "HWOI") {
106     kernel_h = get_const_int(kernel_shape[0]);
107     kernel_w = get_const_int(kernel_shape[1]);
108     out_channels = get_const_int(kernel_shape[2]);
109     if (depthwise) {
110       channel_multiplier = get_const_int(kernel_shape[3]);
111     }
112   } else {
113     LOG(FATAL) << "qnn.conv2d does not support " << param->kernel_layout << " layout";
114   }
115 
116   return std::make_tuple(batch_size, in_channels, out_channels, kernel_h, kernel_w,
117                          channel_multiplier);
118 }
119 
120 /*
121  * \brief Fallback to simpler lowering for dilation or grouped conv.
122  * \param data The input expr.
123  * \param weight The weight expr.
124  * \param param The qnn conv2d attributes.
125  * \return The fallback lowered sequence of Relay expr.
126  * \note In case of dilation, normal lowering would require a dilated pool.
127  *       Since, we don't have dilated pool, we fallback to a simpler sequence of
128  *       Relay operations. This will potentially lead to performance degradation
129  *       as the convolution is called on int32 tensors instead of int8 tensors.
130  */
Conv2DFallBack(const Expr & data,const Expr & weight,const QnnConv2DAttrs * param)131 Expr Conv2DFallBack(const Expr& data, const Expr& weight, const QnnConv2DAttrs* param) {
132   // Upcast the zero point to Int16.
133   auto zp_data = MakeConstantScalar(Int(16), param->input_zero_point);
134   auto zp_kernel = MakeConstantScalar(Int(16), param->kernel_zero_point);
135 
136   auto shifted_data = Cast(data, Int(16));
137   if (param->input_zero_point != 0) {
138     shifted_data = Subtract(Cast(data, Int(16)), zp_data);
139   }
140 
141   auto shifted_kernel = Cast(weight, Int(16));
142   if (param->kernel_zero_point != 0) {
143     shifted_kernel = Subtract(Cast(weight, Int(16)), zp_kernel);
144   }
145 
146   return Conv2D(shifted_data, shifted_kernel, param->strides, param->padding, param->dilation,
147                 param->groups, param->channels, param->kernel_size, param->data_layout,
148                 param->kernel_layout, param->out_layout, param->out_dtype);
149 }
150 
151 /*
152  * \brief Pad the input data.
153  * \param data The input expr.
154  * \return The padded input expr.
155  * \note For quantized convolution, the input has to be padded with zero point
156  *       instead of zero. This might lead to performance degradation as pad
157  *       cannot be fused with conv in Relay. In case we see performance
158  *       degradation, we can change the conv2D API to accept a pad_const value.
159  */
Conv2DPadInput(const Expr & data,const QnnConv2DAttrs * param)160 Expr Conv2DPadInput(const Expr& data, const QnnConv2DAttrs* param) {
161   // 1) Pad the input data
162   auto padded_data = data;
163   auto pad_h_value = get_const_int(param->padding[0]);
164   auto pad_w_value = get_const_int(param->padding[1]);
165   if (pad_h_value != 0 || pad_w_value != 0) {
166     Array<IndexExpr> pad_n({0, 0});
167     Array<IndexExpr> pad_c({0, 0});
168     Array<IndexExpr> pad_h({param->padding[0], param->padding[0]});
169     Array<IndexExpr> pad_w({param->padding[1], param->padding[1]});
170 
171     Array<Array<IndexExpr>> pad_width;
172     if (param->data_layout == "NCHW") {
173       pad_width = {pad_n, pad_c, pad_h, pad_w};
174     } else if (param->data_layout == "NHWC") {
175       pad_width = {pad_n, pad_h, pad_w, pad_c};
176     } else {
177       LOG(FATAL) << "qnn.conv2d does not support " << param->data_layout << " layout";
178     }
179     padded_data = Pad(data, pad_width, param->input_zero_point, "constant");
180   }
181   return padded_data;
182 }
183 
184 /*
185  * \brief Calculates the second term in the qnn.conv2d depthwise lowering sequence.
186  * \param padded_data The padded data expr.
187  * \param param The qnn conv2d attributes.
188  * \param kernel_h The height of kernel.
189  * \param kernel_w The width of kernel.
190  * \param channel_multiplier The channel/depth multiplier.
191  * \return The sequence of Relay operators for term2.
192  * \note The term2 looks like this
193  *
194  *       Sigma(r, s) zp_w * Qa(n, oc/cm, oh + r, ow + s)
195  *
196  *       Second term is not directly representable by one Relay operator.
197  *       However, deeper analysis shows that we can reduce r,s using avg_pool2d,
198  *       followed by repeat on the C axis by cm times.
199  */
DepthwiseConv2DSecondTerm(const Expr & padded_data,const QnnConv2DAttrs * param,int kernel_h,int kernel_w,int channel_multiplier)200 Expr DepthwiseConv2DSecondTerm(const Expr& padded_data, const QnnConv2DAttrs* param, int kernel_h,
201                                int kernel_w, int channel_multiplier) {
202   // Constant Expr for the kernel zero point.
203   auto zp_kernel = MakeConstantScalar(Int(32), param->kernel_zero_point);
204 
205   auto casted_t2 = Cast(padded_data, Int(32));
206 
207   // We can reduce the H and W axis by using avg_pool2d. However, avg_pool2d averages the sum.
208   // Since, this is integer division (floor), we can first multiply the data by the pool_size and
209   // then perform avg_pool2d. Reversing this causes inaccuracy due to floor division. If the
210   // pool_size is 1x1, we don't need avg_pool2d.
211   auto reduced_t2 = casted_t2;
212   if (kernel_h * kernel_w != 1) {
213     auto scaled_hw_t2 = Multiply(casted_t2, MakeConstantScalar(Int(32), kernel_h * kernel_w));
214     Array<IndexExpr> padding({0, 0});
215     reduced_t2 =
216         AvgPool2D(scaled_hw_t2, param->kernel_size, param->strides, padding, param->data_layout,
217                   false,   // ceil_mode
218                   false);  // count_include_pad
219   }
220 
221   auto multiplied_t2 = reduced_t2;
222   if (param->kernel_zero_point != 1) {
223     multiplied_t2 = Multiply(zp_kernel, reduced_t2);
224   }
225 
226   // Reduce the C dimension. Find the dimension.
227   int axis_t2 = 0;
228   if (param->data_layout == "NCHW") {
229     axis_t2 = 1;
230   } else if (param->data_layout == "NHWC") {
231     axis_t2 = 3;
232   } else {
233     LOG(FATAL) << "qnn.conv2d does not support " << param->data_layout << " layout";
234   }
235   auto repeated_t2 = multiplied_t2;
236   if (channel_multiplier != 1) {
237     repeated_t2 = MakeRepeat(multiplied_t2, channel_multiplier, axis_t2);
238   }
239   return repeated_t2;
240 }
241 
242 /*
243  * \brief Calculates the third term in the qnn.conv2d depthwise lowering sequence.
244  * \param weight The weight expr.
245  * \param param The qnn conv2d attributes.
246  * \param out_channels The number of output channels.
247  * \param channel_multiplier The channel/depth multiplier.
248  * \return The sequence of Relay operatos for term3.
249  * \note The term3 looks like this
250  *
251  *       Sigma(r, s) zp_a * Qw(oc/m, oc%m, r, s)
252  *
253  *       This can be achieved by calling reduce on r and s axis. The tensor can be then reshaped to
254  *       (1, oc, 1, 1) as (oc/m, oc%m) are just contiguous memory locations.
255  */
DepthwiseConv2DThirdTerm(const Expr & weight,const QnnConv2DAttrs * param,int out_channels,int channel_multiplier)256 Expr DepthwiseConv2DThirdTerm(const Expr& weight, const QnnConv2DAttrs* param, int out_channels,
257                               int channel_multiplier) {
258   // Constant expr for input zero point.
259   auto zp_data = MakeConstantScalar(Int(32), param->input_zero_point);
260 
261   // Find which dimensions are R, S.
262   Array<Integer> axes_t3;
263   if (param->kernel_layout == "OIHW") {
264     // For OIHW kernel layout, HW are reduce axis
265     axes_t3 = {2, 3};
266   } else if (param->kernel_layout == "HWIO") {
267     axes_t3 = {0, 1};
268   } else if (param->kernel_layout == "HWOI") {
269     axes_t3 = {0, 1};
270   } else {
271     LOG(FATAL) << "qnn.conv2d does not support " << param->kernel_layout << " layout";
272   }
273   auto reduced_t3 = Sum(Cast(weight, Int(32)), axes_t3, false, false);
274 
275   // Find the newshape depending on NCHW/NHWC layout.
276   Array<Integer> newshape;
277   if (param->data_layout == "NCHW") {
278     newshape = {1, out_channels * channel_multiplier, 1, 1};
279   } else if (param->data_layout == "NHWC") {
280     newshape = {1, 1, 1, out_channels * channel_multiplier};
281   } else {
282     LOG(FATAL) << "qnn.conv2d does not support " << param->data_layout << " layout";
283   }
284   auto reshaped_t3 = Reshape(reduced_t3, newshape);
285 
286   if (param->input_zero_point == 1) {
287     return reshaped_t3;
288   }
289   return Multiply(zp_data, reshaped_t3);
290 }
291 
292 /*
293  * \brief Calculates the fourth term in the qnn.conv2d depthwise lowering sequence.
294  * \param param The qnn conv2d attributes.
295  * \param kernel_h The height of kernel.
296  * \param kernel_w The width of kernel.
297  * \return The sequence of Relay operators for term4.
298  * \note The term4 looks like this
299  *
300  *       Sigma(r, s) zp_a * zp_w
301  */
DepthwiseConv2DFourthTerm(const QnnConv2DAttrs * param,int kernel_h,int kernel_w)302 Expr DepthwiseConv2DFourthTerm(const QnnConv2DAttrs* param, int kernel_h, int kernel_w) {
303   int scalar_term4 = param->input_zero_point * param->kernel_zero_point * kernel_h * kernel_w;
304   return MakeConstantScalar(Int(32), scalar_term4);
305 }
306 
307 /*
308  * \brief Calculates the first term in the qnn.conv2d lowering sequence.
309  * \param data The input expr.
310  * \param weight The weight expr.
311  * \param param The qnn conv2d attributes.
312  * \return The sequence of Relay operators for term1.
313  * \note The term1 is
314  *       Sigma(c,r,s) QW(k, c, r, s) * QA(n, c, h + r, w + s)
315  *       This is just conv2d on int tensors.
316  */
Conv2DFirstTerm(const Expr & padded_data,const Expr & weight,const QnnConv2DAttrs * param)317 Expr Conv2DFirstTerm(const Expr& padded_data, const Expr& weight, const QnnConv2DAttrs* param) {
318   // Lowering for Term 1
319   Array<IndexExpr> padding({0, 0});
320   return Conv2D(padded_data, weight, param->strides, padding, param->dilation, param->groups,
321                 param->channels, param->kernel_size, param->data_layout, param->kernel_layout,
322                 param->out_layout, param->out_dtype);
323 }
324 
325 /*
326  * \brief Calculates the second term in the qnn.conv2d lowering sequence.
327  * \param padded_data The padded data expr.
328  * \param param The qnn conv2d attributes.
329  * \param kernel_h The height of kernel.
330  * \param kernel_w The width of kernel.
331  * \return The sequence of Relay operators for term2.
332  * \note The term2 looks like this
333  *
334  *       Sigma(c,r,s) zp_w * QA(n, c, h + r, w + s)
335  *
336  *       Second term is not directly representable by one Relay operator.
337  *       However, deeper analysis shows that we can reduce r,s using avg_pool2d,
338  *       followed by a reduce on the C axis. Using avg_pool2d also gives an
339  *       opportunity to reuse alter_op_layout infrastructure.
340  */
Conv2DSecondTerm(const Expr & padded_data,const QnnConv2DAttrs * param,int kernel_h,int kernel_w,int out_channels)341 Expr Conv2DSecondTerm(const Expr& padded_data, const QnnConv2DAttrs* param, int kernel_h,
342                       int kernel_w, int out_channels) {
343   // Constant Expr for the kernel zero point.
344   auto zp_kernel = MakeConstantScalar(Int(32), param->kernel_zero_point);
345 
346   auto casted_t2 = Cast(padded_data, Int(32));
347 
348   // We can reduce the H and W axis by using avg_pool2d. However, avg_pool2d averages the sum.
349   // Since, this is integer division (floor), we can first multiply the data by the pool_size and
350   // then perform avg_pool2d. Reversing this causes inaccuracy due to floor division.
351   Array<IndexExpr> padding({0, 0});
352 
353   // Reduce the C dimension. Find the dimension.
354   Array<Integer> axes_t2;
355   if (param->data_layout == "NCHW") {
356     axes_t2 = {1};
357   } else if (param->data_layout == "NHWC") {
358     axes_t2 = {3};
359   } else {
360     LOG(FATAL) << "qnn.conv2d does not support " << param->data_layout << " layout";
361   }
362   // Keep dims true to retain 4D tensor
363   auto reduced_c_t2 = Sum(casted_t2, axes_t2, true, false);
364 
365   // If the pool_size is 1x1, we don't need avg_pool2d.
366   auto reduced_t2 = reduced_c_t2;
367   if (kernel_h * kernel_w != 1) {
368     reduced_c_t2 = Multiply(reduced_c_t2, MakeConstantScalar(Int(32), kernel_h * kernel_w));
369     reduced_t2 =
370         AvgPool2D(reduced_c_t2, param->kernel_size, param->strides, padding, param->data_layout,
371                   false,   // ceil_mode
372                   false);  // count_include_pad
373   }
374 
375   auto multiplied_t2 = reduced_t2;
376   if (param->kernel_zero_point != 1) {
377     multiplied_t2 = Multiply(zp_kernel, reduced_t2);
378   }
379   return multiplied_t2;
380 }
381 
382 /*
383  * \brief Calculates the third term in the qnn.conv2d lowering sequence.
384  * \param weight The weight expr.
385  * \param param The qnn conv2d attributes.
386  * \param out_channels The number of output channels.
387  * \return The sequence of Relay operatos for term3.
388  * \note The term3 looks like this
389  *
390  *       Sigma(c,r,s) zp_a * QW(k, c, r, s)
391  *
392  *       This can be achieved by calling reduce on c, r and s axis, resulting in
393  *       a 1D tensor. The tensor is then reshaped to conform to NHWC/NCHW
394  *       format.
395  */
Conv2DThirdTerm(const Expr & weight,const QnnConv2DAttrs * param,int out_channels)396 Expr Conv2DThirdTerm(const Expr& weight, const QnnConv2DAttrs* param, int out_channels) {
397   // Constant expr for input zero point.
398   auto zp_data = MakeConstantScalar(Int(32), param->input_zero_point);
399 
400   // Find which dimensions are C, R, S.
401   Array<Integer> axes_t3;
402   if (param->kernel_layout == "OIHW") {
403     // For OIHW kernel layout, IHW are reduce axis
404     axes_t3 = {1, 2, 3};
405   } else if (param->kernel_layout == "HWIO") {
406     axes_t3 = {0, 1, 2};
407   } else if (param->kernel_layout == "HWOI") {
408     axes_t3 = {0, 1, 3};
409   } else {
410     LOG(FATAL) << "qnn.conv2d does not support " << param->kernel_layout << " layout";
411   }
412   auto reduced_t3 = Sum(Cast(weight, Int(32)), axes_t3, false, false);
413 
414   // Find the newshape depending on NCHW/NHWC layout.
415   Array<Integer> newshape;
416   if (param->data_layout == "NCHW") {
417     newshape = {1, out_channels, 1, 1};
418   } else if (param->data_layout == "NHWC") {
419     newshape = {1, 1, 1, out_channels};
420   } else {
421     LOG(FATAL) << "qnn.conv2d does not support " << param->data_layout << " layout";
422   }
423   auto reshaped_t3 = Reshape(reduced_t3, newshape);
424 
425   if (param->input_zero_point == 1) {
426     return reshaped_t3;
427   }
428   return Multiply(zp_data, reshaped_t3);
429 }
430 
431 /*
432  * \brief Calculates the fourth term in the qnn.conv2d lowering sequence.
433  * \param param The qnn conv2d attributes.
434  * \param in_channels The number of input channels.
435  * \param kernel_h The height of kernel.
436  * \param kernel_w The width of kernel.
437  * \return The sequence of Relay operators for term4.
438  * \note The term4 looks like this
439  *
440  *       Sigma(c,r,s) zp_a * zp_w
441  *
442  */
Conv2DFourthTerm(const QnnConv2DAttrs * param,int in_channels,int kernel_h,int kernel_w)443 Expr Conv2DFourthTerm(const QnnConv2DAttrs* param, int in_channels, int kernel_h, int kernel_w) {
444   int scalar_term4 =
445       param->input_zero_point * param->kernel_zero_point * in_channels * kernel_h * kernel_w;
446   return MakeConstantScalar(Int(32), scalar_term4);
447 }
448 
449 /*
450  * \brief Combines different terms of qnn conv2d lowering.
451  * \param term1 The term1 of qnn conv2d lowering.
452  * \param term2 The term2 of qnn conv2d lowering.
453  * \param term3 The term3 of qnn conv2d lowering.
454  * \param term4 The term4 of qnn conv2d lowering.
455  * \param param The qnn conv2d attributes.
456  * \return The combined sequence of relay operations.
457  * \note The combined operation looks like this
458  *
459  *       Sigma(c,r,s) QW(k, c, r, s) * QA(n, c, h + r, w + s)  // Term1
460  *     - Sigma(c,r,s) zp_w * QA(n, c, h + r, w + s)            // Term2
461  *     - Sigma(c,r,s) zp_a * QW(k, c, r, s)                    // Term3
462  *     + Sigma(c,r,s) zp_a * zp_w                              // Term4
463  *
464  */
Conv2DCombineTerms(const Expr & term1,const Expr & term2,const Expr & term3,const Expr & term4,const QnnConv2DAttrs * param)465 Expr Conv2DCombineTerms(const Expr& term1, const Expr& term2, const Expr& term3, const Expr& term4,
466                         const QnnConv2DAttrs* param) {
467   if (param->input_zero_point == 0 && param->kernel_zero_point == 0) {
468     // term 2, 3 and 4 become zero.
469     return term1;
470   } else if (param->input_zero_point == 0 && param->kernel_zero_point != 0) {
471     // term 3 and term 4 become zero.
472     return Subtract(term1, term2);
473   } else if (param->input_zero_point != 0 && param->kernel_zero_point == 0) {
474     // term 2 and term 4 become zero.
475     return Subtract(term1, term3);
476   } else {
477     auto data_term = Subtract(term1, term2);
478     // Putting constant terms together, so that constant folding can fold it.
479     auto const_term = Subtract(term4, term3);
480     return Add(data_term, const_term);
481   }
482 }
483 
484 /*
485  * \brief Forward rewrite the qnn conv2d op.
486  * \param attrs The QNN conv2d attrs.
487  * \param new_args The new mutated args to the call node.
488  * \param arg_types The types of input and output.
489  * \return The sequence of Relay ops for qnn cov2d op.
490  * \node Lowering of the qnn.conv2d operator
491  *       A quantized tensor is represented in following manner
492  *          A = scale_a x (QA - zp_A)
493  *       where QA is quantized tensor, scale_a and zp_A are quantizations
494  *       params.
495  *
496  *       Quantized convolution will convolve two quantized tensors and returns a
497  *       quantized tensor of default dtype of int32, with scale equaling to the
498  *       product of scales of input tensors, and a zero point of zero.
499  *
500  *       For symmetric quantization, the zp_* for all tensors is 0. So, the
501  *       lowering of qnn.conv2d is
502  *
503  *          QA(n, ic, oh + r, ow + s) (conv) QW(oc, ic, r, s)
504  *
505  *       For asymmetric computation, we can perform similar unrolling. We can
506  *       find more details at
507  *       https://discuss.tvm.ai/t/tf-lite-quantized-conv2d-operator-conversion/2651/8?u=janimesh
508  *       The computation gets unrolled into following 4 terms
509  *
510  *            Sigma(c,r,s) QW(k, c, r, s) * QA(n, c, h + r, w + s)  // Term1
511  *          - Sigma(c,r,s) zp_w * QA(n, c, h + r, w + s)            // Term2
512  *          - Sigma(c,r,s) zp_a * QW(k, c, r, s)                    // Term3
513  *          + Sigma(c,r,s) zp_a * zp_w                              // Term4
514  *
515  *       Term3 and Term4 can be computed at compile time.
516  *
517  *       Key points to notice:
518  *         1) Padding is done explicitly because the input has to be padded with
519  *         zero point. This might leave some performance opportunity at the
520  *         table. Can be avoided by modifying conv2d API to accept the
521  *         pad_const_value.
522  *         2) Second term is not directly representable by one Relay operator.
523  *         However, deeper analysis shows that we can reduce r,s using
524  *         avg_pool2d, followed by a reduce on the C axis. Using avg_pool2d also
525  *         gives an opportunity to reuse alter_op_layout infrastructure.
526  *         3) For dilated conv, in current lowering, we need dilated pool. So as
527  *         a workaround, we fall back to simpler lowering using int32 conv if
528  *         the conv is dilated. We fallback also in case of grouped conv.
529  *
530  *       For depthwise, we can similarly unroll the computation. The intial compute is as follows
531  *       wehere cm = channel_multiplier
532  *
533  *       Qc(n, oc, oh, ow) = Sigma(r, s) (Qw(oc/m, oc%/m, r, s) - zp_w)
534  *                                     * (Qa(n, oc/cm, oh + r, ow + s) - zp_a)
535  *
536  *       This can be written as
537  *
538  *            Sigma(r, s) Qw(oc/m, oc%/m, r, s) * Qa(n, oc/cm, oh + r, ow + s)
539  *          - Sigma(r, s) zp_w * Qa(n, oc/cm, oh + r, ow + s)
540  *          - Sigma(r, s) zp_a * Qw(oc/m, oc%m, r, s)
541  *          - Sigma(r, s) zp_a * zp_w
542  *
543  *       The whole process can be broken down into following steps
544  *       * Assertion checks for existing support, fallback if necessary
545  *       * Pad the input.
546  *       * Get Term1.
547  *       * Get Term2.
548  *       * Get Term3.
549  *       * Get Term4.
550  *       * Combine the terms.
551  */
QnnConv2DCanonicalize(const Attrs & attrs,const Array<Expr> & new_args,const Array<tvm::relay::Type> & arg_types)552 Expr QnnConv2DCanonicalize(const Attrs& attrs, const Array<Expr>& new_args,
553                            const Array<tvm::relay::Type>& arg_types) {
554   CHECK_EQ(new_args.size(), 2);
555   Expr data = new_args[0];
556   Expr weight = new_args[1];
557   const auto* param = attrs.as<QnnConv2DAttrs>();
558   CHECK(param != nullptr);
559   // Assertion checks for exisiing support.
560   CHECK_EQ(param->padding.size(), 2) << "qnn.conv2d only supports 2D padding";
561   CHECK(param->data_layout == "NCHW" || param->data_layout == "NHWC")
562       << "qnn.conv2d supports only NCHW/NHWC input data layout.";
563   CHECK(param->kernel_layout == "OIHW" || param->kernel_layout == "HWIO" ||
564         param->kernel_layout == "HWOI")
565       << "qnn.conv2d supports only OIHW/HWIO/HWOI kernel data layout.";
566 
567   int batch_size, in_channels, out_channels, kernel_h, kernel_w, channel_multiplier;
568   std::tie(batch_size, in_channels, out_channels, kernel_h, kernel_w, channel_multiplier) =
569       GetWorkload(arg_types, param);
570 
571   // Fallback to int32 conv if there is dilation or grouped conv2d
572 
573   CHECK_EQ(param->dilation.size(), 2) << "qnn.conv2d only supports 2D dilation";
574   auto dilation_h = get_const_int(param->dilation[0]);
575   auto dilation_w = get_const_int(param->dilation[1]);
576   if (dilation_h != 1 || dilation_w != 1 || (param->groups != 1 && !is_depthwise(param))) {
577     return Conv2DFallBack(data, weight, param);
578   } else if (is_depthwise(param)) {
579     CHECK_NE(channel_multiplier, -1);
580     auto padded_data = Conv2DPadInput(data, param);
581     auto term1 = Conv2DFirstTerm(padded_data, weight, param);
582     auto term2 =
583         DepthwiseConv2DSecondTerm(padded_data, param, kernel_h, kernel_w, channel_multiplier);
584     auto term3 = DepthwiseConv2DThirdTerm(weight, param, out_channels, channel_multiplier);
585     auto term4 = DepthwiseConv2DFourthTerm(param, kernel_h, kernel_w);
586     return Conv2DCombineTerms(term1, term2, term3, term4, param);
587   }
588 
589   auto padded_data = Conv2DPadInput(data, param);
590   auto term1 = Conv2DFirstTerm(padded_data, weight, param);
591   auto term2 = Conv2DSecondTerm(padded_data, param, kernel_h, kernel_w, out_channels);
592   auto term3 = Conv2DThirdTerm(weight, param, out_channels);
593   auto term4 = Conv2DFourthTerm(param, in_channels, kernel_h, kernel_w);
594   return Conv2DCombineTerms(term1, term2, term3, term4, param);
595 }
596 
597 // Positional relay function to create quantized conv2d operator
598 // used by frontend FFI.
MakeQnnConv2D(Expr data,Expr weight,int32_t input_zero_point,int32_t kernel_zero_point,double input_scale,double kernel_scale,Array<IndexExpr> strides,Array<IndexExpr> padding,Array<IndexExpr> dilation,int groups,IndexExpr channels,Array<IndexExpr> kernel_size,std::string data_layout,std::string kernel_layout,std::string out_layout,DataType out_dtype)599 Expr MakeQnnConv2D(Expr data, Expr weight, int32_t input_zero_point, int32_t kernel_zero_point,
600                    double input_scale, double kernel_scale, Array<IndexExpr> strides,
601                    Array<IndexExpr> padding, Array<IndexExpr> dilation,
602                    int groups, IndexExpr channels, Array<IndexExpr> kernel_size,
603                    std::string data_layout, std::string kernel_layout, std::string out_layout,
604                    DataType out_dtype) {
605   auto attrs = make_node<QnnConv2DAttrs>();
606   attrs->strides = std::move(strides);
607   attrs->padding = std::move(padding);
608   attrs->dilation = std::move(dilation);
609   attrs->groups = groups;
610   attrs->channels = std::move(channels);
611   attrs->kernel_size = std::move(kernel_size);
612   attrs->data_layout = std::move(data_layout);
613   attrs->kernel_layout = std::move(kernel_layout);
614   attrs->out_layout = std::move(out_layout);
615   attrs->out_dtype = std::move(out_dtype);
616   attrs->input_zero_point = std::move(input_zero_point);
617   attrs->kernel_zero_point = std::move(kernel_zero_point);
618   attrs->input_scale = std::move(input_scale);
619   attrs->kernel_scale = std::move(kernel_scale);
620   static const Op& op = Op::Get("qnn.conv2d");
621   return CallNode::make(op, {data, weight}, Attrs(attrs), {});
622 }
623 
624 RELAY_REGISTER_OP("qnn.conv2d")
625 .describe(R"code(2D quantized convolution layer.
626 This operator convolves quantized weight with quantized data. The scale of the
627 output quantized tensor is the product of the weight_scale and input_scale of
628 the input quantized tensors. The zero point of the output quantized tensor is
629 0. By default, the dtype of output is int32. Please also refer to Requantize
630 operator to understand how to scale back the int32 output to (u)int8.
631 - **data**: This depends on the `layout` parameter. Input is 4D array of shape
632             (batch_size, in_channels, height, width) if `layout` is `NCHW`.
633 - **weight**: (channels, in_channels, kernel_size[0], kernel_size[1])
634 - **out**:  This depends on the `layout` parameter. Output is 4D array of shape
635             (batch_size, channels, out_height, out_width) if `layout` is `NCHW`.
636 )code" TVM_ADD_FILELINE)
637 .set_attrs_type<QnnConv2DAttrs>()
638 .set_num_inputs(2)
639 .add_argument("data", "Tensor", "The quantized input data tensor.")
640 .add_argument("weight", "Tensor", "The quantized weight tensor.")
641 .set_support_level(11)
642 .add_type_rel("QnnConv2D", QnnConv2DRel)
643 .set_attr<FTVMLegalize>("FTVMQnnCanonicalize", QnnConv2DCanonicalize);
644 
645 TVM_REGISTER_API("relay.qnn.op._make.conv2d").set_body_typed(MakeQnnConv2D);
646 
647 }  // namespace qnn
648 }  // namespace relay
649 }  // namespace tvm
650