1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include "./av1_rtcd.h"
13 #include "aom_dsp/inv_txfm.h"
14 #include "av1/common/enums.h"
15 #include "av1/common/av1_txfm.h"
16 #include "av1/common/av1_inv_txfm1d.h"
17 #include "av1/common/av1_inv_txfm1d_cfg.h"
18 
inv_txfm_type_to_func(TXFM_TYPE txfm_type)19 static INLINE TxfmFunc inv_txfm_type_to_func(TXFM_TYPE txfm_type) {
20   switch (txfm_type) {
21     case TXFM_TYPE_DCT4: return av1_idct4_new;
22     case TXFM_TYPE_DCT8: return av1_idct8_new;
23     case TXFM_TYPE_DCT16: return av1_idct16_new;
24     case TXFM_TYPE_DCT32: return av1_idct32_new;
25 #if CONFIG_TX64X64
26     case TXFM_TYPE_DCT64: return av1_idct64_new;
27 #endif  // CONFIG_TX64X64
28     case TXFM_TYPE_ADST4: return av1_iadst4_new;
29     case TXFM_TYPE_ADST8: return av1_iadst8_new;
30     case TXFM_TYPE_ADST16: return av1_iadst16_new;
31     case TXFM_TYPE_ADST32: return av1_iadst32_new;
32 #if CONFIG_EXT_TX
33     case TXFM_TYPE_IDENTITY4: return av1_iidentity4_c;
34     case TXFM_TYPE_IDENTITY8: return av1_iidentity8_c;
35     case TXFM_TYPE_IDENTITY16: return av1_iidentity16_c;
36     case TXFM_TYPE_IDENTITY32: return av1_iidentity32_c;
37 #if CONFIG_TX64X64
38     case TXFM_TYPE_IDENTITY64: return av1_iidentity64_c;
39 #endif  // CONFIG_TX64X64
40 #endif  // CONFIG_EXT_TX
41     default: assert(0); return NULL;
42   }
43 }
44 
45 static const TXFM_1D_CFG *inv_txfm_col_cfg_ls[TX_TYPES_1D][TX_SIZES] = {
46   // DCT
47   {
48 #if CONFIG_CHROMA_2X2
49       NULL,
50 #endif
51       &inv_txfm_1d_col_cfg_dct_4, &inv_txfm_1d_col_cfg_dct_8,
52       &inv_txfm_1d_col_cfg_dct_16, &inv_txfm_1d_col_cfg_dct_32,
53 #if CONFIG_TX64X64
54       &inv_txfm_1d_col_cfg_dct_64
55 #endif  // CONFIG_TX64X64
56   },
57   // ADST
58   {
59 #if CONFIG_CHROMA_2X2
60       NULL,
61 #endif
62       &inv_txfm_1d_col_cfg_adst_4, &inv_txfm_1d_col_cfg_adst_8,
63       &inv_txfm_1d_col_cfg_adst_16, &inv_txfm_1d_col_cfg_adst_32,
64 #if CONFIG_TX64X64
65       NULL
66 #endif  // CONFIG_TX64X64
67   },
68 #if CONFIG_EXT_TX
69   // FLIPADST
70   {
71 #if CONFIG_CHROMA_2X2
72       NULL,
73 #endif
74       &inv_txfm_1d_col_cfg_adst_4, &inv_txfm_1d_col_cfg_adst_8,
75       &inv_txfm_1d_col_cfg_adst_16, &inv_txfm_1d_col_cfg_adst_32,
76 #if CONFIG_TX64X64
77       NULL
78 #endif  // CONFIG_TX64X64
79   },
80   // IDENTITY
81   {
82 #if CONFIG_CHROMA_2X2
83       NULL,
84 #endif
85       &inv_txfm_1d_cfg_identity_4, &inv_txfm_1d_cfg_identity_8,
86       &inv_txfm_1d_cfg_identity_16, &inv_txfm_1d_cfg_identity_32,
87 #if CONFIG_TX64X64
88       &inv_txfm_1d_cfg_identity_64
89 #endif  // CONFIG_TX64X64
90   },
91 #endif  // CONFIG_EXT_TX
92 };
93 
94 static const TXFM_1D_CFG *inv_txfm_row_cfg_ls[TX_TYPES_1D][TX_SIZES] = {
95   // DCT
96   {
97 #if CONFIG_CHROMA_2X2
98       NULL,
99 #endif
100       &inv_txfm_1d_row_cfg_dct_4, &inv_txfm_1d_row_cfg_dct_8,
101       &inv_txfm_1d_row_cfg_dct_16, &inv_txfm_1d_row_cfg_dct_32,
102 #if CONFIG_TX64X64
103       &inv_txfm_1d_row_cfg_dct_64,
104 #endif  // CONFIG_TX64X64
105   },
106   // ADST
107   {
108 #if CONFIG_CHROMA_2X2
109       NULL,
110 #endif
111       &inv_txfm_1d_row_cfg_adst_4, &inv_txfm_1d_row_cfg_adst_8,
112       &inv_txfm_1d_row_cfg_adst_16, &inv_txfm_1d_row_cfg_adst_32,
113 #if CONFIG_TX64X64
114       NULL
115 #endif  // CONFIG_TX64X64
116   },
117 #if CONFIG_EXT_TX
118   // FLIPADST
119   {
120 #if CONFIG_CHROMA_2X2
121       NULL,
122 #endif
123       &inv_txfm_1d_row_cfg_adst_4, &inv_txfm_1d_row_cfg_adst_8,
124       &inv_txfm_1d_row_cfg_adst_16, &inv_txfm_1d_row_cfg_adst_32,
125 #if CONFIG_TX64X64
126       NULL
127 #endif  // CONFIG_TX64X64
128   },
129   // IDENTITY
130   {
131 #if CONFIG_CHROMA_2X2
132       NULL,
133 #endif
134       &inv_txfm_1d_cfg_identity_4, &inv_txfm_1d_cfg_identity_8,
135       &inv_txfm_1d_cfg_identity_16, &inv_txfm_1d_cfg_identity_32,
136 #if CONFIG_TX64X64
137       &inv_txfm_1d_cfg_identity_64
138 #endif  // CONFIG_TX64X64
139   },
140 #endif  // CONFIG_EXT_TX
141 };
142 
av1_get_inv_txfm_cfg(TX_TYPE tx_type,TX_SIZE tx_size)143 TXFM_2D_FLIP_CFG av1_get_inv_txfm_cfg(TX_TYPE tx_type, TX_SIZE tx_size) {
144   TXFM_2D_FLIP_CFG cfg;
145   set_flip_cfg(tx_type, &cfg);
146   const TX_TYPE_1D tx_type_col = vtx_tab[tx_type];
147   const TX_TYPE_1D tx_type_row = htx_tab[tx_type];
148   const TX_SIZE tx_size_col = txsize_vert_map[tx_size];
149   const TX_SIZE tx_size_row = txsize_horz_map[tx_size];
150   cfg.col_cfg = inv_txfm_col_cfg_ls[tx_type_col][tx_size_col];
151   cfg.row_cfg = inv_txfm_row_cfg_ls[tx_type_row][tx_size_row];
152   return cfg;
153 }
154 
155 #if CONFIG_TX64X64
av1_get_inv_txfm_64x64_cfg(TX_TYPE tx_type)156 TXFM_2D_FLIP_CFG av1_get_inv_txfm_64x64_cfg(TX_TYPE tx_type) {
157   TXFM_2D_FLIP_CFG cfg = { 0, 0, NULL, NULL };
158   switch (tx_type) {
159     case DCT_DCT:
160       cfg.col_cfg = &inv_txfm_1d_col_cfg_dct_64;
161       cfg.row_cfg = &inv_txfm_1d_row_cfg_dct_64;
162       set_flip_cfg(tx_type, &cfg);
163       break;
164     default: assert(0);
165   }
166   return cfg;
167 }
168 
av1_get_inv_txfm_32x64_cfg(int tx_type)169 TXFM_2D_FLIP_CFG av1_get_inv_txfm_32x64_cfg(int tx_type) {
170   TXFM_2D_FLIP_CFG cfg = { 0, 0, NULL, NULL };
171   switch (tx_type) {
172     case DCT_DCT:
173       cfg.col_cfg = &inv_txfm_1d_col_cfg_dct_64;
174       cfg.row_cfg = &inv_txfm_1d_row_cfg_dct_32;
175       set_flip_cfg(tx_type, &cfg);
176       break;
177     default: assert(0);
178   }
179   return cfg;
180 }
181 
av1_get_inv_txfm_64x32_cfg(int tx_type)182 TXFM_2D_FLIP_CFG av1_get_inv_txfm_64x32_cfg(int tx_type) {
183   TXFM_2D_FLIP_CFG cfg = { 0, 0, NULL, NULL };
184   switch (tx_type) {
185     case DCT_DCT:
186       cfg.col_cfg = &inv_txfm_1d_col_cfg_dct_32;
187       cfg.row_cfg = &inv_txfm_1d_row_cfg_dct_64;
188       set_flip_cfg(tx_type, &cfg);
189       break;
190     default: assert(0);
191   }
192   return cfg;
193 }
194 #endif  // CONFIG_TX64X64
195 
av1_gen_inv_stage_range(int8_t * stage_range_col,int8_t * stage_range_row,const TXFM_2D_FLIP_CFG * cfg,int8_t fwd_shift,int bd)196 void av1_gen_inv_stage_range(int8_t *stage_range_col, int8_t *stage_range_row,
197                              const TXFM_2D_FLIP_CFG *cfg, int8_t fwd_shift,
198                              int bd) {
199   // Note when assigning txfm_size_col, we use the txfm_size from the
200   // row configuration and vice versa. This is intentionally done to
201   // accurately perform rectangular transforms. When the transform is
202   // rectangular, the number of columns will be the same as the
203   // txfm_size stored in the row cfg struct. It will make no difference
204   // for square transforms.
205   const int txfm_size_col = cfg->row_cfg->txfm_size;
206   const int txfm_size_row = cfg->col_cfg->txfm_size;
207   // Take the shift from the larger dimension in the rectangular case.
208   const int8_t *shift = (txfm_size_col > txfm_size_row) ? cfg->row_cfg->shift
209                                                         : cfg->col_cfg->shift;
210   // i < MAX_TXFM_STAGE_NUM will mute above array bounds warning
211   for (int i = 0; i < cfg->row_cfg->stage_num && i < MAX_TXFM_STAGE_NUM; ++i) {
212     stage_range_row[i] = cfg->row_cfg->stage_range[i] + fwd_shift + bd + 1;
213   }
214   // i < MAX_TXFM_STAGE_NUM will mute above array bounds warning
215   for (int i = 0; i < cfg->col_cfg->stage_num && i < MAX_TXFM_STAGE_NUM; ++i) {
216     stage_range_col[i] =
217         cfg->col_cfg->stage_range[i] + fwd_shift + shift[0] + bd + 1;
218   }
219 }
220 
inv_txfm2d_add_c(const int32_t * input,uint16_t * output,int stride,TXFM_2D_FLIP_CFG * cfg,int32_t * txfm_buf,int8_t fwd_shift,int bd)221 static INLINE void inv_txfm2d_add_c(const int32_t *input, uint16_t *output,
222                                     int stride, TXFM_2D_FLIP_CFG *cfg,
223                                     int32_t *txfm_buf, int8_t fwd_shift,
224                                     int bd) {
225   // Note when assigning txfm_size_col, we use the txfm_size from the
226   // row configuration and vice versa. This is intentionally done to
227   // accurately perform rectangular transforms. When the transform is
228   // rectangular, the number of columns will be the same as the
229   // txfm_size stored in the row cfg struct. It will make no difference
230   // for square transforms.
231   const int txfm_size_col = cfg->row_cfg->txfm_size;
232   const int txfm_size_row = cfg->col_cfg->txfm_size;
233   // Take the shift from the larger dimension in the rectangular case.
234   const int8_t *shift = (txfm_size_col > txfm_size_row) ? cfg->row_cfg->shift
235                                                         : cfg->col_cfg->shift;
236   int8_t stage_range_row[MAX_TXFM_STAGE_NUM];
237   int8_t stage_range_col[MAX_TXFM_STAGE_NUM];
238   assert(cfg->row_cfg->stage_num <= MAX_TXFM_STAGE_NUM);
239   assert(cfg->col_cfg->stage_num <= MAX_TXFM_STAGE_NUM);
240   av1_gen_inv_stage_range(stage_range_col, stage_range_row, cfg, fwd_shift, bd);
241 
242   const int8_t *cos_bit_col = cfg->col_cfg->cos_bit;
243   const int8_t *cos_bit_row = cfg->row_cfg->cos_bit;
244   const TxfmFunc txfm_func_col = inv_txfm_type_to_func(cfg->col_cfg->txfm_type);
245   const TxfmFunc txfm_func_row = inv_txfm_type_to_func(cfg->row_cfg->txfm_type);
246 
247   // txfm_buf's length is  txfm_size_row * txfm_size_col + 2 * txfm_size_row
248   // it is used for intermediate data buffering
249   int32_t *temp_in = txfm_buf;
250   int32_t *temp_out = temp_in + txfm_size_row;
251   int32_t *buf = temp_out + txfm_size_row;
252   int32_t *buf_ptr = buf;
253   int c, r;
254 
255   // Rows
256   for (r = 0; r < txfm_size_row; ++r) {
257     txfm_func_row(input, buf_ptr, cos_bit_row, stage_range_row);
258     round_shift_array(buf_ptr, txfm_size_col, -shift[0]);
259     // Multiply everything by Sqrt2 if the transform is rectangular
260     if (txfm_size_row != txfm_size_col) {
261       for (c = 0; c < txfm_size_col; ++c)
262         buf_ptr[c] = (int32_t)dct_const_round_shift(buf_ptr[c] * Sqrt2);
263     }
264     input += txfm_size_col;
265     buf_ptr += txfm_size_col;
266   }
267 
268   // Columns
269   for (c = 0; c < txfm_size_col; ++c) {
270     if (cfg->lr_flip == 0) {
271       for (r = 0; r < txfm_size_row; ++r)
272         temp_in[r] = buf[r * txfm_size_col + c];
273     } else {
274       // flip left right
275       for (r = 0; r < txfm_size_row; ++r)
276         temp_in[r] = buf[r * txfm_size_col + (txfm_size_col - c - 1)];
277     }
278     txfm_func_col(temp_in, temp_out, cos_bit_col, stage_range_col);
279     round_shift_array(temp_out, txfm_size_row, -shift[1]);
280     if (cfg->ud_flip == 0) {
281       for (r = 0; r < txfm_size_row; ++r) {
282         output[r * stride + c] =
283             highbd_clip_pixel_add(output[r * stride + c], temp_out[r], bd);
284       }
285     } else {
286       // flip upside down
287       for (r = 0; r < txfm_size_row; ++r) {
288         output[r * stride + c] = highbd_clip_pixel_add(
289             output[r * stride + c], temp_out[txfm_size_row - r - 1], bd);
290       }
291     }
292   }
293 }
294 
inv_txfm2d_add_facade(const int32_t * input,uint16_t * output,int stride,int32_t * txfm_buf,TX_TYPE tx_type,TX_SIZE tx_size,int bd)295 static INLINE void inv_txfm2d_add_facade(const int32_t *input, uint16_t *output,
296                                          int stride, int32_t *txfm_buf,
297                                          TX_TYPE tx_type, TX_SIZE tx_size,
298                                          int bd) {
299   TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, tx_size);
300   TX_SIZE tx_size_sqr = txsize_sqr_map[tx_size];
301   inv_txfm2d_add_c(input, output, stride, &cfg, txfm_buf,
302                    fwd_shift_sum[tx_size_sqr], bd);
303 }
304 
av1_inv_txfm2d_add_4x8_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)305 void av1_inv_txfm2d_add_4x8_c(const int32_t *input, uint16_t *output,
306                               int stride, TX_TYPE tx_type, int bd) {
307   int txfm_buf[4 * 8 + 8 + 8];
308   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_4X8, bd);
309 }
310 
av1_inv_txfm2d_add_8x4_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)311 void av1_inv_txfm2d_add_8x4_c(const int32_t *input, uint16_t *output,
312                               int stride, TX_TYPE tx_type, int bd) {
313 #if CONFIG_TXMG
314   int txfm_buf[8 * 4 + 8 + 8];
315   int32_t rinput[8 * 4];
316   uint16_t routput[8 * 4];
317   TX_SIZE tx_size = TX_8X4;
318   TX_SIZE rtx_size = av1_rotate_tx_size(tx_size);
319   TX_TYPE rtx_type = av1_rotate_tx_type(tx_type);
320   int w = tx_size_wide[tx_size];
321   int h = tx_size_high[tx_size];
322   int rw = h;
323   int rh = w;
324   transpose_int32(rinput, rw, input, w, w, h);
325   transpose_uint16(routput, rw, output, stride, w, h);
326   inv_txfm2d_add_facade(rinput, routput, rw, txfm_buf, rtx_type, rtx_size, bd);
327   transpose_uint16(output, stride, routput, rw, rw, rh);
328 #else
329   int txfm_buf[8 * 4 + 4 + 4];
330   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_8X4, bd);
331 #endif
332 }
333 
av1_inv_txfm2d_add_8x16_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)334 void av1_inv_txfm2d_add_8x16_c(const int32_t *input, uint16_t *output,
335                                int stride, TX_TYPE tx_type, int bd) {
336   int txfm_buf[8 * 16 + 16 + 16];
337   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_8X16, bd);
338 }
339 
av1_inv_txfm2d_add_16x8_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)340 void av1_inv_txfm2d_add_16x8_c(const int32_t *input, uint16_t *output,
341                                int stride, TX_TYPE tx_type, int bd) {
342 #if CONFIG_TXMG
343   int txfm_buf[16 * 8 + 16 + 16];
344   int32_t rinput[16 * 8];
345   uint16_t routput[16 * 8];
346   TX_SIZE tx_size = TX_16X8;
347   TX_SIZE rtx_size = av1_rotate_tx_size(tx_size);
348   TX_TYPE rtx_type = av1_rotate_tx_type(tx_type);
349   int w = tx_size_wide[tx_size];
350   int h = tx_size_high[tx_size];
351   int rw = h;
352   int rh = w;
353   transpose_int32(rinput, rw, input, w, w, h);
354   transpose_uint16(routput, rw, output, stride, w, h);
355   inv_txfm2d_add_facade(rinput, routput, rw, txfm_buf, rtx_type, rtx_size, bd);
356   transpose_uint16(output, stride, routput, rw, rw, rh);
357 #else
358   int txfm_buf[16 * 8 + 8 + 8];
359   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_16X8, bd);
360 #endif
361 }
362 
av1_inv_txfm2d_add_16x32_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)363 void av1_inv_txfm2d_add_16x32_c(const int32_t *input, uint16_t *output,
364                                 int stride, TX_TYPE tx_type, int bd) {
365   int txfm_buf[16 * 32 + 32 + 32];
366   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_16X32, bd);
367 }
368 
av1_inv_txfm2d_add_32x16_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)369 void av1_inv_txfm2d_add_32x16_c(const int32_t *input, uint16_t *output,
370                                 int stride, TX_TYPE tx_type, int bd) {
371 #if CONFIG_TXMG
372   int txfm_buf[32 * 16 + 32 + 32];
373   int32_t rinput[32 * 16];
374   uint16_t routput[32 * 16];
375   TX_SIZE tx_size = TX_32X16;
376   TX_SIZE rtx_size = av1_rotate_tx_size(tx_size);
377   TX_TYPE rtx_type = av1_rotate_tx_type(tx_type);
378   int w = tx_size_wide[tx_size];
379   int h = tx_size_high[tx_size];
380   int rw = h;
381   int rh = w;
382   transpose_int32(rinput, rw, input, w, w, h);
383   transpose_uint16(routput, rw, output, stride, w, h);
384   inv_txfm2d_add_facade(rinput, routput, rw, txfm_buf, rtx_type, rtx_size, bd);
385   transpose_uint16(output, stride, routput, rw, rw, rh);
386 #else
387   int txfm_buf[32 * 16 + 16 + 16];
388   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_32X16, bd);
389 #endif
390 }
391 
av1_inv_txfm2d_add_4x4_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)392 void av1_inv_txfm2d_add_4x4_c(const int32_t *input, uint16_t *output,
393                               int stride, TX_TYPE tx_type, int bd) {
394   int txfm_buf[4 * 4 + 4 + 4];
395   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_4X4, bd);
396 }
397 
av1_inv_txfm2d_add_8x8_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)398 void av1_inv_txfm2d_add_8x8_c(const int32_t *input, uint16_t *output,
399                               int stride, TX_TYPE tx_type, int bd) {
400   int txfm_buf[8 * 8 + 8 + 8];
401   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_8X8, bd);
402 }
403 
av1_inv_txfm2d_add_16x16_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)404 void av1_inv_txfm2d_add_16x16_c(const int32_t *input, uint16_t *output,
405                                 int stride, TX_TYPE tx_type, int bd) {
406   int txfm_buf[16 * 16 + 16 + 16];
407   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_16X16, bd);
408 }
409 
av1_inv_txfm2d_add_32x32_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)410 void av1_inv_txfm2d_add_32x32_c(const int32_t *input, uint16_t *output,
411                                 int stride, TX_TYPE tx_type, int bd) {
412   int txfm_buf[32 * 32 + 32 + 32];
413   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_32X32, bd);
414 }
415 
416 #if CONFIG_TX64X64
av1_inv_txfm2d_add_64x64_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)417 void av1_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output,
418                                 int stride, TX_TYPE tx_type, int bd) {
419   int txfm_buf[64 * 64 + 64 + 64];
420   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_64X64, bd);
421 }
422 
av1_inv_txfm2d_add_64x32_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)423 void av1_inv_txfm2d_add_64x32_c(const int32_t *input, uint16_t *output,
424                                 int stride, TX_TYPE tx_type, int bd) {
425 #if CONFIG_TXMG
426   int txfm_buf[64 * 32 + 64 + 64];
427   int32_t rinput[64 * 32];
428   uint16_t routput[64 * 32];
429   TX_SIZE tx_size = TX_64X32;
430   TX_SIZE rtx_size = av1_rotate_tx_size(tx_size);
431   TX_TYPE rtx_type = av1_rotate_tx_type(tx_type);
432   int w = tx_size_wide[tx_size];
433   int h = tx_size_high[tx_size];
434   int rw = h;
435   int rh = w;
436   transpose_int32(rinput, rw, input, w, w, h);
437   transpose_uint16(routput, rw, output, stride, w, h);
438   inv_txfm2d_add_facade(rinput, routput, rw, txfm_buf, rtx_type, rtx_size, bd);
439   transpose_uint16(output, stride, routput, rw, rw, rh);
440 #else
441   int txfm_buf[64 * 32 + 64 + 64];
442   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_64X32, bd);
443 #endif
444 }
445 
av1_inv_txfm2d_add_32x64_c(const int32_t * input,uint16_t * output,int stride,TX_TYPE tx_type,int bd)446 void av1_inv_txfm2d_add_32x64_c(const int32_t *input, uint16_t *output,
447                                 int stride, TX_TYPE tx_type, int bd) {
448   int txfm_buf[64 * 32 + 64 + 64];
449   inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_32X64, bd);
450 }
451 #endif  // CONFIG_TX64X64
452