1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #ifndef AV1_COMMON_BLOCKD_H_
13 #define AV1_COMMON_BLOCKD_H_
14 
15 #include "./aom_config.h"
16 
17 #include "aom_dsp/aom_dsp_common.h"
18 #include "aom_ports/mem.h"
19 #include "aom_scale/yv12config.h"
20 
21 #include "av1/common/common_data.h"
22 #include "av1/common/quant_common.h"
23 #include "av1/common/entropy.h"
24 #include "av1/common/entropymode.h"
25 #include "av1/common/mv.h"
26 #include "av1/common/scale.h"
27 #include "av1/common/seg_common.h"
28 #include "av1/common/tile_common.h"
29 #if CONFIG_PVQ
30 #include "av1/common/pvq.h"
31 #include "av1/common/pvq_state.h"
32 #include "av1/decoder/decint.h"
33 #endif
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37 
38 #if (CONFIG_CHROMA_SUB8X8 || CONFIG_CHROMA_2X2)
39 #define SUB8X8_COMP_REF 0
40 #else
41 #define SUB8X8_COMP_REF 1
42 #endif
43 
44 #define MAX_MB_PLANE 3
45 
46 #if CONFIG_COMPOUND_SEGMENT
47 // Set COMPOUND_SEGMENT_TYPE to one of the three
48 // 0: Uniform
49 // 1: Difference weighted
50 #define COMPOUND_SEGMENT_TYPE 1
51 #define MAX_SEG_MASK_BITS 1
52 
53 // SEG_MASK_TYPES should not surpass 1 << MAX_SEG_MASK_BITS
54 typedef enum {
55 #if COMPOUND_SEGMENT_TYPE == 0
56   UNIFORM_45 = 0,
57   UNIFORM_45_INV,
58 #elif COMPOUND_SEGMENT_TYPE == 1
59   DIFFWTD_38 = 0,
60   DIFFWTD_38_INV,
61 #endif  // COMPOUND_SEGMENT_TYPE
62   SEG_MASK_TYPES,
63 } SEG_MASK_TYPE;
64 
65 #endif  // CONFIG_COMPOUND_SEGMENT
66 
67 typedef enum {
68   KEY_FRAME = 0,
69   INTER_FRAME = 1,
70 #if CONFIG_OBU
71   INTRA_ONLY_FRAME = 2,  // replaces intra-only
72   S_FRAME = 3,
73 #endif
74   FRAME_TYPES,
75 } FRAME_TYPE;
76 
is_comp_ref_allowed(BLOCK_SIZE bsize)77 static INLINE int is_comp_ref_allowed(BLOCK_SIZE bsize) {
78   (void)bsize;
79 #if SUB8X8_COMP_REF
80   return 1;
81 #else
82   return AOMMIN(block_size_wide[bsize], block_size_high[bsize]) >= 8;
83 #endif  // SUB8X8_COMP_REF
84 }
85 
is_inter_mode(PREDICTION_MODE mode)86 static INLINE int is_inter_mode(PREDICTION_MODE mode) {
87   return mode >= NEARESTMV && mode <= NEW_NEWMV;
88 }
89 
90 #if CONFIG_PVQ
91 typedef struct PVQ_INFO {
92   int theta[PVQ_MAX_PARTITIONS];
93   int qg[PVQ_MAX_PARTITIONS];
94   int k[PVQ_MAX_PARTITIONS];
95   od_coeff y[OD_TXSIZE_MAX * OD_TXSIZE_MAX];
96   int nb_bands;
97   int off[PVQ_MAX_PARTITIONS];
98   int size[PVQ_MAX_PARTITIONS];
99   int skip_rest;
100   int skip_dir;
101   int bs;  // log of the block size minus two,
102            // i.e. equivalent to aom's TX_SIZE
103   // Block skip info, indicating whether DC/AC, is coded.
104   PVQ_SKIP_TYPE ac_dc_coded;  // bit0: DC coded, bit1 : AC coded (1 means coded)
105   tran_low_t dq_dc_residue;
106 } PVQ_INFO;
107 
108 typedef struct PVQ_QUEUE {
109   PVQ_INFO *buf;  // buffer for pvq info, stored in encoding order
110   int curr_pos;   // curr position to write PVQ_INFO
111   int buf_len;    // allocated buffer length
112   int last_pos;   // last written position of PVQ_INFO in a tile
113 } PVQ_QUEUE;
114 #endif
115 
116 #if CONFIG_NCOBMC_ADAPT_WEIGHT
117 typedef struct superblock_mi_boundaries {
118   int mi_row_begin;
119   int mi_col_begin;
120   int mi_row_end;
121   int mi_col_end;
122 } SB_MI_BD;
123 
124 typedef struct { int16_t KERNEL[4][MAX_SB_SIZE][MAX_SB_SIZE]; } NCOBMC_KERNELS;
125 #endif
126 
127 typedef struct {
128   uint8_t *plane[MAX_MB_PLANE];
129   int stride[MAX_MB_PLANE];
130 } BUFFER_SET;
131 
is_inter_singleref_mode(PREDICTION_MODE mode)132 static INLINE int is_inter_singleref_mode(PREDICTION_MODE mode) {
133   return mode >= NEARESTMV && mode <= NEWMV;
134 }
is_inter_compound_mode(PREDICTION_MODE mode)135 static INLINE int is_inter_compound_mode(PREDICTION_MODE mode) {
136   return mode >= NEAREST_NEARESTMV && mode <= NEW_NEWMV;
137 }
138 #if CONFIG_COMPOUND_SINGLEREF
is_inter_singleref_comp_mode(PREDICTION_MODE mode)139 static INLINE int is_inter_singleref_comp_mode(PREDICTION_MODE mode) {
140   return mode >= SR_NEAREST_NEARMV && mode <= SR_NEW_NEWMV;
141 }
is_inter_anyref_comp_mode(PREDICTION_MODE mode)142 static INLINE int is_inter_anyref_comp_mode(PREDICTION_MODE mode) {
143   return is_inter_compound_mode(mode) || is_inter_singleref_comp_mode(mode);
144 }
145 #endif  // CONFIG_COMPOUND_SINGLEREF
146 
compound_ref0_mode(PREDICTION_MODE mode)147 static INLINE PREDICTION_MODE compound_ref0_mode(PREDICTION_MODE mode) {
148   static PREDICTION_MODE lut[] = {
149     MB_MODE_COUNT,  // DC_PRED
150     MB_MODE_COUNT,  // V_PRED
151     MB_MODE_COUNT,  // H_PRED
152     MB_MODE_COUNT,  // D45_PRED
153     MB_MODE_COUNT,  // D135_PRED
154     MB_MODE_COUNT,  // D117_PRED
155     MB_MODE_COUNT,  // D153_PRED
156     MB_MODE_COUNT,  // D207_PRED
157     MB_MODE_COUNT,  // D63_PRED
158     MB_MODE_COUNT,  // SMOOTH_PRED
159 #if CONFIG_SMOOTH_HV
160     MB_MODE_COUNT,  // SMOOTH_V_PRED
161     MB_MODE_COUNT,  // SMOOTH_H_PRED
162 #endif              // CONFIG_SMOOTH_HV
163     MB_MODE_COUNT,  // TM_PRED
164     MB_MODE_COUNT,  // NEARESTMV
165     MB_MODE_COUNT,  // NEARMV
166     MB_MODE_COUNT,  // ZEROMV
167     MB_MODE_COUNT,  // NEWMV
168 #if CONFIG_COMPOUND_SINGLEREF
169     NEARESTMV,  // SR_NEAREST_NEARMV
170     // NEARESTMV,  // SR_NEAREST_NEWMV
171     NEARMV,     // SR_NEAR_NEWMV
172     ZEROMV,     // SR_ZERO_NEWMV
173     NEWMV,      // SR_NEW_NEWMV
174 #endif          // CONFIG_COMPOUND_SINGLEREF
175     NEARESTMV,  // NEAREST_NEARESTMV
176     NEARMV,     // NEAR_NEARMV
177     NEARESTMV,  // NEAREST_NEWMV
178     NEWMV,      // NEW_NEARESTMV
179     NEARMV,     // NEAR_NEWMV
180     NEWMV,      // NEW_NEARMV
181     ZEROMV,     // ZERO_ZEROMV
182     NEWMV,      // NEW_NEWMV
183   };
184   assert(NELEMENTS(lut) == MB_MODE_COUNT);
185 #if CONFIG_COMPOUND_SINGLEREF
186   assert(is_inter_anyref_comp_mode(mode));
187 #else   // !CONFIG_COMPOUND_SINGLEREF
188   assert(is_inter_compound_mode(mode));
189 #endif  // CONFIG_COMPOUND_SINGLEREF
190   return lut[mode];
191 }
192 
compound_ref1_mode(PREDICTION_MODE mode)193 static INLINE PREDICTION_MODE compound_ref1_mode(PREDICTION_MODE mode) {
194   static PREDICTION_MODE lut[] = {
195     MB_MODE_COUNT,  // DC_PRED
196     MB_MODE_COUNT,  // V_PRED
197     MB_MODE_COUNT,  // H_PRED
198     MB_MODE_COUNT,  // D45_PRED
199     MB_MODE_COUNT,  // D135_PRED
200     MB_MODE_COUNT,  // D117_PRED
201     MB_MODE_COUNT,  // D153_PRED
202     MB_MODE_COUNT,  // D207_PRED
203     MB_MODE_COUNT,  // D63_PRED
204     MB_MODE_COUNT,  // SMOOTH_PRED
205 #if CONFIG_SMOOTH_HV
206     MB_MODE_COUNT,  // SMOOTH_V_PRED
207     MB_MODE_COUNT,  // SMOOTH_H_PRED
208 #endif              // CONFIG_SMOOTH_HV
209     MB_MODE_COUNT,  // TM_PRED
210     MB_MODE_COUNT,  // NEARESTMV
211     MB_MODE_COUNT,  // NEARMV
212     MB_MODE_COUNT,  // ZEROMV
213     MB_MODE_COUNT,  // NEWMV
214 #if CONFIG_COMPOUND_SINGLEREF
215     NEARMV,  // SR_NEAREST_NEARMV
216     // NEWMV,      // SR_NEAREST_NEWMV
217     NEWMV,      // SR_NEAR_NEWMV
218     NEWMV,      // SR_ZERO_NEWMV
219     NEWMV,      // SR_NEW_NEWMV
220 #endif          // CONFIG_COMPOUND_SINGLEREF
221     NEARESTMV,  // NEAREST_NEARESTMV
222     NEARMV,     // NEAR_NEARMV
223     NEWMV,      // NEAREST_NEWMV
224     NEARESTMV,  // NEW_NEARESTMV
225     NEWMV,      // NEAR_NEWMV
226     NEARMV,     // NEW_NEARMV
227     ZEROMV,     // ZERO_ZEROMV
228     NEWMV,      // NEW_NEWMV
229   };
230   assert(NELEMENTS(lut) == MB_MODE_COUNT);
231 #if CONFIG_COMPOUND_SINGLEREF
232   assert(is_inter_anyref_comp_mode(mode));
233 #else   // !CONFIG_COMPOUND_SINGLEREF
234   assert(is_inter_compound_mode(mode));
235 #endif  // CONFIG_COMPOUND_SINGLEREF
236   return lut[mode];
237 }
238 
have_nearmv_in_inter_mode(PREDICTION_MODE mode)239 static INLINE int have_nearmv_in_inter_mode(PREDICTION_MODE mode) {
240   return (mode == NEARMV || mode == NEAR_NEARMV || mode == NEAR_NEWMV ||
241 #if CONFIG_COMPOUND_SINGLEREF
242           mode == SR_NEAREST_NEARMV || mode == SR_NEAR_NEWMV ||
243 #endif  // CONFIG_COMPOUND_SINGLEREF
244           mode == NEW_NEARMV);
245 }
246 
have_newmv_in_inter_mode(PREDICTION_MODE mode)247 static INLINE int have_newmv_in_inter_mode(PREDICTION_MODE mode) {
248   return (mode == NEWMV || mode == NEW_NEWMV || mode == NEAREST_NEWMV ||
249 #if CONFIG_COMPOUND_SINGLEREF
250           /* mode == SR_NEAREST_NEWMV || */ mode == SR_NEAR_NEWMV ||
251           mode == SR_ZERO_NEWMV || mode == SR_NEW_NEWMV ||
252 #endif  // CONFIG_COMPOUND_SINGLEREF
253           mode == NEW_NEARESTMV || mode == NEAR_NEWMV || mode == NEW_NEARMV);
254 }
255 
use_masked_motion_search(COMPOUND_TYPE type)256 static INLINE int use_masked_motion_search(COMPOUND_TYPE type) {
257 #if CONFIG_WEDGE
258   return (type == COMPOUND_WEDGE);
259 #else
260   (void)type;
261   return 0;
262 #endif
263 }
264 
is_masked_compound_type(COMPOUND_TYPE type)265 static INLINE int is_masked_compound_type(COMPOUND_TYPE type) {
266 #if CONFIG_COMPOUND_SEGMENT && CONFIG_WEDGE
267   return (type == COMPOUND_WEDGE || type == COMPOUND_SEG);
268 #elif !CONFIG_COMPOUND_SEGMENT && CONFIG_WEDGE
269   return (type == COMPOUND_WEDGE);
270 #elif CONFIG_COMPOUND_SEGMENT && !CONFIG_WEDGE
271   return (type == COMPOUND_SEG);
272 #endif  // CONFIG_COMPOUND_SEGMENT
273   (void)type;
274   return 0;
275 }
276 
277 /* For keyframes, intra block modes are predicted by the (already decoded)
278    modes for the Y blocks to the left and above us; for interframes, there
279    is a single probability table. */
280 
281 typedef struct {
282   PREDICTION_MODE as_mode;
283   int_mv as_mv[2];  // first, second inter predictor motion vectors
284   int_mv pred_mv[2];
285   int_mv ref_mv[2];
286 } b_mode_info;
287 
288 typedef int8_t MV_REFERENCE_FRAME;
289 
290 typedef struct {
291   // Number of base colors for Y (0) and UV (1)
292   uint8_t palette_size[2];
293   // Value of base colors for Y, U, and V
294   uint16_t palette_colors[3 * PALETTE_MAX_SIZE];
295 } PALETTE_MODE_INFO;
296 
297 #if CONFIG_FILTER_INTRA
298 #define USE_3TAP_INTRA_FILTER 1  // 0: 4-tap; 1: 3-tap
299 typedef struct {
300   // 1: an ext intra mode is used; 0: otherwise.
301   uint8_t use_filter_intra_mode[PLANE_TYPES];
302   FILTER_INTRA_MODE filter_intra_mode[PLANE_TYPES];
303 } FILTER_INTRA_MODE_INFO;
304 #endif  // CONFIG_FILTER_INTRA
305 
306 #if CONFIG_VAR_TX
307 #if CONFIG_RD_DEBUG
308 #define TXB_COEFF_COST_MAP_SIZE (2 * MAX_MIB_SIZE)
309 #endif
310 #endif
311 
312 typedef struct RD_STATS {
313   int rate;
314   int64_t dist;
315   // Please be careful of using rdcost, it's not guaranteed to be set all the
316   // time.
317   // TODO(angiebird): Create a set of functions to manipulate the RD_STATS. In
318   // these functions, make sure rdcost is always up-to-date according to
319   // rate/dist.
320   int64_t rdcost;
321   int64_t sse;
322   int skip;  // sse should equal to dist when skip == 1
323   int64_t ref_rdcost;
324   int zero_rate;
325   uint8_t invalid_rate;
326 #if CONFIG_RD_DEBUG
327   int txb_coeff_cost[MAX_MB_PLANE];
328 #if CONFIG_VAR_TX
329   int txb_coeff_cost_map[MAX_MB_PLANE][TXB_COEFF_COST_MAP_SIZE]
330                         [TXB_COEFF_COST_MAP_SIZE];
331 #endif  // CONFIG_VAR_TX
332 #endif  // CONFIG_RD_DEBUG
333 } RD_STATS;
334 
335 // This struct is used to group function args that are commonly
336 // sent together in functions related to interinter compound modes
337 typedef struct {
338 #if CONFIG_WEDGE
339   int wedge_index;
340   int wedge_sign;
341 #endif  // CONFIG_WEDGE
342 #if CONFIG_COMPOUND_SEGMENT
343   SEG_MASK_TYPE mask_type;
344   uint8_t *seg_mask;
345 #endif  // CONFIG_COMPOUND_SEGMENT
346   COMPOUND_TYPE interinter_compound_type;
347 } INTERINTER_COMPOUND_DATA;
348 
349 // This structure now relates to 8x8 block regions.
350 typedef struct MB_MODE_INFO {
351   // Common for both INTER and INTRA blocks
352   BLOCK_SIZE sb_type;
353   PREDICTION_MODE mode;
354   TX_SIZE tx_size;
355 #if CONFIG_VAR_TX
356   // TODO(jingning): This effectively assigned a separate entry for each
357   // 8x8 block. Apparently it takes much more space than needed.
358   TX_SIZE inter_tx_size[MAX_MIB_SIZE][MAX_MIB_SIZE];
359   TX_SIZE min_tx_size;
360 #endif
361   int8_t skip;
362   int8_t segment_id;
363 #if CONFIG_SUPERTX
364   // Minimum of all segment IDs under the current supertx block.
365   int8_t segment_id_supertx;
366 #endif                      // CONFIG_SUPERTX
367   int8_t seg_id_predicted;  // valid only when temporal_update is enabled
368 
369 #if CONFIG_MRC_TX
370   int valid_mrc_mask;
371 #endif  // CONFIG_MRC_TX
372 
373   // Only for INTRA blocks
374   UV_PREDICTION_MODE uv_mode;
375 
376   PALETTE_MODE_INFO palette_mode_info;
377 #if CONFIG_INTRABC
378   uint8_t use_intrabc;
379 #endif  // CONFIG_INTRABC
380 
381   // Only for INTER blocks
382   InterpFilters interp_filters;
383   MV_REFERENCE_FRAME ref_frame[2];
384   TX_TYPE tx_type;
385 #if CONFIG_TXK_SEL
386   TX_TYPE txk_type[MAX_SB_SQUARE / (TX_SIZE_W_MIN * TX_SIZE_H_MIN)];
387 #endif
388 #if CONFIG_LGT_FROM_PRED
389   int use_lgt;
390 #endif
391 
392 #if CONFIG_FILTER_INTRA
393   FILTER_INTRA_MODE_INFO filter_intra_mode_info;
394 #endif  // CONFIG_FILTER_INTRA
395 #if CONFIG_EXT_INTRA
396   // The actual prediction angle is the base angle + (angle_delta * step).
397   int8_t angle_delta[2];
398 #if CONFIG_INTRA_INTERP
399   // To-Do (huisu): this may be replaced by interp_filter
400   INTRA_FILTER intra_filter;
401 #endif  // CONFIG_INTRA_INTERP
402 #endif  // CONFIG_EXT_INTRA
403 
404 #if CONFIG_INTERINTRA
405   // interintra members
406   INTERINTRA_MODE interintra_mode;
407 #endif
408   // TODO(debargha): Consolidate these flags
409   int use_wedge_interintra;
410   int interintra_wedge_index;
411   int interintra_wedge_sign;
412   // interinter members
413   COMPOUND_TYPE interinter_compound_type;
414 #if CONFIG_WEDGE
415   int wedge_index;
416   int wedge_sign;
417 #endif  // CONFIG_WEDGE
418 #if CONFIG_COMPOUND_SEGMENT
419   SEG_MASK_TYPE mask_type;
420 #endif  // CONFIG_COMPOUND_SEGMENT
421   MOTION_MODE motion_mode;
422 #if CONFIG_MOTION_VAR
423   int overlappable_neighbors[2];
424 #if CONFIG_NCOBMC_ADAPT_WEIGHT
425   // Applying different weighting kernels in ncobmc
426   // In current implementation, interpolation modes only defined for squared
427   // blocks. A rectangular block is divided into two squared blocks and each
428   // squared block has an interpolation mode.
429   NCOBMC_MODE ncobmc_mode[2];
430 #endif  // CONFIG_NCOBMC_ADAPT_WEIGHT
431 #endif  // CONFIG_MOTION_VAR
432   int_mv mv[2];
433   int_mv pred_mv[2];
434   uint8_t ref_mv_idx;
435 #if CONFIG_EXT_PARTITION_TYPES
436   PARTITION_TYPE partition;
437 #endif
438 #if CONFIG_NEW_QUANT
439   int dq_off_index;
440   int send_dq_bit;
441 #endif  // CONFIG_NEW_QUANT
442   /* deringing gain *per-superblock* */
443   int8_t cdef_strength;
444   int current_q_index;
445 #if CONFIG_EXT_DELTA_Q
446   int current_delta_lf_from_base;
447 #if CONFIG_LOOPFILTER_LEVEL
448   int curr_delta_lf[FRAME_LF_COUNT];
449 #endif  // CONFIG_LOOPFILTER_LEVEL
450 #endif
451 #if CONFIG_RD_DEBUG
452   RD_STATS rd_stats;
453   int mi_row;
454   int mi_col;
455 #endif
456 #if CONFIG_WARPED_MOTION
457   int num_proj_ref[2];
458   WarpedMotionParams wm_params[2];
459 #endif  // CONFIG_WARPED_MOTION
460 
461 #if CONFIG_CFL
462   // Index of the alpha Cb and alpha Cr combination
463   int cfl_alpha_idx;
464   // Joint sign of alpha Cb and alpha Cr
465   int cfl_alpha_signs;
466 #endif
467 
468   BOUNDARY_TYPE boundary_info;
469 #if CONFIG_LPF_SB
470   uint8_t filt_lvl;
471   int reuse_sb_lvl;
472   int sign;
473   int delta;
474 #endif
475 } MB_MODE_INFO;
476 
477 typedef struct MODE_INFO {
478   MB_MODE_INFO mbmi;
479   b_mode_info bmi[4];
480 } MODE_INFO;
481 
482 #if CONFIG_INTRABC
is_intrabc_block(const MB_MODE_INFO * mbmi)483 static INLINE int is_intrabc_block(const MB_MODE_INFO *mbmi) {
484   return mbmi->use_intrabc;
485 }
486 #endif
487 
get_y_mode(const MODE_INFO * mi,int block)488 static INLINE PREDICTION_MODE get_y_mode(const MODE_INFO *mi, int block) {
489 #if CONFIG_CB4X4
490   (void)block;
491   return mi->mbmi.mode;
492 #else
493   return mi->mbmi.sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode : mi->mbmi.mode;
494 #endif
495 }
496 
497 #if CONFIG_CFL
get_uv_mode(UV_PREDICTION_MODE mode)498 static INLINE PREDICTION_MODE get_uv_mode(UV_PREDICTION_MODE mode) {
499   static const PREDICTION_MODE uv2y[UV_INTRA_MODES] = {
500     DC_PRED,      // UV_DC_PRED
501     V_PRED,       // UV_V_PRED
502     H_PRED,       // UV_H_PRED
503     D45_PRED,     // UV_D45_PRED
504     D135_PRED,    // UV_D135_PRED
505     D117_PRED,    // UV_D117_PRED
506     D153_PRED,    // UV_D153_PRED
507     D207_PRED,    // UV_D207_PRED
508     D63_PRED,     // UV_D63_PRED
509     SMOOTH_PRED,  // UV_SMOOTH_PRED
510 #if CONFIG_SMOOTH_HV
511     SMOOTH_V_PRED,  // UV_SMOOTH_V_PRED
512     SMOOTH_H_PRED,  // UV_SMOOTH_H_PRED
513 #endif              // CONFIG_SMOOTH_HV
514     TM_PRED,        // UV_TM_PRED
515     DC_PRED,        // CFL_PRED
516   };
517   return uv2y[mode];
518 }
519 #else
get_uv_mode(PREDICTION_MODE mode)520 static INLINE PREDICTION_MODE get_uv_mode(PREDICTION_MODE mode) { return mode; }
521 #endif  // CONFIG_CFL
522 
is_inter_block(const MB_MODE_INFO * mbmi)523 static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) {
524 #if CONFIG_INTRABC
525   if (is_intrabc_block(mbmi)) return 1;
526 #endif
527   return mbmi->ref_frame[0] > INTRA_FRAME;
528 }
529 
has_second_ref(const MB_MODE_INFO * mbmi)530 static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
531   return mbmi->ref_frame[1] > INTRA_FRAME;
532 }
533 
534 #if CONFIG_EXT_COMP_REFS
has_uni_comp_refs(const MB_MODE_INFO * mbmi)535 static INLINE int has_uni_comp_refs(const MB_MODE_INFO *mbmi) {
536   return has_second_ref(mbmi) && (!((mbmi->ref_frame[0] >= BWDREF_FRAME) ^
537                                     (mbmi->ref_frame[1] >= BWDREF_FRAME)));
538 }
539 
comp_ref0(int ref_idx)540 static INLINE MV_REFERENCE_FRAME comp_ref0(int ref_idx) {
541   static const MV_REFERENCE_FRAME lut[] = {
542     LAST_FRAME,    // LAST_LAST2_FRAMES,
543     LAST_FRAME,    // LAST_LAST3_FRAMES,
544     LAST_FRAME,    // LAST_GOLDEN_FRAMES,
545     BWDREF_FRAME,  // BWDREF_ALTREF_FRAMES,
546   };
547   assert(NELEMENTS(lut) == UNIDIR_COMP_REFS);
548   return lut[ref_idx];
549 }
550 
comp_ref1(int ref_idx)551 static INLINE MV_REFERENCE_FRAME comp_ref1(int ref_idx) {
552   static const MV_REFERENCE_FRAME lut[] = {
553     LAST2_FRAME,   // LAST_LAST2_FRAMES,
554     LAST3_FRAME,   // LAST_LAST3_FRAMES,
555     GOLDEN_FRAME,  // LAST_GOLDEN_FRAMES,
556     ALTREF_FRAME,  // BWDREF_ALTREF_FRAMES,
557   };
558   assert(NELEMENTS(lut) == UNIDIR_COMP_REFS);
559   return lut[ref_idx];
560 }
561 #endif  // CONFIG_EXT_COMP_REFS
562 
563 PREDICTION_MODE av1_left_block_mode(const MODE_INFO *cur_mi,
564                                     const MODE_INFO *left_mi, int b);
565 
566 PREDICTION_MODE av1_above_block_mode(const MODE_INFO *cur_mi,
567                                      const MODE_INFO *above_mi, int b);
568 
569 #if CONFIG_GLOBAL_MOTION
is_global_mv_block(const MODE_INFO * mi,int block,TransformationType type)570 static INLINE int is_global_mv_block(const MODE_INFO *mi, int block,
571                                      TransformationType type) {
572   PREDICTION_MODE mode = get_y_mode(mi, block);
573 #if GLOBAL_SUB8X8_USED
574   const int block_size_allowed = 1;
575 #else
576   const BLOCK_SIZE bsize = mi->mbmi.sb_type;
577   const int block_size_allowed =
578       AOMMIN(block_size_wide[bsize], block_size_high[bsize]) >= 8;
579 #endif  // GLOBAL_SUB8X8_USED
580   return (mode == ZEROMV || mode == ZERO_ZEROMV) && type > TRANSLATION &&
581          block_size_allowed;
582 }
583 #endif  // CONFIG_GLOBAL_MOTION
584 
585 enum mv_precision { MV_PRECISION_Q3, MV_PRECISION_Q4 };
586 
587 struct buf_2d {
588   uint8_t *buf;
589   uint8_t *buf0;
590   int width;
591   int height;
592   int stride;
593 };
594 
595 typedef struct macroblockd_plane {
596   tran_low_t *dqcoeff;
597   PLANE_TYPE plane_type;
598   int subsampling_x;
599   int subsampling_y;
600   struct buf_2d dst;
601   struct buf_2d pre[2];
602   ENTROPY_CONTEXT *above_context;
603   ENTROPY_CONTEXT *left_context;
604   int16_t seg_dequant[MAX_SEGMENTS][2];
605 #if CONFIG_NEW_QUANT
606   dequant_val_type_nuq seg_dequant_nuq[MAX_SEGMENTS][QUANT_PROFILES]
607                                       [COEF_BANDS];
608 #endif
609   uint8_t *color_index_map;
610 
611   // number of 4x4s in current block
612   uint16_t n4_w, n4_h;
613   // log2 of n4_w, n4_h
614   uint8_t n4_wl, n4_hl;
615   // block size in pixels
616   uint8_t width, height;
617 
618 #if CONFIG_AOM_QM
619   qm_val_t *seg_iqmatrix[MAX_SEGMENTS][2][TX_SIZES_ALL];
620   qm_val_t *seg_qmatrix[MAX_SEGMENTS][2][TX_SIZES_ALL];
621 #endif
622   // encoder
623   const int16_t *dequant;
624 #if CONFIG_NEW_QUANT
625   const dequant_val_type_nuq *dequant_val_nuq[QUANT_PROFILES];
626 #endif  // CONFIG_NEW_QUANT
627 
628 #if CONFIG_PVQ || CONFIG_DIST_8X8
629   DECLARE_ALIGNED(16, int16_t, pred[MAX_SB_SQUARE]);
630 #endif
631 #if CONFIG_PVQ
632   // PVQ: forward transformed predicted image, a reference for PVQ.
633   tran_low_t *pvq_ref_coeff;
634 #endif
635 } MACROBLOCKD_PLANE;
636 
637 #define BLOCK_OFFSET(x, i) \
638   ((x) + (i) * (1 << (tx_size_wide_log2[0] + tx_size_high_log2[0])))
639 
640 typedef struct RefBuffer {
641   int idx;
642   YV12_BUFFER_CONFIG *buf;
643   struct scale_factors sf;
644 #if CONFIG_VAR_REFS
645   int is_valid;
646 #endif  // CONFIG_VAR_REFS
647 } RefBuffer;
648 
649 #if CONFIG_ADAPT_SCAN
650 typedef int16_t EobThresholdMD[TX_TYPES][EOB_THRESHOLD_NUM];
651 #endif
652 
653 #if CONFIG_LOOP_RESTORATION
654 typedef struct {
655   DECLARE_ALIGNED(16, InterpKernel, vfilter);
656   DECLARE_ALIGNED(16, InterpKernel, hfilter);
657 } WienerInfo;
658 
659 typedef struct {
660   int ep;
661   int xqd[2];
662 } SgrprojInfo;
663 #endif  // CONFIG_LOOP_RESTORATION
664 
665 #if CONFIG_CFL
666 #if CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG
667 #define CFL_SUB8X8_VAL_MI_SIZE (4)
668 #define CFL_SUB8X8_VAL_MI_SQUARE \
669   (CFL_SUB8X8_VAL_MI_SIZE * CFL_SUB8X8_VAL_MI_SIZE)
670 #endif  // CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG
671 typedef struct cfl_ctx {
672   // The CfL prediction buffer is used in two steps:
673   //   1. Stores Q3 reconstructed luma pixels
674   //      (only Q2 is required, but Q3 is used to avoid shifts)
675   //   2. Stores Q3 AC contributions (step1 - tx block avg)
676   int16_t pred_buf_q3[MAX_SB_SQUARE];
677 
678   // Height and width currently used in the CfL prediction buffer.
679   int buf_height, buf_width;
680 
681   // Height and width of the chroma prediction block currently associated with
682   // this context
683   int uv_height, uv_width;
684 
685   int are_parameters_computed;
686 
687   // Chroma subsampling
688   int subsampling_x, subsampling_y;
689 
690   // Block level DC_PRED for each chromatic plane
691   int dc_pred[CFL_PRED_PLANES];
692 
693   int mi_row, mi_col;
694 
695   // Whether the reconstructed luma pixels need to be stored
696   int store_y;
697 
698 #if CONFIG_CB4X4
699   int is_chroma_reference;
700 #if CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG
701   // The prediction used for sub8x8 blocks originates from multiple luma blocks,
702   // this array is used to validate that cfl_store() is called only once for
703   // each luma block
704   uint8_t sub8x8_val[CFL_SUB8X8_VAL_MI_SQUARE];
705 #endif  // CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG
706 #endif  // CONFIG_CB4X4
707 } CFL_CTX;
708 #endif  // CONFIG_CFL
709 
710 typedef struct macroblockd {
711   struct macroblockd_plane plane[MAX_MB_PLANE];
712   uint8_t bmode_blocks_wl;
713   uint8_t bmode_blocks_hl;
714 
715   FRAME_COUNTS *counts;
716   TileInfo tile;
717 
718   int mi_stride;
719 
720   MODE_INFO **mi;
721   MODE_INFO *left_mi;
722   MODE_INFO *above_mi;
723   MB_MODE_INFO *left_mbmi;
724   MB_MODE_INFO *above_mbmi;
725 
726   int up_available;
727   int left_available;
728 #if CONFIG_CHROMA_SUB8X8
729   int chroma_up_available;
730   int chroma_left_available;
731 #endif
732 
733   const aom_prob (*partition_probs)[PARTITION_TYPES - 1];
734 
735   /* Distance of MB away from frame edges in subpixels (1/8th pixel)  */
736   int mb_to_left_edge;
737   int mb_to_right_edge;
738   int mb_to_top_edge;
739   int mb_to_bottom_edge;
740 
741   FRAME_CONTEXT *fc;
742 
743   /* pointers to reference frames */
744   const RefBuffer *block_refs[2];
745 
746   /* pointer to current frame */
747   const YV12_BUFFER_CONFIG *cur_buf;
748 
749 #if CONFIG_INTRABC
750   /* Scale of the current frame with respect to itself */
751   struct scale_factors sf_identity;
752 #endif
753 
754   ENTROPY_CONTEXT *above_context[MAX_MB_PLANE];
755   ENTROPY_CONTEXT left_context[MAX_MB_PLANE][2 * MAX_MIB_SIZE];
756 
757   PARTITION_CONTEXT *above_seg_context;
758   PARTITION_CONTEXT left_seg_context[MAX_MIB_SIZE];
759 
760 #if CONFIG_VAR_TX
761   TXFM_CONTEXT *above_txfm_context;
762   TXFM_CONTEXT *left_txfm_context;
763   TXFM_CONTEXT left_txfm_context_buffer[2 * MAX_MIB_SIZE];
764 
765   TX_SIZE max_tx_size;
766 #if CONFIG_SUPERTX
767   TX_SIZE supertx_size;
768 #endif
769 #endif
770 
771 #if CONFIG_LOOP_RESTORATION
772   WienerInfo wiener_info[MAX_MB_PLANE];
773   SgrprojInfo sgrproj_info[MAX_MB_PLANE];
774 #endif  // CONFIG_LOOP_RESTORATION
775 
776   // block dimension in the unit of mode_info.
777   uint8_t n8_w, n8_h;
778 
779   uint8_t ref_mv_count[MODE_CTX_REF_FRAMES];
780   CANDIDATE_MV ref_mv_stack[MODE_CTX_REF_FRAMES][MAX_REF_MV_STACK_SIZE];
781   uint8_t is_sec_rect;
782 
783 #if CONFIG_PVQ
784   daala_dec_ctx daala_dec;
785 #endif
786   FRAME_CONTEXT *tile_ctx;
787   /* Bit depth: 8, 10, 12 */
788   int bd;
789 
790   int qindex[MAX_SEGMENTS];
791   int lossless[MAX_SEGMENTS];
792   int corrupted;
793 #if CONFIG_AMVR
794   int cur_frame_mv_precision_level;
795 // same with that in AV1_COMMON
796 #endif
797   struct aom_internal_error_info *error_info;
798 #if CONFIG_GLOBAL_MOTION
799   WarpedMotionParams *global_motion;
800 #endif  // CONFIG_GLOBAL_MOTION
801   int prev_qindex;
802   int delta_qindex;
803   int current_qindex;
804 #if CONFIG_EXT_DELTA_Q
805   // Since actual frame level loop filtering level value is not available
806   // at the beginning of the tile (only available during actual filtering)
807   // at encoder side.we record the delta_lf (against the frame level loop
808   // filtering level) and code the delta between previous superblock's delta
809   // lf and current delta lf. It is equivalent to the delta between previous
810   // superblock's actual lf and current lf.
811   int prev_delta_lf_from_base;
812   int current_delta_lf_from_base;
813 #if CONFIG_LOOPFILTER_LEVEL
814   // For this experiment, we have four frame filter levels for different plane
815   // and direction. So, to support the per superblock update, we need to add
816   // a few more params as below.
817   // 0: delta loop filter level for y plane vertical
818   // 1: delta loop filter level for y plane horizontal
819   // 2: delta loop filter level for u plane
820   // 3: delta loop filter level for v plane
821   // To make it consistent with the reference to each filter level in segment,
822   // we need to -1, since
823   // SEG_LVL_ALT_LF_Y_V = 1;
824   // SEG_LVL_ALT_LF_Y_H = 2;
825   // SEG_LVL_ALT_LF_U   = 3;
826   // SEG_LVL_ALT_LF_V   = 4;
827   int prev_delta_lf[FRAME_LF_COUNT];
828   int curr_delta_lf[FRAME_LF_COUNT];
829 #endif  // CONFIG_LOOPFILTER_LEVEL
830 #endif
831 #if CONFIG_ADAPT_SCAN
832   const EobThresholdMD *eob_threshold_md;
833 #endif
834 
835 #if CONFIG_COMPOUND_SEGMENT
836   DECLARE_ALIGNED(16, uint8_t, seg_mask[2 * MAX_SB_SQUARE]);
837 #endif  // CONFIG_COMPOUND_SEGMENT
838 
839 #if CONFIG_MRC_TX
840   uint8_t *mrc_mask;
841 #endif  // CONFIG_MRC_TX
842 
843 #if CONFIG_CFL
844   CFL_CTX *cfl;
845 #endif
846 
847 #if CONFIG_NCOBMC_ADAPT_WEIGHT
848   uint8_t *ncobmc_pred_buf[MAX_MB_PLANE];
849   int ncobmc_pred_buf_stride[MAX_MB_PLANE];
850   SB_MI_BD sb_mi_bd;
851 #endif
852 } MACROBLOCKD;
853 
get_bitdepth_data_path_index(const MACROBLOCKD * xd)854 static INLINE int get_bitdepth_data_path_index(const MACROBLOCKD *xd) {
855   return xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH ? 1 : 0;
856 }
857 
get_subsize(BLOCK_SIZE bsize,PARTITION_TYPE partition)858 static INLINE BLOCK_SIZE get_subsize(BLOCK_SIZE bsize,
859                                      PARTITION_TYPE partition) {
860   if (partition == PARTITION_INVALID)
861     return BLOCK_INVALID;
862   else
863     return subsize_lookup[partition][bsize];
864 }
865 
866 static const TX_TYPE intra_mode_to_tx_type_context[INTRA_MODES] = {
867   DCT_DCT,    // DC
868   ADST_DCT,   // V
869   DCT_ADST,   // H
870   DCT_DCT,    // D45
871   ADST_ADST,  // D135
872   ADST_DCT,   // D117
873   DCT_ADST,   // D153
874   DCT_ADST,   // D207
875   ADST_DCT,   // D63
876   ADST_ADST,  // SMOOTH
877 #if CONFIG_SMOOTH_HV
878   ADST_DCT,   // SMOOTH_V
879   DCT_ADST,   // SMOOTH_H
880 #endif        // CONFIG_SMOOTH_HV
881   ADST_ADST,  // TM
882 };
883 
884 #if CONFIG_SUPERTX
supertx_enabled(const MB_MODE_INFO * mbmi)885 static INLINE int supertx_enabled(const MB_MODE_INFO *mbmi) {
886   TX_SIZE max_tx_size = txsize_sqr_map[mbmi->tx_size];
887   return tx_size_wide[max_tx_size] >
888          AOMMIN(block_size_wide[mbmi->sb_type], block_size_high[mbmi->sb_type]);
889 }
890 #endif  // CONFIG_SUPERTX
891 
892 #define USE_TXTYPE_SEARCH_FOR_SUB8X8_IN_CB4X4 1
893 
894 #if CONFIG_RECT_TX
is_rect_tx(TX_SIZE tx_size)895 static INLINE int is_rect_tx(TX_SIZE tx_size) { return tx_size >= TX_SIZES; }
896 #endif  // CONFIG_RECT_TX
897 
block_signals_txsize(BLOCK_SIZE bsize)898 static INLINE int block_signals_txsize(BLOCK_SIZE bsize) {
899 #if CONFIG_CB4X4 && (CONFIG_VAR_TX || CONFIG_EXT_TX) && CONFIG_RECT_TX
900   return bsize > BLOCK_4X4;
901 #else
902   return bsize >= BLOCK_8X8;
903 #endif
904 }
905 
906 #if CONFIG_MRC_TX
907 #define USE_MRC_INTRA 0
908 #define USE_MRC_INTER 1
909 #define SIGNAL_MRC_MASK_INTRA (USE_MRC_INTRA && 0)
910 #define SIGNAL_MRC_MASK_INTER (USE_MRC_INTER && 1)
911 #define SIGNAL_ANY_MRC_MASK (SIGNAL_MRC_MASK_INTRA || SIGNAL_MRC_MASK_INTER)
912 #endif  // CONFIG_MRC_TX
913 
914 #if CONFIG_EXT_TX
915 #define ALLOW_INTRA_EXT_TX 1
916 
917 // Number of transform types in each set type
918 static const int av1_num_ext_tx_set[EXT_TX_SET_TYPES] = {
919   1, 2,
920 #if CONFIG_MRC_TX
921   2, 3,
922 #endif  // CONFIG_MRC_TX
923   5, 7, 12, 16,
924 };
925 
926 static const int av1_ext_tx_set_idx_to_type[2][AOMMAX(EXT_TX_SETS_INTRA,
927                                                       EXT_TX_SETS_INTER)] = {
928   {
929       // Intra
930       EXT_TX_SET_DCTONLY, EXT_TX_SET_DTT4_IDTX_1DDCT, EXT_TX_SET_DTT4_IDTX,
931 #if CONFIG_MRC_TX
932       EXT_TX_SET_MRC_DCT,
933 #endif  // CONFIG_MRC_TX
934   },
935   {
936       // Inter
937       EXT_TX_SET_DCTONLY, EXT_TX_SET_ALL16, EXT_TX_SET_DTT9_IDTX_1DDCT,
938       EXT_TX_SET_DCT_IDTX,
939 #if CONFIG_MRC_TX
940       EXT_TX_SET_MRC_DCT_IDTX,
941 #endif  // CONFIG_MRC_TX
942   }
943 };
944 
945 #if CONFIG_MRC_TX
946 static const int av1_ext_tx_used[EXT_TX_SET_TYPES][TX_TYPES] = {
947   {
948       1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
949   },
950   {
951       1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
952   },
953   {
954       1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
955   },
956   {
957       1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1,
958   },
959   {
960       1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
961   },
962   {
963       1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0,
964   },
965   {
966       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
967   },
968   {
969       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
970   },
971 };
972 #else   // CONFIG_MRC_TX
973 static const int av1_ext_tx_used[EXT_TX_SET_TYPES][TX_TYPES] = {
974   {
975       1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
976   },
977   {
978       1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
979   },
980   {
981       1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
982   },
983   {
984       1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0,
985   },
986   {
987       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
988   },
989   {
990       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
991   },
992 };
993 #endif  // CONFIG_MRC_TX
994 
get_ext_tx_set_type(TX_SIZE tx_size,BLOCK_SIZE bs,int is_inter,int use_reduced_set)995 static INLINE TxSetType get_ext_tx_set_type(TX_SIZE tx_size, BLOCK_SIZE bs,
996                                             int is_inter, int use_reduced_set) {
997   const TX_SIZE tx_size_sqr_up = txsize_sqr_up_map[tx_size];
998   const TX_SIZE tx_size_sqr = txsize_sqr_map[tx_size];
999 #if CONFIG_CB4X4 && USE_TXTYPE_SEARCH_FOR_SUB8X8_IN_CB4X4
1000   (void)bs;
1001   if (tx_size_sqr_up > TX_32X32) return EXT_TX_SET_DCTONLY;
1002 #else
1003   if (tx_size_sqr_up > TX_32X32 || bs < BLOCK_8X8) return EXT_TX_SET_DCTONLY;
1004 #endif
1005   if (use_reduced_set)
1006     return is_inter ? EXT_TX_SET_DCT_IDTX : EXT_TX_SET_DTT4_IDTX;
1007 #if CONFIG_MRC_TX
1008   if (tx_size == TX_32X32) {
1009     if (is_inter && USE_MRC_INTER)
1010       return EXT_TX_SET_MRC_DCT_IDTX;
1011     else if (!is_inter && USE_MRC_INTRA)
1012       return EXT_TX_SET_MRC_DCT;
1013   }
1014 #endif  // CONFIG_MRC_TX
1015   if (tx_size_sqr_up == TX_32X32)
1016     return is_inter ? EXT_TX_SET_DCT_IDTX : EXT_TX_SET_DCTONLY;
1017   if (is_inter)
1018     return (tx_size_sqr == TX_16X16 ? EXT_TX_SET_DTT9_IDTX_1DDCT
1019                                     : EXT_TX_SET_ALL16);
1020   else
1021     return (tx_size_sqr == TX_16X16 ? EXT_TX_SET_DTT4_IDTX
1022                                     : EXT_TX_SET_DTT4_IDTX_1DDCT);
1023 }
1024 
1025 // Maps tx set types to the indices.
1026 static const int ext_tx_set_index[2][EXT_TX_SET_TYPES] = {
1027   {
1028       // Intra
1029       0, -1,
1030 #if CONFIG_MRC_TX
1031       3, -1,
1032 #endif  // CONFIG_MRC_TX
1033       2, 1, -1, -1,
1034   },
1035   {
1036       // Inter
1037       0, 3,
1038 #if CONFIG_MRC_TX
1039       -1, 4,
1040 #endif  // CONFIG_MRC_TX
1041       -1, -1, 2, 1,
1042   },
1043 };
1044 
get_ext_tx_set(TX_SIZE tx_size,BLOCK_SIZE bs,int is_inter,int use_reduced_set)1045 static INLINE int get_ext_tx_set(TX_SIZE tx_size, BLOCK_SIZE bs, int is_inter,
1046                                  int use_reduced_set) {
1047   const TxSetType set_type =
1048       get_ext_tx_set_type(tx_size, bs, is_inter, use_reduced_set);
1049   return ext_tx_set_index[is_inter][set_type];
1050 }
1051 
get_ext_tx_types(TX_SIZE tx_size,BLOCK_SIZE bs,int is_inter,int use_reduced_set)1052 static INLINE int get_ext_tx_types(TX_SIZE tx_size, BLOCK_SIZE bs, int is_inter,
1053                                    int use_reduced_set) {
1054   const int set_type =
1055       get_ext_tx_set_type(tx_size, bs, is_inter, use_reduced_set);
1056   return av1_num_ext_tx_set[set_type];
1057 }
1058 
1059 #if CONFIG_LGT_FROM_PRED
is_lgt_allowed(PREDICTION_MODE mode,TX_SIZE tx_size)1060 static INLINE int is_lgt_allowed(PREDICTION_MODE mode, TX_SIZE tx_size) {
1061   if (!LGT_FROM_PRED_INTRA && !is_inter_mode(mode)) return 0;
1062   if (!LGT_FROM_PRED_INTER && is_inter_mode(mode)) return 0;
1063 
1064   switch (mode) {
1065     case D45_PRED:
1066     case D63_PRED:
1067     case D117_PRED:
1068     case V_PRED:
1069 #if CONFIG_SMOOTH_HV
1070     case SMOOTH_V_PRED:
1071 #endif
1072       return tx_size_wide[tx_size] <= 8;
1073     case D135_PRED:
1074     case D153_PRED:
1075     case D207_PRED:
1076     case H_PRED:
1077 #if CONFIG_SMOOTH_HV
1078     case SMOOTH_H_PRED:
1079 #endif
1080       return tx_size_high[tx_size] <= 8;
1081     case DC_PRED:
1082     case SMOOTH_PRED: return 0;
1083     case TM_PRED:
1084     default: return tx_size_wide[tx_size] <= 8 || tx_size_high[tx_size] <= 8;
1085   }
1086 }
1087 #endif  // CONFIG_LGT_FROM_PRED
1088 
1089 #if CONFIG_RECT_TX
is_rect_tx_allowed_bsize(BLOCK_SIZE bsize)1090 static INLINE int is_rect_tx_allowed_bsize(BLOCK_SIZE bsize) {
1091   static const char LUT[BLOCK_SIZES_ALL] = {
1092 #if CONFIG_CHROMA_2X2 || CONFIG_CHROMA_SUB8X8
1093     0,  // BLOCK_2X2
1094     0,  // BLOCK_2X4
1095     0,  // BLOCK_4X2
1096 #endif
1097     0,  // BLOCK_4X4
1098     1,  // BLOCK_4X8
1099     1,  // BLOCK_8X4
1100     0,  // BLOCK_8X8
1101     1,  // BLOCK_8X16
1102     1,  // BLOCK_16X8
1103     0,  // BLOCK_16X16
1104     1,  // BLOCK_16X32
1105     1,  // BLOCK_32X16
1106     0,  // BLOCK_32X32
1107     1,  // BLOCK_32X64
1108     1,  // BLOCK_64X32
1109     0,  // BLOCK_64X64
1110 #if CONFIG_EXT_PARTITION
1111     0,  // BLOCK_64X128
1112     0,  // BLOCK_128X64
1113     0,  // BLOCK_128X128
1114 #endif  // CONFIG_EXT_PARTITION
1115     0,  // BLOCK_4X16
1116     0,  // BLOCK_16X4
1117     0,  // BLOCK_8X32
1118     0,  // BLOCK_32X8
1119     0,  // BLOCK_16X64
1120     0,  // BLOCK_64X16
1121 #if CONFIG_EXT_PARTITION
1122     0,  // BLOCK_32X128
1123     0,  // BLOCK_128X32
1124 #endif  // CONFIG_EXT_PARTITION
1125   };
1126 
1127   return LUT[bsize];
1128 }
1129 
is_rect_tx_allowed(const MACROBLOCKD * xd,const MB_MODE_INFO * mbmi)1130 static INLINE int is_rect_tx_allowed(const MACROBLOCKD *xd,
1131                                      const MB_MODE_INFO *mbmi) {
1132   return is_rect_tx_allowed_bsize(mbmi->sb_type) &&
1133          !xd->lossless[mbmi->segment_id];
1134 }
1135 #endif  // CONFIG_RECT_TX
1136 #endif  // CONFIG_EXT_TX
1137 
1138 #if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
is_quarter_tx_allowed_bsize(BLOCK_SIZE bsize)1139 static INLINE int is_quarter_tx_allowed_bsize(BLOCK_SIZE bsize) {
1140   static const char LUT_QTTX[BLOCK_SIZES_ALL] = {
1141 #if CONFIG_CHROMA_2X2 || CONFIG_CHROMA_SUB8X8
1142     0,  // BLOCK_2X2
1143     0,  // BLOCK_2X4
1144     0,  // BLOCK_4X2
1145 #endif
1146     0,  // BLOCK_4X4
1147     0,  // BLOCK_4X8
1148     0,  // BLOCK_8X4
1149     0,  // BLOCK_8X8
1150     1,  // BLOCK_8X16
1151     1,  // BLOCK_16X8
1152     0,  // BLOCK_16X16
1153     0,  // BLOCK_16X32
1154     0,  // BLOCK_32X16
1155     0,  // BLOCK_32X32
1156     0,  // BLOCK_32X64
1157     0,  // BLOCK_64X32
1158     0,  // BLOCK_64X64
1159 #if CONFIG_EXT_PARTITION
1160     0,  // BLOCK_64X128
1161     0,  // BLOCK_128X64
1162     0,  // BLOCK_128X128
1163 #endif  // CONFIG_EXT_PARTITION
1164     0,  // BLOCK_4X16
1165     0,  // BLOCK_16X4
1166     0,  // BLOCK_8X32
1167     0,  // BLOCK_32X8
1168     0,  // BLOCK_16X64
1169     0,  // BLOCK_64X16
1170 #if CONFIG_EXT_PARTITION
1171     0,  // BLOCK_32X128
1172     0,  // BLOCK_128X32
1173 #endif  // CONFIG_EXT_PARTITION
1174   };
1175 
1176   return LUT_QTTX[bsize];
1177 }
1178 
is_quarter_tx_allowed(const MACROBLOCKD * xd,const MB_MODE_INFO * mbmi,int is_inter)1179 static INLINE int is_quarter_tx_allowed(const MACROBLOCKD *xd,
1180                                         const MB_MODE_INFO *mbmi,
1181                                         int is_inter) {
1182   return is_quarter_tx_allowed_bsize(mbmi->sb_type) && is_inter &&
1183          !xd->lossless[mbmi->segment_id];
1184 }
1185 #endif
1186 
tx_size_from_tx_mode(BLOCK_SIZE bsize,TX_MODE tx_mode,int is_inter)1187 static INLINE TX_SIZE tx_size_from_tx_mode(BLOCK_SIZE bsize, TX_MODE tx_mode,
1188                                            int is_inter) {
1189   const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
1190 #if (CONFIG_VAR_TX || CONFIG_EXT_TX) && CONFIG_RECT_TX
1191   const TX_SIZE max_rect_tx_size = max_txsize_rect_lookup[bsize];
1192 #else
1193   const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
1194 #endif  // (CONFIG_VAR_TX || CONFIG_EXT_TX) && CONFIG_RECT_TX
1195   (void)is_inter;
1196 #if CONFIG_VAR_TX && CONFIG_RECT_TX
1197 #if CONFIG_CB4X4
1198   if (bsize == BLOCK_4X4)
1199     return AOMMIN(max_txsize_lookup[bsize], largest_tx_size);
1200 #else
1201   if (bsize < BLOCK_8X8)
1202     return AOMMIN(max_txsize_lookup[bsize], largest_tx_size);
1203 #endif
1204   if (txsize_sqr_map[max_rect_tx_size] <= largest_tx_size)
1205     return max_rect_tx_size;
1206   else
1207     return largest_tx_size;
1208 #elif CONFIG_EXT_TX && CONFIG_RECT_TX
1209   if (txsize_sqr_up_map[max_rect_tx_size] <= largest_tx_size) {
1210     return max_rect_tx_size;
1211   } else {
1212     return largest_tx_size;
1213   }
1214 #else
1215   return AOMMIN(max_tx_size, largest_tx_size);
1216 #endif  // CONFIG_VAR_TX && CONFIG_RECT_TX
1217 }
1218 
1219 #if CONFIG_EXT_INTRA
1220 #define MAX_ANGLE_DELTA 3
1221 #define ANGLE_STEP 3
1222 extern const int16_t dr_intra_derivative[90];
1223 static const uint8_t mode_to_angle_map[] = {
1224   0, 90, 180, 45, 135, 111, 157, 203, 67, 0, 0,
1225 #if CONFIG_SMOOTH_HV
1226   0, 0,
1227 #endif  // CONFIG_SMOOTH_HV
1228 };
1229 #if CONFIG_INTRA_INTERP
1230 // Returns whether filter selection is needed for a given
1231 // intra prediction angle.
1232 int av1_is_intra_filter_switchable(int angle);
1233 #endif  // CONFIG_INTRA_INTERP
1234 #endif  // CONFIG_EXT_INTRA
1235 
1236 #if CONFIG_DCT_ONLY
1237 #define FIXED_TX_TYPE 1
1238 #else
1239 #define FIXED_TX_TYPE 0
1240 #endif
1241 
1242 // Converts block_index for given transform size to index of the block in raster
1243 // order.
av1_block_index_to_raster_order(TX_SIZE tx_size,int block_idx)1244 static INLINE int av1_block_index_to_raster_order(TX_SIZE tx_size,
1245                                                   int block_idx) {
1246   // For transform size 4x8, the possible block_idx values are 0 & 2, because
1247   // block_idx values are incremented in steps of size 'tx_width_unit x
1248   // tx_height_unit'. But, for this transform size, block_idx = 2 corresponds to
1249   // block number 1 in raster order, inside an 8x8 MI block.
1250   // For any other transform size, the two indices are equivalent.
1251   return (tx_size == TX_4X8 && block_idx == 2) ? 1 : block_idx;
1252 }
1253 
1254 // Inverse of above function.
1255 // Note: only implemented for transform sizes 4x4, 4x8 and 8x4 right now.
av1_raster_order_to_block_index(TX_SIZE tx_size,int raster_order)1256 static INLINE int av1_raster_order_to_block_index(TX_SIZE tx_size,
1257                                                   int raster_order) {
1258   assert(tx_size == TX_4X4 || tx_size == TX_4X8 || tx_size == TX_8X4);
1259   // We ensure that block indices are 0 & 2 if tx size is 4x8 or 8x4.
1260   return (tx_size == TX_4X4) ? raster_order : (raster_order > 0) ? 2 : 0;
1261 }
1262 
get_default_tx_type(PLANE_TYPE plane_type,const MACROBLOCKD * xd,int block_idx,TX_SIZE tx_size)1263 static INLINE TX_TYPE get_default_tx_type(PLANE_TYPE plane_type,
1264                                           const MACROBLOCKD *xd, int block_idx,
1265                                           TX_SIZE tx_size) {
1266   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1267 
1268   if (CONFIG_DCT_ONLY || is_inter_block(mbmi) || plane_type != PLANE_TYPE_Y ||
1269       xd->lossless[mbmi->segment_id] || tx_size >= TX_32X32)
1270     return DCT_DCT;
1271 
1272   return intra_mode_to_tx_type_context[plane_type == PLANE_TYPE_Y
1273                                            ? get_y_mode(xd->mi[0], block_idx)
1274                                            : get_uv_mode(mbmi->uv_mode)];
1275 }
1276 
av1_get_tx_type(PLANE_TYPE plane_type,const MACROBLOCKD * xd,int blk_row,int blk_col,int block,TX_SIZE tx_size)1277 static INLINE TX_TYPE av1_get_tx_type(PLANE_TYPE plane_type,
1278                                       const MACROBLOCKD *xd, int blk_row,
1279                                       int blk_col, int block, TX_SIZE tx_size) {
1280   const MODE_INFO *const mi = xd->mi[0];
1281   const MB_MODE_INFO *const mbmi = &mi->mbmi;
1282   (void)blk_row;
1283   (void)blk_col;
1284 #if CONFIG_INTRABC && (!CONFIG_EXT_TX || CONFIG_TXK_SEL)
1285   // TODO(aconverse@google.com): Handle INTRABC + EXT_TX + TXK_SEL
1286   if (is_intrabc_block(mbmi)) return DCT_DCT;
1287 #endif  // CONFIG_INTRABC && (!CONFIG_EXT_TX || CONFIG_TXK_SEL)
1288 
1289 #if CONFIG_TXK_SEL
1290   TX_TYPE tx_type;
1291   if (xd->lossless[mbmi->segment_id] || txsize_sqr_map[tx_size] >= TX_32X32) {
1292     tx_type = DCT_DCT;
1293   } else {
1294     if (plane_type == PLANE_TYPE_Y)
1295       tx_type = mbmi->txk_type[(blk_row << 4) + blk_col];
1296     else if (is_inter_block(mbmi))
1297       tx_type = mbmi->txk_type[(blk_row << 5) + (blk_col << 1)];
1298     else
1299       tx_type = intra_mode_to_tx_type_context[mbmi->uv_mode];
1300   }
1301   assert(tx_type >= DCT_DCT && tx_type < TX_TYPES);
1302   return tx_type;
1303 #endif  // CONFIG_TXK_SEL
1304 
1305 #if FIXED_TX_TYPE
1306   const int block_raster_idx = av1_block_index_to_raster_order(tx_size, block);
1307   return get_default_tx_type(plane_type, xd, block_raster_idx, tx_size);
1308 #endif  // FIXED_TX_TYPE
1309 
1310 #if CONFIG_EXT_TX
1311 #if CONFIG_MRC_TX
1312   if (mbmi->tx_type == MRC_DCT) {
1313     assert(((is_inter_block(mbmi) && USE_MRC_INTER) ||
1314             (!is_inter_block(mbmi) && USE_MRC_INTRA)) &&
1315            "INVALID BLOCK TYPE FOR MRC_DCT");
1316     if (plane_type == PLANE_TYPE_Y) {
1317       assert(tx_size == TX_32X32);
1318       return mbmi->tx_type;
1319     }
1320     return DCT_DCT;
1321   }
1322 #endif  // CONFIG_MRC_TX
1323   if (xd->lossless[mbmi->segment_id] || txsize_sqr_map[tx_size] > TX_32X32 ||
1324       (txsize_sqr_map[tx_size] >= TX_32X32 && !is_inter_block(mbmi)))
1325     return DCT_DCT;
1326   if (mbmi->sb_type >= BLOCK_8X8 || CONFIG_CB4X4) {
1327     if (plane_type == PLANE_TYPE_Y) {
1328 #if !ALLOW_INTRA_EXT_TX
1329       if (is_inter_block(mbmi))
1330 #endif  // ALLOW_INTRA_EXT_TX
1331         return mbmi->tx_type;
1332     }
1333 
1334     if (is_inter_block(mbmi)) {
1335 // UV Inter only
1336 #if CONFIG_CHROMA_2X2
1337       if (tx_size < TX_4X4) return DCT_DCT;
1338 #endif
1339       return (mbmi->tx_type == IDTX && txsize_sqr_map[tx_size] >= TX_32X32)
1340                  ? DCT_DCT
1341                  : mbmi->tx_type;
1342     }
1343   }
1344 
1345 #if CONFIG_CB4X4
1346   (void)block;
1347 #if CONFIG_CHROMA_2X2
1348   if (tx_size < TX_4X4)
1349     return DCT_DCT;
1350   else
1351 #endif  // CONFIG_CHROMA_2X2
1352     return intra_mode_to_tx_type_context[get_uv_mode(mbmi->uv_mode)];
1353 #else   // CONFIG_CB4X4
1354   // Sub8x8-Inter/Intra OR UV-Intra
1355   if (is_inter_block(mbmi)) {  // Sub8x8-Inter
1356     return DCT_DCT;
1357   } else {  // Sub8x8 Intra OR UV-Intra
1358     const int block_raster_idx =
1359         av1_block_index_to_raster_order(tx_size, block);
1360     return intra_mode_to_tx_type_context[plane_type == PLANE_TYPE_Y
1361                                              ? get_y_mode(mi, block_raster_idx)
1362                                              : get_uv_mode(mbmi->uv_mode)];
1363   }
1364 #endif  // CONFIG_CB4X4
1365 #else   // CONFIG_EXT_TX
1366   (void)block;
1367 #if CONFIG_MRC_TX
1368   if (mbmi->tx_type == MRC_DCT) {
1369     if (plane_type == PLANE_TYPE_Y && !xd->lossless[mbmi->segment_id]) {
1370       assert(tx_size == TX_32X32);
1371       return mbmi->tx_type;
1372     }
1373     return DCT_DCT;
1374   }
1375 #endif  // CONFIG_MRC_TX
1376   if (plane_type != PLANE_TYPE_Y || xd->lossless[mbmi->segment_id] ||
1377       txsize_sqr_map[tx_size] >= TX_32X32)
1378     return DCT_DCT;
1379   return mbmi->tx_type;
1380 #endif  // CONFIG_EXT_TX
1381 }
1382 
1383 void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
1384 
tx_size_to_depth(TX_SIZE tx_size)1385 static INLINE int tx_size_to_depth(TX_SIZE tx_size) {
1386   return (int)(tx_size - TX_SIZE_LUMA_MIN);
1387 }
1388 
depth_to_tx_size(int depth)1389 static INLINE TX_SIZE depth_to_tx_size(int depth) {
1390   return (TX_SIZE)(depth + TX_SIZE_LUMA_MIN);
1391 }
1392 
av1_get_uv_tx_size(const MB_MODE_INFO * mbmi,const struct macroblockd_plane * pd)1393 static INLINE TX_SIZE av1_get_uv_tx_size(const MB_MODE_INFO *mbmi,
1394                                          const struct macroblockd_plane *pd) {
1395 #if CONFIG_CHROMA_2X2
1396   assert(mbmi->tx_size > TX_2X2);
1397 #endif  // CONFIG_CHROMA_2X2
1398 
1399 #if CONFIG_SUPERTX
1400   if (supertx_enabled(mbmi))
1401     return uvsupertx_size_lookup[txsize_sqr_map[mbmi->tx_size]]
1402                                 [pd->subsampling_x][pd->subsampling_y];
1403 #endif  // CONFIG_SUPERTX
1404 
1405   const TX_SIZE uv_txsize =
1406       uv_txsize_lookup[mbmi->sb_type][mbmi->tx_size][pd->subsampling_x]
1407                       [pd->subsampling_y];
1408   assert(uv_txsize != TX_INVALID);
1409   return uv_txsize;
1410 }
1411 
av1_get_tx_size(int plane,const MACROBLOCKD * xd)1412 static INLINE TX_SIZE av1_get_tx_size(int plane, const MACROBLOCKD *xd) {
1413   const MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
1414   if (plane == 0) return mbmi->tx_size;
1415   const MACROBLOCKD_PLANE *pd = &xd->plane[plane];
1416   return av1_get_uv_tx_size(mbmi, pd);
1417 }
1418 
1419 static INLINE BLOCK_SIZE
get_plane_block_size(BLOCK_SIZE bsize,const struct macroblockd_plane * pd)1420 get_plane_block_size(BLOCK_SIZE bsize, const struct macroblockd_plane *pd) {
1421   return ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y];
1422 }
1423 
1424 void av1_reset_skip_context(MACROBLOCKD *xd, int mi_row, int mi_col,
1425                             BLOCK_SIZE bsize);
1426 
1427 typedef void (*foreach_transformed_block_visitor)(int plane, int block,
1428                                                   int blk_row, int blk_col,
1429                                                   BLOCK_SIZE plane_bsize,
1430                                                   TX_SIZE tx_size, void *arg);
1431 
1432 void av1_foreach_transformed_block_in_plane(
1433     const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
1434     foreach_transformed_block_visitor visit, void *arg);
1435 
1436 #if CONFIG_LV_MAP
1437 void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
1438                                    BLOCK_SIZE bsize, int mi_row, int mi_col,
1439                                    foreach_transformed_block_visitor visit,
1440                                    void *arg);
1441 #endif
1442 
1443 #if CONFIG_COEF_INTERLEAVE
get_max_4x4_size(int num_4x4,int mb_to_edge,int subsampling)1444 static INLINE int get_max_4x4_size(int num_4x4, int mb_to_edge,
1445                                    int subsampling) {
1446   return num_4x4 + (mb_to_edge >= 0 ? 0 : mb_to_edge >> (5 + subsampling));
1447 }
1448 
1449 void av1_foreach_transformed_block_interleave(
1450     const MACROBLOCKD *const xd, BLOCK_SIZE bsize,
1451     foreach_transformed_block_visitor visit, void *arg);
1452 #endif
1453 
1454 void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
1455                       int plane, TX_SIZE tx_size, int has_eob, int aoff,
1456                       int loff);
1457 
is_interintra_allowed_bsize(const BLOCK_SIZE bsize)1458 static INLINE int is_interintra_allowed_bsize(const BLOCK_SIZE bsize) {
1459 #if CONFIG_INTERINTRA
1460   // TODO(debargha): Should this be bsize < BLOCK_LARGEST?
1461   return (bsize >= BLOCK_8X8) && (bsize < BLOCK_64X64);
1462 #else
1463   (void)bsize;
1464   return 0;
1465 #endif  // CONFIG_INTERINTRA
1466 }
1467 
is_interintra_allowed_mode(const PREDICTION_MODE mode)1468 static INLINE int is_interintra_allowed_mode(const PREDICTION_MODE mode) {
1469 #if CONFIG_INTERINTRA
1470   return (mode >= NEARESTMV) && (mode <= NEWMV);
1471 #else
1472   (void)mode;
1473   return 0;
1474 #endif  // CONFIG_INTERINTRA
1475 }
1476 
is_interintra_allowed_ref(const MV_REFERENCE_FRAME rf[2])1477 static INLINE int is_interintra_allowed_ref(const MV_REFERENCE_FRAME rf[2]) {
1478 #if CONFIG_INTERINTRA
1479   return (rf[0] > INTRA_FRAME) && (rf[1] <= INTRA_FRAME);
1480 #else
1481   (void)rf;
1482   return 0;
1483 #endif  // CONFIG_INTERINTRA
1484 }
1485 
is_interintra_allowed(const MB_MODE_INFO * mbmi)1486 static INLINE int is_interintra_allowed(const MB_MODE_INFO *mbmi) {
1487   return is_interintra_allowed_bsize(mbmi->sb_type) &&
1488          is_interintra_allowed_mode(mbmi->mode) &&
1489          is_interintra_allowed_ref(mbmi->ref_frame);
1490 }
1491 
is_interintra_allowed_bsize_group(int group)1492 static INLINE int is_interintra_allowed_bsize_group(int group) {
1493   int i;
1494   for (i = 0; i < BLOCK_SIZES_ALL; i++) {
1495     if (size_group_lookup[i] == group &&
1496         is_interintra_allowed_bsize((BLOCK_SIZE)i)) {
1497       return 1;
1498     }
1499   }
1500   return 0;
1501 }
1502 
is_interintra_pred(const MB_MODE_INFO * mbmi)1503 static INLINE int is_interintra_pred(const MB_MODE_INFO *mbmi) {
1504   return (mbmi->ref_frame[1] == INTRA_FRAME) && is_interintra_allowed(mbmi);
1505 }
1506 
1507 #if CONFIG_VAR_TX
get_vartx_max_txsize(const MB_MODE_INFO * const mbmi,BLOCK_SIZE bsize,int subsampled)1508 static INLINE int get_vartx_max_txsize(const MB_MODE_INFO *const mbmi,
1509                                        BLOCK_SIZE bsize, int subsampled) {
1510 #if CONFIG_CB4X4
1511   (void)mbmi;
1512   TX_SIZE max_txsize = max_txsize_rect_lookup[bsize];
1513 #else
1514   TX_SIZE max_txsize = mbmi->sb_type < BLOCK_8X8
1515                            ? max_txsize_rect_lookup[mbmi->sb_type]
1516                            : max_txsize_rect_lookup[bsize];
1517 #endif  // CONFIG_C4X4
1518 
1519 #if CONFIG_EXT_PARTITION && CONFIG_TX64X64
1520   // The decoder is designed so that it can process 64x64 luma pixels at a
1521   // time. If this is a chroma plane with subsampling and bsize corresponds to
1522   // a subsampled BLOCK_128X128 then the lookup above will give TX_64X64. That
1523   // mustn't be used for the subsampled plane (because it would be bigger than
1524   // a 64x64 luma block) so we round down to TX_32X32.
1525   if (subsampled && max_txsize == TX_64X64) max_txsize = TX_32X32;
1526 #else
1527   (void)subsampled;
1528 #endif
1529 
1530   return max_txsize;
1531 }
1532 #endif  // CONFIG_VAR_TX
1533 
1534 #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
is_motion_variation_allowed_bsize(BLOCK_SIZE bsize)1535 static INLINE int is_motion_variation_allowed_bsize(BLOCK_SIZE bsize) {
1536   return AOMMIN(block_size_wide[bsize], block_size_high[bsize]) >= 8;
1537 }
1538 
is_motion_variation_allowed_compound(const MB_MODE_INFO * mbmi)1539 static INLINE int is_motion_variation_allowed_compound(
1540     const MB_MODE_INFO *mbmi) {
1541 #if CONFIG_COMPOUND_SINGLEREF
1542   if (!has_second_ref(mbmi) && !is_inter_singleref_comp_mode(mbmi->mode))
1543 #else
1544   if (!has_second_ref(mbmi))
1545 #endif  // CONFIG_COMPOUND_SINGLEREF
1546     return 1;
1547   else
1548     return 0;
1549 }
1550 
1551 #if CONFIG_MOTION_VAR
1552 // input: log2 of length, 0(4), 1(8), ...
1553 static const int max_neighbor_obmc[6] = { 0, 1, 2, 3, 4, 4 };
1554 
check_num_overlappable_neighbors(const MB_MODE_INFO * mbmi)1555 static INLINE int check_num_overlappable_neighbors(const MB_MODE_INFO *mbmi) {
1556   return !(mbmi->overlappable_neighbors[0] == 0 &&
1557            mbmi->overlappable_neighbors[1] == 0);
1558 }
1559 #if CONFIG_NCOBMC_ADAPT_WEIGHT
ncobmc_mode_allowed_bsize(BLOCK_SIZE bsize)1560 static INLINE NCOBMC_MODE ncobmc_mode_allowed_bsize(BLOCK_SIZE bsize) {
1561   if (bsize < BLOCK_8X8 || bsize >= BLOCK_64X64)
1562     return NO_OVERLAP;
1563   else
1564     return MAX_NCOBMC_MODES;
1565 }
1566 #endif  // CONFIG_NCOBMC_ADAPT_WEIGHT
1567 #endif  // CONFIG_MOTION_VAR
1568 
motion_mode_allowed(int block,const WarpedMotionParams * gm_params,const MACROBLOCKD * xd,const MODE_INFO * mi)1569 static INLINE MOTION_MODE motion_mode_allowed(
1570 #if CONFIG_GLOBAL_MOTION
1571     int block, const WarpedMotionParams *gm_params,
1572 #endif  // CONFIG_GLOBAL_MOTION
1573 #if CONFIG_WARPED_MOTION
1574     const MACROBLOCKD *xd,
1575 #endif
1576     const MODE_INFO *mi) {
1577   const MB_MODE_INFO *mbmi = &mi->mbmi;
1578 #if CONFIG_AMVR
1579   if (xd->cur_frame_mv_precision_level == 0) {
1580 #endif
1581 #if CONFIG_GLOBAL_MOTION
1582     const TransformationType gm_type = gm_params[mbmi->ref_frame[0]].wmtype;
1583     if (is_global_mv_block(mi, block, gm_type)) return SIMPLE_TRANSLATION;
1584 #endif  // CONFIG_GLOBAL_MOTION
1585 #if CONFIG_AMVR
1586   }
1587 #endif
1588   if (is_motion_variation_allowed_bsize(mbmi->sb_type) &&
1589       is_inter_mode(mbmi->mode) && mbmi->ref_frame[1] != INTRA_FRAME &&
1590       is_motion_variation_allowed_compound(mbmi)) {
1591 #if CONFIG_MOTION_VAR
1592     if (!check_num_overlappable_neighbors(mbmi)) return SIMPLE_TRANSLATION;
1593 #endif
1594 #if CONFIG_WARPED_MOTION
1595     if (!has_second_ref(mbmi) && mbmi->num_proj_ref[0] >= 1 &&
1596         !av1_is_scaled(&(xd->block_refs[0]->sf))) {
1597 #if CONFIG_AMVR
1598       if (xd->cur_frame_mv_precision_level) {
1599         return OBMC_CAUSAL;
1600       }
1601 #endif
1602       return WARPED_CAUSAL;
1603     }
1604 
1605 #endif  // CONFIG_WARPED_MOTION
1606 #if CONFIG_MOTION_VAR
1607 #if CONFIG_NCOBMC_ADAPT_WEIGHT
1608     if (ncobmc_mode_allowed_bsize(mbmi->sb_type) < NO_OVERLAP)
1609       return NCOBMC_ADAPT_WEIGHT;
1610     else
1611 #endif
1612       return OBMC_CAUSAL;
1613 #else
1614     return SIMPLE_TRANSLATION;
1615 #endif  // CONFIG_MOTION_VAR
1616   } else {
1617     return SIMPLE_TRANSLATION;
1618   }
1619 }
1620 
assert_motion_mode_valid(MOTION_MODE mode,int block,const WarpedMotionParams * gm_params,const MACROBLOCKD * xd,const MODE_INFO * mi)1621 static INLINE void assert_motion_mode_valid(MOTION_MODE mode,
1622 #if CONFIG_GLOBAL_MOTION
1623                                             int block,
1624                                             const WarpedMotionParams *gm_params,
1625 #endif  // CONFIG_GLOBAL_MOTION
1626 #if CONFIG_WARPED_MOTION
1627                                             const MACROBLOCKD *xd,
1628 #endif
1629                                             const MODE_INFO *mi) {
1630   const MOTION_MODE last_motion_mode_allowed = motion_mode_allowed(
1631 #if CONFIG_GLOBAL_MOTION
1632       block, gm_params,
1633 #endif  // CONFIG_GLOBAL_MOTION
1634 #if CONFIG_WARPED_MOTION
1635       xd,
1636 #endif
1637       mi);
1638 
1639   // Check that the input mode is not illegal
1640   if (last_motion_mode_allowed < mode)
1641     assert(0 && "Illegal motion mode selected");
1642 }
1643 
1644 #if CONFIG_MOTION_VAR
is_neighbor_overlappable(const MB_MODE_INFO * mbmi)1645 static INLINE int is_neighbor_overlappable(const MB_MODE_INFO *mbmi) {
1646   return (is_inter_block(mbmi));
1647 }
1648 #endif  // CONFIG_MOTION_VAR
1649 #endif  // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
1650 
av1_allow_palette(int allow_screen_content_tools,BLOCK_SIZE sb_type)1651 static INLINE int av1_allow_palette(int allow_screen_content_tools,
1652                                     BLOCK_SIZE sb_type) {
1653   return allow_screen_content_tools && sb_type >= BLOCK_8X8 &&
1654          sb_type <= BLOCK_LARGEST;
1655 }
1656 
1657 // Returns sub-sampled dimensions of the given block.
1658 // The output values for 'rows_within_bounds' and 'cols_within_bounds' will
1659 // differ from 'height' and 'width' when part of the block is outside the
1660 // right
1661 // and/or bottom image boundary.
av1_get_block_dimensions(BLOCK_SIZE bsize,int plane,const MACROBLOCKD * xd,int * width,int * height,int * rows_within_bounds,int * cols_within_bounds)1662 static INLINE void av1_get_block_dimensions(BLOCK_SIZE bsize, int plane,
1663                                             const MACROBLOCKD *xd, int *width,
1664                                             int *height,
1665                                             int *rows_within_bounds,
1666                                             int *cols_within_bounds) {
1667   const int block_height = block_size_high[bsize];
1668   const int block_width = block_size_wide[bsize];
1669   const int block_rows = (xd->mb_to_bottom_edge >= 0)
1670                              ? block_height
1671                              : (xd->mb_to_bottom_edge >> 3) + block_height;
1672   const int block_cols = (xd->mb_to_right_edge >= 0)
1673                              ? block_width
1674                              : (xd->mb_to_right_edge >> 3) + block_width;
1675   const struct macroblockd_plane *const pd = &xd->plane[plane];
1676   assert(IMPLIES(plane == PLANE_TYPE_Y, pd->subsampling_x == 0));
1677   assert(IMPLIES(plane == PLANE_TYPE_Y, pd->subsampling_y == 0));
1678   assert(block_width >= block_cols);
1679   assert(block_height >= block_rows);
1680   if (width) *width = block_width >> pd->subsampling_x;
1681   if (height) *height = block_height >> pd->subsampling_y;
1682   if (rows_within_bounds) *rows_within_bounds = block_rows >> pd->subsampling_y;
1683   if (cols_within_bounds) *cols_within_bounds = block_cols >> pd->subsampling_x;
1684 }
1685 
1686 /* clang-format off */
1687 typedef aom_cdf_prob (*MapCdf)[PALETTE_COLOR_INDEX_CONTEXTS]
1688                               [CDF_SIZE(PALETTE_COLORS)];
1689 typedef const int (*ColorCost)[PALETTE_SIZES][PALETTE_COLOR_INDEX_CONTEXTS]
1690                               [PALETTE_COLORS];
1691 /* clang-format on */
1692 
1693 typedef struct {
1694   int rows;
1695   int cols;
1696   int n_colors;
1697   int plane_width;
1698   int plane_height;
1699   uint8_t *color_map;
1700   MapCdf map_cdf;
1701   ColorCost color_cost;
1702 } Av1ColorMapParam;
1703 
1704 #if CONFIG_GLOBAL_MOTION
is_nontrans_global_motion(const MACROBLOCKD * xd)1705 static INLINE int is_nontrans_global_motion(const MACROBLOCKD *xd) {
1706   const MODE_INFO *mi = xd->mi[0];
1707   const MB_MODE_INFO *const mbmi = &mi->mbmi;
1708   int ref;
1709 #if CONFIG_CB4X4
1710   const int unify_bsize = 1;
1711 #else
1712   const int unify_bsize = 0;
1713 #endif
1714 
1715   // First check if all modes are ZEROMV
1716   if (mbmi->sb_type >= BLOCK_8X8 || unify_bsize) {
1717     if (mbmi->mode != ZEROMV && mbmi->mode != ZERO_ZEROMV) return 0;
1718   } else {
1719     if ((mi->bmi[0].as_mode != ZEROMV && mi->bmi[0].as_mode != ZERO_ZEROMV) ||
1720         (mi->bmi[1].as_mode != ZEROMV && mi->bmi[1].as_mode != ZERO_ZEROMV) ||
1721         (mi->bmi[2].as_mode != ZEROMV && mi->bmi[2].as_mode != ZERO_ZEROMV) ||
1722         (mi->bmi[3].as_mode != ZEROMV && mi->bmi[3].as_mode != ZERO_ZEROMV))
1723       return 0;
1724   }
1725 
1726 #if !GLOBAL_SUB8X8_USED
1727   if (mbmi->sb_type < BLOCK_8X8) return 0;
1728 #endif
1729 
1730   // Now check if all global motion is non translational
1731   for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
1732     if (xd->global_motion[mbmi->ref_frame[ref]].wmtype <= TRANSLATION) return 0;
1733   }
1734   return 1;
1735 }
1736 #endif  // CONFIG_GLOBAL_MOTION
1737 
get_plane_type(int plane)1738 static INLINE PLANE_TYPE get_plane_type(int plane) {
1739   return (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
1740 }
1741 
transpose_uint8(uint8_t * dst,int dst_stride,const uint8_t * src,int src_stride,int w,int h)1742 static INLINE void transpose_uint8(uint8_t *dst, int dst_stride,
1743                                    const uint8_t *src, int src_stride, int w,
1744                                    int h) {
1745   int r, c;
1746   for (r = 0; r < h; ++r)
1747     for (c = 0; c < w; ++c) dst[c * dst_stride + r] = src[r * src_stride + c];
1748 }
1749 
transpose_uint16(uint16_t * dst,int dst_stride,const uint16_t * src,int src_stride,int w,int h)1750 static INLINE void transpose_uint16(uint16_t *dst, int dst_stride,
1751                                     const uint16_t *src, int src_stride, int w,
1752                                     int h) {
1753   int r, c;
1754   for (r = 0; r < h; ++r)
1755     for (c = 0; c < w; ++c) dst[c * dst_stride + r] = src[r * src_stride + c];
1756 }
1757 
transpose_int16(int16_t * dst,int dst_stride,const int16_t * src,int src_stride,int w,int h)1758 static INLINE void transpose_int16(int16_t *dst, int dst_stride,
1759                                    const int16_t *src, int src_stride, int w,
1760                                    int h) {
1761   int r, c;
1762   for (r = 0; r < h; ++r)
1763     for (c = 0; c < w; ++c) dst[c * dst_stride + r] = src[r * src_stride + c];
1764 }
1765 
transpose_int32(int32_t * dst,int dst_stride,const int32_t * src,int src_stride,int w,int h)1766 static INLINE void transpose_int32(int32_t *dst, int dst_stride,
1767                                    const int32_t *src, int src_stride, int w,
1768                                    int h) {
1769   int r, c;
1770   for (r = 0; r < h; ++r)
1771     for (c = 0; c < w; ++c) dst[c * dst_stride + r] = src[r * src_stride + c];
1772 }
1773 
1774 #ifdef __cplusplus
1775 }  // extern "C"
1776 #endif
1777 
1778 #endif  // AV1_COMMON_BLOCKD_H_
1779