1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #ifndef AOM_AV1_COMMON_BLOCKD_H_
13 #define AOM_AV1_COMMON_BLOCKD_H_
14 
15 #include "config/aom_config.h"
16 
17 #include "aom_dsp/aom_dsp_common.h"
18 #include "aom_ports/mem.h"
19 #include "aom_scale/yv12config.h"
20 
21 #include "av1/common/common_data.h"
22 #include "av1/common/quant_common.h"
23 #include "av1/common/entropy.h"
24 #include "av1/common/entropymode.h"
25 #include "av1/common/mv.h"
26 #include "av1/common/scale.h"
27 #include "av1/common/seg_common.h"
28 #include "av1/common/tile_common.h"
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
34 #define USE_B_QUANT_NO_TRELLIS 1
35 
36 #define MAX_MB_PLANE 3
37 
38 #define MAX_DIFFWTD_MASK_BITS 1
39 
40 // DIFFWTD_MASK_TYPES should not surpass 1 << MAX_DIFFWTD_MASK_BITS
41 typedef enum ATTRIBUTE_PACKED {
42   DIFFWTD_38 = 0,
43   DIFFWTD_38_INV,
44   DIFFWTD_MASK_TYPES,
45 } DIFFWTD_MASK_TYPE;
46 
47 typedef enum ATTRIBUTE_PACKED {
48   KEY_FRAME = 0,
49   INTER_FRAME = 1,
50   INTRA_ONLY_FRAME = 2,  // replaces intra-only
51   S_FRAME = 3,
52   FRAME_TYPES,
53 } FRAME_TYPE;
54 
is_comp_ref_allowed(BLOCK_SIZE bsize)55 static INLINE int is_comp_ref_allowed(BLOCK_SIZE bsize) {
56   return AOMMIN(block_size_wide[bsize], block_size_high[bsize]) >= 8;
57 }
58 
is_inter_mode(PREDICTION_MODE mode)59 static INLINE int is_inter_mode(PREDICTION_MODE mode) {
60   return mode >= INTER_MODE_START && mode < INTER_MODE_END;
61 }
62 
63 typedef struct {
64   uint8_t *plane[MAX_MB_PLANE];
65   int stride[MAX_MB_PLANE];
66 } BUFFER_SET;
67 
is_inter_singleref_mode(PREDICTION_MODE mode)68 static INLINE int is_inter_singleref_mode(PREDICTION_MODE mode) {
69   return mode >= SINGLE_INTER_MODE_START && mode < SINGLE_INTER_MODE_END;
70 }
is_inter_compound_mode(PREDICTION_MODE mode)71 static INLINE int is_inter_compound_mode(PREDICTION_MODE mode) {
72   return mode >= COMP_INTER_MODE_START && mode < COMP_INTER_MODE_END;
73 }
74 
compound_ref0_mode(PREDICTION_MODE mode)75 static INLINE PREDICTION_MODE compound_ref0_mode(PREDICTION_MODE mode) {
76   static PREDICTION_MODE lut[] = {
77     MB_MODE_COUNT,  // DC_PRED
78     MB_MODE_COUNT,  // V_PRED
79     MB_MODE_COUNT,  // H_PRED
80     MB_MODE_COUNT,  // D45_PRED
81     MB_MODE_COUNT,  // D135_PRED
82     MB_MODE_COUNT,  // D113_PRED
83     MB_MODE_COUNT,  // D157_PRED
84     MB_MODE_COUNT,  // D203_PRED
85     MB_MODE_COUNT,  // D67_PRED
86     MB_MODE_COUNT,  // SMOOTH_PRED
87     MB_MODE_COUNT,  // SMOOTH_V_PRED
88     MB_MODE_COUNT,  // SMOOTH_H_PRED
89     MB_MODE_COUNT,  // PAETH_PRED
90     MB_MODE_COUNT,  // NEARESTMV
91     MB_MODE_COUNT,  // NEARMV
92     MB_MODE_COUNT,  // GLOBALMV
93     MB_MODE_COUNT,  // NEWMV
94     NEARESTMV,      // NEAREST_NEARESTMV
95     NEARMV,         // NEAR_NEARMV
96     NEARESTMV,      // NEAREST_NEWMV
97     NEWMV,          // NEW_NEARESTMV
98     NEARMV,         // NEAR_NEWMV
99     NEWMV,          // NEW_NEARMV
100     GLOBALMV,       // GLOBAL_GLOBALMV
101     NEWMV,          // NEW_NEWMV
102   };
103   assert(NELEMENTS(lut) == MB_MODE_COUNT);
104   assert(is_inter_compound_mode(mode));
105   return lut[mode];
106 }
107 
compound_ref1_mode(PREDICTION_MODE mode)108 static INLINE PREDICTION_MODE compound_ref1_mode(PREDICTION_MODE mode) {
109   static PREDICTION_MODE lut[] = {
110     MB_MODE_COUNT,  // DC_PRED
111     MB_MODE_COUNT,  // V_PRED
112     MB_MODE_COUNT,  // H_PRED
113     MB_MODE_COUNT,  // D45_PRED
114     MB_MODE_COUNT,  // D135_PRED
115     MB_MODE_COUNT,  // D113_PRED
116     MB_MODE_COUNT,  // D157_PRED
117     MB_MODE_COUNT,  // D203_PRED
118     MB_MODE_COUNT,  // D67_PRED
119     MB_MODE_COUNT,  // SMOOTH_PRED
120     MB_MODE_COUNT,  // SMOOTH_V_PRED
121     MB_MODE_COUNT,  // SMOOTH_H_PRED
122     MB_MODE_COUNT,  // PAETH_PRED
123     MB_MODE_COUNT,  // NEARESTMV
124     MB_MODE_COUNT,  // NEARMV
125     MB_MODE_COUNT,  // GLOBALMV
126     MB_MODE_COUNT,  // NEWMV
127     NEARESTMV,      // NEAREST_NEARESTMV
128     NEARMV,         // NEAR_NEARMV
129     NEWMV,          // NEAREST_NEWMV
130     NEARESTMV,      // NEW_NEARESTMV
131     NEWMV,          // NEAR_NEWMV
132     NEARMV,         // NEW_NEARMV
133     GLOBALMV,       // GLOBAL_GLOBALMV
134     NEWMV,          // NEW_NEWMV
135   };
136   assert(NELEMENTS(lut) == MB_MODE_COUNT);
137   assert(is_inter_compound_mode(mode));
138   return lut[mode];
139 }
140 
have_nearmv_in_inter_mode(PREDICTION_MODE mode)141 static INLINE int have_nearmv_in_inter_mode(PREDICTION_MODE mode) {
142   return (mode == NEARMV || mode == NEAR_NEARMV || mode == NEAR_NEWMV ||
143           mode == NEW_NEARMV);
144 }
145 
have_newmv_in_inter_mode(PREDICTION_MODE mode)146 static INLINE int have_newmv_in_inter_mode(PREDICTION_MODE mode) {
147   return (mode == NEWMV || mode == NEW_NEWMV || mode == NEAREST_NEWMV ||
148           mode == NEW_NEARESTMV || mode == NEAR_NEWMV || mode == NEW_NEARMV);
149 }
150 
is_masked_compound_type(COMPOUND_TYPE type)151 static INLINE int is_masked_compound_type(COMPOUND_TYPE type) {
152   return (type == COMPOUND_WEDGE || type == COMPOUND_DIFFWTD);
153 }
154 
155 /* For keyframes, intra block modes are predicted by the (already decoded)
156    modes for the Y blocks to the left and above us; for interframes, there
157    is a single probability table. */
158 
159 typedef int8_t MV_REFERENCE_FRAME;
160 
161 typedef struct {
162   // Number of base colors for Y (0) and UV (1)
163   uint8_t palette_size[2];
164   // Value of base colors for Y, U, and V
165   uint16_t palette_colors[3 * PALETTE_MAX_SIZE];
166 } PALETTE_MODE_INFO;
167 
168 typedef struct {
169   uint8_t use_filter_intra;
170   FILTER_INTRA_MODE filter_intra_mode;
171 } FILTER_INTRA_MODE_INFO;
172 
173 static const PREDICTION_MODE fimode_to_intradir[FILTER_INTRA_MODES] = {
174   DC_PRED, V_PRED, H_PRED, D157_PRED, DC_PRED
175 };
176 
177 #if CONFIG_RD_DEBUG
178 #define TXB_COEFF_COST_MAP_SIZE (MAX_MIB_SIZE)
179 #endif
180 
181 typedef struct RD_STATS {
182   int rate;
183   int64_t dist;
184   // Please be careful of using rdcost, it's not guaranteed to be set all the
185   // time.
186   // TODO(angiebird): Create a set of functions to manipulate the RD_STATS. In
187   // these functions, make sure rdcost is always up-to-date according to
188   // rate/dist.
189   int64_t rdcost;
190   int64_t sse;
191   int skip;  // sse should equal to dist when skip == 1
192   int64_t ref_rdcost;
193   int zero_rate;
194   uint8_t invalid_rate;
195 #if CONFIG_RD_DEBUG
196   int txb_coeff_cost[MAX_MB_PLANE];
197   int txb_coeff_cost_map[MAX_MB_PLANE][TXB_COEFF_COST_MAP_SIZE]
198                         [TXB_COEFF_COST_MAP_SIZE];
199 #endif  // CONFIG_RD_DEBUG
200 } RD_STATS;
201 
202 // This struct is used to group function args that are commonly
203 // sent together in functions related to interinter compound modes
204 typedef struct {
205   int wedge_index;
206   int wedge_sign;
207   DIFFWTD_MASK_TYPE mask_type;
208   uint8_t *seg_mask;
209   COMPOUND_TYPE type;
210 } INTERINTER_COMPOUND_DATA;
211 
212 #define INTER_TX_SIZE_BUF_LEN 16
213 #define TXK_TYPE_BUF_LEN 64
214 // This structure now relates to 4x4 block regions.
215 typedef struct MB_MODE_INFO {
216   // Common for both INTER and INTRA blocks
217   BLOCK_SIZE sb_type;
218   PREDICTION_MODE mode;
219   TX_SIZE tx_size;
220   uint8_t inter_tx_size[INTER_TX_SIZE_BUF_LEN];
221   int8_t skip;
222   int8_t skip_mode;
223   int8_t segment_id;
224   int8_t seg_id_predicted;  // valid only when temporal_update is enabled
225 
226   // Only for INTRA blocks
227   UV_PREDICTION_MODE uv_mode;
228 
229   PALETTE_MODE_INFO palette_mode_info;
230   uint8_t use_intrabc;
231 
232   // Only for INTER blocks
233   InterpFilters interp_filters;
234   MV_REFERENCE_FRAME ref_frame[2];
235 
236   TX_TYPE txk_type[TXK_TYPE_BUF_LEN];
237 
238   FILTER_INTRA_MODE_INFO filter_intra_mode_info;
239 
240   // The actual prediction angle is the base angle + (angle_delta * step).
241   int8_t angle_delta[PLANE_TYPES];
242 
243   // interintra members
244   INTERINTRA_MODE interintra_mode;
245   // TODO(debargha): Consolidate these flags
246   int use_wedge_interintra;
247   int interintra_wedge_index;
248   int interintra_wedge_sign;
249   // interinter members
250   INTERINTER_COMPOUND_DATA interinter_comp;
251   MOTION_MODE motion_mode;
252   int overlappable_neighbors[2];
253   int_mv mv[2];
254   uint8_t ref_mv_idx;
255   PARTITION_TYPE partition;
256   /* deringing gain *per-superblock* */
257   int8_t cdef_strength;
258   int current_qindex;
259   int delta_lf_from_base;
260   int delta_lf[FRAME_LF_COUNT];
261 #if CONFIG_RD_DEBUG
262   RD_STATS rd_stats;
263   int mi_row;
264   int mi_col;
265 #endif
266   int num_proj_ref;
267   WarpedMotionParams wm_params;
268 
269   // Index of the alpha Cb and alpha Cr combination
270   int cfl_alpha_idx;
271   // Joint sign of alpha Cb and alpha Cr
272   int cfl_alpha_signs;
273 
274   int compound_idx;
275   int comp_group_idx;
276 } MB_MODE_INFO;
277 
is_intrabc_block(const MB_MODE_INFO * mbmi)278 static INLINE int is_intrabc_block(const MB_MODE_INFO *mbmi) {
279   return mbmi->use_intrabc;
280 }
281 
get_uv_mode(UV_PREDICTION_MODE mode)282 static INLINE PREDICTION_MODE get_uv_mode(UV_PREDICTION_MODE mode) {
283   assert(mode < UV_INTRA_MODES);
284   static const PREDICTION_MODE uv2y[] = {
285     DC_PRED,        // UV_DC_PRED
286     V_PRED,         // UV_V_PRED
287     H_PRED,         // UV_H_PRED
288     D45_PRED,       // UV_D45_PRED
289     D135_PRED,      // UV_D135_PRED
290     D113_PRED,      // UV_D113_PRED
291     D157_PRED,      // UV_D157_PRED
292     D203_PRED,      // UV_D203_PRED
293     D67_PRED,       // UV_D67_PRED
294     SMOOTH_PRED,    // UV_SMOOTH_PRED
295     SMOOTH_V_PRED,  // UV_SMOOTH_V_PRED
296     SMOOTH_H_PRED,  // UV_SMOOTH_H_PRED
297     PAETH_PRED,     // UV_PAETH_PRED
298     DC_PRED,        // UV_CFL_PRED
299     INTRA_INVALID,  // UV_INTRA_MODES
300     INTRA_INVALID,  // UV_MODE_INVALID
301   };
302   return uv2y[mode];
303 }
304 
is_inter_block(const MB_MODE_INFO * mbmi)305 static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) {
306   return is_intrabc_block(mbmi) || mbmi->ref_frame[0] > INTRA_FRAME;
307 }
308 
has_second_ref(const MB_MODE_INFO * mbmi)309 static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
310   return mbmi->ref_frame[1] > INTRA_FRAME;
311 }
312 
has_uni_comp_refs(const MB_MODE_INFO * mbmi)313 static INLINE int has_uni_comp_refs(const MB_MODE_INFO *mbmi) {
314   return has_second_ref(mbmi) && (!((mbmi->ref_frame[0] >= BWDREF_FRAME) ^
315                                     (mbmi->ref_frame[1] >= BWDREF_FRAME)));
316 }
317 
comp_ref0(int ref_idx)318 static INLINE MV_REFERENCE_FRAME comp_ref0(int ref_idx) {
319   static const MV_REFERENCE_FRAME lut[] = {
320     LAST_FRAME,     // LAST_LAST2_FRAMES,
321     LAST_FRAME,     // LAST_LAST3_FRAMES,
322     LAST_FRAME,     // LAST_GOLDEN_FRAMES,
323     BWDREF_FRAME,   // BWDREF_ALTREF_FRAMES,
324     LAST2_FRAME,    // LAST2_LAST3_FRAMES
325     LAST2_FRAME,    // LAST2_GOLDEN_FRAMES,
326     LAST3_FRAME,    // LAST3_GOLDEN_FRAMES,
327     BWDREF_FRAME,   // BWDREF_ALTREF2_FRAMES,
328     ALTREF2_FRAME,  // ALTREF2_ALTREF_FRAMES,
329   };
330   assert(NELEMENTS(lut) == TOTAL_UNIDIR_COMP_REFS);
331   return lut[ref_idx];
332 }
333 
comp_ref1(int ref_idx)334 static INLINE MV_REFERENCE_FRAME comp_ref1(int ref_idx) {
335   static const MV_REFERENCE_FRAME lut[] = {
336     LAST2_FRAME,    // LAST_LAST2_FRAMES,
337     LAST3_FRAME,    // LAST_LAST3_FRAMES,
338     GOLDEN_FRAME,   // LAST_GOLDEN_FRAMES,
339     ALTREF_FRAME,   // BWDREF_ALTREF_FRAMES,
340     LAST3_FRAME,    // LAST2_LAST3_FRAMES
341     GOLDEN_FRAME,   // LAST2_GOLDEN_FRAMES,
342     GOLDEN_FRAME,   // LAST3_GOLDEN_FRAMES,
343     ALTREF2_FRAME,  // BWDREF_ALTREF2_FRAMES,
344     ALTREF_FRAME,   // ALTREF2_ALTREF_FRAMES,
345   };
346   assert(NELEMENTS(lut) == TOTAL_UNIDIR_COMP_REFS);
347   return lut[ref_idx];
348 }
349 
350 PREDICTION_MODE av1_left_block_mode(const MB_MODE_INFO *left_mi);
351 
352 PREDICTION_MODE av1_above_block_mode(const MB_MODE_INFO *above_mi);
353 
is_global_mv_block(const MB_MODE_INFO * const mbmi,TransformationType type)354 static INLINE int is_global_mv_block(const MB_MODE_INFO *const mbmi,
355                                      TransformationType type) {
356   const PREDICTION_MODE mode = mbmi->mode;
357   const BLOCK_SIZE bsize = mbmi->sb_type;
358   const int block_size_allowed =
359       AOMMIN(block_size_wide[bsize], block_size_high[bsize]) >= 8;
360   return (mode == GLOBALMV || mode == GLOBAL_GLOBALMV) && type > TRANSLATION &&
361          block_size_allowed;
362 }
363 
364 #if CONFIG_MISMATCH_DEBUG
mi_to_pixel_loc(int * pixel_c,int * pixel_r,int mi_col,int mi_row,int tx_blk_col,int tx_blk_row,int subsampling_x,int subsampling_y)365 static INLINE void mi_to_pixel_loc(int *pixel_c, int *pixel_r, int mi_col,
366                                    int mi_row, int tx_blk_col, int tx_blk_row,
367                                    int subsampling_x, int subsampling_y) {
368   *pixel_c = ((mi_col >> subsampling_x) << MI_SIZE_LOG2) +
369              (tx_blk_col << tx_size_wide_log2[0]);
370   *pixel_r = ((mi_row >> subsampling_y) << MI_SIZE_LOG2) +
371              (tx_blk_row << tx_size_high_log2[0]);
372 }
373 #endif
374 
375 enum ATTRIBUTE_PACKED mv_precision { MV_PRECISION_Q3, MV_PRECISION_Q4 };
376 
377 struct buf_2d {
378   uint8_t *buf;
379   uint8_t *buf0;
380   int width;
381   int height;
382   int stride;
383 };
384 
385 typedef struct eob_info {
386   uint16_t eob;
387   uint16_t max_scan_line;
388 } eob_info;
389 
390 typedef struct {
391   DECLARE_ALIGNED(32, tran_low_t, dqcoeff[MAX_MB_PLANE][MAX_SB_SQUARE]);
392   eob_info eob_data[MAX_MB_PLANE]
393                    [MAX_SB_SQUARE / (TX_SIZE_W_MIN * TX_SIZE_H_MIN)];
394   DECLARE_ALIGNED(16, uint8_t, color_index_map[2][MAX_SB_SQUARE]);
395 } CB_BUFFER;
396 
397 typedef struct macroblockd_plane {
398   tran_low_t *dqcoeff;
399   tran_low_t *dqcoeff_block;
400   eob_info *eob_data;
401   PLANE_TYPE plane_type;
402   int subsampling_x;
403   int subsampling_y;
404   struct buf_2d dst;
405   struct buf_2d pre[2];
406   ENTROPY_CONTEXT *above_context;
407   ENTROPY_CONTEXT *left_context;
408 
409   // The dequantizers below are true dequntizers used only in the
410   // dequantization process.  They have the same coefficient
411   // shift/scale as TX.
412   int16_t seg_dequant_QTX[MAX_SEGMENTS][2];
413   uint8_t *color_index_map;
414 
415   // block size in pixels
416   uint8_t width, height;
417 
418   qm_val_t *seg_iqmatrix[MAX_SEGMENTS][TX_SIZES_ALL];
419   qm_val_t *seg_qmatrix[MAX_SEGMENTS][TX_SIZES_ALL];
420 
421   // the 'dequantizers' below are not literal dequantizer values.
422   // They're used by encoder RDO to generate ad-hoc lambda values.
423   // They use a hardwired Q3 coeff shift and do not necessarily match
424   // the TX scale in use.
425   const int16_t *dequant_Q3;
426 } MACROBLOCKD_PLANE;
427 
428 #define BLOCK_OFFSET(x, i) \
429   ((x) + (i) * (1 << (tx_size_wide_log2[0] + tx_size_high_log2[0])))
430 
431 typedef struct RefBuffer {
432   int idx;      // frame buf idx
433   int map_idx;  // frame map idx
434   YV12_BUFFER_CONFIG *buf;
435   struct scale_factors sf;
436 } RefBuffer;
437 
438 typedef struct {
439   DECLARE_ALIGNED(16, InterpKernel, vfilter);
440   DECLARE_ALIGNED(16, InterpKernel, hfilter);
441 } WienerInfo;
442 
443 typedef struct {
444   int ep;
445   int xqd[2];
446 } SgrprojInfo;
447 
448 #if CONFIG_DEBUG
449 #define CFL_SUB8X8_VAL_MI_SIZE (4)
450 #define CFL_SUB8X8_VAL_MI_SQUARE \
451   (CFL_SUB8X8_VAL_MI_SIZE * CFL_SUB8X8_VAL_MI_SIZE)
452 #endif  // CONFIG_DEBUG
453 #define CFL_MAX_BLOCK_SIZE (BLOCK_32X32)
454 #define CFL_BUF_LINE (32)
455 #define CFL_BUF_LINE_I128 (CFL_BUF_LINE >> 3)
456 #define CFL_BUF_LINE_I256 (CFL_BUF_LINE >> 4)
457 #define CFL_BUF_SQUARE (CFL_BUF_LINE * CFL_BUF_LINE)
458 typedef struct cfl_ctx {
459   // Q3 reconstructed luma pixels (only Q2 is required, but Q3 is used to avoid
460   // shifts)
461   uint16_t recon_buf_q3[CFL_BUF_SQUARE];
462   // Q3 AC contributions (reconstructed luma pixels - tx block avg)
463   int16_t ac_buf_q3[CFL_BUF_SQUARE];
464 
465   // Cache the DC_PRED when performing RDO, so it does not have to be recomputed
466   // for every scaling parameter
467   int dc_pred_is_cached[CFL_PRED_PLANES];
468   // The DC_PRED cache is disable when decoding
469   int use_dc_pred_cache;
470   // Only cache the first row of the DC_PRED
471   int16_t dc_pred_cache[CFL_PRED_PLANES][CFL_BUF_LINE];
472 
473   // Height and width currently used in the CfL prediction buffer.
474   int buf_height, buf_width;
475 
476   int are_parameters_computed;
477 
478   // Chroma subsampling
479   int subsampling_x, subsampling_y;
480 
481   int mi_row, mi_col;
482 
483   // Whether the reconstructed luma pixels need to be stored
484   int store_y;
485 
486 #if CONFIG_DEBUG
487   int rate;
488 #endif  // CONFIG_DEBUG
489 
490   int is_chroma_reference;
491 } CFL_CTX;
492 
493 typedef struct jnt_comp_params {
494   int use_jnt_comp_avg;
495   int fwd_offset;
496   int bck_offset;
497 } JNT_COMP_PARAMS;
498 
499 // Most/all of the pointers are mere pointers to actual arrays are allocated
500 // elsewhere. This is mostly for coding convenience.
501 typedef struct macroblockd {
502   struct macroblockd_plane plane[MAX_MB_PLANE];
503 
504   TileInfo tile;
505 
506   int mi_stride;
507 
508   MB_MODE_INFO **mi;
509   MB_MODE_INFO *left_mbmi;
510   MB_MODE_INFO *above_mbmi;
511   MB_MODE_INFO *chroma_left_mbmi;
512   MB_MODE_INFO *chroma_above_mbmi;
513 
514   int up_available;
515   int left_available;
516   int chroma_up_available;
517   int chroma_left_available;
518 
519   /* Distance of MB away from frame edges in subpixels (1/8th pixel)  */
520   int mb_to_left_edge;
521   int mb_to_right_edge;
522   int mb_to_top_edge;
523   int mb_to_bottom_edge;
524 
525   /* pointers to reference frames */
526   const RefBuffer *block_refs[2];
527 
528   /* pointer to current frame */
529   const YV12_BUFFER_CONFIG *cur_buf;
530 
531   ENTROPY_CONTEXT *above_context[MAX_MB_PLANE];
532   ENTROPY_CONTEXT left_context[MAX_MB_PLANE][MAX_MIB_SIZE];
533 
534   PARTITION_CONTEXT *above_seg_context;
535   PARTITION_CONTEXT left_seg_context[MAX_MIB_SIZE];
536 
537   TXFM_CONTEXT *above_txfm_context;
538   TXFM_CONTEXT *left_txfm_context;
539   TXFM_CONTEXT left_txfm_context_buffer[MAX_MIB_SIZE];
540 
541   WienerInfo wiener_info[MAX_MB_PLANE];
542   SgrprojInfo sgrproj_info[MAX_MB_PLANE];
543 
544   // block dimension in the unit of mode_info.
545   uint8_t n4_w, n4_h;
546 
547   uint8_t ref_mv_count[MODE_CTX_REF_FRAMES];
548   CANDIDATE_MV ref_mv_stack[MODE_CTX_REF_FRAMES][MAX_REF_MV_STACK_SIZE];
549   uint8_t is_sec_rect;
550 
551   // Counts of each reference frame in the above and left neighboring blocks.
552   // NOTE: Take into account both single and comp references.
553   uint8_t neighbors_ref_counts[REF_FRAMES];
554 
555   FRAME_CONTEXT *tile_ctx;
556   /* Bit depth: 8, 10, 12 */
557   int bd;
558 
559   int qindex[MAX_SEGMENTS];
560   int lossless[MAX_SEGMENTS];
561   int corrupted;
562   int cur_frame_force_integer_mv;
563   // same with that in AV1_COMMON
564   struct aom_internal_error_info *error_info;
565   const WarpedMotionParams *global_motion;
566   int delta_qindex;
567   int current_qindex;
568   // Since actual frame level loop filtering level value is not available
569   // at the beginning of the tile (only available during actual filtering)
570   // at encoder side.we record the delta_lf (against the frame level loop
571   // filtering level) and code the delta between previous superblock's delta
572   // lf and current delta lf. It is equivalent to the delta between previous
573   // superblock's actual lf and current lf.
574   int delta_lf_from_base;
575   // For this experiment, we have four frame filter levels for different plane
576   // and direction. So, to support the per superblock update, we need to add
577   // a few more params as below.
578   // 0: delta loop filter level for y plane vertical
579   // 1: delta loop filter level for y plane horizontal
580   // 2: delta loop filter level for u plane
581   // 3: delta loop filter level for v plane
582   // To make it consistent with the reference to each filter level in segment,
583   // we need to -1, since
584   // SEG_LVL_ALT_LF_Y_V = 1;
585   // SEG_LVL_ALT_LF_Y_H = 2;
586   // SEG_LVL_ALT_LF_U   = 3;
587   // SEG_LVL_ALT_LF_V   = 4;
588   int delta_lf[FRAME_LF_COUNT];
589   int cdef_preset[4];
590 
591   DECLARE_ALIGNED(16, uint8_t, seg_mask[2 * MAX_SB_SQUARE]);
592   uint8_t *mc_buf[2];
593   CFL_CTX cfl;
594 
595   JNT_COMP_PARAMS jcp_param;
596 
597   uint16_t cb_offset[MAX_MB_PLANE];
598   uint16_t txb_offset[MAX_MB_PLANE];
599   uint16_t color_index_map_offset[2];
600 
601   CONV_BUF_TYPE *tmp_conv_dst;
602   uint8_t *tmp_obmc_bufs[2];
603 } MACROBLOCKD;
604 
get_bitdepth_data_path_index(const MACROBLOCKD * xd)605 static INLINE int get_bitdepth_data_path_index(const MACROBLOCKD *xd) {
606   return xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH ? 1 : 0;
607 }
608 
get_buf_by_bd(const MACROBLOCKD * xd,uint8_t * buf16)609 static INLINE uint8_t *get_buf_by_bd(const MACROBLOCKD *xd, uint8_t *buf16) {
610   return (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
611              ? CONVERT_TO_BYTEPTR(buf16)
612              : buf16;
613 }
614 
get_sqr_bsize_idx(BLOCK_SIZE bsize)615 static INLINE int get_sqr_bsize_idx(BLOCK_SIZE bsize) {
616   switch (bsize) {
617     case BLOCK_4X4: return 0;
618     case BLOCK_8X8: return 1;
619     case BLOCK_16X16: return 2;
620     case BLOCK_32X32: return 3;
621     case BLOCK_64X64: return 4;
622     case BLOCK_128X128: return 5;
623     default: return SQR_BLOCK_SIZES;
624   }
625 }
626 
627 // For a square block size 'bsize', returns the size of the sub-blocks used by
628 // the given partition type. If the partition produces sub-blocks of different
629 // sizes, then the function returns the largest sub-block size.
630 // Implements the Partition_Subsize lookup table in the spec (Section 9.3.
631 // Conversion tables).
632 // Note: the input block size should be square.
633 // Otherwise it's considered invalid.
get_partition_subsize(BLOCK_SIZE bsize,PARTITION_TYPE partition)634 static INLINE BLOCK_SIZE get_partition_subsize(BLOCK_SIZE bsize,
635                                                PARTITION_TYPE partition) {
636   if (partition == PARTITION_INVALID) {
637     return BLOCK_INVALID;
638   } else {
639     const int sqr_bsize_idx = get_sqr_bsize_idx(bsize);
640     return sqr_bsize_idx >= SQR_BLOCK_SIZES
641                ? BLOCK_INVALID
642                : subsize_lookup[partition][sqr_bsize_idx];
643   }
644 }
645 
intra_mode_to_tx_type(const MB_MODE_INFO * mbmi,PLANE_TYPE plane_type)646 static TX_TYPE intra_mode_to_tx_type(const MB_MODE_INFO *mbmi,
647                                      PLANE_TYPE plane_type) {
648   static const TX_TYPE _intra_mode_to_tx_type[INTRA_MODES] = {
649     DCT_DCT,    // DC
650     ADST_DCT,   // V
651     DCT_ADST,   // H
652     DCT_DCT,    // D45
653     ADST_ADST,  // D135
654     ADST_DCT,   // D117
655     DCT_ADST,   // D153
656     DCT_ADST,   // D207
657     ADST_DCT,   // D63
658     ADST_ADST,  // SMOOTH
659     ADST_DCT,   // SMOOTH_V
660     DCT_ADST,   // SMOOTH_H
661     ADST_ADST,  // PAETH
662   };
663   const PREDICTION_MODE mode =
664       (plane_type == PLANE_TYPE_Y) ? mbmi->mode : get_uv_mode(mbmi->uv_mode);
665   assert(mode < INTRA_MODES);
666   return _intra_mode_to_tx_type[mode];
667 }
668 
is_rect_tx(TX_SIZE tx_size)669 static INLINE int is_rect_tx(TX_SIZE tx_size) { return tx_size >= TX_SIZES; }
670 
block_signals_txsize(BLOCK_SIZE bsize)671 static INLINE int block_signals_txsize(BLOCK_SIZE bsize) {
672   return bsize > BLOCK_4X4;
673 }
674 
675 // Number of transform types in each set type
676 static const int av1_num_ext_tx_set[EXT_TX_SET_TYPES] = {
677   1, 2, 5, 7, 12, 16,
678 };
679 
680 static const int av1_ext_tx_used[EXT_TX_SET_TYPES][TX_TYPES] = {
681   { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
682   { 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0 },
683   { 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0 },
684   { 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0 },
685   { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 },
686   { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
687 };
688 
689 static const uint16_t av1_ext_tx_used_flag[EXT_TX_SET_TYPES] = {
690   0x0001,  // 0000 0000 0000 0001
691   0x0201,  // 0000 0010 0000 0001
692   0x020F,  // 0000 0010 0000 1111
693   0x0E0F,  // 0000 1110 0000 1111
694   0x0FFF,  // 0000 1111 1111 1111
695   0xFFFF,  // 1111 1111 1111 1111
696 };
697 
av1_get_ext_tx_set_type(TX_SIZE tx_size,int is_inter,int use_reduced_set)698 static INLINE TxSetType av1_get_ext_tx_set_type(TX_SIZE tx_size, int is_inter,
699                                                 int use_reduced_set) {
700   const TX_SIZE tx_size_sqr_up = txsize_sqr_up_map[tx_size];
701   if (tx_size_sqr_up > TX_32X32) return EXT_TX_SET_DCTONLY;
702   if (tx_size_sqr_up == TX_32X32)
703     return is_inter ? EXT_TX_SET_DCT_IDTX : EXT_TX_SET_DCTONLY;
704   if (use_reduced_set)
705     return is_inter ? EXT_TX_SET_DCT_IDTX : EXT_TX_SET_DTT4_IDTX;
706   const TX_SIZE tx_size_sqr = txsize_sqr_map[tx_size];
707   if (is_inter) {
708     return (tx_size_sqr == TX_16X16 ? EXT_TX_SET_DTT9_IDTX_1DDCT
709                                     : EXT_TX_SET_ALL16);
710   } else {
711     return (tx_size_sqr == TX_16X16 ? EXT_TX_SET_DTT4_IDTX
712                                     : EXT_TX_SET_DTT4_IDTX_1DDCT);
713   }
714 }
715 
716 // Maps tx set types to the indices.
717 static const int ext_tx_set_index[2][EXT_TX_SET_TYPES] = {
718   { // Intra
719     0, -1, 2, 1, -1, -1 },
720   { // Inter
721     0, 3, -1, -1, 2, 1 },
722 };
723 
get_ext_tx_set(TX_SIZE tx_size,int is_inter,int use_reduced_set)724 static INLINE int get_ext_tx_set(TX_SIZE tx_size, int is_inter,
725                                  int use_reduced_set) {
726   const TxSetType set_type =
727       av1_get_ext_tx_set_type(tx_size, is_inter, use_reduced_set);
728   return ext_tx_set_index[is_inter][set_type];
729 }
730 
get_ext_tx_types(TX_SIZE tx_size,int is_inter,int use_reduced_set)731 static INLINE int get_ext_tx_types(TX_SIZE tx_size, int is_inter,
732                                    int use_reduced_set) {
733   const int set_type =
734       av1_get_ext_tx_set_type(tx_size, is_inter, use_reduced_set);
735   return av1_num_ext_tx_set[set_type];
736 }
737 
738 #define TXSIZEMAX(t1, t2) (tx_size_2d[(t1)] >= tx_size_2d[(t2)] ? (t1) : (t2))
739 #define TXSIZEMIN(t1, t2) (tx_size_2d[(t1)] <= tx_size_2d[(t2)] ? (t1) : (t2))
740 
tx_size_from_tx_mode(BLOCK_SIZE bsize,TX_MODE tx_mode)741 static INLINE TX_SIZE tx_size_from_tx_mode(BLOCK_SIZE bsize, TX_MODE tx_mode) {
742   const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
743   const TX_SIZE max_rect_tx_size = max_txsize_rect_lookup[bsize];
744   if (bsize == BLOCK_4X4)
745     return AOMMIN(max_txsize_lookup[bsize], largest_tx_size);
746   if (txsize_sqr_map[max_rect_tx_size] <= largest_tx_size)
747     return max_rect_tx_size;
748   else
749     return largest_tx_size;
750 }
751 
752 extern const int16_t dr_intra_derivative[90];
753 static const uint8_t mode_to_angle_map[] = {
754   0, 90, 180, 45, 135, 113, 157, 203, 67, 0, 0, 0, 0,
755 };
756 
757 // Converts block_index for given transform size to index of the block in raster
758 // order.
av1_block_index_to_raster_order(TX_SIZE tx_size,int block_idx)759 static INLINE int av1_block_index_to_raster_order(TX_SIZE tx_size,
760                                                   int block_idx) {
761   // For transform size 4x8, the possible block_idx values are 0 & 2, because
762   // block_idx values are incremented in steps of size 'tx_width_unit x
763   // tx_height_unit'. But, for this transform size, block_idx = 2 corresponds to
764   // block number 1 in raster order, inside an 8x8 MI block.
765   // For any other transform size, the two indices are equivalent.
766   return (tx_size == TX_4X8 && block_idx == 2) ? 1 : block_idx;
767 }
768 
769 // Inverse of above function.
770 // Note: only implemented for transform sizes 4x4, 4x8 and 8x4 right now.
av1_raster_order_to_block_index(TX_SIZE tx_size,int raster_order)771 static INLINE int av1_raster_order_to_block_index(TX_SIZE tx_size,
772                                                   int raster_order) {
773   assert(tx_size == TX_4X4 || tx_size == TX_4X8 || tx_size == TX_8X4);
774   // We ensure that block indices are 0 & 2 if tx size is 4x8 or 8x4.
775   return (tx_size == TX_4X4) ? raster_order : (raster_order > 0) ? 2 : 0;
776 }
777 
get_default_tx_type(PLANE_TYPE plane_type,const MACROBLOCKD * xd,TX_SIZE tx_size)778 static INLINE TX_TYPE get_default_tx_type(PLANE_TYPE plane_type,
779                                           const MACROBLOCKD *xd,
780                                           TX_SIZE tx_size) {
781   const MB_MODE_INFO *const mbmi = xd->mi[0];
782 
783   if (is_inter_block(mbmi) || plane_type != PLANE_TYPE_Y ||
784       xd->lossless[mbmi->segment_id] || tx_size >= TX_32X32)
785     return DCT_DCT;
786 
787   return intra_mode_to_tx_type(mbmi, plane_type);
788 }
789 
790 // Implements the get_plane_residual_size() function in the spec (Section
791 // 5.11.38. Get plane residual size function).
get_plane_block_size(BLOCK_SIZE bsize,int subsampling_x,int subsampling_y)792 static INLINE BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize,
793                                               int subsampling_x,
794                                               int subsampling_y) {
795   if (bsize == BLOCK_INVALID) return BLOCK_INVALID;
796   return ss_size_lookup[bsize][subsampling_x][subsampling_y];
797 }
798 
av1_get_txb_size_index(BLOCK_SIZE bsize,int blk_row,int blk_col)799 static INLINE int av1_get_txb_size_index(BLOCK_SIZE bsize, int blk_row,
800                                          int blk_col) {
801   TX_SIZE txs = max_txsize_rect_lookup[bsize];
802   for (int level = 0; level < MAX_VARTX_DEPTH - 1; ++level)
803     txs = sub_tx_size_map[txs];
804   const int tx_w_log2 = tx_size_wide_log2[txs] - MI_SIZE_LOG2;
805   const int tx_h_log2 = tx_size_high_log2[txs] - MI_SIZE_LOG2;
806   const int bw_log2 = mi_size_wide_log2[bsize];
807   const int stride_log2 = bw_log2 - tx_w_log2;
808   const int index =
809       ((blk_row >> tx_h_log2) << stride_log2) + (blk_col >> tx_w_log2);
810   assert(index < INTER_TX_SIZE_BUF_LEN);
811   return index;
812 }
813 
av1_get_txk_type_index(BLOCK_SIZE bsize,int blk_row,int blk_col)814 static INLINE int av1_get_txk_type_index(BLOCK_SIZE bsize, int blk_row,
815                                          int blk_col) {
816   TX_SIZE txs = max_txsize_rect_lookup[bsize];
817   for (int level = 0; level < MAX_VARTX_DEPTH; ++level)
818     txs = sub_tx_size_map[txs];
819   const int tx_w_log2 = tx_size_wide_log2[txs] - MI_SIZE_LOG2;
820   const int tx_h_log2 = tx_size_high_log2[txs] - MI_SIZE_LOG2;
821   const int bw_uint_log2 = mi_size_wide_log2[bsize];
822   const int stride_log2 = bw_uint_log2 - tx_w_log2;
823   const int index =
824       ((blk_row >> tx_h_log2) << stride_log2) + (blk_col >> tx_w_log2);
825   assert(index < TXK_TYPE_BUF_LEN);
826   return index;
827 }
828 
update_txk_array(TX_TYPE * txk_type,BLOCK_SIZE bsize,int blk_row,int blk_col,TX_SIZE tx_size,TX_TYPE tx_type)829 static INLINE void update_txk_array(TX_TYPE *txk_type, BLOCK_SIZE bsize,
830                                     int blk_row, int blk_col, TX_SIZE tx_size,
831                                     TX_TYPE tx_type) {
832   const int txk_type_idx = av1_get_txk_type_index(bsize, blk_row, blk_col);
833   txk_type[txk_type_idx] = tx_type;
834 
835   const int txw = tx_size_wide_unit[tx_size];
836   const int txh = tx_size_high_unit[tx_size];
837   // The 16x16 unit is due to the constraint from tx_64x64 which sets the
838   // maximum tx size for chroma as 32x32. Coupled with 4x1 transform block
839   // size, the constraint takes effect in 32x16 / 16x32 size too. To solve
840   // the intricacy, cover all the 16x16 units inside a 64 level transform.
841   if (txw == tx_size_wide_unit[TX_64X64] ||
842       txh == tx_size_high_unit[TX_64X64]) {
843     const int tx_unit = tx_size_wide_unit[TX_16X16];
844     for (int idy = 0; idy < txh; idy += tx_unit) {
845       for (int idx = 0; idx < txw; idx += tx_unit) {
846         const int this_index =
847             av1_get_txk_type_index(bsize, blk_row + idy, blk_col + idx);
848         txk_type[this_index] = tx_type;
849       }
850     }
851   }
852 }
853 
av1_get_tx_type(PLANE_TYPE plane_type,const MACROBLOCKD * xd,int blk_row,int blk_col,TX_SIZE tx_size,int reduced_tx_set)854 static INLINE TX_TYPE av1_get_tx_type(PLANE_TYPE plane_type,
855                                       const MACROBLOCKD *xd, int blk_row,
856                                       int blk_col, TX_SIZE tx_size,
857                                       int reduced_tx_set) {
858   const MB_MODE_INFO *const mbmi = xd->mi[0];
859   const struct macroblockd_plane *const pd = &xd->plane[plane_type];
860   const TxSetType tx_set_type =
861       av1_get_ext_tx_set_type(tx_size, is_inter_block(mbmi), reduced_tx_set);
862 
863   TX_TYPE tx_type;
864   if (xd->lossless[mbmi->segment_id] || txsize_sqr_up_map[tx_size] > TX_32X32) {
865     tx_type = DCT_DCT;
866   } else {
867     if (plane_type == PLANE_TYPE_Y) {
868       const int txk_type_idx =
869           av1_get_txk_type_index(mbmi->sb_type, blk_row, blk_col);
870       tx_type = mbmi->txk_type[txk_type_idx];
871     } else if (is_inter_block(mbmi)) {
872       // scale back to y plane's coordinate
873       blk_row <<= pd->subsampling_y;
874       blk_col <<= pd->subsampling_x;
875       const int txk_type_idx =
876           av1_get_txk_type_index(mbmi->sb_type, blk_row, blk_col);
877       tx_type = mbmi->txk_type[txk_type_idx];
878     } else {
879       // In intra mode, uv planes don't share the same prediction mode as y
880       // plane, so the tx_type should not be shared
881       tx_type = intra_mode_to_tx_type(mbmi, PLANE_TYPE_UV);
882     }
883   }
884   assert(tx_type < TX_TYPES);
885   if (!av1_ext_tx_used[tx_set_type][tx_type]) return DCT_DCT;
886   return tx_type;
887 }
888 
889 void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y,
890                             const int num_planes);
891 
bsize_to_max_depth(BLOCK_SIZE bsize)892 static INLINE int bsize_to_max_depth(BLOCK_SIZE bsize) {
893   TX_SIZE tx_size = max_txsize_rect_lookup[bsize];
894   int depth = 0;
895   while (depth < MAX_TX_DEPTH && tx_size != TX_4X4) {
896     depth++;
897     tx_size = sub_tx_size_map[tx_size];
898   }
899   return depth;
900 }
901 
bsize_to_tx_size_cat(BLOCK_SIZE bsize)902 static INLINE int bsize_to_tx_size_cat(BLOCK_SIZE bsize) {
903   TX_SIZE tx_size = max_txsize_rect_lookup[bsize];
904   assert(tx_size != TX_4X4);
905   int depth = 0;
906   while (tx_size != TX_4X4) {
907     depth++;
908     tx_size = sub_tx_size_map[tx_size];
909     assert(depth < 10);
910   }
911   assert(depth <= MAX_TX_CATS);
912   return depth - 1;
913 }
914 
depth_to_tx_size(int depth,BLOCK_SIZE bsize)915 static INLINE TX_SIZE depth_to_tx_size(int depth, BLOCK_SIZE bsize) {
916   TX_SIZE max_tx_size = max_txsize_rect_lookup[bsize];
917   TX_SIZE tx_size = max_tx_size;
918   for (int d = 0; d < depth; ++d) tx_size = sub_tx_size_map[tx_size];
919   return tx_size;
920 }
921 
av1_get_adjusted_tx_size(TX_SIZE tx_size)922 static INLINE TX_SIZE av1_get_adjusted_tx_size(TX_SIZE tx_size) {
923   switch (tx_size) {
924     case TX_64X64:
925     case TX_64X32:
926     case TX_32X64: return TX_32X32;
927     case TX_64X16: return TX_32X16;
928     case TX_16X64: return TX_16X32;
929     default: return tx_size;
930   }
931 }
932 
av1_get_max_uv_txsize(BLOCK_SIZE bsize,int subsampling_x,int subsampling_y)933 static INLINE TX_SIZE av1_get_max_uv_txsize(BLOCK_SIZE bsize, int subsampling_x,
934                                             int subsampling_y) {
935   const BLOCK_SIZE plane_bsize =
936       get_plane_block_size(bsize, subsampling_x, subsampling_y);
937   assert(plane_bsize < BLOCK_SIZES_ALL);
938   const TX_SIZE uv_tx = max_txsize_rect_lookup[plane_bsize];
939   return av1_get_adjusted_tx_size(uv_tx);
940 }
941 
av1_get_tx_size(int plane,const MACROBLOCKD * xd)942 static INLINE TX_SIZE av1_get_tx_size(int plane, const MACROBLOCKD *xd) {
943   const MB_MODE_INFO *mbmi = xd->mi[0];
944   if (xd->lossless[mbmi->segment_id]) return TX_4X4;
945   if (plane == 0) return mbmi->tx_size;
946   const MACROBLOCKD_PLANE *pd = &xd->plane[plane];
947   return av1_get_max_uv_txsize(mbmi->sb_type, pd->subsampling_x,
948                                pd->subsampling_y);
949 }
950 
951 void av1_reset_skip_context(MACROBLOCKD *xd, int mi_row, int mi_col,
952                             BLOCK_SIZE bsize, const int num_planes);
953 
954 void av1_reset_loop_filter_delta(MACROBLOCKD *xd, int num_planes);
955 
956 void av1_reset_loop_restoration(MACROBLOCKD *xd, const int num_planes);
957 
958 typedef void (*foreach_transformed_block_visitor)(int plane, int block,
959                                                   int blk_row, int blk_col,
960                                                   BLOCK_SIZE plane_bsize,
961                                                   TX_SIZE tx_size, void *arg);
962 
963 void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
964                       int plane, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
965                       int has_eob, int aoff, int loff);
966 
967 #define MAX_INTERINTRA_SB_SQUARE 32 * 32
is_interintra_mode(const MB_MODE_INFO * mbmi)968 static INLINE int is_interintra_mode(const MB_MODE_INFO *mbmi) {
969   return (mbmi->ref_frame[0] > INTRA_FRAME &&
970           mbmi->ref_frame[1] == INTRA_FRAME);
971 }
972 
is_interintra_allowed_bsize(const BLOCK_SIZE bsize)973 static INLINE int is_interintra_allowed_bsize(const BLOCK_SIZE bsize) {
974   return (bsize >= BLOCK_8X8) && (bsize <= BLOCK_32X32);
975 }
976 
is_interintra_allowed_mode(const PREDICTION_MODE mode)977 static INLINE int is_interintra_allowed_mode(const PREDICTION_MODE mode) {
978   return (mode >= SINGLE_INTER_MODE_START) && (mode < SINGLE_INTER_MODE_END);
979 }
980 
is_interintra_allowed_ref(const MV_REFERENCE_FRAME rf[2])981 static INLINE int is_interintra_allowed_ref(const MV_REFERENCE_FRAME rf[2]) {
982   return (rf[0] > INTRA_FRAME) && (rf[1] <= INTRA_FRAME);
983 }
984 
is_interintra_allowed(const MB_MODE_INFO * mbmi)985 static INLINE int is_interintra_allowed(const MB_MODE_INFO *mbmi) {
986   return is_interintra_allowed_bsize(mbmi->sb_type) &&
987          is_interintra_allowed_mode(mbmi->mode) &&
988          is_interintra_allowed_ref(mbmi->ref_frame);
989 }
990 
is_interintra_allowed_bsize_group(int group)991 static INLINE int is_interintra_allowed_bsize_group(int group) {
992   int i;
993   for (i = 0; i < BLOCK_SIZES_ALL; i++) {
994     if (size_group_lookup[i] == group &&
995         is_interintra_allowed_bsize((BLOCK_SIZE)i)) {
996       return 1;
997     }
998   }
999   return 0;
1000 }
1001 
is_interintra_pred(const MB_MODE_INFO * mbmi)1002 static INLINE int is_interintra_pred(const MB_MODE_INFO *mbmi) {
1003   return mbmi->ref_frame[0] > INTRA_FRAME &&
1004          mbmi->ref_frame[1] == INTRA_FRAME && is_interintra_allowed(mbmi);
1005 }
1006 
get_vartx_max_txsize(const MACROBLOCKD * xd,BLOCK_SIZE bsize,int plane)1007 static INLINE int get_vartx_max_txsize(const MACROBLOCKD *xd, BLOCK_SIZE bsize,
1008                                        int plane) {
1009   if (xd->lossless[xd->mi[0]->segment_id]) return TX_4X4;
1010   const TX_SIZE max_txsize = max_txsize_rect_lookup[bsize];
1011   if (plane == 0) return max_txsize;            // luma
1012   return av1_get_adjusted_tx_size(max_txsize);  // chroma
1013 }
1014 
is_motion_variation_allowed_bsize(BLOCK_SIZE bsize)1015 static INLINE int is_motion_variation_allowed_bsize(BLOCK_SIZE bsize) {
1016   return AOMMIN(block_size_wide[bsize], block_size_high[bsize]) >= 8;
1017 }
1018 
is_motion_variation_allowed_compound(const MB_MODE_INFO * mbmi)1019 static INLINE int is_motion_variation_allowed_compound(
1020     const MB_MODE_INFO *mbmi) {
1021   if (!has_second_ref(mbmi))
1022     return 1;
1023   else
1024     return 0;
1025 }
1026 
1027 // input: log2 of length, 0(4), 1(8), ...
1028 static const int max_neighbor_obmc[6] = { 0, 1, 2, 3, 4, 4 };
1029 
check_num_overlappable_neighbors(const MB_MODE_INFO * mbmi)1030 static INLINE int check_num_overlappable_neighbors(const MB_MODE_INFO *mbmi) {
1031   return !(mbmi->overlappable_neighbors[0] == 0 &&
1032            mbmi->overlappable_neighbors[1] == 0);
1033 }
1034 
1035 static INLINE MOTION_MODE
motion_mode_allowed(const WarpedMotionParams * gm_params,const MACROBLOCKD * xd,const MB_MODE_INFO * mbmi,int allow_warped_motion)1036 motion_mode_allowed(const WarpedMotionParams *gm_params, const MACROBLOCKD *xd,
1037                     const MB_MODE_INFO *mbmi, int allow_warped_motion) {
1038   if (xd->cur_frame_force_integer_mv == 0) {
1039     const TransformationType gm_type = gm_params[mbmi->ref_frame[0]].wmtype;
1040     if (is_global_mv_block(mbmi, gm_type)) return SIMPLE_TRANSLATION;
1041   }
1042   if (is_motion_variation_allowed_bsize(mbmi->sb_type) &&
1043       is_inter_mode(mbmi->mode) && mbmi->ref_frame[1] != INTRA_FRAME &&
1044       is_motion_variation_allowed_compound(mbmi)) {
1045     if (!check_num_overlappable_neighbors(mbmi)) return SIMPLE_TRANSLATION;
1046     assert(!has_second_ref(mbmi));
1047     if (mbmi->num_proj_ref >= 1 &&
1048         (allow_warped_motion && !av1_is_scaled(&(xd->block_refs[0]->sf)))) {
1049       if (xd->cur_frame_force_integer_mv) {
1050         return OBMC_CAUSAL;
1051       }
1052       return WARPED_CAUSAL;
1053     }
1054     return OBMC_CAUSAL;
1055   } else {
1056     return SIMPLE_TRANSLATION;
1057   }
1058 }
1059 
assert_motion_mode_valid(MOTION_MODE mode,const WarpedMotionParams * gm_params,const MACROBLOCKD * xd,const MB_MODE_INFO * mbmi,int allow_warped_motion)1060 static INLINE void assert_motion_mode_valid(MOTION_MODE mode,
1061                                             const WarpedMotionParams *gm_params,
1062                                             const MACROBLOCKD *xd,
1063                                             const MB_MODE_INFO *mbmi,
1064                                             int allow_warped_motion) {
1065   const MOTION_MODE last_motion_mode_allowed =
1066       motion_mode_allowed(gm_params, xd, mbmi, allow_warped_motion);
1067 
1068   // Check that the input mode is not illegal
1069   if (last_motion_mode_allowed < mode)
1070     assert(0 && "Illegal motion mode selected");
1071 }
1072 
is_neighbor_overlappable(const MB_MODE_INFO * mbmi)1073 static INLINE int is_neighbor_overlappable(const MB_MODE_INFO *mbmi) {
1074   return (is_inter_block(mbmi));
1075 }
1076 
av1_allow_palette(int allow_screen_content_tools,BLOCK_SIZE sb_type)1077 static INLINE int av1_allow_palette(int allow_screen_content_tools,
1078                                     BLOCK_SIZE sb_type) {
1079   return allow_screen_content_tools && block_size_wide[sb_type] <= 64 &&
1080          block_size_high[sb_type] <= 64 && sb_type >= BLOCK_8X8;
1081 }
1082 
1083 // Returns sub-sampled dimensions of the given block.
1084 // The output values for 'rows_within_bounds' and 'cols_within_bounds' will
1085 // differ from 'height' and 'width' when part of the block is outside the
1086 // right
1087 // and/or bottom image boundary.
av1_get_block_dimensions(BLOCK_SIZE bsize,int plane,const MACROBLOCKD * xd,int * width,int * height,int * rows_within_bounds,int * cols_within_bounds)1088 static INLINE void av1_get_block_dimensions(BLOCK_SIZE bsize, int plane,
1089                                             const MACROBLOCKD *xd, int *width,
1090                                             int *height,
1091                                             int *rows_within_bounds,
1092                                             int *cols_within_bounds) {
1093   const int block_height = block_size_high[bsize];
1094   const int block_width = block_size_wide[bsize];
1095   const int block_rows = (xd->mb_to_bottom_edge >= 0)
1096                              ? block_height
1097                              : (xd->mb_to_bottom_edge >> 3) + block_height;
1098   const int block_cols = (xd->mb_to_right_edge >= 0)
1099                              ? block_width
1100                              : (xd->mb_to_right_edge >> 3) + block_width;
1101   const struct macroblockd_plane *const pd = &xd->plane[plane];
1102   assert(IMPLIES(plane == PLANE_TYPE_Y, pd->subsampling_x == 0));
1103   assert(IMPLIES(plane == PLANE_TYPE_Y, pd->subsampling_y == 0));
1104   assert(block_width >= block_cols);
1105   assert(block_height >= block_rows);
1106   const int plane_block_width = block_width >> pd->subsampling_x;
1107   const int plane_block_height = block_height >> pd->subsampling_y;
1108   // Special handling for chroma sub8x8.
1109   const int is_chroma_sub8_x = plane > 0 && plane_block_width < 4;
1110   const int is_chroma_sub8_y = plane > 0 && plane_block_height < 4;
1111   if (width) *width = plane_block_width + 2 * is_chroma_sub8_x;
1112   if (height) *height = plane_block_height + 2 * is_chroma_sub8_y;
1113   if (rows_within_bounds) {
1114     *rows_within_bounds =
1115         (block_rows >> pd->subsampling_y) + 2 * is_chroma_sub8_y;
1116   }
1117   if (cols_within_bounds) {
1118     *cols_within_bounds =
1119         (block_cols >> pd->subsampling_x) + 2 * is_chroma_sub8_x;
1120   }
1121 }
1122 
1123 /* clang-format off */
1124 typedef aom_cdf_prob (*MapCdf)[PALETTE_COLOR_INDEX_CONTEXTS]
1125                               [CDF_SIZE(PALETTE_COLORS)];
1126 typedef const int (*ColorCost)[PALETTE_SIZES][PALETTE_COLOR_INDEX_CONTEXTS]
1127                               [PALETTE_COLORS];
1128 /* clang-format on */
1129 
1130 typedef struct {
1131   int rows;
1132   int cols;
1133   int n_colors;
1134   int plane_width;
1135   int plane_height;
1136   uint8_t *color_map;
1137   MapCdf map_cdf;
1138   ColorCost color_cost;
1139 } Av1ColorMapParam;
1140 
is_nontrans_global_motion(const MACROBLOCKD * xd,const MB_MODE_INFO * mbmi)1141 static INLINE int is_nontrans_global_motion(const MACROBLOCKD *xd,
1142                                             const MB_MODE_INFO *mbmi) {
1143   int ref;
1144 
1145   // First check if all modes are GLOBALMV
1146   if (mbmi->mode != GLOBALMV && mbmi->mode != GLOBAL_GLOBALMV) return 0;
1147 
1148   if (AOMMIN(mi_size_wide[mbmi->sb_type], mi_size_high[mbmi->sb_type]) < 2)
1149     return 0;
1150 
1151   // Now check if all global motion is non translational
1152   for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
1153     if (xd->global_motion[mbmi->ref_frame[ref]].wmtype == TRANSLATION) return 0;
1154   }
1155   return 1;
1156 }
1157 
get_plane_type(int plane)1158 static INLINE PLANE_TYPE get_plane_type(int plane) {
1159   return (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
1160 }
1161 
av1_get_max_eob(TX_SIZE tx_size)1162 static INLINE int av1_get_max_eob(TX_SIZE tx_size) {
1163   if (tx_size == TX_64X64 || tx_size == TX_64X32 || tx_size == TX_32X64) {
1164     return 1024;
1165   }
1166   if (tx_size == TX_16X64 || tx_size == TX_64X16) {
1167     return 512;
1168   }
1169   return tx_size_2d[tx_size];
1170 }
1171 
1172 #ifdef __cplusplus
1173 }  // extern "C"
1174 #endif
1175 
1176 #endif  // AOM_AV1_COMMON_BLOCKD_H_
1177