1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <assert.h>
13 #include <stdlib.h>  // qsort()
14 
15 #include "./aom_config.h"
16 #include "./aom_dsp_rtcd.h"
17 #include "./aom_scale_rtcd.h"
18 #include "./av1_rtcd.h"
19 
20 #include "aom/aom_codec.h"
21 #include "aom_dsp/aom_dsp_common.h"
22 #include "aom_dsp/binary_codes_reader.h"
23 #include "aom_dsp/bitreader.h"
24 #include "aom_dsp/bitreader_buffer.h"
25 #include "aom_mem/aom_mem.h"
26 #include "aom_ports/mem.h"
27 #include "aom_ports/mem_ops.h"
28 #include "aom_scale/aom_scale.h"
29 #include "aom_util/aom_thread.h"
30 
31 #if CONFIG_BITSTREAM_DEBUG
32 #include "aom_util/debug_util.h"
33 #endif  // CONFIG_BITSTREAM_DEBUG
34 
35 #include "av1/common/alloccommon.h"
36 #if CONFIG_CDEF
37 #include "av1/common/cdef.h"
38 #endif
39 #if CONFIG_INSPECTION
40 #include "av1/decoder/inspection.h"
41 #endif
42 #include "av1/common/common.h"
43 #include "av1/common/entropy.h"
44 #include "av1/common/entropymode.h"
45 #include "av1/common/entropymv.h"
46 #include "av1/common/idct.h"
47 #include "av1/common/mvref_common.h"
48 #include "av1/common/pred_common.h"
49 #include "av1/common/quant_common.h"
50 #include "av1/common/reconinter.h"
51 #include "av1/common/reconintra.h"
52 #if CONFIG_FRAME_SUPERRES
53 #include "av1/common/resize.h"
54 #endif  // CONFIG_FRAME_SUPERRES
55 #include "av1/common/seg_common.h"
56 #include "av1/common/thread_common.h"
57 #include "av1/common/tile_common.h"
58 
59 #include "av1/decoder/decodeframe.h"
60 #include "av1/decoder/decodemv.h"
61 #include "av1/decoder/decoder.h"
62 #if CONFIG_LV_MAP
63 #include "av1/decoder/decodetxb.h"
64 #endif
65 #include "av1/decoder/detokenize.h"
66 #include "av1/decoder/dsubexp.h"
67 #include "av1/decoder/symbolrate.h"
68 
69 #if CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION
70 #include "av1/common/warped_motion.h"
71 #endif  // CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION
72 
73 #define MAX_AV1_HEADER_SIZE 80
74 #define ACCT_STR __func__
75 
76 #if CONFIG_PVQ
77 #include "av1/common/partition.h"
78 #include "av1/common/pvq.h"
79 #include "av1/common/scan.h"
80 #include "av1/decoder/decint.h"
81 #include "av1/decoder/pvq_decoder.h"
82 #include "av1/encoder/encodemb.h"
83 #include "av1/encoder/hybrid_fwd_txfm.h"
84 #endif
85 
86 #if CONFIG_CFL
87 #include "av1/common/cfl.h"
88 #endif
89 
90 #if CONFIG_STRIPED_LOOP_RESTORATION && !CONFIG_LOOP_RESTORATION
91 #error "striped_loop_restoration requires loop_restoration"
92 #endif
93 
94 #if CONFIG_LOOP_RESTORATION
95 static void loop_restoration_read_sb_coeffs(const AV1_COMMON *const cm,
96                                             MACROBLOCKD *xd,
97                                             aom_reader *const r, int plane,
98                                             int rtile_idx);
99 #endif
100 
101 static struct aom_read_bit_buffer *init_read_bit_buffer(
102     AV1Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
103     const uint8_t *data_end, uint8_t clear_data[MAX_AV1_HEADER_SIZE]);
104 static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
105                                   size_t partition_size);
106 static size_t read_uncompressed_header(AV1Decoder *pbi,
107                                        struct aom_read_bit_buffer *rb);
108 
is_compound_reference_allowed(const AV1_COMMON * cm)109 static int is_compound_reference_allowed(const AV1_COMMON *cm) {
110 #if CONFIG_ONE_SIDED_COMPOUND  // Normative in decoder
111   return !frame_is_intra_only(cm);
112 #else
113   int i;
114   if (frame_is_intra_only(cm)) return 0;
115   for (i = 1; i < INTER_REFS_PER_FRAME; ++i)
116     if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) return 1;
117 
118   return 0;
119 #endif  // CONFIG_ONE_SIDED_COMPOUND
120 }
121 
setup_compound_reference_mode(AV1_COMMON * cm)122 static void setup_compound_reference_mode(AV1_COMMON *cm) {
123 #if CONFIG_EXT_REFS
124   cm->comp_fwd_ref[0] = LAST_FRAME;
125   cm->comp_fwd_ref[1] = LAST2_FRAME;
126   cm->comp_fwd_ref[2] = LAST3_FRAME;
127   cm->comp_fwd_ref[3] = GOLDEN_FRAME;
128 
129   cm->comp_bwd_ref[0] = BWDREF_FRAME;
130   cm->comp_bwd_ref[1] = ALTREF2_FRAME;
131   cm->comp_bwd_ref[2] = ALTREF_FRAME;
132 #else   // !CONFIG_EXT_REFS
133   if (cm->ref_frame_sign_bias[LAST_FRAME] ==
134       cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
135     cm->comp_fixed_ref = ALTREF_FRAME;
136     cm->comp_var_ref[0] = LAST_FRAME;
137     cm->comp_var_ref[1] = GOLDEN_FRAME;
138   } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
139              cm->ref_frame_sign_bias[ALTREF_FRAME]) {
140     cm->comp_fixed_ref = GOLDEN_FRAME;
141     cm->comp_var_ref[0] = LAST_FRAME;
142     cm->comp_var_ref[1] = ALTREF_FRAME;
143   } else {
144     cm->comp_fixed_ref = LAST_FRAME;
145     cm->comp_var_ref[0] = GOLDEN_FRAME;
146     cm->comp_var_ref[1] = ALTREF_FRAME;
147   }
148 #endif  // CONFIG_EXT_REFS
149 }
150 
read_is_valid(const uint8_t * start,size_t len,const uint8_t * end)151 static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
152   return len != 0 && len <= (size_t)(end - start);
153 }
154 
decode_unsigned_max(struct aom_read_bit_buffer * rb,int max)155 static int decode_unsigned_max(struct aom_read_bit_buffer *rb, int max) {
156   const int data = aom_rb_read_literal(rb, get_unsigned_bits(max));
157   return data > max ? max : data;
158 }
159 
read_tx_mode(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)160 static TX_MODE read_tx_mode(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
161 #if CONFIG_TX64X64
162   TX_MODE tx_mode;
163 #endif
164   if (cm->all_lossless) return ONLY_4X4;
165 #if CONFIG_VAR_TX_NO_TX_MODE
166   (void)rb;
167   return TX_MODE_SELECT;
168 #else
169 #if CONFIG_TX64X64
170   tx_mode = aom_rb_read_bit(rb) ? TX_MODE_SELECT : aom_rb_read_literal(rb, 2);
171   if (tx_mode == ALLOW_32X32) tx_mode += aom_rb_read_bit(rb);
172   return tx_mode;
173 #else
174   return aom_rb_read_bit(rb) ? TX_MODE_SELECT : aom_rb_read_literal(rb, 2);
175 #endif  // CONFIG_TX64X64
176 #endif  // CONFIG_VAR_TX_NO_TX_MODE
177 }
178 
179 #if !CONFIG_RESTRICT_COMPRESSED_HDR
read_inter_mode_probs(FRAME_CONTEXT * fc,aom_reader * r)180 static void read_inter_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
181   int i;
182   for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
183     av1_diff_update_prob(r, &fc->newmv_prob[i], ACCT_STR);
184   for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
185     av1_diff_update_prob(r, &fc->zeromv_prob[i], ACCT_STR);
186   for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
187     av1_diff_update_prob(r, &fc->refmv_prob[i], ACCT_STR);
188   for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
189     av1_diff_update_prob(r, &fc->drl_prob[i], ACCT_STR);
190 }
191 #endif
192 
read_frame_reference_mode(const AV1_COMMON * cm,struct aom_read_bit_buffer * rb)193 static REFERENCE_MODE read_frame_reference_mode(
194     const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
195   if (is_compound_reference_allowed(cm)) {
196 #if CONFIG_REF_ADAPT
197     return aom_rb_read_bit(rb) ? REFERENCE_MODE_SELECT : SINGLE_REFERENCE;
198 #else
199     return aom_rb_read_bit(rb)
200                ? REFERENCE_MODE_SELECT
201                : (aom_rb_read_bit(rb) ? COMPOUND_REFERENCE : SINGLE_REFERENCE);
202 #endif  // CONFIG_REF_ADAPT
203   } else {
204     return SINGLE_REFERENCE;
205   }
206 }
207 
208 #if !CONFIG_RESTRICT_COMPRESSED_HDR
read_frame_reference_mode_probs(AV1_COMMON * cm,aom_reader * r)209 static void read_frame_reference_mode_probs(AV1_COMMON *cm, aom_reader *r) {
210   FRAME_CONTEXT *const fc = cm->fc;
211   int i;
212 
213   if (cm->reference_mode == REFERENCE_MODE_SELECT)
214     for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
215       av1_diff_update_prob(r, &fc->comp_inter_prob[i], ACCT_STR);
216 
217   if (cm->reference_mode != COMPOUND_REFERENCE) {
218     for (i = 0; i < REF_CONTEXTS; ++i) {
219       int j;
220       for (j = 0; j < (SINGLE_REFS - 1); ++j) {
221         av1_diff_update_prob(r, &fc->single_ref_prob[i][j], ACCT_STR);
222       }
223     }
224   }
225 
226   if (cm->reference_mode != SINGLE_REFERENCE) {
227 #if CONFIG_EXT_COMP_REFS
228     for (i = 0; i < COMP_REF_TYPE_CONTEXTS; ++i)
229       av1_diff_update_prob(r, &fc->comp_ref_type_prob[i], ACCT_STR);
230 
231     for (i = 0; i < UNI_COMP_REF_CONTEXTS; ++i) {
232       int j;
233       for (j = 0; j < (UNIDIR_COMP_REFS - 1); ++j)
234         av1_diff_update_prob(r, &fc->uni_comp_ref_prob[i][j], ACCT_STR);
235     }
236 #endif  // CONFIG_EXT_COMP_REFS
237 
238     for (i = 0; i < REF_CONTEXTS; ++i) {
239       int j;
240 #if CONFIG_EXT_REFS
241       for (j = 0; j < (FWD_REFS - 1); ++j)
242         av1_diff_update_prob(r, &fc->comp_ref_prob[i][j], ACCT_STR);
243       for (j = 0; j < (BWD_REFS - 1); ++j)
244         av1_diff_update_prob(r, &fc->comp_bwdref_prob[i][j], ACCT_STR);
245 #else
246       for (j = 0; j < (COMP_REFS - 1); ++j)
247         av1_diff_update_prob(r, &fc->comp_ref_prob[i][j], ACCT_STR);
248 #endif  // CONFIG_EXT_REFS
249     }
250   }
251 }
252 
update_mv_probs(aom_prob * p,int n,aom_reader * r)253 static void update_mv_probs(aom_prob *p, int n, aom_reader *r) {
254   int i;
255   for (i = 0; i < n; ++i) av1_diff_update_prob(r, &p[i], ACCT_STR);
256 }
257 
read_mv_probs(nmv_context * ctx,int allow_hp,aom_reader * r)258 static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
259   int i;
260   if (allow_hp) {
261     for (i = 0; i < 2; ++i) {
262       nmv_component *const comp_ctx = &ctx->comps[i];
263       update_mv_probs(&comp_ctx->class0_hp, 1, r);
264       update_mv_probs(&comp_ctx->hp, 1, r);
265     }
266   }
267 }
268 #endif
269 
inverse_transform_block(MACROBLOCKD * xd,int plane,PREDICTION_MODE mode,const TX_TYPE tx_type,const TX_SIZE tx_size,uint8_t * dst,int stride,int16_t scan_line,int eob)270 static void inverse_transform_block(MACROBLOCKD *xd, int plane,
271 #if CONFIG_LGT_FROM_PRED
272                                     PREDICTION_MODE mode,
273 #endif
274                                     const TX_TYPE tx_type,
275                                     const TX_SIZE tx_size, uint8_t *dst,
276                                     int stride, int16_t scan_line, int eob) {
277   struct macroblockd_plane *const pd = &xd->plane[plane];
278   tran_low_t *const dqcoeff = pd->dqcoeff;
279   av1_inverse_transform_block(xd, dqcoeff,
280 #if CONFIG_LGT_FROM_PRED
281                               mode,
282 #endif
283 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
284                               xd->mrc_mask,
285 #endif  // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
286                               tx_type, tx_size, dst, stride, eob);
287   memset(dqcoeff, 0, (scan_line + 1) * sizeof(dqcoeff[0]));
288 }
289 
get_block_idx(const MACROBLOCKD * xd,int plane,int row,int col)290 static int get_block_idx(const MACROBLOCKD *xd, int plane, int row, int col) {
291   const int bsize = xd->mi[0]->mbmi.sb_type;
292   const struct macroblockd_plane *pd = &xd->plane[plane];
293 #if CONFIG_CHROMA_SUB8X8
294   const BLOCK_SIZE plane_bsize =
295       AOMMAX(BLOCK_4X4, get_plane_block_size(bsize, pd));
296 #elif CONFIG_CB4X4
297   const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
298 #else
299   const BLOCK_SIZE plane_bsize =
300       get_plane_block_size(AOMMAX(BLOCK_8X8, bsize), pd);
301 #endif
302   const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
303   const TX_SIZE tx_size = av1_get_tx_size(plane, xd);
304   const uint8_t txh_unit = tx_size_high_unit[tx_size];
305   return row * max_blocks_wide + col * txh_unit;
306 }
307 
308 #if CONFIG_PVQ
av1_pvq_decode_helper(MACROBLOCKD * xd,tran_low_t * ref_coeff,tran_low_t * dqcoeff,int16_t * quant,int pli,int bs,TX_TYPE tx_type,int xdec,PVQ_SKIP_TYPE ac_dc_coded)309 static int av1_pvq_decode_helper(MACROBLOCKD *xd, tran_low_t *ref_coeff,
310                                  tran_low_t *dqcoeff, int16_t *quant, int pli,
311                                  int bs, TX_TYPE tx_type, int xdec,
312                                  PVQ_SKIP_TYPE ac_dc_coded) {
313   unsigned int flags;  // used for daala's stream analyzer.
314   int off;
315   const int is_keyframe = 0;
316   const int has_dc_skip = 1;
317   int coeff_shift = 3 - av1_get_tx_scale(bs);
318   int hbd_downshift = 0;
319   int rounding_mask;
320   // DC quantizer for PVQ
321   int pvq_dc_quant;
322   int lossless = (quant[0] == 0);
323   const int blk_size = tx_size_wide[bs];
324   int eob = 0;
325   int i;
326   od_dec_ctx *dec = &xd->daala_dec;
327   int use_activity_masking = dec->use_activity_masking;
328   DECLARE_ALIGNED(16, tran_low_t, dqcoeff_pvq[OD_TXSIZE_MAX * OD_TXSIZE_MAX]);
329   DECLARE_ALIGNED(16, tran_low_t, ref_coeff_pvq[OD_TXSIZE_MAX * OD_TXSIZE_MAX]);
330 
331   od_coeff ref_int32[OD_TXSIZE_MAX * OD_TXSIZE_MAX];
332   od_coeff out_int32[OD_TXSIZE_MAX * OD_TXSIZE_MAX];
333 
334   hbd_downshift = xd->bd - 8;
335 
336   od_raster_to_coding_order(ref_coeff_pvq, blk_size, tx_type, ref_coeff,
337                             blk_size);
338 
339   assert(OD_COEFF_SHIFT >= 4);
340   if (lossless)
341     pvq_dc_quant = 1;
342   else {
343     if (use_activity_masking)
344       pvq_dc_quant =
345           OD_MAXI(1,
346                   (quant[0] << (OD_COEFF_SHIFT - 3) >> hbd_downshift) *
347                           dec->state.pvq_qm_q4[pli][od_qm_get_index(bs, 0)] >>
348                       4);
349     else
350       pvq_dc_quant =
351           OD_MAXI(1, quant[0] << (OD_COEFF_SHIFT - 3) >> hbd_downshift);
352   }
353 
354   off = od_qm_offset(bs, xdec);
355 
356   // copy int16 inputs to int32
357   for (i = 0; i < blk_size * blk_size; i++) {
358     ref_int32[i] =
359         AOM_SIGNED_SHL(ref_coeff_pvq[i], OD_COEFF_SHIFT - coeff_shift) >>
360         hbd_downshift;
361   }
362 
363   od_pvq_decode(dec, ref_int32, out_int32,
364                 OD_MAXI(1, quant[1] << (OD_COEFF_SHIFT - 3) >> hbd_downshift),
365                 pli, bs, OD_PVQ_BETA[use_activity_masking][pli][bs],
366                 is_keyframe, &flags, ac_dc_coded, dec->state.qm + off,
367                 dec->state.qm_inv + off);
368 
369   if (!has_dc_skip || out_int32[0]) {
370     out_int32[0] =
371         has_dc_skip + generic_decode(dec->r, &dec->state.adapt->model_dc[pli],
372                                      &dec->state.adapt->ex_dc[pli][bs][0], 2,
373                                      "dc:mag");
374     if (out_int32[0]) out_int32[0] *= aom_read_bit(dec->r, "dc:sign") ? -1 : 1;
375   }
376   out_int32[0] = out_int32[0] * pvq_dc_quant + ref_int32[0];
377 
378   // copy int32 result back to int16
379   assert(OD_COEFF_SHIFT > coeff_shift);
380   rounding_mask = (1 << (OD_COEFF_SHIFT - coeff_shift - 1)) - 1;
381   for (i = 0; i < blk_size * blk_size; i++) {
382     out_int32[i] = AOM_SIGNED_SHL(out_int32[i], hbd_downshift);
383     dqcoeff_pvq[i] = (out_int32[i] + (out_int32[i] < 0) + rounding_mask) >>
384                      (OD_COEFF_SHIFT - coeff_shift);
385   }
386 
387   od_coding_order_to_raster(dqcoeff, blk_size, tx_type, dqcoeff_pvq, blk_size);
388 
389   eob = blk_size * blk_size;
390 
391   return eob;
392 }
393 
read_pvq_skip(AV1_COMMON * cm,MACROBLOCKD * const xd,int plane,TX_SIZE tx_size)394 static PVQ_SKIP_TYPE read_pvq_skip(AV1_COMMON *cm, MACROBLOCKD *const xd,
395                                    int plane, TX_SIZE tx_size) {
396   // decode ac/dc coded flag. bit0: DC coded, bit1 : AC coded
397   // NOTE : we don't use 5 symbols for luma here in aom codebase,
398   // since block partition is taken care of by aom.
399   // So, only AC/DC skip info is coded
400   const int ac_dc_coded = aom_read_symbol(
401       xd->daala_dec.r,
402       xd->daala_dec.state.adapt->skip_cdf[2 * tx_size + (plane != 0)], 4,
403       "skip");
404   if (ac_dc_coded < 0 || ac_dc_coded > 3) {
405     aom_internal_error(&cm->error, AOM_CODEC_INVALID_PARAM,
406                        "Invalid PVQ Skip Type");
407   }
408   return ac_dc_coded;
409 }
410 
av1_pvq_decode_helper2(AV1_COMMON * cm,MACROBLOCKD * const xd,MB_MODE_INFO * const mbmi,int plane,int row,int col,TX_SIZE tx_size,TX_TYPE tx_type)411 static int av1_pvq_decode_helper2(AV1_COMMON *cm, MACROBLOCKD *const xd,
412                                   MB_MODE_INFO *const mbmi, int plane, int row,
413                                   int col, TX_SIZE tx_size, TX_TYPE tx_type) {
414   struct macroblockd_plane *const pd = &xd->plane[plane];
415   // transform block size in pixels
416   int tx_blk_size = tx_size_wide[tx_size];
417   int i, j;
418   tran_low_t *pvq_ref_coeff = pd->pvq_ref_coeff;
419   const int diff_stride = tx_blk_size;
420   int16_t *pred = pd->pred;
421   tran_low_t *const dqcoeff = pd->dqcoeff;
422   uint8_t *dst;
423   int eob;
424   const PVQ_SKIP_TYPE ac_dc_coded = read_pvq_skip(cm, xd, plane, tx_size);
425 
426   eob = 0;
427   dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col];
428 
429   if (ac_dc_coded) {
430     int xdec = pd->subsampling_x;
431     int seg_id = mbmi->segment_id;
432     int16_t *quant;
433     TxfmParam txfm_param;
434     // ToDo(yaowu): correct this with optimal number from decoding process.
435     const int max_scan_line = tx_size_2d[tx_size];
436 #if CONFIG_HIGHBITDEPTH
437     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
438       for (j = 0; j < tx_blk_size; j++)
439         for (i = 0; i < tx_blk_size; i++)
440           pred[diff_stride * j + i] =
441               CONVERT_TO_SHORTPTR(dst)[pd->dst.stride * j + i];
442     } else {
443 #endif
444       for (j = 0; j < tx_blk_size; j++)
445         for (i = 0; i < tx_blk_size; i++)
446           pred[diff_stride * j + i] = dst[pd->dst.stride * j + i];
447 #if CONFIG_HIGHBITDEPTH
448     }
449 #endif
450 
451     txfm_param.tx_type = tx_type;
452     txfm_param.tx_size = tx_size;
453     txfm_param.lossless = xd->lossless[seg_id];
454 
455 #if CONFIG_HIGHBITDEPTH
456     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
457       txfm_param.bd = xd->bd;
458       av1_highbd_fwd_txfm(pred, pvq_ref_coeff, diff_stride, &txfm_param);
459     } else {
460 #endif  // CONFIG_HIGHBITDEPTH
461       av1_fwd_txfm(pred, pvq_ref_coeff, diff_stride, &txfm_param);
462 #if CONFIG_HIGHBITDEPTH
463     }
464 #endif  // CONFIG_HIGHBITDEPTH
465 
466     quant = &pd->seg_dequant[seg_id][0];  // aom's quantizer
467 
468     eob = av1_pvq_decode_helper(xd, pvq_ref_coeff, dqcoeff, quant, plane,
469                                 tx_size, tx_type, xdec, ac_dc_coded);
470 
471     inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride,
472                             max_scan_line, eob);
473   }
474 
475   return eob;
476 }
477 #endif
478 
predict_and_reconstruct_intra_block(AV1_COMMON * cm,MACROBLOCKD * const xd,aom_reader * const r,MB_MODE_INFO * const mbmi,int plane,int row,int col,TX_SIZE tx_size)479 static void predict_and_reconstruct_intra_block(
480     AV1_COMMON *cm, MACROBLOCKD *const xd, aom_reader *const r,
481     MB_MODE_INFO *const mbmi, int plane, int row, int col, TX_SIZE tx_size) {
482   PLANE_TYPE plane_type = get_plane_type(plane);
483   const int block_idx = get_block_idx(xd, plane, row, col);
484 #if CONFIG_PVQ
485   (void)r;
486 #endif
487   av1_predict_intra_block_facade(cm, xd, plane, block_idx, col, row, tx_size);
488 
489   if (!mbmi->skip) {
490 #if !CONFIG_PVQ
491     struct macroblockd_plane *const pd = &xd->plane[plane];
492 #if CONFIG_LV_MAP
493     int16_t max_scan_line = 0;
494     int eob;
495     av1_read_coeffs_txb_facade(cm, xd, r, row, col, block_idx, plane,
496                                pd->dqcoeff, tx_size, &max_scan_line, &eob);
497     // tx_type will be read out in av1_read_coeffs_txb_facade
498     const TX_TYPE tx_type =
499         av1_get_tx_type(plane_type, xd, row, col, block_idx, tx_size);
500 #else   // CONFIG_LV_MAP
501     const TX_TYPE tx_type =
502         av1_get_tx_type(plane_type, xd, row, col, block_idx, tx_size);
503     const SCAN_ORDER *scan_order = get_scan(cm, tx_size, tx_type, mbmi);
504     int16_t max_scan_line = 0;
505     const int eob =
506         av1_decode_block_tokens(cm, xd, plane, scan_order, col, row, tx_size,
507                                 tx_type, &max_scan_line, r, mbmi->segment_id);
508 #endif  // CONFIG_LV_MAP
509     if (eob) {
510       uint8_t *dst =
511           &pd->dst.buf[(row * pd->dst.stride + col) << tx_size_wide_log2[0]];
512       inverse_transform_block(xd, plane,
513 #if CONFIG_LGT_FROM_PRED
514                               mbmi->mode,
515 #endif
516                               tx_type, tx_size, dst, pd->dst.stride,
517                               max_scan_line, eob);
518     }
519 #else   // !CONFIG_PVQ
520     const TX_TYPE tx_type =
521         av1_get_tx_type(plane_type, xd, row, col, block_idx, tx_size);
522     av1_pvq_decode_helper2(cm, xd, mbmi, plane, row, col, tx_size, tx_type);
523 #endif  // !CONFIG_PVQ
524   }
525 #if CONFIG_CFL
526   if (plane == AOM_PLANE_Y && xd->cfl->store_y) {
527     cfl_store_tx(xd, row, col, tx_size, mbmi->sb_type);
528   }
529 #endif  // CONFIG_CFL
530 }
531 
532 #if CONFIG_VAR_TX && !CONFIG_COEF_INTERLEAVE
decode_reconstruct_tx(AV1_COMMON * cm,MACROBLOCKD * const xd,aom_reader * r,MB_MODE_INFO * const mbmi,int plane,BLOCK_SIZE plane_bsize,int blk_row,int blk_col,int block,TX_SIZE tx_size,int * eob_total)533 static void decode_reconstruct_tx(AV1_COMMON *cm, MACROBLOCKD *const xd,
534                                   aom_reader *r, MB_MODE_INFO *const mbmi,
535                                   int plane, BLOCK_SIZE plane_bsize,
536                                   int blk_row, int blk_col, int block,
537                                   TX_SIZE tx_size, int *eob_total) {
538   const struct macroblockd_plane *const pd = &xd->plane[plane];
539   const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
540   const int tx_row = blk_row >> (1 - pd->subsampling_y);
541   const int tx_col = blk_col >> (1 - pd->subsampling_x);
542   const TX_SIZE plane_tx_size =
543       plane ? uv_txsize_lookup[bsize][mbmi->inter_tx_size[tx_row][tx_col]][0][0]
544             : mbmi->inter_tx_size[tx_row][tx_col];
545   // Scale to match transform block unit.
546   const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
547   const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
548 
549   if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
550 
551   if (tx_size == plane_tx_size) {
552     PLANE_TYPE plane_type = get_plane_type(plane);
553 #if CONFIG_LV_MAP
554     int16_t max_scan_line = 0;
555     int eob;
556     av1_read_coeffs_txb_facade(cm, xd, r, blk_row, blk_col, block, plane,
557                                pd->dqcoeff, tx_size, &max_scan_line, &eob);
558     // tx_type will be read out in av1_read_coeffs_txb_facade
559     const TX_TYPE tx_type =
560         av1_get_tx_type(plane_type, xd, blk_row, blk_col, block, plane_tx_size);
561 #else   // CONFIG_LV_MAP
562     const TX_TYPE tx_type =
563         av1_get_tx_type(plane_type, xd, blk_row, blk_col, block, plane_tx_size);
564     const SCAN_ORDER *sc = get_scan(cm, plane_tx_size, tx_type, mbmi);
565     int16_t max_scan_line = 0;
566     const int eob = av1_decode_block_tokens(
567         cm, xd, plane, sc, blk_col, blk_row, plane_tx_size, tx_type,
568         &max_scan_line, r, mbmi->segment_id);
569 #endif  // CONFIG_LV_MAP
570     inverse_transform_block(xd, plane,
571 #if CONFIG_LGT_FROM_PRED
572                             mbmi->mode,
573 #endif
574                             tx_type, plane_tx_size,
575                             &pd->dst.buf[(blk_row * pd->dst.stride + blk_col)
576                                          << tx_size_wide_log2[0]],
577                             pd->dst.stride, max_scan_line, eob);
578     *eob_total += eob;
579   } else {
580 #if CONFIG_RECT_TX_EXT
581     int is_qttx = plane_tx_size == quarter_txsize_lookup[plane_bsize];
582     const TX_SIZE sub_txs = is_qttx ? plane_tx_size : sub_tx_size_map[tx_size];
583     if (is_qttx) assert(blk_row == 0 && blk_col == 0 && block == 0);
584 #else
585     const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
586     assert(IMPLIES(tx_size <= TX_4X4, sub_txs == tx_size));
587     assert(IMPLIES(tx_size > TX_4X4, sub_txs < tx_size));
588 #endif
589     const int bsl = tx_size_wide_unit[sub_txs];
590     int sub_step = tx_size_wide_unit[sub_txs] * tx_size_high_unit[sub_txs];
591     int i;
592 
593     assert(bsl > 0);
594 
595     for (i = 0; i < 4; ++i) {
596 #if CONFIG_RECT_TX_EXT
597       int is_wide_tx = tx_size_wide_unit[sub_txs] > tx_size_high_unit[sub_txs];
598       const int offsetr =
599           is_qttx ? (is_wide_tx ? i * tx_size_high_unit[sub_txs] : 0)
600                   : blk_row + ((i >> 1) * bsl);
601       const int offsetc =
602           is_qttx ? (is_wide_tx ? 0 : i * tx_size_wide_unit[sub_txs])
603                   : blk_col + (i & 0x01) * bsl;
604 #else
605       const int offsetr = blk_row + (i >> 1) * bsl;
606       const int offsetc = blk_col + (i & 0x01) * bsl;
607 #endif
608 
609       if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
610 
611       decode_reconstruct_tx(cm, xd, r, mbmi, plane, plane_bsize, offsetr,
612                             offsetc, block, sub_txs, eob_total);
613       block += sub_step;
614     }
615   }
616 }
617 #endif  // CONFIG_VAR_TX
618 
619 #if !CONFIG_VAR_TX || CONFIG_SUPERTX || CONFIG_COEF_INTERLEAVE || \
620     (!CONFIG_VAR_TX && CONFIG_EXT_TX && CONFIG_RECT_TX)
reconstruct_inter_block(AV1_COMMON * cm,MACROBLOCKD * const xd,aom_reader * const r,int segment_id,int plane,int row,int col,TX_SIZE tx_size)621 static int reconstruct_inter_block(AV1_COMMON *cm, MACROBLOCKD *const xd,
622                                    aom_reader *const r, int segment_id,
623                                    int plane, int row, int col,
624                                    TX_SIZE tx_size) {
625   PLANE_TYPE plane_type = get_plane_type(plane);
626   int block_idx = get_block_idx(xd, plane, row, col);
627 #if CONFIG_PVQ
628   int eob;
629   (void)r;
630   (void)segment_id;
631 #else
632   struct macroblockd_plane *const pd = &xd->plane[plane];
633 #endif
634 
635 #if !CONFIG_PVQ
636 #if CONFIG_LV_MAP
637   (void)segment_id;
638   int16_t max_scan_line = 0;
639   int eob;
640   av1_read_coeffs_txb_facade(cm, xd, r, row, col, block_idx, plane, pd->dqcoeff,
641                              tx_size, &max_scan_line, &eob);
642   // tx_type will be read out in av1_read_coeffs_txb_facade
643   const TX_TYPE tx_type =
644       av1_get_tx_type(plane_type, xd, row, col, block_idx, tx_size);
645 #else   // CONFIG_LV_MAP
646   int16_t max_scan_line = 0;
647   const TX_TYPE tx_type =
648       av1_get_tx_type(plane_type, xd, row, col, block_idx, tx_size);
649   const SCAN_ORDER *scan_order =
650       get_scan(cm, tx_size, tx_type, &xd->mi[0]->mbmi);
651   const int eob =
652       av1_decode_block_tokens(cm, xd, plane, scan_order, col, row, tx_size,
653                               tx_type, &max_scan_line, r, segment_id);
654 #endif  // CONFIG_LV_MAP
655   uint8_t *dst =
656       &pd->dst.buf[(row * pd->dst.stride + col) << tx_size_wide_log2[0]];
657   if (eob)
658     inverse_transform_block(xd, plane,
659 #if CONFIG_LGT_FROM_PRED
660                             xd->mi[0]->mbmi.mode,
661 #endif
662                             tx_type, tx_size, dst, pd->dst.stride,
663                             max_scan_line, eob);
664 #else
665   const TX_TYPE tx_type =
666       av1_get_tx_type(plane_type, xd, row, col, block_idx, tx_size);
667   eob = av1_pvq_decode_helper2(cm, xd, &xd->mi[0]->mbmi, plane, row, col,
668                                tx_size, tx_type);
669 #endif
670   return eob;
671 }
672 #endif  // !CONFIG_VAR_TX || CONFIG_SUPER_TX
673 
set_offsets(AV1_COMMON * const cm,MACROBLOCKD * const xd,BLOCK_SIZE bsize,int mi_row,int mi_col,int bw,int bh,int x_mis,int y_mis)674 static void set_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
675                         BLOCK_SIZE bsize, int mi_row, int mi_col, int bw,
676                         int bh, int x_mis, int y_mis) {
677   const int offset = mi_row * cm->mi_stride + mi_col;
678   int x, y;
679   const TileInfo *const tile = &xd->tile;
680 
681   xd->mi = cm->mi_grid_visible + offset;
682   xd->mi[0] = &cm->mi[offset];
683   // TODO(slavarnway): Generate sb_type based on bwl and bhl, instead of
684   // passing bsize from decode_partition().
685   xd->mi[0]->mbmi.sb_type = bsize;
686 #if CONFIG_RD_DEBUG
687   xd->mi[0]->mbmi.mi_row = mi_row;
688   xd->mi[0]->mbmi.mi_col = mi_col;
689 #endif
690 #if CONFIG_CFL
691   xd->cfl->mi_row = mi_row;
692   xd->cfl->mi_col = mi_col;
693 #endif
694   for (y = 0; y < y_mis; ++y)
695     for (x = !y; x < x_mis; ++x) xd->mi[y * cm->mi_stride + x] = xd->mi[0];
696 
697   set_plane_n4(xd, bw, bh);
698   set_skip_context(xd, mi_row, mi_col);
699 
700 #if CONFIG_VAR_TX
701   xd->max_tx_size = max_txsize_lookup[bsize];
702 #endif
703 
704   // Distance of Mb to the various image edges. These are specified to 8th pel
705   // as they are always compared to values that are in 1/8th pel units
706   set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw,
707 #if CONFIG_DEPENDENT_HORZTILES
708                  cm->dependent_horz_tiles,
709 #endif  // CONFIG_DEPENDENT_HORZTILES
710                  cm->mi_rows, cm->mi_cols);
711 
712   av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row,
713                        mi_col);
714 }
715 
716 #if CONFIG_SUPERTX
set_offsets_extend(AV1_COMMON * const cm,MACROBLOCKD * const xd,const TileInfo * const tile,BLOCK_SIZE bsize_pred,int mi_row_pred,int mi_col_pred,int mi_row_ori,int mi_col_ori)717 static MB_MODE_INFO *set_offsets_extend(AV1_COMMON *const cm,
718                                         MACROBLOCKD *const xd,
719                                         const TileInfo *const tile,
720                                         BLOCK_SIZE bsize_pred, int mi_row_pred,
721                                         int mi_col_pred, int mi_row_ori,
722                                         int mi_col_ori) {
723   // Used in supertx
724   // (mi_row_ori, mi_col_ori): location for mv
725   // (mi_row_pred, mi_col_pred, bsize_pred): region to predict
726   const int bw = mi_size_wide[bsize_pred];
727   const int bh = mi_size_high[bsize_pred];
728   const int offset = mi_row_ori * cm->mi_stride + mi_col_ori;
729   xd->mi = cm->mi_grid_visible + offset;
730   xd->mi[0] = cm->mi + offset;
731   set_mi_row_col(xd, tile, mi_row_pred, bh, mi_col_pred, bw,
732 #if CONFIG_DEPENDENT_HORZTILES
733                  cm->dependent_horz_tiles,
734 #endif  // CONFIG_DEPENDENT_HORZTILES
735                  cm->mi_rows, cm->mi_cols);
736 
737   xd->up_available = (mi_row_ori > tile->mi_row_start);
738   xd->left_available = (mi_col_ori > tile->mi_col_start);
739 
740   set_plane_n4(xd, bw, bh);
741 
742   return &xd->mi[0]->mbmi;
743 }
744 
745 #if CONFIG_SUPERTX
set_mb_offsets(AV1_COMMON * const cm,MACROBLOCKD * const xd,BLOCK_SIZE bsize,int mi_row,int mi_col,int bw,int bh,int x_mis,int y_mis)746 static MB_MODE_INFO *set_mb_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
747                                     BLOCK_SIZE bsize, int mi_row, int mi_col,
748                                     int bw, int bh, int x_mis, int y_mis) {
749   const int offset = mi_row * cm->mi_stride + mi_col;
750   const TileInfo *const tile = &xd->tile;
751   int x, y;
752 
753   xd->mi = cm->mi_grid_visible + offset;
754   xd->mi[0] = cm->mi + offset;
755   xd->mi[0]->mbmi.sb_type = bsize;
756   for (y = 0; y < y_mis; ++y)
757     for (x = !y; x < x_mis; ++x) xd->mi[y * cm->mi_stride + x] = xd->mi[0];
758 
759   set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw,
760 #if CONFIG_DEPENDENT_HORZTILES
761                  cm->dependent_horz_tiles,
762 #endif  // CONFIG_DEPENDENT_HORZTILES
763                  cm->mi_rows, cm->mi_cols);
764   return &xd->mi[0]->mbmi;
765 }
766 #endif
767 
set_offsets_topblock(AV1_COMMON * const cm,MACROBLOCKD * const xd,const TileInfo * const tile,BLOCK_SIZE bsize,int mi_row,int mi_col)768 static void set_offsets_topblock(AV1_COMMON *const cm, MACROBLOCKD *const xd,
769                                  const TileInfo *const tile, BLOCK_SIZE bsize,
770                                  int mi_row, int mi_col) {
771   const int bw = mi_size_wide[bsize];
772   const int bh = mi_size_high[bsize];
773   const int offset = mi_row * cm->mi_stride + mi_col;
774 
775   xd->mi = cm->mi_grid_visible + offset;
776   xd->mi[0] = cm->mi + offset;
777 
778   set_plane_n4(xd, bw, bh);
779 
780   set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw,
781 #if CONFIG_DEPENDENT_HORZTILES
782                  cm->dependent_horz_tiles,
783 #endif  // CONFIG_DEPENDENT_HORZTILES
784                  cm->mi_rows, cm->mi_cols);
785 
786   av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row,
787                        mi_col);
788 }
789 
set_param_topblock(AV1_COMMON * const cm,MACROBLOCKD * const xd,BLOCK_SIZE bsize,int mi_row,int mi_col,int txfm,int skip)790 static void set_param_topblock(AV1_COMMON *const cm, MACROBLOCKD *const xd,
791                                BLOCK_SIZE bsize, int mi_row, int mi_col,
792                                int txfm, int skip) {
793   const int bw = mi_size_wide[bsize];
794   const int bh = mi_size_high[bsize];
795   const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
796   const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
797   const int offset = mi_row * cm->mi_stride + mi_col;
798   int x, y;
799 
800   xd->mi = cm->mi_grid_visible + offset;
801   xd->mi[0] = cm->mi + offset;
802 
803   for (y = 0; y < y_mis; ++y)
804     for (x = 0; x < x_mis; ++x) {
805       xd->mi[y * cm->mi_stride + x]->mbmi.skip = skip;
806       xd->mi[y * cm->mi_stride + x]->mbmi.tx_type = txfm;
807     }
808 #if CONFIG_VAR_TX
809   xd->above_txfm_context = cm->above_txfm_context + mi_col;
810   xd->left_txfm_context =
811       xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
812   set_txfm_ctxs(xd->mi[0]->mbmi.tx_size, bw, bh, skip, xd);
813 #endif
814 }
815 
set_ref(AV1_COMMON * const cm,MACROBLOCKD * const xd,int idx,int mi_row,int mi_col)816 static void set_ref(AV1_COMMON *const cm, MACROBLOCKD *const xd, int idx,
817                     int mi_row, int mi_col) {
818   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
819 #if CONFIG_COMPOUND_SINGLEREF
820   RefBuffer *ref_buffer =
821       has_second_ref(mbmi) ? &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME]
822                            : &cm->frame_refs[mbmi->ref_frame[0] - LAST_FRAME];
823 #else
824   RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME];
825 #endif  // CONFIG_COMPOUND_SINGLEREF
826   xd->block_refs[idx] = ref_buffer;
827   if (!av1_is_valid_scale(&ref_buffer->sf))
828     aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
829                        "Invalid scale factors");
830   av1_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col,
831                        &ref_buffer->sf);
832   aom_merge_corrupted_flag(&xd->corrupted, ref_buffer->buf->corrupted);
833 }
834 
dec_predict_b_extend(AV1Decoder * const pbi,MACROBLOCKD * const xd,const TileInfo * const tile,int block,int mi_row_ori,int mi_col_ori,int mi_row_pred,int mi_col_pred,int mi_row_top,int mi_col_top,int plane,uint8_t * dst_buf,int dst_stride,BLOCK_SIZE bsize_top,BLOCK_SIZE bsize_pred,int b_sub8x8,int bextend)835 static void dec_predict_b_extend(
836     AV1Decoder *const pbi, MACROBLOCKD *const xd, const TileInfo *const tile,
837     int block, int mi_row_ori, int mi_col_ori, int mi_row_pred, int mi_col_pred,
838     int mi_row_top, int mi_col_top, int plane, uint8_t *dst_buf, int dst_stride,
839     BLOCK_SIZE bsize_top, BLOCK_SIZE bsize_pred, int b_sub8x8, int bextend) {
840   // Used in supertx
841   // (mi_row_ori, mi_col_ori): location for mv
842   // (mi_row_pred, mi_col_pred, bsize_pred): region to predict
843   // (mi_row_top, mi_col_top, bsize_top): region of the top partition size
844   // block: sub location of sub8x8 blocks
845   // b_sub8x8: 1: ori is sub8x8; 0: ori is not sub8x8
846   // bextend: 1: region to predict is an extension of ori; 0: not
847   int r = (mi_row_pred - mi_row_top) * MI_SIZE;
848   int c = (mi_col_pred - mi_col_top) * MI_SIZE;
849   const int mi_width_top = mi_size_wide[bsize_top];
850   const int mi_height_top = mi_size_high[bsize_top];
851   MB_MODE_INFO *mbmi;
852   AV1_COMMON *const cm = &pbi->common;
853 
854   if (mi_row_pred < mi_row_top || mi_col_pred < mi_col_top ||
855       mi_row_pred >= mi_row_top + mi_height_top ||
856       mi_col_pred >= mi_col_top + mi_width_top || mi_row_pred >= cm->mi_rows ||
857       mi_col_pred >= cm->mi_cols)
858     return;
859 
860   mbmi = set_offsets_extend(cm, xd, tile, bsize_pred, mi_row_pred, mi_col_pred,
861                             mi_row_ori, mi_col_ori);
862   set_ref(cm, xd, 0, mi_row_pred, mi_col_pred);
863   if (has_second_ref(&xd->mi[0]->mbmi)
864 #if CONFIG_COMPOUND_SINGLEREF
865       || is_inter_singleref_comp_mode(xd->mi[0]->mbmi.mode)
866 #endif  // CONFIG_COMPOUND_SINGLEREF
867           )
868     set_ref(cm, xd, 1, mi_row_pred, mi_col_pred);
869   if (!bextend) mbmi->tx_size = max_txsize_lookup[bsize_top];
870 
871   xd->plane[plane].dst.stride = dst_stride;
872   xd->plane[plane].dst.buf =
873       dst_buf + (r >> xd->plane[plane].subsampling_y) * dst_stride +
874       (c >> xd->plane[plane].subsampling_x);
875 
876   if (!b_sub8x8)
877     av1_build_inter_predictor_sb_extend(&pbi->common, xd, mi_row_ori,
878                                         mi_col_ori, mi_row_pred, mi_col_pred,
879                                         plane, bsize_pred);
880   else
881     av1_build_inter_predictor_sb_sub8x8_extend(
882         &pbi->common, xd, mi_row_ori, mi_col_ori, mi_row_pred, mi_col_pred,
883         plane, bsize_pred, block);
884 }
885 
dec_extend_dir(AV1Decoder * const pbi,MACROBLOCKD * const xd,const TileInfo * const tile,int block,BLOCK_SIZE bsize,BLOCK_SIZE top_bsize,int mi_row_ori,int mi_col_ori,int mi_row,int mi_col,int mi_row_top,int mi_col_top,int plane,uint8_t * dst_buf,int dst_stride,int dir)886 static void dec_extend_dir(AV1Decoder *const pbi, MACROBLOCKD *const xd,
887                            const TileInfo *const tile, int block,
888                            BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
889                            int mi_row_ori, int mi_col_ori, int mi_row,
890                            int mi_col, int mi_row_top, int mi_col_top,
891                            int plane, uint8_t *dst_buf, int dst_stride,
892                            int dir) {
893   // dir: 0-lower, 1-upper, 2-left, 3-right
894   //      4-lowerleft, 5-upperleft, 6-lowerright, 7-upperright
895   const int mi_width = mi_size_wide[bsize];
896   const int mi_height = mi_size_high[bsize];
897   int xss = xd->plane[1].subsampling_x;
898   int yss = xd->plane[1].subsampling_y;
899 #if CONFIG_CB4X4
900   const int unify_bsize = 1;
901 #else
902   const int unify_bsize = 0;
903 #endif
904   int b_sub8x8 = (bsize < BLOCK_8X8) && !unify_bsize ? 1 : 0;
905   BLOCK_SIZE extend_bsize;
906   int mi_row_pred, mi_col_pred;
907 
908   int wide_unit, high_unit;
909   int i, j;
910   int ext_offset = 0;
911 
912   if (dir == 0 || dir == 1) {
913     extend_bsize =
914         (mi_width == mi_size_wide[BLOCK_8X8] || bsize < BLOCK_8X8 || xss < yss)
915             ? BLOCK_8X8
916             : BLOCK_16X8;
917 #if CONFIG_CB4X4
918     if (bsize < BLOCK_8X8) {
919       extend_bsize = BLOCK_4X4;
920       ext_offset = mi_size_wide[BLOCK_8X8];
921     }
922 #endif
923 
924     wide_unit = mi_size_wide[extend_bsize];
925     high_unit = mi_size_high[extend_bsize];
926 
927     mi_row_pred = mi_row + ((dir == 0) ? mi_height : -(mi_height + ext_offset));
928     mi_col_pred = mi_col;
929 
930     for (j = 0; j < mi_height + ext_offset; j += high_unit)
931       for (i = 0; i < mi_width + ext_offset; i += wide_unit)
932         dec_predict_b_extend(pbi, xd, tile, block, mi_row_ori, mi_col_ori,
933                              mi_row_pred + j, mi_col_pred + i, mi_row_top,
934                              mi_col_top, plane, dst_buf, dst_stride, top_bsize,
935                              extend_bsize, b_sub8x8, 1);
936   } else if (dir == 2 || dir == 3) {
937     extend_bsize =
938         (mi_height == mi_size_high[BLOCK_8X8] || bsize < BLOCK_8X8 || yss < xss)
939             ? BLOCK_8X8
940             : BLOCK_8X16;
941 #if CONFIG_CB4X4
942     if (bsize < BLOCK_8X8) {
943       extend_bsize = BLOCK_4X4;
944       ext_offset = mi_size_wide[BLOCK_8X8];
945     }
946 #endif
947 
948     wide_unit = mi_size_wide[extend_bsize];
949     high_unit = mi_size_high[extend_bsize];
950 
951     mi_row_pred = mi_row;
952     mi_col_pred = mi_col + ((dir == 3) ? mi_width : -(mi_width + ext_offset));
953 
954     for (j = 0; j < mi_height + ext_offset; j += high_unit)
955       for (i = 0; i < mi_width + ext_offset; i += wide_unit)
956         dec_predict_b_extend(pbi, xd, tile, block, mi_row_ori, mi_col_ori,
957                              mi_row_pred + j, mi_col_pred + i, mi_row_top,
958                              mi_col_top, plane, dst_buf, dst_stride, top_bsize,
959                              extend_bsize, b_sub8x8, 1);
960   } else {
961     extend_bsize = BLOCK_8X8;
962 #if CONFIG_CB4X4
963     if (bsize < BLOCK_8X8) {
964       extend_bsize = BLOCK_4X4;
965       ext_offset = mi_size_wide[BLOCK_8X8];
966     }
967 #endif
968     wide_unit = mi_size_wide[extend_bsize];
969     high_unit = mi_size_high[extend_bsize];
970 
971     mi_row_pred = mi_row + ((dir == 4 || dir == 6) ? mi_height
972                                                    : -(mi_height + ext_offset));
973     mi_col_pred =
974         mi_col + ((dir == 6 || dir == 7) ? mi_width : -(mi_width + ext_offset));
975 
976     for (j = 0; j < mi_height + ext_offset; j += high_unit)
977       for (i = 0; i < mi_width + ext_offset; i += wide_unit)
978         dec_predict_b_extend(pbi, xd, tile, block, mi_row_ori, mi_col_ori,
979                              mi_row_pred + j, mi_col_pred + i, mi_row_top,
980                              mi_col_top, plane, dst_buf, dst_stride, top_bsize,
981                              extend_bsize, b_sub8x8, 1);
982   }
983 }
984 
dec_extend_all(AV1Decoder * const pbi,MACROBLOCKD * const xd,const TileInfo * const tile,int block,BLOCK_SIZE bsize,BLOCK_SIZE top_bsize,int mi_row_ori,int mi_col_ori,int mi_row,int mi_col,int mi_row_top,int mi_col_top,int plane,uint8_t * dst_buf,int dst_stride)985 static void dec_extend_all(AV1Decoder *const pbi, MACROBLOCKD *const xd,
986                            const TileInfo *const tile, int block,
987                            BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
988                            int mi_row_ori, int mi_col_ori, int mi_row,
989                            int mi_col, int mi_row_top, int mi_col_top,
990                            int plane, uint8_t *dst_buf, int dst_stride) {
991   for (int i = 0; i < 8; ++i) {
992     dec_extend_dir(pbi, xd, tile, block, bsize, top_bsize, mi_row_ori,
993                    mi_col_ori, mi_row, mi_col, mi_row_top, mi_col_top, plane,
994                    dst_buf, dst_stride, i);
995   }
996 }
997 
dec_predict_sb_complex(AV1Decoder * const pbi,MACROBLOCKD * const xd,const TileInfo * const tile,int mi_row,int mi_col,int mi_row_top,int mi_col_top,BLOCK_SIZE bsize,BLOCK_SIZE top_bsize,uint8_t * dst_buf[3],int dst_stride[3])998 static void dec_predict_sb_complex(AV1Decoder *const pbi, MACROBLOCKD *const xd,
999                                    const TileInfo *const tile, int mi_row,
1000                                    int mi_col, int mi_row_top, int mi_col_top,
1001                                    BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
1002                                    uint8_t *dst_buf[3], int dst_stride[3]) {
1003   const AV1_COMMON *const cm = &pbi->common;
1004   const int hbs = mi_size_wide[bsize] / 2;
1005   const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize);
1006   const BLOCK_SIZE subsize = get_subsize(bsize, partition);
1007 #if CONFIG_EXT_PARTITION_TYPES
1008   const BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
1009 #endif
1010   int i;
1011   const int mi_offset = mi_row * cm->mi_stride + mi_col;
1012   uint8_t *dst_buf1[3], *dst_buf2[3], *dst_buf3[3];
1013 #if CONFIG_CB4X4
1014   const int unify_bsize = 1;
1015 #else
1016   const int unify_bsize = 0;
1017 #endif
1018 
1019   DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
1020   DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
1021   DECLARE_ALIGNED(16, uint8_t, tmp_buf3[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
1022   int dst_stride1[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
1023   int dst_stride2[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
1024   int dst_stride3[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
1025 
1026 #if CONFIG_HIGHBITDEPTH
1027   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1028     int len = sizeof(uint16_t);
1029     dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
1030     dst_buf1[1] = CONVERT_TO_BYTEPTR(tmp_buf1 + MAX_TX_SQUARE * len);
1031     dst_buf1[2] = CONVERT_TO_BYTEPTR(tmp_buf1 + 2 * MAX_TX_SQUARE * len);
1032     dst_buf2[0] = CONVERT_TO_BYTEPTR(tmp_buf2);
1033     dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_TX_SQUARE * len);
1034     dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + 2 * MAX_TX_SQUARE * len);
1035     dst_buf3[0] = CONVERT_TO_BYTEPTR(tmp_buf3);
1036     dst_buf3[1] = CONVERT_TO_BYTEPTR(tmp_buf3 + MAX_TX_SQUARE * len);
1037     dst_buf3[2] = CONVERT_TO_BYTEPTR(tmp_buf3 + 2 * MAX_TX_SQUARE * len);
1038   } else {
1039 #endif
1040     dst_buf1[0] = tmp_buf1;
1041     dst_buf1[1] = tmp_buf1 + MAX_TX_SQUARE;
1042     dst_buf1[2] = tmp_buf1 + 2 * MAX_TX_SQUARE;
1043     dst_buf2[0] = tmp_buf2;
1044     dst_buf2[1] = tmp_buf2 + MAX_TX_SQUARE;
1045     dst_buf2[2] = tmp_buf2 + 2 * MAX_TX_SQUARE;
1046     dst_buf3[0] = tmp_buf3;
1047     dst_buf3[1] = tmp_buf3 + MAX_TX_SQUARE;
1048     dst_buf3[2] = tmp_buf3 + 2 * MAX_TX_SQUARE;
1049 #if CONFIG_HIGHBITDEPTH
1050   }
1051 #endif
1052 
1053   if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
1054 
1055   xd->mi = cm->mi_grid_visible + mi_offset;
1056   xd->mi[0] = cm->mi + mi_offset;
1057 
1058   for (i = 0; i < MAX_MB_PLANE; i++) {
1059     xd->plane[i].dst.buf = dst_buf[i];
1060     xd->plane[i].dst.stride = dst_stride[i];
1061   }
1062 
1063   switch (partition) {
1064     case PARTITION_NONE:
1065       assert(bsize < top_bsize);
1066       for (i = 0; i < MAX_MB_PLANE; i++) {
1067         dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col,
1068                              mi_row_top, mi_col_top, i, dst_buf[i],
1069                              dst_stride[i], top_bsize, bsize, 0, 0);
1070         dec_extend_all(pbi, xd, tile, 0, bsize, top_bsize, mi_row, mi_col,
1071                        mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf[i],
1072                        dst_stride[i]);
1073       }
1074       break;
1075     case PARTITION_HORZ:
1076       if (bsize == BLOCK_8X8 && !unify_bsize) {
1077         for (i = 0; i < MAX_MB_PLANE; i++) {
1078           // For sub8x8, predict in 8x8 unit
1079           // First half
1080           dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col,
1081                                mi_row_top, mi_col_top, i, dst_buf[i],
1082                                dst_stride[i], top_bsize, BLOCK_8X8, 1, 0);
1083           if (bsize < top_bsize)
1084             dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col,
1085                            mi_row, mi_col, mi_row_top, mi_col_top, i,
1086                            dst_buf[i], dst_stride[i]);
1087 
1088           // Second half
1089           dec_predict_b_extend(pbi, xd, tile, 2, mi_row, mi_col, mi_row, mi_col,
1090                                mi_row_top, mi_col_top, i, dst_buf1[i],
1091                                dst_stride1[i], top_bsize, BLOCK_8X8, 1, 1);
1092           if (bsize < top_bsize)
1093             dec_extend_all(pbi, xd, tile, 2, subsize, top_bsize, mi_row, mi_col,
1094                            mi_row, mi_col, mi_row_top, mi_col_top, i,
1095                            dst_buf1[i], dst_stride1[i]);
1096         }
1097 
1098         // weighted average to smooth the boundary
1099         xd->plane[0].dst.buf = dst_buf[0];
1100         xd->plane[0].dst.stride = dst_stride[0];
1101         av1_build_masked_inter_predictor_complex(
1102             xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
1103             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
1104             0);
1105       } else {
1106         for (i = 0; i < MAX_MB_PLANE; i++) {
1107 #if CONFIG_CB4X4
1108           const struct macroblockd_plane *pd = &xd->plane[i];
1109           int handle_chroma_sub8x8 = need_handle_chroma_sub8x8(
1110               subsize, pd->subsampling_x, pd->subsampling_y);
1111 
1112           if (handle_chroma_sub8x8) {
1113             int mode_offset_row = CONFIG_CHROMA_SUB8X8 ? hbs : 0;
1114 
1115             dec_predict_b_extend(pbi, xd, tile, 0, mi_row + mode_offset_row,
1116                                  mi_col, mi_row, mi_col, mi_row_top, mi_col_top,
1117                                  i, dst_buf[i], dst_stride[i], top_bsize, bsize,
1118                                  0, 0);
1119             if (bsize < top_bsize)
1120               dec_extend_all(pbi, xd, tile, 0, bsize, top_bsize,
1121                              mi_row + mode_offset_row, mi_col, mi_row, mi_col,
1122                              mi_row_top, mi_col_top, i, dst_buf[i],
1123                              dst_stride[i]);
1124           } else {
1125 #endif
1126             // First half
1127             dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row,
1128                                  mi_col, mi_row_top, mi_col_top, i, dst_buf[i],
1129                                  dst_stride[i], top_bsize, subsize, 0, 0);
1130             if (bsize < top_bsize)
1131               dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
1132                              mi_col, mi_row, mi_col, mi_row_top, mi_col_top, i,
1133                              dst_buf[i], dst_stride[i]);
1134             else
1135               dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
1136                              mi_col, mi_row, mi_col, mi_row_top, mi_col_top, i,
1137                              dst_buf[i], dst_stride[i], 0);
1138 
1139             if (mi_row + hbs < cm->mi_rows) {
1140               // Second half
1141               dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col,
1142                                    mi_row + hbs, mi_col, mi_row_top, mi_col_top,
1143                                    i, dst_buf1[i], dst_stride1[i], top_bsize,
1144                                    subsize, 0, 0);
1145               if (bsize < top_bsize)
1146                 dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize,
1147                                mi_row + hbs, mi_col, mi_row + hbs, mi_col,
1148                                mi_row_top, mi_col_top, i, dst_buf1[i],
1149                                dst_stride1[i]);
1150               else
1151                 dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize,
1152                                mi_row + hbs, mi_col, mi_row + hbs, mi_col,
1153                                mi_row_top, mi_col_top, i, dst_buf1[i],
1154                                dst_stride1[i], 1);
1155 
1156               // weighted average to smooth the boundary
1157               xd->plane[i].dst.buf = dst_buf[i];
1158               xd->plane[i].dst.stride = dst_stride[i];
1159               av1_build_masked_inter_predictor_complex(
1160                   xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
1161                   mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
1162                   PARTITION_HORZ, i);
1163             }
1164 #if CONFIG_CB4X4
1165           }
1166 #endif
1167         }
1168       }
1169       break;
1170     case PARTITION_VERT:
1171       if (bsize == BLOCK_8X8 && !unify_bsize) {
1172         for (i = 0; i < MAX_MB_PLANE; i++) {
1173           // First half
1174           dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col,
1175                                mi_row_top, mi_col_top, i, dst_buf[i],
1176                                dst_stride[i], top_bsize, BLOCK_8X8, 1, 0);
1177           if (bsize < top_bsize)
1178             dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col,
1179                            mi_row, mi_col, mi_row_top, mi_col_top, i,
1180                            dst_buf[i], dst_stride[i]);
1181 
1182           // Second half
1183           dec_predict_b_extend(pbi, xd, tile, 1, mi_row, mi_col, mi_row, mi_col,
1184                                mi_row_top, mi_col_top, i, dst_buf1[i],
1185                                dst_stride1[i], top_bsize, BLOCK_8X8, 1, 1);
1186           if (bsize < top_bsize)
1187             dec_extend_all(pbi, xd, tile, 1, subsize, top_bsize, mi_row, mi_col,
1188                            mi_row, mi_col, mi_row_top, mi_col_top, i,
1189                            dst_buf1[i], dst_stride1[i]);
1190         }
1191 
1192         // Smooth
1193         xd->plane[0].dst.buf = dst_buf[0];
1194         xd->plane[0].dst.stride = dst_stride[0];
1195         av1_build_masked_inter_predictor_complex(
1196             xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
1197             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
1198             0);
1199       } else {
1200         for (i = 0; i < MAX_MB_PLANE; i++) {
1201 #if CONFIG_CB4X4
1202           const struct macroblockd_plane *pd = &xd->plane[i];
1203           int handle_chroma_sub8x8 = need_handle_chroma_sub8x8(
1204               subsize, pd->subsampling_x, pd->subsampling_y);
1205 
1206           if (handle_chroma_sub8x8) {
1207             int mode_offset_col = CONFIG_CHROMA_SUB8X8 ? hbs : 0;
1208             assert(i > 0 && bsize == BLOCK_8X8);
1209 
1210             dec_predict_b_extend(pbi, xd, tile, 0, mi_row,
1211                                  mi_col + mode_offset_col, mi_row, mi_col,
1212                                  mi_row_top, mi_col_top, i, dst_buf[i],
1213                                  dst_stride[i], top_bsize, bsize, 0, 0);
1214             if (bsize < top_bsize)
1215               dec_extend_all(pbi, xd, tile, 0, bsize, top_bsize, mi_row,
1216                              mi_col + mode_offset_col, mi_row, mi_col,
1217                              mi_row_top, mi_col_top, i, dst_buf[i],
1218                              dst_stride[i]);
1219           } else {
1220 #endif
1221             // First half
1222             dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row,
1223                                  mi_col, mi_row_top, mi_col_top, i, dst_buf[i],
1224                                  dst_stride[i], top_bsize, subsize, 0, 0);
1225             if (bsize < top_bsize)
1226               dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
1227                              mi_col, mi_row, mi_col, mi_row_top, mi_col_top, i,
1228                              dst_buf[i], dst_stride[i]);
1229             else
1230               dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
1231                              mi_col, mi_row, mi_col, mi_row_top, mi_col_top, i,
1232                              dst_buf[i], dst_stride[i], 3);
1233 
1234             // Second half
1235             if (mi_col + hbs < cm->mi_cols) {
1236               dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs,
1237                                    mi_row, mi_col + hbs, mi_row_top, mi_col_top,
1238                                    i, dst_buf1[i], dst_stride1[i], top_bsize,
1239                                    subsize, 0, 0);
1240               if (bsize < top_bsize)
1241                 dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
1242                                mi_col + hbs, mi_row, mi_col + hbs, mi_row_top,
1243                                mi_col_top, i, dst_buf1[i], dst_stride1[i]);
1244               else
1245                 dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
1246                                mi_col + hbs, mi_row, mi_col + hbs, mi_row_top,
1247                                mi_col_top, i, dst_buf1[i], dst_stride1[i], 2);
1248 
1249               // Smooth
1250               xd->plane[i].dst.buf = dst_buf[i];
1251               xd->plane[i].dst.stride = dst_stride[i];
1252               av1_build_masked_inter_predictor_complex(
1253                   xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
1254                   mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
1255                   PARTITION_VERT, i);
1256             }
1257 #if CONFIG_CB4X4
1258           }
1259 #endif
1260         }
1261       }
1262       break;
1263     case PARTITION_SPLIT:
1264       if (bsize == BLOCK_8X8 && !unify_bsize) {
1265         for (i = 0; i < MAX_MB_PLANE; i++) {
1266           dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col,
1267                                mi_row_top, mi_col_top, i, dst_buf[i],
1268                                dst_stride[i], top_bsize, BLOCK_8X8, 1, 0);
1269           dec_predict_b_extend(pbi, xd, tile, 1, mi_row, mi_col, mi_row, mi_col,
1270                                mi_row_top, mi_col_top, i, dst_buf1[i],
1271                                dst_stride1[i], top_bsize, BLOCK_8X8, 1, 1);
1272           dec_predict_b_extend(pbi, xd, tile, 2, mi_row, mi_col, mi_row, mi_col,
1273                                mi_row_top, mi_col_top, i, dst_buf2[i],
1274                                dst_stride2[i], top_bsize, BLOCK_8X8, 1, 1);
1275           dec_predict_b_extend(pbi, xd, tile, 3, mi_row, mi_col, mi_row, mi_col,
1276                                mi_row_top, mi_col_top, i, dst_buf3[i],
1277                                dst_stride3[i], top_bsize, BLOCK_8X8, 1, 1);
1278           if (bsize < top_bsize) {
1279             dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col,
1280                            mi_row, mi_col, mi_row_top, mi_col_top, i,
1281                            dst_buf[i], dst_stride[i]);
1282             dec_extend_all(pbi, xd, tile, 1, subsize, top_bsize, mi_row, mi_col,
1283                            mi_row, mi_col, mi_row_top, mi_col_top, i,
1284                            dst_buf1[i], dst_stride1[i]);
1285             dec_extend_all(pbi, xd, tile, 2, subsize, top_bsize, mi_row, mi_col,
1286                            mi_row, mi_col, mi_row_top, mi_col_top, i,
1287                            dst_buf2[i], dst_stride2[i]);
1288             dec_extend_all(pbi, xd, tile, 3, subsize, top_bsize, mi_row, mi_col,
1289                            mi_row, mi_col, mi_row_top, mi_col_top, i,
1290                            dst_buf3[i], dst_stride3[i]);
1291           }
1292         }
1293 #if CONFIG_CB4X4
1294       } else if (bsize == BLOCK_8X8) {
1295         for (i = 0; i < MAX_MB_PLANE; i++) {
1296           const struct macroblockd_plane *pd = &xd->plane[i];
1297           int handle_chroma_sub8x8 = need_handle_chroma_sub8x8(
1298               subsize, pd->subsampling_x, pd->subsampling_y);
1299 
1300           if (handle_chroma_sub8x8) {
1301             int mode_offset_row =
1302                 CONFIG_CHROMA_SUB8X8 && mi_row + hbs < cm->mi_rows ? hbs : 0;
1303             int mode_offset_col =
1304                 CONFIG_CHROMA_SUB8X8 && mi_col + hbs < cm->mi_cols ? hbs : 0;
1305 
1306             dec_predict_b_extend(pbi, xd, tile, 0, mi_row + mode_offset_row,
1307                                  mi_col + mode_offset_col, mi_row, mi_col,
1308                                  mi_row_top, mi_col_top, i, dst_buf[i],
1309                                  dst_stride[i], top_bsize, BLOCK_8X8, 0, 0);
1310             if (bsize < top_bsize)
1311               dec_extend_all(pbi, xd, tile, 0, BLOCK_8X8, top_bsize,
1312                              mi_row + mode_offset_row, mi_col + mode_offset_col,
1313                              mi_row, mi_col, mi_row_top, mi_col_top, i,
1314                              dst_buf[i], dst_stride[i]);
1315           } else {
1316             dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row,
1317                                  mi_col, mi_row_top, mi_col_top, i, dst_buf[i],
1318                                  dst_stride[i], top_bsize, subsize, 0, 0);
1319             if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols)
1320               dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs,
1321                                    mi_row, mi_col + hbs, mi_row_top, mi_col_top,
1322                                    i, dst_buf1[i], dst_stride1[i], top_bsize,
1323                                    subsize, 0, 0);
1324             if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols)
1325               dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col,
1326                                    mi_row + hbs, mi_col, mi_row_top, mi_col_top,
1327                                    i, dst_buf2[i], dst_stride2[i], top_bsize,
1328                                    subsize, 0, 0);
1329             if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols)
1330               dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col + hbs,
1331                                    mi_row + hbs, mi_col + hbs, mi_row_top,
1332                                    mi_col_top, i, dst_buf3[i], dst_stride3[i],
1333                                    top_bsize, subsize, 0, 0);
1334 
1335             if (bsize < top_bsize) {
1336               dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
1337                              mi_col, mi_row, mi_col, mi_row_top, mi_col_top, i,
1338                              dst_buf[i], dst_stride[i]);
1339               if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols)
1340                 dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
1341                                mi_col + hbs, mi_row, mi_col + hbs, mi_row_top,
1342                                mi_col_top, i, dst_buf1[i], dst_stride1[i]);
1343               if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols)
1344                 dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize,
1345                                mi_row + hbs, mi_col, mi_row + hbs, mi_col,
1346                                mi_row_top, mi_col_top, i, dst_buf2[i],
1347                                dst_stride2[i]);
1348               if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols)
1349                 dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize,
1350                                mi_row + hbs, mi_col + hbs, mi_row + hbs,
1351                                mi_col + hbs, mi_row_top, mi_col_top, i,
1352                                dst_buf3[i], dst_stride3[i]);
1353             }
1354           }
1355         }
1356 #endif
1357       } else {
1358         dec_predict_sb_complex(pbi, xd, tile, mi_row, mi_col, mi_row_top,
1359                                mi_col_top, subsize, top_bsize, dst_buf,
1360                                dst_stride);
1361         if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols)
1362           dec_predict_sb_complex(pbi, xd, tile, mi_row, mi_col + hbs,
1363                                  mi_row_top, mi_col_top, subsize, top_bsize,
1364                                  dst_buf1, dst_stride1);
1365         if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols)
1366           dec_predict_sb_complex(pbi, xd, tile, mi_row + hbs, mi_col,
1367                                  mi_row_top, mi_col_top, subsize, top_bsize,
1368                                  dst_buf2, dst_stride2);
1369         if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols)
1370           dec_predict_sb_complex(pbi, xd, tile, mi_row + hbs, mi_col + hbs,
1371                                  mi_row_top, mi_col_top, subsize, top_bsize,
1372                                  dst_buf3, dst_stride3);
1373       }
1374       for (i = 0; i < MAX_MB_PLANE; i++) {
1375 #if CONFIG_CB4X4
1376         const struct macroblockd_plane *pd = &xd->plane[i];
1377         int handle_chroma_sub8x8 = need_handle_chroma_sub8x8(
1378             subsize, pd->subsampling_x, pd->subsampling_y);
1379         if (handle_chroma_sub8x8) continue;  // Skip <4x4 chroma smoothing
1380 #else
1381         if (bsize == BLOCK_8X8 && i != 0)
1382           continue;  // Skip <4x4 chroma smoothing
1383 #endif
1384         if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
1385           av1_build_masked_inter_predictor_complex(
1386               xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
1387               mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
1388               PARTITION_VERT, i);
1389           if (mi_row + hbs < cm->mi_rows) {
1390             av1_build_masked_inter_predictor_complex(
1391                 xd, dst_buf2[i], dst_stride2[i], dst_buf3[i], dst_stride3[i],
1392                 mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
1393                 PARTITION_VERT, i);
1394             av1_build_masked_inter_predictor_complex(
1395                 xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
1396                 mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
1397                 PARTITION_HORZ, i);
1398           }
1399         } else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
1400           av1_build_masked_inter_predictor_complex(
1401               xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
1402               mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
1403               PARTITION_HORZ, i);
1404         }
1405       }
1406       break;
1407 #if CONFIG_EXT_PARTITION_TYPES
1408 #if CONFIG_EXT_PARTITION_TYPES_AB
1409 #error HORZ/VERT_A/B partitions not yet updated in superres code
1410 #endif
1411     case PARTITION_HORZ_A:
1412       dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col,
1413                            mi_row_top, mi_col_top, dst_buf, dst_stride,
1414                            top_bsize, bsize2, 0, 0);
1415       dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row, mi_col,
1416                      mi_row_top, mi_col_top, dst_buf, dst_stride);
1417 
1418       dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, mi_row,
1419                            mi_col + hbs, mi_row_top, mi_col_top, dst_buf1,
1420                            dst_stride1, top_bsize, bsize2, 0, 0);
1421       dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row, mi_col + hbs,
1422                      mi_row_top, mi_col_top, dst_buf1, dst_stride1);
1423 
1424       dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col, mi_row + hbs,
1425                            mi_col, mi_row_top, mi_col_top, dst_buf2,
1426                            dst_stride2, top_bsize, subsize, 0, 0);
1427       if (bsize < top_bsize)
1428         dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row + hbs,
1429                        mi_col, mi_row_top, mi_col_top, dst_buf2, dst_stride2);
1430       else
1431         dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row + hbs,
1432                        mi_col, mi_row_top, mi_col_top, dst_buf2, dst_stride2,
1433                        1);
1434 
1435       for (i = 0; i < MAX_MB_PLANE; i++) {
1436         xd->plane[i].dst.buf = dst_buf[i];
1437         xd->plane[i].dst.stride = dst_stride[i];
1438         av1_build_masked_inter_predictor_complex(
1439             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
1440             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
1441             i);
1442       }
1443       for (i = 0; i < MAX_MB_PLANE; i++) {
1444         av1_build_masked_inter_predictor_complex(
1445             xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
1446             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
1447             i);
1448       }
1449       break;
1450     case PARTITION_VERT_A:
1451 
1452       dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col,
1453                            mi_row_top, mi_col_top, dst_buf, dst_stride,
1454                            top_bsize, bsize2, 0, 0);
1455       dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row, mi_col,
1456                      mi_row_top, mi_col_top, dst_buf, dst_stride);
1457 
1458       dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col, mi_row + hbs,
1459                            mi_col, mi_row_top, mi_col_top, dst_buf1,
1460                            dst_stride1, top_bsize, bsize2, 0, 0);
1461       dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs, mi_col,
1462                      mi_row_top, mi_col_top, dst_buf1, dst_stride1);
1463 
1464       dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, mi_row,
1465                            mi_col + hbs, mi_row_top, mi_col_top, dst_buf2,
1466                            dst_stride2, top_bsize, subsize, 0, 0);
1467       if (bsize < top_bsize)
1468         dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
1469                        mi_col + hbs, mi_row_top, mi_col_top, dst_buf2,
1470                        dst_stride2);
1471       else
1472         dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row,
1473                        mi_col + hbs, mi_row_top, mi_col_top, dst_buf2,
1474                        dst_stride2, 2);
1475 
1476       for (i = 0; i < MAX_MB_PLANE; i++) {
1477         xd->plane[i].dst.buf = dst_buf[i];
1478         xd->plane[i].dst.stride = dst_stride[i];
1479         av1_build_masked_inter_predictor_complex(
1480             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
1481             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
1482             i);
1483       }
1484       for (i = 0; i < MAX_MB_PLANE; i++) {
1485         av1_build_masked_inter_predictor_complex(
1486             xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
1487             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
1488             i);
1489       }
1490       break;
1491     case PARTITION_HORZ_B:
1492       dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col,
1493                            mi_row_top, mi_col_top, dst_buf, dst_stride,
1494                            top_bsize, subsize, 0, 0);
1495       if (bsize < top_bsize)
1496         dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col,
1497                        mi_row_top, mi_col_top, dst_buf, dst_stride);
1498       else
1499         dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col,
1500                        mi_row_top, mi_col_top, dst_buf, dst_stride, 0);
1501 
1502       dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col, mi_row + hbs,
1503                            mi_col, mi_row_top, mi_col_top, dst_buf1,
1504                            dst_stride1, top_bsize, bsize2, 0, 0);
1505       dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs, mi_col,
1506                      mi_row_top, mi_col_top, dst_buf1, dst_stride1);
1507 
1508       dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col + hbs,
1509                            mi_row + hbs, mi_col + hbs, mi_row_top, mi_col_top,
1510                            dst_buf2, dst_stride2, top_bsize, bsize2, 0, 0);
1511       dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs,
1512                      mi_col + hbs, mi_row_top, mi_col_top, dst_buf2,
1513                      dst_stride2);
1514 
1515       for (i = 0; i < MAX_MB_PLANE; i++) {
1516         xd->plane[i].dst.buf = dst_buf1[i];
1517         xd->plane[i].dst.stride = dst_stride1[i];
1518         av1_build_masked_inter_predictor_complex(
1519             xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
1520             mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
1521             PARTITION_VERT, i);
1522       }
1523       for (i = 0; i < MAX_MB_PLANE; i++) {
1524         xd->plane[i].dst.buf = dst_buf[i];
1525         xd->plane[i].dst.stride = dst_stride[i];
1526         av1_build_masked_inter_predictor_complex(
1527             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
1528             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
1529             i);
1530       }
1531       break;
1532     case PARTITION_VERT_B:
1533       dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col,
1534                            mi_row_top, mi_col_top, dst_buf, dst_stride,
1535                            top_bsize, subsize, 0, 0);
1536       if (bsize < top_bsize)
1537         dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col,
1538                        mi_row_top, mi_col_top, dst_buf, dst_stride);
1539       else
1540         dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col,
1541                        mi_row_top, mi_col_top, dst_buf, dst_stride, 3);
1542 
1543       dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, mi_row,
1544                            mi_col + hbs, mi_row_top, mi_col_top, dst_buf1,
1545                            dst_stride1, top_bsize, bsize2, 0, 0);
1546       dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row, mi_col + hbs,
1547                      mi_row_top, mi_col_top, dst_buf1, dst_stride1);
1548 
1549       dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col + hbs,
1550                            mi_row + hbs, mi_col + hbs, mi_row_top, mi_col_top,
1551                            dst_buf2, dst_stride2, top_bsize, bsize2, 0, 0);
1552       dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs,
1553                      mi_col + hbs, mi_row_top, mi_col_top, dst_buf2,
1554                      dst_stride2);
1555 
1556       for (i = 0; i < MAX_MB_PLANE; i++) {
1557         xd->plane[i].dst.buf = dst_buf1[i];
1558         xd->plane[i].dst.stride = dst_stride1[i];
1559         av1_build_masked_inter_predictor_complex(
1560             xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
1561             mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
1562             PARTITION_HORZ, i);
1563       }
1564       for (i = 0; i < MAX_MB_PLANE; i++) {
1565         xd->plane[i].dst.buf = dst_buf[i];
1566         xd->plane[i].dst.stride = dst_stride[i];
1567         av1_build_masked_inter_predictor_complex(
1568             xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
1569             mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
1570             i);
1571       }
1572       break;
1573 #endif  // CONFIG_EXT_PARTITION_TYPES
1574     default: assert(0);
1575   }
1576 }
1577 
set_segment_id_supertx(const AV1_COMMON * const cm,int mi_row,int mi_col,BLOCK_SIZE bsize)1578 static void set_segment_id_supertx(const AV1_COMMON *const cm, int mi_row,
1579                                    int mi_col, BLOCK_SIZE bsize) {
1580   const struct segmentation *seg = &cm->seg;
1581   const int miw = AOMMIN(mi_size_wide[bsize], cm->mi_cols - mi_col);
1582   const int mih = AOMMIN(mi_size_high[bsize], cm->mi_rows - mi_row);
1583   const int mi_offset = mi_row * cm->mi_stride + mi_col;
1584   MODE_INFO **const mip = cm->mi_grid_visible + mi_offset;
1585   int r, c;
1586   int seg_id_supertx = MAX_SEGMENTS;
1587 
1588   if (!seg->enabled) {
1589     seg_id_supertx = 0;
1590   } else {
1591     // Find the minimum segment_id
1592     for (r = 0; r < mih; r++)
1593       for (c = 0; c < miw; c++)
1594         seg_id_supertx =
1595             AOMMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
1596     assert(0 <= seg_id_supertx && seg_id_supertx < MAX_SEGMENTS);
1597   }
1598 
1599   // Assign the the segment_id back to segment_id_supertx
1600   for (r = 0; r < mih; r++)
1601     for (c = 0; c < miw; c++)
1602       mip[r * cm->mi_stride + c]->mbmi.segment_id_supertx = seg_id_supertx;
1603 }
1604 #endif  // CONFIG_SUPERTX
1605 
decode_mbmi_block(AV1Decoder * const pbi,MACROBLOCKD * const xd,int supertx_enabled,int mi_row,int mi_col,aom_reader * r,PARTITION_TYPE partition,BLOCK_SIZE bsize)1606 static void decode_mbmi_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
1607 #if CONFIG_SUPERTX
1608                               int supertx_enabled,
1609 #endif  // CONFIG_SUPERTX
1610                               int mi_row, int mi_col, aom_reader *r,
1611 #if CONFIG_EXT_PARTITION_TYPES
1612                               PARTITION_TYPE partition,
1613 #endif  // CONFIG_EXT_PARTITION_TYPES
1614                               BLOCK_SIZE bsize) {
1615   AV1_COMMON *const cm = &pbi->common;
1616   const int bw = mi_size_wide[bsize];
1617   const int bh = mi_size_high[bsize];
1618   const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
1619   const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
1620 
1621 #if CONFIG_ACCOUNTING
1622   aom_accounting_set_context(&pbi->accounting, mi_col, mi_row);
1623 #endif
1624 #if CONFIG_SUPERTX
1625   if (supertx_enabled) {
1626     set_mb_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis);
1627   } else {
1628     set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis);
1629   }
1630 #if CONFIG_EXT_PARTITION_TYPES
1631   xd->mi[0]->mbmi.partition = partition;
1632 #endif
1633   av1_read_mode_info(pbi, xd, supertx_enabled, mi_row, mi_col, r, x_mis, y_mis);
1634 #else
1635   set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis);
1636 #if CONFIG_EXT_PARTITION_TYPES
1637   xd->mi[0]->mbmi.partition = partition;
1638 #endif
1639   av1_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
1640 #endif  // CONFIG_SUPERTX
1641   if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
1642     const BLOCK_SIZE uv_subsize =
1643         ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
1644     if (uv_subsize == BLOCK_INVALID)
1645       aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
1646                          "Invalid block size.");
1647   }
1648 
1649 #if CONFIG_SUPERTX
1650   xd->mi[0]->mbmi.segment_id_supertx = MAX_SEGMENTS;
1651 #endif  // CONFIG_SUPERTX
1652 
1653   int reader_corrupted_flag = aom_reader_has_error(r);
1654   aom_merge_corrupted_flag(&xd->corrupted, reader_corrupted_flag);
1655 }
1656 
1657 #if CONFIG_NCOBMC_ADAPT_WEIGHT
set_mode_info_offsets(AV1_COMMON * const cm,MACROBLOCKD * const xd,int mi_row,int mi_col)1658 static void set_mode_info_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
1659                                   int mi_row, int mi_col) {
1660   const int offset = mi_row * cm->mi_stride + mi_col;
1661   xd->mi = cm->mi_grid_visible + offset;
1662   xd->mi[0] = &cm->mi[offset];
1663 }
1664 
get_ncobmc_recon(AV1_COMMON * const cm,MACROBLOCKD * xd,int mi_row,int mi_col,int bsize,int mode)1665 static void get_ncobmc_recon(AV1_COMMON *const cm, MACROBLOCKD *xd, int mi_row,
1666                              int mi_col, int bsize, int mode) {
1667   uint8_t *pred_buf[4][MAX_MB_PLANE];
1668   int pred_stride[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
1669   // target block in pxl
1670   int pxl_row = mi_row << MI_SIZE_LOG2;
1671   int pxl_col = mi_col << MI_SIZE_LOG2;
1672 
1673   int plane;
1674 #if CONFIG_HIGHBITDEPTH
1675   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1676     int len = sizeof(uint16_t);
1677     ASSIGN_ALIGNED_PTRS_HBD(pred_buf[0], cm->ncobmcaw_buf[0], MAX_SB_SQUARE,
1678                             len);
1679     ASSIGN_ALIGNED_PTRS_HBD(pred_buf[1], cm->ncobmcaw_buf[1], MAX_SB_SQUARE,
1680                             len);
1681     ASSIGN_ALIGNED_PTRS_HBD(pred_buf[2], cm->ncobmcaw_buf[2], MAX_SB_SQUARE,
1682                             len);
1683     ASSIGN_ALIGNED_PTRS_HBD(pred_buf[3], cm->ncobmcaw_buf[3], MAX_SB_SQUARE,
1684                             len);
1685   } else {
1686 #endif  // CONFIG_HIGHBITDEPTH
1687     ASSIGN_ALIGNED_PTRS(pred_buf[0], cm->ncobmcaw_buf[0], MAX_SB_SQUARE);
1688     ASSIGN_ALIGNED_PTRS(pred_buf[1], cm->ncobmcaw_buf[1], MAX_SB_SQUARE);
1689     ASSIGN_ALIGNED_PTRS(pred_buf[2], cm->ncobmcaw_buf[2], MAX_SB_SQUARE);
1690     ASSIGN_ALIGNED_PTRS(pred_buf[3], cm->ncobmcaw_buf[3], MAX_SB_SQUARE);
1691 #if CONFIG_HIGHBITDEPTH
1692   }
1693 #endif
1694   av1_get_ext_blk_preds(cm, xd, bsize, mi_row, mi_col, pred_buf, pred_stride);
1695   av1_get_ori_blk_pred(cm, xd, bsize, mi_row, mi_col, pred_buf[3], pred_stride);
1696   for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
1697     build_ncobmc_intrpl_pred(cm, xd, plane, pxl_row, pxl_col, bsize, pred_buf,
1698                              pred_stride, mode);
1699   }
1700 }
1701 
av1_get_ncobmc_recon(AV1_COMMON * const cm,MACROBLOCKD * const xd,int bsize,const int mi_row,const int mi_col,const NCOBMC_MODE modes)1702 static void av1_get_ncobmc_recon(AV1_COMMON *const cm, MACROBLOCKD *const xd,
1703                                  int bsize, const int mi_row, const int mi_col,
1704                                  const NCOBMC_MODE modes) {
1705   const int mi_width = mi_size_wide[bsize];
1706   const int mi_height = mi_size_high[bsize];
1707 
1708   assert(bsize >= BLOCK_8X8);
1709 
1710   reset_xd_boundary(xd, mi_row, mi_height, mi_col, mi_width, cm->mi_rows,
1711                     cm->mi_cols);
1712   get_ncobmc_recon(cm, xd, mi_row, mi_col, bsize, modes);
1713 }
1714 
recon_ncobmc_intrpl_pred(AV1_COMMON * const cm,MACROBLOCKD * const xd,int mi_row,int mi_col,BLOCK_SIZE bsize)1715 static void recon_ncobmc_intrpl_pred(AV1_COMMON *const cm,
1716                                      MACROBLOCKD *const xd, int mi_row,
1717                                      int mi_col, BLOCK_SIZE bsize) {
1718   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1719   const int mi_width = mi_size_wide[bsize];
1720   const int mi_height = mi_size_high[bsize];
1721   const int hbs = AOMMAX(mi_size_wide[bsize] / 2, mi_size_high[bsize] / 2);
1722   const BLOCK_SIZE sqr_blk = bsize_2_sqr_bsize[bsize];
1723   if (mi_width > mi_height) {
1724     // horizontal partition
1725     av1_get_ncobmc_recon(cm, xd, sqr_blk, mi_row, mi_col, mbmi->ncobmc_mode[0]);
1726     xd->mi += hbs;
1727     av1_get_ncobmc_recon(cm, xd, sqr_blk, mi_row, mi_col + hbs,
1728                          mbmi->ncobmc_mode[1]);
1729   } else if (mi_height > mi_width) {
1730     // vertical partition
1731     av1_get_ncobmc_recon(cm, xd, sqr_blk, mi_row, mi_col, mbmi->ncobmc_mode[0]);
1732     xd->mi += hbs * xd->mi_stride;
1733     av1_get_ncobmc_recon(cm, xd, sqr_blk, mi_row + hbs, mi_col,
1734                          mbmi->ncobmc_mode[1]);
1735   } else {
1736     av1_get_ncobmc_recon(cm, xd, sqr_blk, mi_row, mi_col, mbmi->ncobmc_mode[0]);
1737   }
1738   set_mode_info_offsets(cm, xd, mi_row, mi_col);
1739   // restore dst buffer and mode info
1740   av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row,
1741                        mi_col);
1742 }
1743 #endif  // CONFIG_NCOBMC_ADAPT_WEIGHT
1744 
decode_token_and_recon_block(AV1Decoder * const pbi,MACROBLOCKD * const xd,int mi_row,int mi_col,aom_reader * r,BLOCK_SIZE bsize)1745 static void decode_token_and_recon_block(AV1Decoder *const pbi,
1746                                          MACROBLOCKD *const xd, int mi_row,
1747                                          int mi_col, aom_reader *r,
1748                                          BLOCK_SIZE bsize) {
1749   AV1_COMMON *const cm = &pbi->common;
1750   const int bw = mi_size_wide[bsize];
1751   const int bh = mi_size_high[bsize];
1752   const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
1753   const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
1754 
1755   set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis);
1756   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
1757 #if CONFIG_CFL && CONFIG_CHROMA_SUB8X8
1758   CFL_CTX *const cfl = xd->cfl;
1759   cfl->is_chroma_reference = is_chroma_reference(
1760       mi_row, mi_col, bsize, cfl->subsampling_x, cfl->subsampling_y);
1761 #endif  // CONFIG_CFL && CONFIG_CHROMA_SUB8X8
1762 
1763   if (cm->delta_q_present_flag) {
1764     int i;
1765     for (i = 0; i < MAX_SEGMENTS; i++) {
1766 #if CONFIG_EXT_DELTA_Q
1767       const int current_qindex =
1768           av1_get_qindex(&cm->seg, i, xd->current_qindex);
1769 #else
1770       const int current_qindex = xd->current_qindex;
1771 #endif  // CONFIG_EXT_DELTA_Q
1772       int j;
1773       for (j = 0; j < MAX_MB_PLANE; ++j) {
1774         const int dc_delta_q = j == 0 ? cm->y_dc_delta_q : cm->uv_dc_delta_q;
1775         const int ac_delta_q = j == 0 ? 0 : cm->uv_ac_delta_q;
1776 
1777         xd->plane[j].seg_dequant[i][0] =
1778             av1_dc_quant(current_qindex, dc_delta_q, cm->bit_depth);
1779         xd->plane[j].seg_dequant[i][1] =
1780             av1_ac_quant(current_qindex, ac_delta_q, cm->bit_depth);
1781       }
1782     }
1783   }
1784 
1785 #if CONFIG_CB4X4
1786   if (mbmi->skip) av1_reset_skip_context(xd, mi_row, mi_col, bsize);
1787 #else
1788   if (mbmi->skip) {
1789     av1_reset_skip_context(xd, mi_row, mi_col, AOMMAX(BLOCK_8X8, bsize));
1790   }
1791 #endif
1792 
1793 #if CONFIG_COEF_INTERLEAVE
1794   {
1795     const struct macroblockd_plane *const pd_y = &xd->plane[0];
1796     const struct macroblockd_plane *const pd_c = &xd->plane[1];
1797     const TX_SIZE tx_log2_y = mbmi->tx_size;
1798     const TX_SIZE tx_log2_c = av1_get_uv_tx_size(mbmi, pd_c);
1799     const int tx_sz_y = (1 << tx_log2_y);
1800     const int tx_sz_c = (1 << tx_log2_c);
1801     const int num_4x4_w_y = pd_y->n4_w;
1802     const int num_4x4_h_y = pd_y->n4_h;
1803     const int num_4x4_w_c = pd_c->n4_w;
1804     const int num_4x4_h_c = pd_c->n4_h;
1805     const int max_4x4_w_y = get_max_4x4_size(num_4x4_w_y, xd->mb_to_right_edge,
1806                                              pd_y->subsampling_x);
1807     const int max_4x4_h_y = get_max_4x4_size(num_4x4_h_y, xd->mb_to_bottom_edge,
1808                                              pd_y->subsampling_y);
1809     const int max_4x4_w_c = get_max_4x4_size(num_4x4_w_c, xd->mb_to_right_edge,
1810                                              pd_c->subsampling_x);
1811     const int max_4x4_h_c = get_max_4x4_size(num_4x4_h_c, xd->mb_to_bottom_edge,
1812                                              pd_c->subsampling_y);
1813 
1814     // The max_4x4_w/h may be smaller than tx_sz under some corner cases,
1815     // i.e. when the SB is splitted by tile boundaries.
1816     const int tu_num_w_y = (max_4x4_w_y + tx_sz_y - 1) / tx_sz_y;
1817     const int tu_num_h_y = (max_4x4_h_y + tx_sz_y - 1) / tx_sz_y;
1818     const int tu_num_w_c = (max_4x4_w_c + tx_sz_c - 1) / tx_sz_c;
1819     const int tu_num_h_c = (max_4x4_h_c + tx_sz_c - 1) / tx_sz_c;
1820     const int tu_num_c = tu_num_w_c * tu_num_h_c;
1821 
1822     if (!is_inter_block(mbmi)) {
1823       int tu_idx_c = 0;
1824       int row_y, col_y, row_c, col_c;
1825       int plane;
1826 
1827 // TODO(anybody) : remove this flag when PVQ supports pallete coding tool
1828 #if !CONFIG_PVQ
1829       for (plane = 0; plane <= 1; ++plane) {
1830         if (mbmi->palette_mode_info.palette_size[plane])
1831           av1_decode_palette_tokens(xd, plane, r);
1832       }
1833 #endif  // !CONFIG_PVQ
1834 
1835       for (row_y = 0; row_y < tu_num_h_y; row_y++) {
1836         for (col_y = 0; col_y < tu_num_w_y; col_y++) {
1837           // luma
1838           predict_and_reconstruct_intra_block(
1839               cm, xd, r, mbmi, 0, row_y * tx_sz_y, col_y * tx_sz_y, tx_log2_y);
1840           // chroma
1841           if (tu_idx_c < tu_num_c) {
1842             row_c = (tu_idx_c / tu_num_w_c) * tx_sz_c;
1843             col_c = (tu_idx_c % tu_num_w_c) * tx_sz_c;
1844             predict_and_reconstruct_intra_block(cm, xd, r, mbmi, 1, row_c,
1845                                                 col_c, tx_log2_c);
1846             predict_and_reconstruct_intra_block(cm, xd, r, mbmi, 2, row_c,
1847                                                 col_c, tx_log2_c);
1848             tu_idx_c++;
1849           }
1850         }
1851       }
1852 
1853       // In 422 case, it's possilbe that Chroma has more TUs than Luma
1854       while (tu_idx_c < tu_num_c) {
1855         row_c = (tu_idx_c / tu_num_w_c) * tx_sz_c;
1856         col_c = (tu_idx_c % tu_num_w_c) * tx_sz_c;
1857         predict_and_reconstruct_intra_block(cm, xd, r, mbmi, 1, row_c, col_c,
1858                                             tx_log2_c);
1859         predict_and_reconstruct_intra_block(cm, xd, r, mbmi, 2, row_c, col_c,
1860                                             tx_log2_c);
1861         tu_idx_c++;
1862       }
1863     } else {
1864       // Prediction
1865       av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL,
1866                                     AOMMAX(bsize, BLOCK_8X8));
1867 
1868       // Reconstruction
1869       if (!mbmi->skip) {
1870         int eobtotal = 0;
1871         int tu_idx_c = 0;
1872         int row_y, col_y, row_c, col_c;
1873 
1874         for (row_y = 0; row_y < tu_num_h_y; row_y++) {
1875           for (col_y = 0; col_y < tu_num_w_y; col_y++) {
1876             // luma
1877             eobtotal += reconstruct_inter_block(cm, xd, r, mbmi->segment_id, 0,
1878                                                 row_y * tx_sz_y,
1879                                                 col_y * tx_sz_y, tx_log2_y);
1880             // chroma
1881             if (tu_idx_c < tu_num_c) {
1882               row_c = (tu_idx_c / tu_num_w_c) * tx_sz_c;
1883               col_c = (tu_idx_c % tu_num_w_c) * tx_sz_c;
1884               eobtotal += reconstruct_inter_block(cm, xd, r, mbmi->segment_id,
1885                                                   1, row_c, col_c, tx_log2_c);
1886               eobtotal += reconstruct_inter_block(cm, xd, r, mbmi->segment_id,
1887                                                   2, row_c, col_c, tx_log2_c);
1888               tu_idx_c++;
1889             }
1890           }
1891         }
1892 
1893         // In 422 case, it's possilbe that Chroma has more TUs than Luma
1894         while (tu_idx_c < tu_num_c) {
1895           row_c = (tu_idx_c / tu_num_w_c) * tx_sz_c;
1896           col_c = (tu_idx_c % tu_num_w_c) * tx_sz_c;
1897           eobtotal += reconstruct_inter_block(cm, xd, r, mbmi->segment_id, 1,
1898                                               row_c, col_c, tx_log2_c);
1899           eobtotal += reconstruct_inter_block(cm, xd, r, mbmi->segment_id, 2,
1900                                               row_c, col_c, tx_log2_c);
1901           tu_idx_c++;
1902         }
1903 
1904         // TODO(CONFIG_COEF_INTERLEAVE owners): bring eob == 0 corner case
1905         // into line with the defaut configuration
1906         if (bsize >= BLOCK_8X8 && eobtotal == 0) mbmi->skip = 1;
1907       }
1908     }
1909   }
1910 #else  // CONFIG_COEF_INTERLEAVE
1911   if (!is_inter_block(mbmi)) {
1912     int plane;
1913 
1914 // TODO(anybody) : remove this flag when PVQ supports pallete coding tool
1915 #if !CONFIG_PVQ
1916     for (plane = 0; plane <= 1; ++plane) {
1917       if (mbmi->palette_mode_info.palette_size[plane])
1918         av1_decode_palette_tokens(xd, plane, r);
1919     }
1920 #endif  // #if !CONFIG_PVQ
1921 
1922     for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
1923       const struct macroblockd_plane *const pd = &xd->plane[plane];
1924       const TX_SIZE tx_size = av1_get_tx_size(plane, xd);
1925       const int stepr = tx_size_high_unit[tx_size];
1926       const int stepc = tx_size_wide_unit[tx_size];
1927 #if CONFIG_CHROMA_SUB8X8
1928       const BLOCK_SIZE plane_bsize =
1929           AOMMAX(BLOCK_4X4, get_plane_block_size(bsize, pd));
1930 #elif CONFIG_CB4X4
1931       const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
1932 #else
1933       const BLOCK_SIZE plane_bsize =
1934           get_plane_block_size(AOMMAX(BLOCK_8X8, bsize), pd);
1935 #endif
1936       int row, col;
1937       const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
1938       const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
1939 #if CONFIG_CB4X4
1940       if (!is_chroma_reference(mi_row, mi_col, bsize, pd->subsampling_x,
1941                                pd->subsampling_y))
1942         continue;
1943 #endif
1944       int blk_row, blk_col;
1945       const BLOCK_SIZE max_unit_bsize = get_plane_block_size(BLOCK_64X64, pd);
1946       int mu_blocks_wide =
1947           block_size_wide[max_unit_bsize] >> tx_size_wide_log2[0];
1948       int mu_blocks_high =
1949           block_size_high[max_unit_bsize] >> tx_size_high_log2[0];
1950       mu_blocks_wide = AOMMIN(max_blocks_wide, mu_blocks_wide);
1951       mu_blocks_high = AOMMIN(max_blocks_high, mu_blocks_high);
1952 
1953       for (row = 0; row < max_blocks_high; row += mu_blocks_high) {
1954         const int unit_height = AOMMIN(mu_blocks_high + row, max_blocks_high);
1955         for (col = 0; col < max_blocks_wide; col += mu_blocks_wide) {
1956           const int unit_width = AOMMIN(mu_blocks_wide + col, max_blocks_wide);
1957 
1958           for (blk_row = row; blk_row < unit_height; blk_row += stepr)
1959             for (blk_col = col; blk_col < unit_width; blk_col += stepc)
1960               predict_and_reconstruct_intra_block(cm, xd, r, mbmi, plane,
1961                                                   blk_row, blk_col, tx_size);
1962         }
1963       }
1964     }
1965   } else {
1966     int ref;
1967 
1968 #if CONFIG_COMPOUND_SINGLEREF
1969     for (ref = 0; ref < 1 + is_inter_anyref_comp_mode(mbmi->mode); ++ref)
1970 #else
1971     for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref)
1972 #endif  // CONFIG_COMPOUND_SINGLEREF
1973     {
1974       const MV_REFERENCE_FRAME frame =
1975 #if CONFIG_COMPOUND_SINGLEREF
1976           has_second_ref(mbmi) ? mbmi->ref_frame[ref] : mbmi->ref_frame[0];
1977 #else
1978           mbmi->ref_frame[ref];
1979 #endif  // CONFIG_COMPOUND_SINGLEREF
1980       if (frame < LAST_FRAME) {
1981 #if CONFIG_INTRABC
1982         assert(is_intrabc_block(mbmi));
1983         assert(frame == INTRA_FRAME);
1984         assert(ref == 0);
1985 #else
1986         assert(0);
1987 #endif  // CONFIG_INTRABC
1988       } else {
1989         RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
1990 
1991         xd->block_refs[ref] = ref_buf;
1992         if ((!av1_is_valid_scale(&ref_buf->sf)))
1993           aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
1994                              "Reference frame has invalid dimensions");
1995         av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col,
1996                              &ref_buf->sf);
1997       }
1998     }
1999 
2000 #if CONFIG_CB4X4
2001     av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL, bsize);
2002 #else
2003     av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL,
2004                                   AOMMAX(bsize, BLOCK_8X8));
2005 #endif
2006 
2007 #if CONFIG_MOTION_VAR
2008     if (mbmi->motion_mode == OBMC_CAUSAL) {
2009 #if CONFIG_NCOBMC
2010       av1_build_ncobmc_inter_predictors_sb(cm, xd, mi_row, mi_col);
2011 #else
2012       av1_build_obmc_inter_predictors_sb(cm, xd, mi_row, mi_col);
2013 #endif
2014     }
2015 #endif  // CONFIG_MOTION_VAR
2016 #if CONFIG_NCOBMC_ADAPT_WEIGHT
2017     if (mbmi->motion_mode == NCOBMC_ADAPT_WEIGHT) {
2018       int plane;
2019       recon_ncobmc_intrpl_pred(cm, xd, mi_row, mi_col, bsize);
2020       for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
2021         get_pred_from_intrpl_buf(xd, mi_row, mi_col, bsize, plane);
2022       }
2023     }
2024 #endif
2025     // Reconstruction
2026     if (!mbmi->skip) {
2027       int eobtotal = 0;
2028       int plane;
2029 
2030       for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
2031         const struct macroblockd_plane *const pd = &xd->plane[plane];
2032 #if CONFIG_CHROMA_SUB8X8
2033         const BLOCK_SIZE plane_bsize =
2034             AOMMAX(BLOCK_4X4, get_plane_block_size(bsize, pd));
2035 #elif CONFIG_CB4X4
2036         const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
2037 #else
2038         const BLOCK_SIZE plane_bsize =
2039             get_plane_block_size(AOMMAX(BLOCK_8X8, bsize), pd);
2040 #endif
2041         const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
2042         const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
2043         int row, col;
2044 
2045 #if CONFIG_CB4X4
2046         if (!is_chroma_reference(mi_row, mi_col, bsize, pd->subsampling_x,
2047                                  pd->subsampling_y))
2048           continue;
2049 #endif
2050 
2051 #if CONFIG_VAR_TX
2052         const BLOCK_SIZE max_unit_bsize = get_plane_block_size(BLOCK_64X64, pd);
2053         int mu_blocks_wide =
2054             block_size_wide[max_unit_bsize] >> tx_size_wide_log2[0];
2055         int mu_blocks_high =
2056             block_size_high[max_unit_bsize] >> tx_size_high_log2[0];
2057 
2058         mu_blocks_wide = AOMMIN(max_blocks_wide, mu_blocks_wide);
2059         mu_blocks_high = AOMMIN(max_blocks_high, mu_blocks_high);
2060 
2061         const TX_SIZE max_tx_size = get_vartx_max_txsize(
2062             mbmi, plane_bsize, pd->subsampling_x || pd->subsampling_y);
2063         const int bh_var_tx = tx_size_high_unit[max_tx_size];
2064         const int bw_var_tx = tx_size_wide_unit[max_tx_size];
2065         int block = 0;
2066         int step =
2067             tx_size_wide_unit[max_tx_size] * tx_size_high_unit[max_tx_size];
2068 
2069         for (row = 0; row < max_blocks_high; row += mu_blocks_high) {
2070           for (col = 0; col < max_blocks_wide; col += mu_blocks_wide) {
2071             int blk_row, blk_col;
2072             const int unit_height =
2073                 AOMMIN(mu_blocks_high + row, max_blocks_high);
2074             const int unit_width =
2075                 AOMMIN(mu_blocks_wide + col, max_blocks_wide);
2076             for (blk_row = row; blk_row < unit_height; blk_row += bh_var_tx) {
2077               for (blk_col = col; blk_col < unit_width; blk_col += bw_var_tx) {
2078                 decode_reconstruct_tx(cm, xd, r, mbmi, plane, plane_bsize,
2079                                       blk_row, blk_col, block, max_tx_size,
2080                                       &eobtotal);
2081                 block += step;
2082               }
2083             }
2084           }
2085         }
2086 #else
2087         const TX_SIZE tx_size = av1_get_tx_size(plane, xd);
2088         const int stepr = tx_size_high_unit[tx_size];
2089         const int stepc = tx_size_wide_unit[tx_size];
2090         for (row = 0; row < max_blocks_high; row += stepr)
2091           for (col = 0; col < max_blocks_wide; col += stepc)
2092             eobtotal += reconstruct_inter_block(cm, xd, r, mbmi->segment_id,
2093                                                 plane, row, col, tx_size);
2094 #endif
2095       }
2096     }
2097   }
2098 #if CONFIG_CFL && CONFIG_CHROMA_SUB8X8
2099   if (mbmi->uv_mode != UV_CFL_PRED) {
2100 #if CONFIG_DEBUG
2101     if (cfl->is_chroma_reference) {
2102       cfl_clear_sub8x8_val(cfl);
2103     }
2104 #endif
2105     if (!cfl->is_chroma_reference && is_inter_block(mbmi)) {
2106       cfl_store_block(xd, mbmi->sb_type, mbmi->tx_size);
2107     }
2108   }
2109 #endif  // CONFIG_CFL && CONFIG_CHROMA_SUB8X8
2110 #endif  // CONFIG_COEF_INTERLEAVE
2111 
2112   int reader_corrupted_flag = aom_reader_has_error(r);
2113   aom_merge_corrupted_flag(&xd->corrupted, reader_corrupted_flag);
2114 }
2115 
2116 #if NC_MODE_INFO && CONFIG_MOTION_VAR
detoken_and_recon_sb(AV1Decoder * const pbi,MACROBLOCKD * const xd,int mi_row,int mi_col,aom_reader * r,BLOCK_SIZE bsize)2117 static void detoken_and_recon_sb(AV1Decoder *const pbi, MACROBLOCKD *const xd,
2118                                  int mi_row, int mi_col, aom_reader *r,
2119                                  BLOCK_SIZE bsize) {
2120   AV1_COMMON *const cm = &pbi->common;
2121   const int hbs = mi_size_wide[bsize] >> 1;
2122 #if CONFIG_CB4X4
2123   const int unify_bsize = 1;
2124 #else
2125   const int unify_bsize = 0;
2126 #endif
2127 #if CONFIG_EXT_PARTITION_TYPES
2128   BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
2129 #endif
2130   PARTITION_TYPE partition;
2131   BLOCK_SIZE subsize;
2132   const int has_rows = (mi_row + hbs) < cm->mi_rows;
2133   const int has_cols = (mi_col + hbs) < cm->mi_cols;
2134 
2135   if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2136 
2137   partition = get_partition(cm, mi_row, mi_col, bsize);
2138   subsize = subsize_lookup[partition][bsize];
2139 
2140   if (!hbs && !unify_bsize) {
2141     xd->bmode_blocks_wl = 1 >> !!(partition & PARTITION_VERT);
2142     xd->bmode_blocks_hl = 1 >> !!(partition & PARTITION_HORZ);
2143     decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
2144   } else {
2145     switch (partition) {
2146       case PARTITION_NONE:
2147         decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize);
2148         break;
2149       case PARTITION_HORZ:
2150         decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
2151         if (has_rows)
2152           decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r,
2153                                        subsize);
2154         break;
2155       case PARTITION_VERT:
2156         decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
2157         if (has_cols)
2158           decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r,
2159                                        subsize);
2160         break;
2161       case PARTITION_SPLIT:
2162         detoken_and_recon_sb(pbi, xd, mi_row, mi_col, r, subsize);
2163         detoken_and_recon_sb(pbi, xd, mi_row, mi_col + hbs, r, subsize);
2164         detoken_and_recon_sb(pbi, xd, mi_row + hbs, mi_col, r, subsize);
2165         detoken_and_recon_sb(pbi, xd, mi_row + hbs, mi_col + hbs, r, subsize);
2166         break;
2167 #if CONFIG_EXT_PARTITION_TYPES
2168 #if CONFIG_EXT_PARTITION_TYPES_AB
2169 #error NC_MODE_INFO+MOTION_VAR not yet supported for new HORZ/VERT_AB partitions
2170 #endif
2171       case PARTITION_HORZ_A:
2172         decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize2);
2173         decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r, bsize2);
2174         decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r, subsize);
2175         break;
2176       case PARTITION_HORZ_B:
2177         decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
2178         decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r, bsize2);
2179         decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col + hbs, r,
2180                                      bsize2);
2181         break;
2182       case PARTITION_VERT_A:
2183         decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize2);
2184         decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r, bsize2);
2185         decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r, subsize);
2186         break;
2187       case PARTITION_VERT_B:
2188         decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
2189         decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r, bsize2);
2190         decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col + hbs, r,
2191                                      bsize2);
2192         break;
2193 #endif
2194       default: assert(0 && "Invalid partition type");
2195     }
2196   }
2197 }
2198 #endif
2199 
decode_block(AV1Decoder * const pbi,MACROBLOCKD * const xd,int supertx_enabled,int mi_row,int mi_col,aom_reader * r,PARTITION_TYPE partition,BLOCK_SIZE bsize)2200 static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
2201 #if CONFIG_SUPERTX
2202                          int supertx_enabled,
2203 #endif  // CONFIG_SUPERTX
2204                          int mi_row, int mi_col, aom_reader *r,
2205 #if CONFIG_EXT_PARTITION_TYPES
2206                          PARTITION_TYPE partition,
2207 #endif  // CONFIG_EXT_PARTITION_TYPES
2208                          BLOCK_SIZE bsize) {
2209   decode_mbmi_block(pbi, xd,
2210 #if CONFIG_SUPERTX
2211                     supertx_enabled,
2212 #endif
2213                     mi_row, mi_col, r,
2214 #if CONFIG_EXT_PARTITION_TYPES
2215                     partition,
2216 #endif
2217                     bsize);
2218 
2219 #if !(CONFIG_MOTION_VAR && NC_MODE_INFO)
2220 #if CONFIG_SUPERTX
2221   if (!supertx_enabled)
2222 #endif  // CONFIG_SUPERTX
2223     decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize);
2224 #endif
2225 }
2226 
read_partition(AV1_COMMON * cm,MACROBLOCKD * xd,int mi_row,int mi_col,aom_reader * r,int has_rows,int has_cols,BLOCK_SIZE bsize)2227 static PARTITION_TYPE read_partition(AV1_COMMON *cm, MACROBLOCKD *xd,
2228                                      int mi_row, int mi_col, aom_reader *r,
2229                                      int has_rows, int has_cols,
2230                                      BLOCK_SIZE bsize) {
2231 #if CONFIG_UNPOISON_PARTITION_CTX
2232   const int ctx =
2233       partition_plane_context(xd, mi_row, mi_col, has_rows, has_cols, bsize);
2234 #else
2235   const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
2236 #endif
2237   PARTITION_TYPE p;
2238   FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
2239   (void)cm;
2240 
2241   aom_cdf_prob *partition_cdf = (ctx >= 0) ? ec_ctx->partition_cdf[ctx] : NULL;
2242 
2243   if (has_rows && has_cols) {
2244 #if CONFIG_EXT_PARTITION_TYPES
2245     const int num_partition_types =
2246         (mi_width_log2_lookup[bsize] > mi_width_log2_lookup[BLOCK_8X8])
2247             ? EXT_PARTITION_TYPES
2248             : PARTITION_TYPES;
2249 #else
2250     const int num_partition_types = PARTITION_TYPES;
2251 #endif  // CONFIG_EXT_PARTITION_TYPES
2252     p = (PARTITION_TYPE)aom_read_symbol(r, partition_cdf, num_partition_types,
2253                                         ACCT_STR);
2254   } else if (!has_rows && has_cols) {
2255     assert(bsize > BLOCK_8X8);
2256     aom_cdf_prob cdf[2];
2257     partition_gather_vert_alike(cdf, partition_cdf);
2258     assert(cdf[1] == AOM_ICDF(CDF_PROB_TOP));
2259     p = aom_read_cdf(r, cdf, 2, ACCT_STR) ? PARTITION_SPLIT : PARTITION_HORZ;
2260     // gather cols
2261   } else if (has_rows && !has_cols) {
2262     assert(bsize > BLOCK_8X8);
2263     aom_cdf_prob cdf[2];
2264     partition_gather_horz_alike(cdf, partition_cdf);
2265     assert(cdf[1] == AOM_ICDF(CDF_PROB_TOP));
2266     p = aom_read_cdf(r, cdf, 2, ACCT_STR) ? PARTITION_SPLIT : PARTITION_VERT;
2267   } else {
2268     p = PARTITION_SPLIT;
2269   }
2270 
2271   return p;
2272 }
2273 
2274 #if CONFIG_SUPERTX
read_skip(AV1_COMMON * cm,const MACROBLOCKD * xd,int segment_id,aom_reader * r)2275 static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
2276                      aom_reader *r) {
2277   if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
2278     return 1;
2279   } else {
2280     const int ctx = av1_get_skip_context(xd);
2281 #if CONFIG_NEW_MULTISYMBOL
2282     FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
2283     const int skip = aom_read_symbol(r, ec_ctx->skip_cdfs[ctx], 2, ACCT_STR);
2284 #else
2285     const int skip = aom_read(r, cm->fc->skip_probs[ctx], ACCT_STR);
2286 #endif
2287     FRAME_COUNTS *counts = xd->counts;
2288     if (counts) ++counts->skip[ctx][skip];
2289     return skip;
2290   }
2291 }
2292 #endif  // CONFIG_SUPERTX
2293 
2294 // TODO(slavarnway): eliminate bsize and subsize in future commits
decode_partition(AV1Decoder * const pbi,MACROBLOCKD * const xd,int supertx_enabled,int mi_row,int mi_col,aom_reader * r,BLOCK_SIZE bsize)2295 static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
2296 #if CONFIG_SUPERTX
2297                              int supertx_enabled,
2298 #endif
2299                              int mi_row, int mi_col, aom_reader *r,
2300                              BLOCK_SIZE bsize) {
2301   AV1_COMMON *const cm = &pbi->common;
2302   const int num_8x8_wh = mi_size_wide[bsize];
2303   const int hbs = num_8x8_wh >> 1;
2304 #if CONFIG_EXT_PARTITION_TYPES && CONFIG_EXT_PARTITION_TYPES_AB
2305   const int qbs = num_8x8_wh >> 2;
2306 #endif
2307 #if CONFIG_CB4X4
2308   const int unify_bsize = 1;
2309 #else
2310   const int unify_bsize = 0;
2311 #endif
2312   PARTITION_TYPE partition;
2313   BLOCK_SIZE subsize;
2314 #if CONFIG_EXT_PARTITION_TYPES
2315   const int quarter_step = num_8x8_wh / 4;
2316   int i;
2317 #if !CONFIG_EXT_PARTITION_TYPES_AB
2318   BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
2319 #endif
2320 #endif
2321   const int has_rows = (mi_row + hbs) < cm->mi_rows;
2322   const int has_cols = (mi_col + hbs) < cm->mi_cols;
2323 #if CONFIG_SUPERTX
2324   const int read_token = !supertx_enabled;
2325   int skip = 0;
2326   TX_SIZE supertx_size = max_txsize_lookup[bsize];
2327   const TileInfo *const tile = &xd->tile;
2328   int txfm = DCT_DCT;
2329 #endif  // CONFIG_SUPERTX
2330 
2331   if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2332 
2333   partition = (bsize < BLOCK_8X8) ? PARTITION_NONE
2334                                   : read_partition(cm, xd, mi_row, mi_col, r,
2335                                                    has_rows, has_cols, bsize);
2336   subsize = subsize_lookup[partition][bsize];  // get_subsize(bsize, partition);
2337 
2338   // Check the bitstream is conformant: if there is subsampling on the
2339   // chroma planes, subsize must subsample to a valid block size.
2340   const struct macroblockd_plane *const pd_u = &xd->plane[1];
2341   if (get_plane_block_size(subsize, pd_u) == BLOCK_INVALID) {
2342     aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
2343                        "Block size %dx%d invalid with this subsampling mode",
2344                        block_size_wide[subsize], block_size_high[subsize]);
2345   }
2346 
2347 #if CONFIG_PVQ
2348   assert(partition < PARTITION_TYPES);
2349   assert(subsize < BLOCK_SIZES_ALL);
2350 #endif
2351 #if CONFIG_SUPERTX
2352   if (!frame_is_intra_only(cm) && partition != PARTITION_NONE &&
2353       bsize <= MAX_SUPERTX_BLOCK_SIZE && !supertx_enabled && !xd->lossless[0]) {
2354     const int supertx_context = partition_supertx_context_lookup[partition];
2355     supertx_enabled = aom_read(
2356         r, cm->fc->supertx_prob[supertx_context][supertx_size], ACCT_STR);
2357     if (xd->counts)
2358       xd->counts->supertx[supertx_context][supertx_size][supertx_enabled]++;
2359 #if CONFIG_VAR_TX
2360     if (supertx_enabled) xd->supertx_size = supertx_size;
2361 #endif
2362   }
2363 #endif  // CONFIG_SUPERTX
2364 
2365 #if CONFIG_SUPERTX
2366 #define DEC_BLOCK_STX_ARG supertx_enabled,
2367 #else
2368 #define DEC_BLOCK_STX_ARG
2369 #endif
2370 #if CONFIG_EXT_PARTITION_TYPES
2371 #define DEC_BLOCK_EPT_ARG partition,
2372 #else
2373 #define DEC_BLOCK_EPT_ARG
2374 #endif
2375 #define DEC_BLOCK(db_r, db_c, db_subsize)                   \
2376   decode_block(pbi, xd, DEC_BLOCK_STX_ARG(db_r), (db_c), r, \
2377                DEC_BLOCK_EPT_ARG(db_subsize))
2378 #define DEC_PARTITION(db_r, db_c, db_subsize) \
2379   decode_partition(pbi, xd, DEC_BLOCK_STX_ARG(db_r), (db_c), r, (db_subsize))
2380 
2381   if (!hbs && !unify_bsize) {
2382     // calculate bmode block dimensions (log 2)
2383     xd->bmode_blocks_wl = 1 >> !!(partition & PARTITION_VERT);
2384     xd->bmode_blocks_hl = 1 >> !!(partition & PARTITION_HORZ);
2385     DEC_BLOCK(mi_row, mi_col, subsize);
2386   } else {
2387     switch (partition) {
2388       case PARTITION_NONE: DEC_BLOCK(mi_row, mi_col, subsize); break;
2389       case PARTITION_HORZ:
2390         DEC_BLOCK(mi_row, mi_col, subsize);
2391         if (has_rows) DEC_BLOCK(mi_row + hbs, mi_col, subsize);
2392         break;
2393       case PARTITION_VERT:
2394         DEC_BLOCK(mi_row, mi_col, subsize);
2395         if (has_cols) DEC_BLOCK(mi_row, mi_col + hbs, subsize);
2396         break;
2397       case PARTITION_SPLIT:
2398         DEC_PARTITION(mi_row, mi_col, subsize);
2399         DEC_PARTITION(mi_row, mi_col + hbs, subsize);
2400         DEC_PARTITION(mi_row + hbs, mi_col, subsize);
2401         DEC_PARTITION(mi_row + hbs, mi_col + hbs, subsize);
2402         break;
2403 #if CONFIG_EXT_PARTITION_TYPES
2404 #if CONFIG_EXT_PARTITION_TYPES_AB
2405       case PARTITION_HORZ_A:
2406         DEC_BLOCK(mi_row, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
2407         DEC_BLOCK(mi_row + qbs, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
2408         DEC_BLOCK(mi_row + hbs, mi_col, subsize);
2409         break;
2410       case PARTITION_HORZ_B:
2411         DEC_BLOCK(mi_row, mi_col, subsize);
2412         DEC_BLOCK(mi_row + hbs, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
2413         if (mi_row + 3 * qbs < cm->mi_rows)
2414           DEC_BLOCK(mi_row + 3 * qbs, mi_col,
2415                     get_subsize(bsize, PARTITION_HORZ_4));
2416         break;
2417       case PARTITION_VERT_A:
2418         DEC_BLOCK(mi_row, mi_col, get_subsize(bsize, PARTITION_VERT_4));
2419         DEC_BLOCK(mi_row, mi_col + qbs, get_subsize(bsize, PARTITION_VERT_4));
2420         DEC_BLOCK(mi_row, mi_col + hbs, subsize);
2421         break;
2422       case PARTITION_VERT_B:
2423         DEC_BLOCK(mi_row, mi_col, subsize);
2424         DEC_BLOCK(mi_row, mi_col + hbs, get_subsize(bsize, PARTITION_VERT_4));
2425         if (mi_col + 3 * qbs < cm->mi_cols)
2426           DEC_BLOCK(mi_row, mi_col + 3 * qbs,
2427                     get_subsize(bsize, PARTITION_VERT_4));
2428         break;
2429 #else
2430       case PARTITION_HORZ_A:
2431         DEC_BLOCK(mi_row, mi_col, bsize2);
2432         DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
2433         DEC_BLOCK(mi_row + hbs, mi_col, subsize);
2434         break;
2435       case PARTITION_HORZ_B:
2436         DEC_BLOCK(mi_row, mi_col, subsize);
2437         DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
2438         DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
2439         break;
2440       case PARTITION_VERT_A:
2441         DEC_BLOCK(mi_row, mi_col, bsize2);
2442         DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
2443         DEC_BLOCK(mi_row, mi_col + hbs, subsize);
2444         break;
2445       case PARTITION_VERT_B:
2446         DEC_BLOCK(mi_row, mi_col, subsize);
2447         DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
2448         DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
2449         break;
2450 #endif
2451       case PARTITION_HORZ_4:
2452         for (i = 0; i < 4; ++i) {
2453           int this_mi_row = mi_row + i * quarter_step;
2454           if (i > 0 && this_mi_row >= cm->mi_rows) break;
2455           DEC_BLOCK(this_mi_row, mi_col, subsize);
2456         }
2457         break;
2458       case PARTITION_VERT_4:
2459         for (i = 0; i < 4; ++i) {
2460           int this_mi_col = mi_col + i * quarter_step;
2461           if (i > 0 && this_mi_col >= cm->mi_cols) break;
2462           DEC_BLOCK(mi_row, this_mi_col, subsize);
2463         }
2464         break;
2465 #endif  // CONFIG_EXT_PARTITION_TYPES
2466       default: assert(0 && "Invalid partition type");
2467     }
2468   }
2469 
2470 #undef DEC_PARTITION
2471 #undef DEC_BLOCK
2472 #undef DEC_BLOCK_EPT_ARG
2473 #undef DEC_BLOCK_STX_ARG
2474 
2475 #if CONFIG_SUPERTX
2476   if (supertx_enabled && read_token) {
2477     uint8_t *dst_buf[3];
2478     int dst_stride[3], i;
2479     int offset = mi_row * cm->mi_stride + mi_col;
2480 
2481     set_segment_id_supertx(cm, mi_row, mi_col, bsize);
2482 
2483     if (cm->delta_q_present_flag) {
2484       for (i = 0; i < MAX_SEGMENTS; i++) {
2485         int j;
2486         for (j = 0; j < MAX_MB_PLANE; ++j) {
2487           const int dc_delta_q = j == 0 ? cm->y_dc_delta_q : cm->uv_dc_delta_q;
2488           const int ac_delta_q = j == 0 ? 0 : cm->uv_ac_delta_q;
2489 
2490           xd->plane[j].seg_dequant[i][0] =
2491               av1_dc_quant(xd->current_qindex, dc_delta_q, cm->bit_depth);
2492           xd->plane[j].seg_dequant[i][1] =
2493               av1_ac_quant(xd->current_qindex, ac_delta_q, cm->bit_depth);
2494         }
2495       }
2496     }
2497 
2498     xd->mi = cm->mi_grid_visible + offset;
2499     xd->mi[0] = cm->mi + offset;
2500     set_mi_row_col(xd, tile, mi_row, mi_size_high[bsize], mi_col,
2501                    mi_size_wide[bsize],
2502 #if CONFIG_DEPENDENT_HORZTILES
2503                    cm->dependent_horz_tiles,
2504 #endif  // CONFIG_DEPENDENT_HORZTILES
2505                    cm->mi_rows, cm->mi_cols);
2506     set_skip_context(xd, mi_row, mi_col);
2507     skip = read_skip(cm, xd, xd->mi[0]->mbmi.segment_id_supertx, r);
2508     if (skip) {
2509       av1_reset_skip_context(xd, mi_row, mi_col, bsize);
2510     } else {
2511       FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
2512 #if CONFIG_EXT_TX
2513       if (get_ext_tx_types(supertx_size, bsize, 1, cm->reduced_tx_set_used) >
2514           1) {
2515         const int eset =
2516             get_ext_tx_set(supertx_size, bsize, 1, cm->reduced_tx_set_used);
2517         if (eset > 0) {
2518           const TxSetType tx_set_type = get_ext_tx_set_type(
2519               supertx_size, bsize, 1, cm->reduced_tx_set_used);
2520           const int packed_sym =
2521               aom_read_symbol(r, ec_ctx->inter_ext_tx_cdf[eset][supertx_size],
2522                               av1_num_ext_tx_set[tx_set_type], ACCT_STR);
2523           txfm = av1_ext_tx_inv[tx_set_type][packed_sym];
2524 #if CONFIG_ENTROPY_STATS
2525           if (xd->counts) ++xd->counts->inter_ext_tx[eset][supertx_size][txfm];
2526 #endif  // CONFIG_ENTROPY_STATS
2527         }
2528       }
2529 #else
2530       if (supertx_size < TX_32X32) {
2531         txfm = aom_read_symbol(r, ec_ctx->inter_ext_tx_cdf[supertx_size],
2532                                TX_TYPES, ACCT_STR);
2533 #if CONFIG_ENTROPY_STATS
2534         if (xd->counts) ++xd->counts->inter_ext_tx[supertx_size][txfm];
2535 #endif  // CONFIG_ENTROPY_STATS
2536       }
2537 #endif  // CONFIG_EXT_TX
2538     }
2539 
2540     av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row,
2541                          mi_col);
2542     for (i = 0; i < MAX_MB_PLANE; i++) {
2543       dst_buf[i] = xd->plane[i].dst.buf;
2544       dst_stride[i] = xd->plane[i].dst.stride;
2545     }
2546     dec_predict_sb_complex(pbi, xd, tile, mi_row, mi_col, mi_row, mi_col, bsize,
2547                            bsize, dst_buf, dst_stride);
2548 
2549     if (!skip) {
2550       int eobtotal = 0;
2551       MB_MODE_INFO *mbmi;
2552       set_offsets_topblock(cm, xd, tile, bsize, mi_row, mi_col);
2553       mbmi = &xd->mi[0]->mbmi;
2554       mbmi->tx_type = txfm;
2555       assert(mbmi->segment_id_supertx != MAX_SEGMENTS);
2556       for (i = 0; i < MAX_MB_PLANE; ++i) {
2557         const struct macroblockd_plane *const pd = &xd->plane[i];
2558         int row, col;
2559         const TX_SIZE tx_size = av1_get_tx_size(i, xd);
2560         const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
2561         const int stepr = tx_size_high_unit[tx_size];
2562         const int stepc = tx_size_wide_unit[tx_size];
2563         const int max_blocks_wide = max_block_wide(xd, plane_bsize, i);
2564         const int max_blocks_high = max_block_high(xd, plane_bsize, i);
2565 
2566         for (row = 0; row < max_blocks_high; row += stepr)
2567           for (col = 0; col < max_blocks_wide; col += stepc)
2568             eobtotal += reconstruct_inter_block(
2569                 cm, xd, r, mbmi->segment_id_supertx, i, row, col, tx_size);
2570       }
2571       if ((unify_bsize || !(subsize < BLOCK_8X8)) && eobtotal == 0) skip = 1;
2572     }
2573     set_param_topblock(cm, xd, bsize, mi_row, mi_col, txfm, skip);
2574   }
2575 #endif  // CONFIG_SUPERTX
2576 
2577 #if CONFIG_EXT_PARTITION_TYPES
2578   update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition);
2579 #else
2580   // update partition context
2581   if (bsize >= BLOCK_8X8 &&
2582       (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
2583     update_partition_context(xd, mi_row, mi_col, subsize, bsize);
2584 #endif  // CONFIG_EXT_PARTITION_TYPES
2585 
2586 #if CONFIG_LPF_SB
2587   if (bsize == cm->sb_size) {
2588     int filt_lvl;
2589     if (mi_row == 0 && mi_col == 0) {
2590       filt_lvl = aom_read_literal(r, 6, ACCT_STR);
2591       cm->mi_grid_visible[0]->mbmi.reuse_sb_lvl = 0;
2592       cm->mi_grid_visible[0]->mbmi.delta = 0;
2593       cm->mi_grid_visible[0]->mbmi.sign = 0;
2594     } else {
2595       int prev_mi_row, prev_mi_col;
2596       if (mi_col - MAX_MIB_SIZE < 0) {
2597         prev_mi_row = mi_row - MAX_MIB_SIZE;
2598         prev_mi_col = mi_col;
2599       } else {
2600         prev_mi_row = mi_row;
2601         prev_mi_col = mi_col - MAX_MIB_SIZE;
2602       }
2603 
2604       MB_MODE_INFO *curr_mbmi =
2605           &cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi;
2606       MB_MODE_INFO *prev_mbmi =
2607           &cm->mi_grid_visible[prev_mi_row * cm->mi_stride + prev_mi_col]->mbmi;
2608       const uint8_t prev_lvl = prev_mbmi->filt_lvl;
2609 
2610       const int reuse_ctx = prev_mbmi->reuse_sb_lvl;
2611       const int reuse_prev_lvl = aom_read_symbol(
2612           r, xd->tile_ctx->lpf_reuse_cdf[reuse_ctx], 2, ACCT_STR);
2613       curr_mbmi->reuse_sb_lvl = reuse_prev_lvl;
2614 
2615       if (reuse_prev_lvl) {
2616         filt_lvl = prev_lvl;
2617         curr_mbmi->delta = 0;
2618         curr_mbmi->sign = 0;
2619       } else {
2620         const int delta_ctx = prev_mbmi->delta;
2621         unsigned int delta = aom_read_symbol(
2622             r, xd->tile_ctx->lpf_delta_cdf[delta_ctx], DELTA_RANGE, ACCT_STR);
2623         curr_mbmi->delta = delta;
2624         delta *= LPF_STEP;
2625 
2626         if (delta) {
2627           const int sign_ctx = prev_mbmi->sign;
2628           const int sign = aom_read_symbol(
2629               r, xd->tile_ctx->lpf_sign_cdf[reuse_ctx][sign_ctx], 2, ACCT_STR);
2630           curr_mbmi->sign = sign;
2631           filt_lvl = sign ? prev_lvl + delta : prev_lvl - delta;
2632         } else {
2633           filt_lvl = prev_lvl;
2634           curr_mbmi->sign = 0;
2635         }
2636       }
2637     }
2638 
2639     av1_loop_filter_sb_level_init(cm, mi_row, mi_col, filt_lvl);
2640   }
2641 #endif
2642 
2643 #if CONFIG_CDEF
2644   if (bsize == cm->sb_size) {
2645     int width_step = mi_size_wide[BLOCK_64X64];
2646     int height_step = mi_size_wide[BLOCK_64X64];
2647     int w, h;
2648     for (h = 0; (h < mi_size_high[cm->sb_size]) && (mi_row + h < cm->mi_rows);
2649          h += height_step) {
2650       for (w = 0; (w < mi_size_wide[cm->sb_size]) && (mi_col + w < cm->mi_cols);
2651            w += width_step) {
2652         if (!cm->all_lossless && !sb_all_skip(cm, mi_row + h, mi_col + w))
2653           cm->mi_grid_visible[(mi_row + h) * cm->mi_stride + (mi_col + w)]
2654               ->mbmi.cdef_strength =
2655               aom_read_literal(r, cm->cdef_bits, ACCT_STR);
2656         else
2657           cm->mi_grid_visible[(mi_row + h) * cm->mi_stride + (mi_col + w)]
2658               ->mbmi.cdef_strength = -1;
2659       }
2660     }
2661   }
2662 #endif  // CONFIG_CDEF
2663 #if CONFIG_LOOP_RESTORATION
2664   for (int plane = 0; plane < MAX_MB_PLANE; ++plane) {
2665     int rcol0, rcol1, rrow0, rrow1, nhtiles;
2666     if (av1_loop_restoration_corners_in_sb(cm, plane, mi_row, mi_col, bsize,
2667                                            &rcol0, &rcol1, &rrow0, &rrow1,
2668                                            &nhtiles)) {
2669       for (int rrow = rrow0; rrow < rrow1; ++rrow) {
2670         for (int rcol = rcol0; rcol < rcol1; ++rcol) {
2671           int rtile_idx = rcol + rrow * nhtiles;
2672           loop_restoration_read_sb_coeffs(cm, xd, r, plane, rtile_idx);
2673         }
2674       }
2675     }
2676   }
2677 #endif
2678 }
2679 
setup_bool_decoder(const uint8_t * data,const uint8_t * data_end,const size_t read_size,struct aom_internal_error_info * error_info,aom_reader * r,int window_size,aom_decrypt_cb decrypt_cb,void * decrypt_state)2680 static void setup_bool_decoder(const uint8_t *data, const uint8_t *data_end,
2681                                const size_t read_size,
2682                                struct aom_internal_error_info *error_info,
2683                                aom_reader *r,
2684 #if CONFIG_ANS && ANS_MAX_SYMBOLS
2685                                int window_size,
2686 #endif  // CONFIG_ANS && ANS_MAX_SYMBOLS
2687                                aom_decrypt_cb decrypt_cb, void *decrypt_state) {
2688   // Validate the calculated partition length. If the buffer
2689   // described by the partition can't be fully read, then restrict
2690   // it to the portion that can be (for EC mode) or throw an error.
2691   if (!read_is_valid(data, read_size, data_end))
2692     aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
2693                        "Truncated packet or corrupt tile length");
2694 
2695 #if CONFIG_ANS && ANS_MAX_SYMBOLS
2696   r->window_size = window_size;
2697 #endif
2698   if (aom_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
2699     aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
2700                        "Failed to allocate bool decoder %d", 1);
2701 }
2702 
setup_segmentation(AV1_COMMON * const cm,struct aom_read_bit_buffer * rb)2703 static void setup_segmentation(AV1_COMMON *const cm,
2704                                struct aom_read_bit_buffer *rb) {
2705   struct segmentation *const seg = &cm->seg;
2706   int i, j;
2707 
2708   seg->update_map = 0;
2709   seg->update_data = 0;
2710   seg->temporal_update = 0;
2711 
2712   seg->enabled = aom_rb_read_bit(rb);
2713   if (!seg->enabled) return;
2714 
2715   // Segmentation map update
2716   if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
2717     seg->update_map = 1;
2718   } else {
2719     seg->update_map = aom_rb_read_bit(rb);
2720   }
2721   if (seg->update_map) {
2722     if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
2723       seg->temporal_update = 0;
2724     } else {
2725       seg->temporal_update = aom_rb_read_bit(rb);
2726     }
2727   }
2728 
2729   // Segmentation data update
2730   seg->update_data = aom_rb_read_bit(rb);
2731   if (seg->update_data) {
2732     seg->abs_delta = aom_rb_read_bit(rb);
2733 
2734     av1_clearall_segfeatures(seg);
2735 
2736     for (i = 0; i < MAX_SEGMENTS; i++) {
2737       for (j = 0; j < SEG_LVL_MAX; j++) {
2738         int data = 0;
2739         const int feature_enabled = aom_rb_read_bit(rb);
2740         if (feature_enabled) {
2741           av1_enable_segfeature(seg, i, j);
2742           data = decode_unsigned_max(rb, av1_seg_feature_data_max(j));
2743           if (av1_is_segfeature_signed(j))
2744             data = aom_rb_read_bit(rb) ? -data : data;
2745         }
2746         av1_set_segdata(seg, i, j, data);
2747       }
2748     }
2749   }
2750 }
2751 
2752 #if CONFIG_LOOP_RESTORATION
decode_restoration_mode(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)2753 static void decode_restoration_mode(AV1_COMMON *cm,
2754                                     struct aom_read_bit_buffer *rb) {
2755   int p;
2756   RestorationInfo *rsi = &cm->rst_info[0];
2757   if (aom_rb_read_bit(rb)) {
2758     rsi->frame_restoration_type =
2759         aom_rb_read_bit(rb) ? RESTORE_SGRPROJ : RESTORE_WIENER;
2760   } else {
2761     rsi->frame_restoration_type =
2762         aom_rb_read_bit(rb) ? RESTORE_SWITCHABLE : RESTORE_NONE;
2763   }
2764   for (p = 1; p < MAX_MB_PLANE; ++p) {
2765     rsi = &cm->rst_info[p];
2766     if (aom_rb_read_bit(rb)) {
2767       rsi->frame_restoration_type =
2768           aom_rb_read_bit(rb) ? RESTORE_SGRPROJ : RESTORE_WIENER;
2769     } else {
2770       rsi->frame_restoration_type = RESTORE_NONE;
2771     }
2772   }
2773 
2774   cm->rst_info[0].restoration_tilesize = RESTORATION_TILESIZE_MAX;
2775   cm->rst_info[1].restoration_tilesize = RESTORATION_TILESIZE_MAX;
2776   cm->rst_info[2].restoration_tilesize = RESTORATION_TILESIZE_MAX;
2777   if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
2778       cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
2779       cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
2780     rsi = &cm->rst_info[0];
2781     rsi->restoration_tilesize >>= aom_rb_read_bit(rb);
2782     if (rsi->restoration_tilesize != RESTORATION_TILESIZE_MAX) {
2783       rsi->restoration_tilesize >>= aom_rb_read_bit(rb);
2784     }
2785   }
2786   int s = AOMMIN(cm->subsampling_x, cm->subsampling_y);
2787   if (s && (cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
2788             cm->rst_info[2].frame_restoration_type != RESTORE_NONE)) {
2789     cm->rst_info[1].restoration_tilesize =
2790         cm->rst_info[0].restoration_tilesize >> (aom_rb_read_bit(rb) * s);
2791   } else {
2792     cm->rst_info[1].restoration_tilesize = cm->rst_info[0].restoration_tilesize;
2793   }
2794   cm->rst_info[2].restoration_tilesize = cm->rst_info[1].restoration_tilesize;
2795 
2796   cm->rst_info[0].procunit_width = cm->rst_info[0].procunit_height =
2797       RESTORATION_PROC_UNIT_SIZE;
2798   cm->rst_info[1].procunit_width = cm->rst_info[2].procunit_width =
2799       RESTORATION_PROC_UNIT_SIZE >> cm->subsampling_x;
2800   cm->rst_info[1].procunit_height = cm->rst_info[2].procunit_height =
2801       RESTORATION_PROC_UNIT_SIZE >> cm->subsampling_y;
2802 }
2803 
read_wiener_filter(int wiener_win,WienerInfo * wiener_info,WienerInfo * ref_wiener_info,aom_reader * rb)2804 static void read_wiener_filter(int wiener_win, WienerInfo *wiener_info,
2805                                WienerInfo *ref_wiener_info, aom_reader *rb) {
2806   if (wiener_win == WIENER_WIN)
2807     wiener_info->vfilter[0] = wiener_info->vfilter[WIENER_WIN - 1] =
2808         aom_read_primitive_refsubexpfin(
2809             rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
2810             WIENER_FILT_TAP0_SUBEXP_K,
2811             ref_wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) +
2812         WIENER_FILT_TAP0_MINV;
2813   else
2814     wiener_info->vfilter[0] = wiener_info->vfilter[WIENER_WIN - 1] = 0;
2815   wiener_info->vfilter[1] = wiener_info->vfilter[WIENER_WIN - 2] =
2816       aom_read_primitive_refsubexpfin(
2817           rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
2818           WIENER_FILT_TAP1_SUBEXP_K,
2819           ref_wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV, ACCT_STR) +
2820       WIENER_FILT_TAP1_MINV;
2821   wiener_info->vfilter[2] = wiener_info->vfilter[WIENER_WIN - 3] =
2822       aom_read_primitive_refsubexpfin(
2823           rb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1,
2824           WIENER_FILT_TAP2_SUBEXP_K,
2825           ref_wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV, ACCT_STR) +
2826       WIENER_FILT_TAP2_MINV;
2827   // The central element has an implicit +WIENER_FILT_STEP
2828   wiener_info->vfilter[WIENER_HALFWIN] =
2829       -2 * (wiener_info->vfilter[0] + wiener_info->vfilter[1] +
2830             wiener_info->vfilter[2]);
2831 
2832   if (wiener_win == WIENER_WIN)
2833     wiener_info->hfilter[0] = wiener_info->hfilter[WIENER_WIN - 1] =
2834         aom_read_primitive_refsubexpfin(
2835             rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
2836             WIENER_FILT_TAP0_SUBEXP_K,
2837             ref_wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) +
2838         WIENER_FILT_TAP0_MINV;
2839   else
2840     wiener_info->hfilter[0] = wiener_info->hfilter[WIENER_WIN - 1] = 0;
2841   wiener_info->hfilter[1] = wiener_info->hfilter[WIENER_WIN - 2] =
2842       aom_read_primitive_refsubexpfin(
2843           rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
2844           WIENER_FILT_TAP1_SUBEXP_K,
2845           ref_wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV, ACCT_STR) +
2846       WIENER_FILT_TAP1_MINV;
2847   wiener_info->hfilter[2] = wiener_info->hfilter[WIENER_WIN - 3] =
2848       aom_read_primitive_refsubexpfin(
2849           rb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1,
2850           WIENER_FILT_TAP2_SUBEXP_K,
2851           ref_wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV, ACCT_STR) +
2852       WIENER_FILT_TAP2_MINV;
2853   // The central element has an implicit +WIENER_FILT_STEP
2854   wiener_info->hfilter[WIENER_HALFWIN] =
2855       -2 * (wiener_info->hfilter[0] + wiener_info->hfilter[1] +
2856             wiener_info->hfilter[2]);
2857   memcpy(ref_wiener_info, wiener_info, sizeof(*wiener_info));
2858 }
2859 
read_sgrproj_filter(SgrprojInfo * sgrproj_info,SgrprojInfo * ref_sgrproj_info,aom_reader * rb)2860 static void read_sgrproj_filter(SgrprojInfo *sgrproj_info,
2861                                 SgrprojInfo *ref_sgrproj_info, aom_reader *rb) {
2862   sgrproj_info->ep = aom_read_literal(rb, SGRPROJ_PARAMS_BITS, ACCT_STR);
2863   sgrproj_info->xqd[0] =
2864       aom_read_primitive_refsubexpfin(
2865           rb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1, SGRPROJ_PRJ_SUBEXP_K,
2866           ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0, ACCT_STR) +
2867       SGRPROJ_PRJ_MIN0;
2868   sgrproj_info->xqd[1] =
2869       aom_read_primitive_refsubexpfin(
2870           rb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1, SGRPROJ_PRJ_SUBEXP_K,
2871           ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1, ACCT_STR) +
2872       SGRPROJ_PRJ_MIN1;
2873   memcpy(ref_sgrproj_info, sgrproj_info, sizeof(*sgrproj_info));
2874 }
2875 
loop_restoration_read_sb_coeffs(const AV1_COMMON * const cm,MACROBLOCKD * xd,aom_reader * const r,int plane,int rtile_idx)2876 static void loop_restoration_read_sb_coeffs(const AV1_COMMON *const cm,
2877                                             MACROBLOCKD *xd,
2878                                             aom_reader *const r, int plane,
2879                                             int rtile_idx) {
2880   const RestorationInfo *rsi = cm->rst_info + plane;
2881   if (rsi->frame_restoration_type == RESTORE_NONE) return;
2882 
2883   const int wiener_win = (plane > 0) ? WIENER_WIN_CHROMA : WIENER_WIN;
2884   WienerInfo *wiener_info = xd->wiener_info + plane;
2885   SgrprojInfo *sgrproj_info = xd->sgrproj_info + plane;
2886 
2887   if (rsi->frame_restoration_type == RESTORE_SWITCHABLE) {
2888     assert(plane == 0);
2889     rsi->restoration_type[rtile_idx] =
2890         aom_read_tree(r, av1_switchable_restore_tree,
2891                       cm->fc->switchable_restore_prob, ACCT_STR);
2892 
2893     if (rsi->restoration_type[rtile_idx] == RESTORE_WIENER) {
2894       read_wiener_filter(wiener_win, &rsi->wiener_info[rtile_idx], wiener_info,
2895                          r);
2896     } else if (rsi->restoration_type[rtile_idx] == RESTORE_SGRPROJ) {
2897       read_sgrproj_filter(&rsi->sgrproj_info[rtile_idx], sgrproj_info, r);
2898     }
2899   } else if (rsi->frame_restoration_type == RESTORE_WIENER) {
2900     if (aom_read(r, RESTORE_NONE_WIENER_PROB, ACCT_STR)) {
2901       rsi->restoration_type[rtile_idx] = RESTORE_WIENER;
2902       read_wiener_filter(wiener_win, &rsi->wiener_info[rtile_idx], wiener_info,
2903                          r);
2904     } else {
2905       rsi->restoration_type[rtile_idx] = RESTORE_NONE;
2906     }
2907   } else if (rsi->frame_restoration_type == RESTORE_SGRPROJ) {
2908     if (aom_read(r, RESTORE_NONE_SGRPROJ_PROB, ACCT_STR)) {
2909       rsi->restoration_type[rtile_idx] = RESTORE_SGRPROJ;
2910       read_sgrproj_filter(&rsi->sgrproj_info[rtile_idx], sgrproj_info, r);
2911     } else {
2912       rsi->restoration_type[rtile_idx] = RESTORE_NONE;
2913     }
2914   }
2915 }
2916 #endif  // CONFIG_LOOP_RESTORATION
2917 
setup_loopfilter(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)2918 static void setup_loopfilter(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
2919   struct loopfilter *lf = &cm->lf;
2920 #if !CONFIG_LPF_SB
2921 #if CONFIG_LOOPFILTER_LEVEL
2922   lf->filter_level[0] = aom_rb_read_literal(rb, 6);
2923   lf->filter_level[1] = aom_rb_read_literal(rb, 6);
2924   if (lf->filter_level[0] || lf->filter_level[1]) {
2925     lf->filter_level_u = aom_rb_read_literal(rb, 6);
2926     lf->filter_level_v = aom_rb_read_literal(rb, 6);
2927   }
2928 #else
2929   lf->filter_level = aom_rb_read_literal(rb, 6);
2930 #endif
2931 #endif  // CONFIG_LPF_SB
2932   lf->sharpness_level = aom_rb_read_literal(rb, 3);
2933 
2934   // Read in loop filter deltas applied at the MB level based on mode or ref
2935   // frame.
2936   lf->mode_ref_delta_update = 0;
2937 
2938   lf->mode_ref_delta_enabled = aom_rb_read_bit(rb);
2939   if (lf->mode_ref_delta_enabled) {
2940     lf->mode_ref_delta_update = aom_rb_read_bit(rb);
2941     if (lf->mode_ref_delta_update) {
2942       int i;
2943 
2944       for (i = 0; i < TOTAL_REFS_PER_FRAME; i++)
2945         if (aom_rb_read_bit(rb))
2946           lf->ref_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
2947 
2948       for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
2949         if (aom_rb_read_bit(rb))
2950           lf->mode_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
2951     }
2952   }
2953 }
2954 
2955 #if CONFIG_CDEF
setup_cdef(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)2956 static void setup_cdef(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
2957   int i;
2958 #if CONFIG_CDEF_SINGLEPASS
2959   cm->cdef_pri_damping = cm->cdef_sec_damping = aom_rb_read_literal(rb, 2) + 3;
2960 #else
2961   cm->cdef_pri_damping = aom_rb_read_literal(rb, 1) + 5;
2962   cm->cdef_sec_damping = aom_rb_read_literal(rb, 2) + 3;
2963 #endif
2964   cm->cdef_bits = aom_rb_read_literal(rb, 2);
2965   cm->nb_cdef_strengths = 1 << cm->cdef_bits;
2966   for (i = 0; i < cm->nb_cdef_strengths; i++) {
2967     cm->cdef_strengths[i] = aom_rb_read_literal(rb, CDEF_STRENGTH_BITS);
2968     cm->cdef_uv_strengths[i] = cm->subsampling_x == cm->subsampling_y
2969                                    ? aom_rb_read_literal(rb, CDEF_STRENGTH_BITS)
2970                                    : 0;
2971   }
2972 }
2973 #endif  // CONFIG_CDEF
2974 
read_delta_q(struct aom_read_bit_buffer * rb)2975 static INLINE int read_delta_q(struct aom_read_bit_buffer *rb) {
2976   return aom_rb_read_bit(rb) ? aom_rb_read_inv_signed_literal(rb, 6) : 0;
2977 }
2978 
setup_quantization(AV1_COMMON * const cm,struct aom_read_bit_buffer * rb)2979 static void setup_quantization(AV1_COMMON *const cm,
2980                                struct aom_read_bit_buffer *rb) {
2981   cm->base_qindex = aom_rb_read_literal(rb, QINDEX_BITS);
2982   cm->y_dc_delta_q = read_delta_q(rb);
2983   cm->uv_dc_delta_q = read_delta_q(rb);
2984   cm->uv_ac_delta_q = read_delta_q(rb);
2985   cm->dequant_bit_depth = cm->bit_depth;
2986 #if CONFIG_AOM_QM
2987   cm->using_qmatrix = aom_rb_read_bit(rb);
2988   if (cm->using_qmatrix) {
2989     cm->min_qmlevel = aom_rb_read_literal(rb, QM_LEVEL_BITS);
2990     cm->max_qmlevel = aom_rb_read_literal(rb, QM_LEVEL_BITS);
2991   } else {
2992     cm->min_qmlevel = 0;
2993     cm->max_qmlevel = 0;
2994   }
2995 #endif
2996 }
2997 
2998 // Build y/uv dequant values based on segmentation.
setup_segmentation_dequant(AV1_COMMON * const cm)2999 static void setup_segmentation_dequant(AV1_COMMON *const cm) {
3000 #if CONFIG_AOM_QM
3001   const int using_qm = cm->using_qmatrix;
3002   const int minqm = cm->min_qmlevel;
3003   const int maxqm = cm->max_qmlevel;
3004 #endif
3005   // When segmentation is disabled, only the first value is used.  The
3006   // remaining are don't cares.
3007   const int max_segments = cm->seg.enabled ? MAX_SEGMENTS : 1;
3008   for (int i = 0; i < max_segments; ++i) {
3009     const int qindex = av1_get_qindex(&cm->seg, i, cm->base_qindex);
3010     cm->y_dequant[i][0] = av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
3011     cm->y_dequant[i][1] = av1_ac_quant(qindex, 0, cm->bit_depth);
3012     cm->uv_dequant[i][0] =
3013         av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
3014     cm->uv_dequant[i][1] =
3015         av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
3016 #if CONFIG_AOM_QM
3017     const int lossless = qindex == 0 && cm->y_dc_delta_q == 0 &&
3018                          cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
3019     // NB: depends on base index so there is only 1 set per frame
3020     // No quant weighting when lossless or signalled not using QM
3021     const int qmlevel = (lossless || using_qm == 0)
3022                             ? NUM_QM_LEVELS - 1
3023                             : aom_get_qmlevel(cm->base_qindex, minqm, maxqm);
3024     for (int j = 0; j < TX_SIZES_ALL; ++j) {
3025       cm->y_iqmatrix[i][1][j] = aom_iqmatrix(cm, qmlevel, 0, j, 1);
3026       cm->y_iqmatrix[i][0][j] = aom_iqmatrix(cm, qmlevel, 0, j, 0);
3027       cm->uv_iqmatrix[i][1][j] = aom_iqmatrix(cm, qmlevel, 1, j, 1);
3028       cm->uv_iqmatrix[i][0][j] = aom_iqmatrix(cm, qmlevel, 1, j, 0);
3029     }
3030 #endif  // CONFIG_AOM_QM
3031 #if CONFIG_NEW_QUANT
3032     for (int dq = 0; dq < QUANT_PROFILES; dq++) {
3033       for (int b = 0; b < COEF_BANDS; ++b) {
3034         av1_get_dequant_val_nuq(cm->y_dequant[i][b != 0], b,
3035                                 cm->y_dequant_nuq[i][dq][b], NULL, dq);
3036         av1_get_dequant_val_nuq(cm->uv_dequant[i][b != 0], b,
3037                                 cm->uv_dequant_nuq[i][dq][b], NULL, dq);
3038       }
3039     }
3040 #endif  //  CONFIG_NEW_QUANT
3041   }
3042 }
3043 
read_frame_interp_filter(struct aom_read_bit_buffer * rb)3044 static InterpFilter read_frame_interp_filter(struct aom_read_bit_buffer *rb) {
3045   return aom_rb_read_bit(rb) ? SWITCHABLE
3046                              : aom_rb_read_literal(rb, LOG_SWITCHABLE_FILTERS);
3047 }
3048 
setup_render_size(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)3049 static void setup_render_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
3050 #if CONFIG_FRAME_SUPERRES
3051   cm->render_width = cm->superres_upscaled_width;
3052   cm->render_height = cm->superres_upscaled_height;
3053 #else
3054   cm->render_width = cm->width;
3055   cm->render_height = cm->height;
3056 #endif  // CONFIG_FRAME_SUPERRES
3057   if (aom_rb_read_bit(rb))
3058     av1_read_frame_size(rb, &cm->render_width, &cm->render_height);
3059 }
3060 
3061 #if CONFIG_FRAME_SUPERRES
3062 // TODO(afergs): make "struct aom_read_bit_buffer *const rb"?
setup_superres(AV1_COMMON * const cm,struct aom_read_bit_buffer * rb,int * width,int * height)3063 static void setup_superres(AV1_COMMON *const cm, struct aom_read_bit_buffer *rb,
3064                            int *width, int *height) {
3065   cm->superres_upscaled_width = *width;
3066   cm->superres_upscaled_height = *height;
3067   if (aom_rb_read_bit(rb)) {
3068     cm->superres_scale_denominator =
3069         (uint8_t)aom_rb_read_literal(rb, SUPERRES_SCALE_BITS);
3070     cm->superres_scale_denominator += SUPERRES_SCALE_DENOMINATOR_MIN;
3071     // Don't edit cm->width or cm->height directly, or the buffers won't get
3072     // resized correctly
3073     av1_calculate_scaled_superres_size(width, height,
3074                                        cm->superres_scale_denominator);
3075   } else {
3076     // 1:1 scaling - ie. no scaling, scale not provided
3077     cm->superres_scale_denominator = SCALE_NUMERATOR;
3078   }
3079 }
3080 #endif  // CONFIG_FRAME_SUPERRES
3081 
resize_context_buffers(AV1_COMMON * cm,int width,int height)3082 static void resize_context_buffers(AV1_COMMON *cm, int width, int height) {
3083 #if CONFIG_SIZE_LIMIT
3084   if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
3085     aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
3086                        "Dimensions of %dx%d beyond allowed size of %dx%d.",
3087                        width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT);
3088 #endif
3089   if (cm->width != width || cm->height != height) {
3090     const int new_mi_rows =
3091         ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
3092     const int new_mi_cols =
3093         ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
3094 
3095     // Allocations in av1_alloc_context_buffers() depend on individual
3096     // dimensions as well as the overall size.
3097     if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
3098       if (av1_alloc_context_buffers(cm, width, height))
3099         aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
3100                            "Failed to allocate context buffers");
3101     } else {
3102       av1_set_mb_mi(cm, width, height);
3103     }
3104     av1_init_context_buffers(cm);
3105     cm->width = width;
3106     cm->height = height;
3107   }
3108 
3109   ensure_mv_buffer(cm->cur_frame, cm);
3110   cm->cur_frame->width = cm->width;
3111   cm->cur_frame->height = cm->height;
3112 }
3113 
setup_frame_size(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)3114 static void setup_frame_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
3115   int width, height;
3116   BufferPool *const pool = cm->buffer_pool;
3117   av1_read_frame_size(rb, &width, &height);
3118 #if CONFIG_FRAME_SUPERRES
3119   setup_superres(cm, rb, &width, &height);
3120 #endif  // CONFIG_FRAME_SUPERRES
3121   setup_render_size(cm, rb);
3122   resize_context_buffers(cm, width, height);
3123 
3124   lock_buffer_pool(pool);
3125   if (aom_realloc_frame_buffer(
3126           get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
3127           cm->subsampling_y,
3128 #if CONFIG_HIGHBITDEPTH
3129           cm->use_highbitdepth,
3130 #endif
3131           AOM_BORDER_IN_PIXELS, cm->byte_alignment,
3132           &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
3133           pool->cb_priv)) {
3134     unlock_buffer_pool(pool);
3135     aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
3136                        "Failed to allocate frame buffer");
3137   }
3138   unlock_buffer_pool(pool);
3139 
3140   pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
3141   pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
3142   pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
3143   pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
3144 #if CONFIG_COLORSPACE_HEADERS
3145   pool->frame_bufs[cm->new_fb_idx].buf.transfer_function =
3146       cm->transfer_function;
3147   pool->frame_bufs[cm->new_fb_idx].buf.chroma_sample_position =
3148       cm->chroma_sample_position;
3149 #endif
3150   pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
3151   pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
3152   pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
3153 }
3154 
setup_sb_size(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)3155 static void setup_sb_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
3156   (void)rb;
3157 #if CONFIG_EXT_PARTITION
3158   set_sb_size(cm, aom_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64);
3159 #else
3160   set_sb_size(cm, BLOCK_64X64);
3161 #endif  // CONFIG_EXT_PARTITION
3162 }
3163 
valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth,int ref_xss,int ref_yss,aom_bit_depth_t this_bit_depth,int this_xss,int this_yss)3164 static INLINE int valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth,
3165                                           int ref_xss, int ref_yss,
3166                                           aom_bit_depth_t this_bit_depth,
3167                                           int this_xss, int this_yss) {
3168   return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
3169          ref_yss == this_yss;
3170 }
3171 
setup_frame_size_with_refs(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)3172 static void setup_frame_size_with_refs(AV1_COMMON *cm,
3173                                        struct aom_read_bit_buffer *rb) {
3174   int width, height;
3175   int found = 0, i;
3176   int has_valid_ref_frame = 0;
3177   BufferPool *const pool = cm->buffer_pool;
3178   for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
3179     if (aom_rb_read_bit(rb)) {
3180       YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
3181       width = buf->y_crop_width;
3182       height = buf->y_crop_height;
3183       cm->render_width = buf->render_width;
3184       cm->render_height = buf->render_height;
3185 #if CONFIG_FRAME_SUPERRES
3186       setup_superres(cm, rb, &width, &height);
3187 #endif  // CONFIG_FRAME_SUPERRES
3188       found = 1;
3189       break;
3190     }
3191   }
3192 
3193   if (!found) {
3194     av1_read_frame_size(rb, &width, &height);
3195 #if CONFIG_FRAME_SUPERRES
3196     setup_superres(cm, rb, &width, &height);
3197 #endif  // CONFIG_FRAME_SUPERRES
3198     setup_render_size(cm, rb);
3199   }
3200 
3201   if (width <= 0 || height <= 0)
3202     aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
3203                        "Invalid frame size");
3204 
3205   // Check to make sure at least one of frames that this frame references
3206   // has valid dimensions.
3207   for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
3208     RefBuffer *const ref_frame = &cm->frame_refs[i];
3209     has_valid_ref_frame |=
3210         valid_ref_frame_size(ref_frame->buf->y_crop_width,
3211                              ref_frame->buf->y_crop_height, width, height);
3212   }
3213   if (!has_valid_ref_frame)
3214     aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
3215                        "Referenced frame has invalid size");
3216   for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
3217     RefBuffer *const ref_frame = &cm->frame_refs[i];
3218     if (!valid_ref_frame_img_fmt(ref_frame->buf->bit_depth,
3219                                  ref_frame->buf->subsampling_x,
3220                                  ref_frame->buf->subsampling_y, cm->bit_depth,
3221                                  cm->subsampling_x, cm->subsampling_y))
3222       aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
3223                          "Referenced frame has incompatible color format");
3224   }
3225 
3226   resize_context_buffers(cm, width, height);
3227 
3228   lock_buffer_pool(pool);
3229   if (aom_realloc_frame_buffer(
3230           get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
3231           cm->subsampling_y,
3232 #if CONFIG_HIGHBITDEPTH
3233           cm->use_highbitdepth,
3234 #endif
3235           AOM_BORDER_IN_PIXELS, cm->byte_alignment,
3236           &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
3237           pool->cb_priv)) {
3238     unlock_buffer_pool(pool);
3239     aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
3240                        "Failed to allocate frame buffer");
3241   }
3242   unlock_buffer_pool(pool);
3243 
3244   pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
3245   pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
3246   pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
3247   pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
3248 #if CONFIG_COLORSPACE_HEADERS
3249   pool->frame_bufs[cm->new_fb_idx].buf.transfer_function =
3250       cm->transfer_function;
3251   pool->frame_bufs[cm->new_fb_idx].buf.chroma_sample_position =
3252       cm->chroma_sample_position;
3253 #endif
3254   pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
3255   pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width;
3256   pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
3257 }
3258 
read_tile_group_range(AV1Decoder * pbi,struct aom_read_bit_buffer * const rb)3259 static void read_tile_group_range(AV1Decoder *pbi,
3260                                   struct aom_read_bit_buffer *const rb) {
3261   AV1_COMMON *const cm = &pbi->common;
3262   const int num_bits = cm->log2_tile_rows + cm->log2_tile_cols;
3263   const int num_tiles =
3264       cm->tile_rows * cm->tile_cols;  // Note: May be < (1<<num_bits)
3265   pbi->tg_start = aom_rb_read_literal(rb, num_bits);
3266   pbi->tg_size = 1 + aom_rb_read_literal(rb, num_bits);
3267   if (pbi->tg_start + pbi->tg_size > num_tiles)
3268     aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
3269                        "Tile group extends past last tile in frame");
3270 }
3271 
3272 #if CONFIG_MAX_TILE
3273 
3274 // Same function as av1_read_uniform but reading from uncompresses header wb
rb_read_uniform(struct aom_read_bit_buffer * const rb,int n)3275 static int rb_read_uniform(struct aom_read_bit_buffer *const rb, int n) {
3276   const int l = get_unsigned_bits(n);
3277   const int m = (1 << l) - n;
3278   const int v = aom_rb_read_literal(rb, l - 1);
3279   assert(l != 0);
3280   if (v < m)
3281     return v;
3282   else
3283     return (v << 1) - m + aom_rb_read_literal(rb, 1);
3284 }
3285 
read_tile_info_max_tile(AV1_COMMON * const cm,struct aom_read_bit_buffer * const rb)3286 static void read_tile_info_max_tile(AV1_COMMON *const cm,
3287                                     struct aom_read_bit_buffer *const rb) {
3288   int width_mi = ALIGN_POWER_OF_TWO(cm->mi_cols, MAX_MIB_SIZE_LOG2);
3289   int height_mi = ALIGN_POWER_OF_TWO(cm->mi_rows, MAX_MIB_SIZE_LOG2);
3290   int width_sb = width_mi >> MAX_MIB_SIZE_LOG2;
3291   int height_sb = height_mi >> MAX_MIB_SIZE_LOG2;
3292   int start_sb, size_sb, i;
3293 
3294   av1_get_tile_limits(cm);
3295   cm->uniform_tile_spacing_flag = aom_rb_read_bit(rb);
3296 
3297   // Read tile columns
3298   if (cm->uniform_tile_spacing_flag) {
3299     cm->log2_tile_cols = cm->min_log2_tile_cols;
3300     while (cm->log2_tile_cols < cm->max_log2_tile_cols) {
3301       if (!aom_rb_read_bit(rb)) {
3302         break;
3303       }
3304       cm->log2_tile_cols++;
3305     }
3306   } else {
3307     for (i = 0, start_sb = 0; width_sb > 0 && i < MAX_TILE_COLS; i++) {
3308       size_sb = 1 + rb_read_uniform(rb, AOMMIN(width_sb, MAX_TILE_WIDTH_SB));
3309       cm->tile_col_start_sb[i] = start_sb;
3310       start_sb += size_sb;
3311       width_sb -= size_sb;
3312     }
3313     cm->tile_cols = i;
3314     cm->tile_col_start_sb[i] = start_sb + width_sb;
3315   }
3316   av1_calculate_tile_cols(cm);
3317 
3318   // Read tile rows
3319   if (cm->uniform_tile_spacing_flag) {
3320     cm->log2_tile_rows = cm->min_log2_tile_rows;
3321     while (cm->log2_tile_rows < cm->max_log2_tile_rows) {
3322       if (!aom_rb_read_bit(rb)) {
3323         break;
3324       }
3325       cm->log2_tile_rows++;
3326     }
3327   } else {
3328     for (i = 0, start_sb = 0; height_sb > 0 && i < MAX_TILE_ROWS; i++) {
3329       size_sb =
3330           1 + rb_read_uniform(rb, AOMMIN(height_sb, cm->max_tile_height_sb));
3331       cm->tile_row_start_sb[i] = start_sb;
3332       start_sb += size_sb;
3333       height_sb -= size_sb;
3334     }
3335     cm->tile_rows = i;
3336     cm->tile_row_start_sb[i] = start_sb + height_sb;
3337   }
3338   av1_calculate_tile_rows(cm);
3339 }
3340 #endif
3341 
read_tile_info(AV1Decoder * const pbi,struct aom_read_bit_buffer * const rb)3342 static void read_tile_info(AV1Decoder *const pbi,
3343                            struct aom_read_bit_buffer *const rb) {
3344   AV1_COMMON *const cm = &pbi->common;
3345 #if CONFIG_EXT_TILE
3346   cm->single_tile_decoding = 0;
3347   if (cm->large_scale_tile) {
3348     struct loopfilter *lf = &cm->lf;
3349 
3350     // Figure out single_tile_decoding by loopfilter_level.
3351     cm->single_tile_decoding = (!lf->filter_level) ? 1 : 0;
3352 // Read the tile width/height
3353 #if CONFIG_EXT_PARTITION
3354     if (cm->sb_size == BLOCK_128X128) {
3355       cm->tile_width = aom_rb_read_literal(rb, 5) + 1;
3356       cm->tile_height = aom_rb_read_literal(rb, 5) + 1;
3357     } else {
3358 #endif  // CONFIG_EXT_PARTITION
3359       cm->tile_width = aom_rb_read_literal(rb, 6) + 1;
3360       cm->tile_height = aom_rb_read_literal(rb, 6) + 1;
3361 #if CONFIG_EXT_PARTITION
3362     }
3363 #endif  // CONFIG_EXT_PARTITION
3364 
3365 #if CONFIG_LOOPFILTERING_ACROSS_TILES
3366     cm->loop_filter_across_tiles_enabled = aom_rb_read_bit(rb);
3367 #endif  // CONFIG_LOOPFILTERING_ACROSS_TILES
3368 
3369     cm->tile_width <<= cm->mib_size_log2;
3370     cm->tile_height <<= cm->mib_size_log2;
3371 
3372     cm->tile_width = AOMMIN(cm->tile_width, cm->mi_cols);
3373     cm->tile_height = AOMMIN(cm->tile_height, cm->mi_rows);
3374 
3375     // Get the number of tiles
3376     cm->tile_cols = 1;
3377     while (cm->tile_cols * cm->tile_width < cm->mi_cols) ++cm->tile_cols;
3378 
3379     cm->tile_rows = 1;
3380     while (cm->tile_rows * cm->tile_height < cm->mi_rows) ++cm->tile_rows;
3381 
3382     if (cm->tile_cols * cm->tile_rows > 1) {
3383       // Read the number of bytes used to store tile size
3384       pbi->tile_col_size_bytes = aom_rb_read_literal(rb, 2) + 1;
3385       pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
3386     }
3387 
3388 #if CONFIG_DEPENDENT_HORZTILES
3389     cm->dependent_horz_tiles = 0;
3390 #endif
3391   } else {
3392 #endif  // CONFIG_EXT_TILE
3393 
3394 #if CONFIG_MAX_TILE
3395     read_tile_info_max_tile(cm, rb);
3396 #else
3397   int min_log2_tile_cols, max_log2_tile_cols, max_ones;
3398   av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
3399 
3400   // columns
3401   max_ones = max_log2_tile_cols - min_log2_tile_cols;
3402   cm->log2_tile_cols = min_log2_tile_cols;
3403   while (max_ones-- && aom_rb_read_bit(rb)) cm->log2_tile_cols++;
3404 
3405   if (cm->log2_tile_cols > 6)
3406     aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
3407                        "Invalid number of tile columns");
3408 
3409   // rows
3410   cm->log2_tile_rows = aom_rb_read_bit(rb);
3411   if (cm->log2_tile_rows) cm->log2_tile_rows += aom_rb_read_bit(rb);
3412 
3413   cm->tile_width =
3414       get_tile_size(cm->mi_cols, cm->log2_tile_cols, &cm->tile_cols);
3415   cm->tile_height =
3416       get_tile_size(cm->mi_rows, cm->log2_tile_rows, &cm->tile_rows);
3417 
3418 #endif  // CONFIG_MAX_TILE
3419 #if CONFIG_DEPENDENT_HORZTILES
3420     if (cm->tile_rows > 1)
3421       cm->dependent_horz_tiles = aom_rb_read_bit(rb);
3422     else
3423       cm->dependent_horz_tiles = 0;
3424 #endif
3425 #if CONFIG_LOOPFILTERING_ACROSS_TILES
3426     cm->loop_filter_across_tiles_enabled = aom_rb_read_bit(rb);
3427 #endif  // CONFIG_LOOPFILTERING_ACROSS_TILES
3428 
3429     // tile size magnitude
3430     pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
3431 #if CONFIG_EXT_TILE
3432   }
3433 #endif  // CONFIG_EXT_TILE
3434 
3435 // each tile group header is in its own tile group OBU
3436 #if !CONFIG_OBU
3437   // Store an index to the location of the tile group information
3438   pbi->tg_size_bit_offset = rb->bit_offset;
3439   read_tile_group_range(pbi, rb);
3440 #endif
3441 }
3442 
mem_get_varsize(const uint8_t * src,int sz)3443 static int mem_get_varsize(const uint8_t *src, int sz) {
3444   switch (sz) {
3445     case 1: return src[0];
3446     case 2: return mem_get_le16(src);
3447     case 3: return mem_get_le24(src);
3448     case 4: return mem_get_le32(src);
3449     default: assert(0 && "Invalid size"); return -1;
3450   }
3451 }
3452 
3453 #if CONFIG_EXT_TILE
3454 // Reads the next tile returning its size and adjusting '*data' accordingly
3455 // based on 'is_last'.
get_ls_tile_buffer(const uint8_t * const data_end,struct aom_internal_error_info * error_info,const uint8_t ** data,aom_decrypt_cb decrypt_cb,void * decrypt_state,TileBufferDec (* const tile_buffers)[MAX_TILE_COLS],int tile_size_bytes,int col,int row,int tile_copy_mode)3456 static void get_ls_tile_buffer(
3457     const uint8_t *const data_end, struct aom_internal_error_info *error_info,
3458     const uint8_t **data, aom_decrypt_cb decrypt_cb, void *decrypt_state,
3459     TileBufferDec (*const tile_buffers)[MAX_TILE_COLS], int tile_size_bytes,
3460     int col, int row, int tile_copy_mode) {
3461   size_t size;
3462 
3463   size_t copy_size = 0;
3464   const uint8_t *copy_data = NULL;
3465 
3466   if (!read_is_valid(*data, tile_size_bytes, data_end))
3467     aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
3468                        "Truncated packet or corrupt tile length");
3469   if (decrypt_cb) {
3470     uint8_t be_data[4];
3471     decrypt_cb(decrypt_state, *data, be_data, tile_size_bytes);
3472 
3473     // Only read number of bytes in cm->tile_size_bytes.
3474     size = mem_get_varsize(be_data, tile_size_bytes);
3475   } else {
3476     size = mem_get_varsize(*data, tile_size_bytes);
3477   }
3478 
3479   // If tile_copy_mode = 1, then the top bit of the tile header indicates copy
3480   // mode.
3481   if (tile_copy_mode && (size >> (tile_size_bytes * 8 - 1)) == 1) {
3482     // The remaining bits in the top byte signal the row offset
3483     int offset = (size >> (tile_size_bytes - 1) * 8) & 0x7f;
3484 
3485     // Currently, only use tiles in same column as reference tiles.
3486     copy_data = tile_buffers[row - offset][col].data;
3487     copy_size = tile_buffers[row - offset][col].size;
3488     size = 0;
3489   }
3490 
3491   *data += tile_size_bytes;
3492 
3493   if (size > (size_t)(data_end - *data))
3494     aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
3495                        "Truncated packet or corrupt tile size");
3496 
3497   if (size > 0) {
3498     tile_buffers[row][col].data = *data;
3499     tile_buffers[row][col].size = size;
3500   } else {
3501     tile_buffers[row][col].data = copy_data;
3502     tile_buffers[row][col].size = copy_size;
3503   }
3504 
3505   *data += size;
3506 
3507   tile_buffers[row][col].raw_data_end = *data;
3508 }
3509 
get_ls_tile_buffers(AV1Decoder * pbi,const uint8_t * data,const uint8_t * data_end,TileBufferDec (* const tile_buffers)[MAX_TILE_COLS])3510 static void get_ls_tile_buffers(
3511     AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
3512     TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
3513   AV1_COMMON *const cm = &pbi->common;
3514   const int tile_cols = cm->tile_cols;
3515   const int tile_rows = cm->tile_rows;
3516   const int have_tiles = tile_cols * tile_rows > 1;
3517 
3518   if (!have_tiles) {
3519     const size_t tile_size = data_end - data;
3520     tile_buffers[0][0].data = data;
3521     tile_buffers[0][0].size = tile_size;
3522     tile_buffers[0][0].raw_data_end = NULL;
3523   } else {
3524     // We locate only the tile buffers that are required, which are the ones
3525     // specified by pbi->dec_tile_col and pbi->dec_tile_row. Also, we always
3526     // need the last (bottom right) tile buffer, as we need to know where the
3527     // end of the compressed frame buffer is for proper superframe decoding.
3528 
3529     const uint8_t *tile_col_data_end[MAX_TILE_COLS];
3530     const uint8_t *const data_start = data;
3531 
3532     const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
3533     const int single_row = pbi->dec_tile_row >= 0;
3534     const int tile_rows_start = single_row ? dec_tile_row : 0;
3535     const int tile_rows_end = single_row ? tile_rows_start + 1 : tile_rows;
3536     const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
3537     const int single_col = pbi->dec_tile_col >= 0;
3538     const int tile_cols_start = single_col ? dec_tile_col : 0;
3539     const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
3540 
3541     const int tile_col_size_bytes = pbi->tile_col_size_bytes;
3542     const int tile_size_bytes = pbi->tile_size_bytes;
3543     const int tile_copy_mode =
3544         ((AOMMAX(cm->tile_width, cm->tile_height) << MI_SIZE_LOG2) <= 256) ? 1
3545                                                                            : 0;
3546     size_t tile_col_size;
3547     int r, c;
3548 
3549     // Read tile column sizes for all columns (we need the last tile buffer)
3550     for (c = 0; c < tile_cols; ++c) {
3551       const int is_last = c == tile_cols - 1;
3552       if (!is_last) {
3553         tile_col_size = mem_get_varsize(data, tile_col_size_bytes);
3554         data += tile_col_size_bytes;
3555         tile_col_data_end[c] = data + tile_col_size;
3556       } else {
3557         tile_col_size = data_end - data;
3558         tile_col_data_end[c] = data_end;
3559       }
3560       data += tile_col_size;
3561     }
3562 
3563     data = data_start;
3564 
3565     // Read the required tile sizes.
3566     for (c = tile_cols_start; c < tile_cols_end; ++c) {
3567       const int is_last = c == tile_cols - 1;
3568 
3569       if (c > 0) data = tile_col_data_end[c - 1];
3570 
3571       if (!is_last) data += tile_col_size_bytes;
3572 
3573       // Get the whole of the last column, otherwise stop at the required tile.
3574       for (r = 0; r < (is_last ? tile_rows : tile_rows_end); ++r) {
3575         tile_buffers[r][c].col = c;
3576 
3577         get_ls_tile_buffer(tile_col_data_end[c], &pbi->common.error, &data,
3578                            pbi->decrypt_cb, pbi->decrypt_state, tile_buffers,
3579                            tile_size_bytes, c, r, tile_copy_mode);
3580       }
3581     }
3582 
3583     // If we have not read the last column, then read it to get the last tile.
3584     if (tile_cols_end != tile_cols) {
3585       c = tile_cols - 1;
3586 
3587       data = tile_col_data_end[c - 1];
3588 
3589       for (r = 0; r < tile_rows; ++r) {
3590         tile_buffers[r][c].col = c;
3591 
3592         get_ls_tile_buffer(tile_col_data_end[c], &pbi->common.error, &data,
3593                            pbi->decrypt_cb, pbi->decrypt_state, tile_buffers,
3594                            tile_size_bytes, c, r, tile_copy_mode);
3595       }
3596     }
3597   }
3598 }
3599 #endif  // CONFIG_EXT_TILE
3600 
3601 // Reads the next tile returning its size and adjusting '*data' accordingly
3602 // based on 'is_last'.
get_tile_buffer(const uint8_t * const data_end,const int tile_size_bytes,int is_last,struct aom_internal_error_info * error_info,const uint8_t ** data,aom_decrypt_cb decrypt_cb,void * decrypt_state,TileBufferDec * const buf)3603 static void get_tile_buffer(const uint8_t *const data_end,
3604                             const int tile_size_bytes, int is_last,
3605                             struct aom_internal_error_info *error_info,
3606                             const uint8_t **data, aom_decrypt_cb decrypt_cb,
3607                             void *decrypt_state, TileBufferDec *const buf) {
3608   size_t size;
3609 
3610   if (!is_last) {
3611     if (!read_is_valid(*data, tile_size_bytes, data_end))
3612       aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
3613                          "Truncated packet or corrupt tile length");
3614 
3615     if (decrypt_cb) {
3616       uint8_t be_data[4];
3617       decrypt_cb(decrypt_state, *data, be_data, tile_size_bytes);
3618       size = mem_get_varsize(be_data, tile_size_bytes);
3619     } else {
3620       size = mem_get_varsize(*data, tile_size_bytes);
3621     }
3622     *data += tile_size_bytes;
3623 
3624     if (size > (size_t)(data_end - *data))
3625       aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
3626                          "Truncated packet or corrupt tile size");
3627   } else {
3628     size = data_end - *data;
3629   }
3630 
3631   buf->data = *data;
3632   buf->size = size;
3633 
3634   *data += size;
3635 }
3636 
get_tile_buffers(AV1Decoder * pbi,const uint8_t * data,const uint8_t * data_end,TileBufferDec (* const tile_buffers)[MAX_TILE_COLS],int startTile,int endTile)3637 static void get_tile_buffers(AV1Decoder *pbi, const uint8_t *data,
3638                              const uint8_t *data_end,
3639                              TileBufferDec (*const tile_buffers)[MAX_TILE_COLS],
3640                              int startTile, int endTile) {
3641   AV1_COMMON *const cm = &pbi->common;
3642   int r, c;
3643   const int tile_cols = cm->tile_cols;
3644   const int tile_rows = cm->tile_rows;
3645   int tc = 0;
3646   int first_tile_in_tg = 0;
3647   struct aom_read_bit_buffer rb_tg_hdr;
3648   uint8_t clear_data[MAX_AV1_HEADER_SIZE];
3649 #if !CONFIG_OBU
3650   const size_t hdr_size = pbi->uncomp_hdr_size + pbi->first_partition_size;
3651   const int tg_size_bit_offset = pbi->tg_size_bit_offset;
3652 #else
3653   const int tg_size_bit_offset = 0;
3654 #endif
3655 
3656 #if CONFIG_DEPENDENT_HORZTILES
3657   int tile_group_start_col = 0;
3658   int tile_group_start_row = 0;
3659 #endif
3660 
3661   for (r = 0; r < tile_rows; ++r) {
3662     for (c = 0; c < tile_cols; ++c, ++tc) {
3663       TileBufferDec *const buf = &tile_buffers[r][c];
3664 #if CONFIG_OBU
3665       const int is_last = (tc == endTile);
3666       const size_t hdr_offset = 0;
3667 #else
3668       const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1);
3669       const size_t hdr_offset = (tc && tc == first_tile_in_tg) ? hdr_size : 0;
3670 #endif
3671 
3672       if (tc < startTile || tc > endTile) continue;
3673 
3674       if (data + hdr_offset >= data_end)
3675         aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
3676                            "Data ended before all tiles were read.");
3677       buf->col = c;
3678       if (hdr_offset) {
3679         init_read_bit_buffer(pbi, &rb_tg_hdr, data, data_end, clear_data);
3680         rb_tg_hdr.bit_offset = tg_size_bit_offset;
3681         read_tile_group_range(pbi, &rb_tg_hdr);
3682 #if CONFIG_DEPENDENT_HORZTILES
3683         tile_group_start_row = r;
3684         tile_group_start_col = c;
3685 #endif
3686       }
3687       first_tile_in_tg += tc == first_tile_in_tg ? pbi->tg_size : 0;
3688       data += hdr_offset;
3689       get_tile_buffer(data_end, pbi->tile_size_bytes, is_last,
3690                       &pbi->common.error, &data, pbi->decrypt_cb,
3691                       pbi->decrypt_state, buf);
3692 #if CONFIG_DEPENDENT_HORZTILES
3693       cm->tile_group_start_row[r][c] = tile_group_start_row;
3694       cm->tile_group_start_col[r][c] = tile_group_start_col;
3695 #endif
3696     }
3697   }
3698 }
3699 
3700 #if CONFIG_PVQ
daala_dec_init(AV1_COMMON * const cm,daala_dec_ctx * daala_dec,aom_reader * r)3701 static void daala_dec_init(AV1_COMMON *const cm, daala_dec_ctx *daala_dec,
3702                            aom_reader *r) {
3703   daala_dec->r = r;
3704 
3705   // TODO(yushin) : activity masking info needs be signaled by a bitstream
3706   daala_dec->use_activity_masking = AV1_PVQ_ENABLE_ACTIVITY_MASKING;
3707 
3708   if (daala_dec->use_activity_masking)
3709     daala_dec->qm = OD_HVS_QM;
3710   else
3711     daala_dec->qm = OD_FLAT_QM;
3712 
3713   od_init_qm(daala_dec->state.qm, daala_dec->state.qm_inv,
3714              daala_dec->qm == OD_HVS_QM ? OD_QM8_Q4_HVS : OD_QM8_Q4_FLAT);
3715 
3716   if (daala_dec->use_activity_masking) {
3717     int pli;
3718     int use_masking = daala_dec->use_activity_masking;
3719     int segment_id = 0;
3720     int qindex = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
3721 
3722     for (pli = 0; pli < MAX_MB_PLANE; pli++) {
3723       int i;
3724       int q;
3725 
3726       q = qindex;
3727       if (q <= OD_DEFAULT_QMS[use_masking][0][pli].interp_q << OD_COEFF_SHIFT) {
3728         od_interp_qm(&daala_dec->state.pvq_qm_q4[pli][0], q,
3729                      &OD_DEFAULT_QMS[use_masking][0][pli], NULL);
3730       } else {
3731         i = 0;
3732         while (OD_DEFAULT_QMS[use_masking][i + 1][pli].qm_q4 != NULL &&
3733                q > OD_DEFAULT_QMS[use_masking][i + 1][pli].interp_q
3734                        << OD_COEFF_SHIFT) {
3735           i++;
3736         }
3737         od_interp_qm(&daala_dec->state.pvq_qm_q4[pli][0], q,
3738                      &OD_DEFAULT_QMS[use_masking][i][pli],
3739                      &OD_DEFAULT_QMS[use_masking][i + 1][pli]);
3740       }
3741     }
3742   }
3743 }
3744 #endif  // #if CONFIG_PVQ
3745 
3746 #if CONFIG_LOOPFILTERING_ACROSS_TILES
dec_setup_across_tile_boundary_info(const AV1_COMMON * const cm,const TileInfo * const tile_info)3747 static void dec_setup_across_tile_boundary_info(
3748     const AV1_COMMON *const cm, const TileInfo *const tile_info) {
3749   if (tile_info->mi_row_start >= tile_info->mi_row_end ||
3750       tile_info->mi_col_start >= tile_info->mi_col_end)
3751     return;
3752 
3753   if (!cm->loop_filter_across_tiles_enabled) {
3754     av1_setup_across_tile_boundary_info(cm, tile_info);
3755   }
3756 }
3757 #endif  // CONFIG_LOOPFILTERING_ACROSS_TILES
3758 
decode_tiles(AV1Decoder * pbi,const uint8_t * data,const uint8_t * data_end,int startTile,int endTile)3759 static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
3760                                    const uint8_t *data_end, int startTile,
3761                                    int endTile) {
3762   AV1_COMMON *const cm = &pbi->common;
3763   const AVxWorkerInterface *const winterface = aom_get_worker_interface();
3764   const int tile_cols = cm->tile_cols;
3765   const int tile_rows = cm->tile_rows;
3766   const int n_tiles = tile_cols * tile_rows;
3767   TileBufferDec(*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
3768 #if CONFIG_EXT_TILE
3769   const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
3770   const int single_row = pbi->dec_tile_row >= 0;
3771   const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
3772   const int single_col = pbi->dec_tile_col >= 0;
3773 #endif  // CONFIG_EXT_TILE
3774   int tile_rows_start;
3775   int tile_rows_end;
3776   int tile_cols_start;
3777   int tile_cols_end;
3778   int inv_col_order;
3779   int inv_row_order;
3780   int tile_row, tile_col;
3781 
3782 #if CONFIG_EXT_TILE
3783   if (cm->large_scale_tile) {
3784     tile_rows_start = single_row ? dec_tile_row : 0;
3785     tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
3786     tile_cols_start = single_col ? dec_tile_col : 0;
3787     tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
3788     inv_col_order = pbi->inv_tile_order && !single_col;
3789     inv_row_order = pbi->inv_tile_order && !single_row;
3790   } else {
3791 #endif  // CONFIG_EXT_TILE
3792     tile_rows_start = 0;
3793     tile_rows_end = tile_rows;
3794     tile_cols_start = 0;
3795     tile_cols_end = tile_cols;
3796     inv_col_order = pbi->inv_tile_order;
3797     inv_row_order = pbi->inv_tile_order;
3798 #if CONFIG_EXT_TILE
3799   }
3800 #endif  // CONFIG_EXT_TILE
3801 
3802   if (cm->lf.filter_level && !cm->skip_loop_filter &&
3803       pbi->lf_worker.data1 == NULL) {
3804     CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
3805                     aom_memalign(32, sizeof(LFWorkerData)));
3806     pbi->lf_worker.hook = (AVxWorkerHook)av1_loop_filter_worker;
3807     if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) {
3808       aom_internal_error(&cm->error, AOM_CODEC_ERROR,
3809                          "Loop filter thread creation failed");
3810     }
3811   }
3812 
3813   if (cm->lf.filter_level && !cm->skip_loop_filter) {
3814     LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
3815     // Be sure to sync as we might be resuming after a failed frame decode.
3816     winterface->sync(&pbi->lf_worker);
3817     av1_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
3818                                pbi->mb.plane);
3819   }
3820 
3821   assert(tile_rows <= MAX_TILE_ROWS);
3822   assert(tile_cols <= MAX_TILE_COLS);
3823 
3824 #if CONFIG_EXT_TILE
3825   if (cm->large_scale_tile)
3826     get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
3827   else
3828 #endif  // CONFIG_EXT_TILE
3829     get_tile_buffers(pbi, data, data_end, tile_buffers, startTile, endTile);
3830 
3831   if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
3832     aom_free(pbi->tile_data);
3833     CHECK_MEM_ERROR(cm, pbi->tile_data,
3834                     aom_memalign(32, n_tiles * (sizeof(*pbi->tile_data))));
3835     pbi->allocated_tiles = n_tiles;
3836   }
3837 #if CONFIG_ACCOUNTING
3838   if (pbi->acct_enabled) {
3839     aom_accounting_reset(&pbi->accounting);
3840   }
3841 #endif
3842   // Load all tile information into tile_data.
3843   for (tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) {
3844     for (tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) {
3845       const TileBufferDec *const buf = &tile_buffers[tile_row][tile_col];
3846       TileData *const td = pbi->tile_data + tile_cols * tile_row + tile_col;
3847 
3848       if (tile_row * cm->tile_cols + tile_col < startTile ||
3849           tile_row * cm->tile_cols + tile_col > endTile)
3850         continue;
3851 
3852       td->cm = cm;
3853       td->xd = pbi->mb;
3854       td->xd.corrupted = 0;
3855       td->xd.counts =
3856           cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
3857               ? &cm->counts
3858               : NULL;
3859       av1_zero(td->dqcoeff);
3860 #if CONFIG_PVQ
3861       av1_zero(td->pvq_ref_coeff);
3862 #endif
3863       av1_tile_init(&td->xd.tile, td->cm, tile_row, tile_col);
3864       setup_bool_decoder(buf->data, data_end, buf->size, &cm->error,
3865                          &td->bit_reader,
3866 #if CONFIG_ANS && ANS_MAX_SYMBOLS
3867                          1 << cm->ans_window_size_log2,
3868 #endif  // CONFIG_ANS && ANS_MAX_SYMBOLS
3869                          pbi->decrypt_cb, pbi->decrypt_state);
3870 #if CONFIG_ACCOUNTING
3871       if (pbi->acct_enabled) {
3872         td->bit_reader.accounting = &pbi->accounting;
3873       } else {
3874         td->bit_reader.accounting = NULL;
3875       }
3876 #endif
3877       av1_init_macroblockd(cm, &td->xd,
3878 #if CONFIG_PVQ
3879                            td->pvq_ref_coeff,
3880 #endif
3881 #if CONFIG_CFL
3882                            &td->cfl,
3883 #endif
3884                            td->dqcoeff);
3885 
3886       // Initialise the tile context from the frame context
3887       td->tctx = *cm->fc;
3888       td->xd.tile_ctx = &td->tctx;
3889 
3890 #if CONFIG_PVQ
3891       daala_dec_init(cm, &td->xd.daala_dec, &td->bit_reader);
3892       td->xd.daala_dec.state.adapt = &td->tctx.pvq_context;
3893 #endif
3894 
3895       td->xd.plane[0].color_index_map = td->color_index_map[0];
3896       td->xd.plane[1].color_index_map = td->color_index_map[1];
3897 #if CONFIG_MRC_TX
3898       td->xd.mrc_mask = td->mrc_mask;
3899 #endif  // CONFIG_MRC_TX
3900     }
3901   }
3902 
3903   for (tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) {
3904     const int row = inv_row_order ? tile_rows - 1 - tile_row : tile_row;
3905     int mi_row = 0;
3906     TileInfo tile_info;
3907 
3908     av1_tile_set_row(&tile_info, cm, row);
3909 
3910     for (tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) {
3911       const int col = inv_col_order ? tile_cols - 1 - tile_col : tile_col;
3912       TileData *const td = pbi->tile_data + tile_cols * row + col;
3913 
3914       if (tile_row * cm->tile_cols + tile_col < startTile ||
3915           tile_row * cm->tile_cols + tile_col > endTile)
3916         continue;
3917 
3918 #if CONFIG_ACCOUNTING
3919       if (pbi->acct_enabled) {
3920         td->bit_reader.accounting->last_tell_frac =
3921             aom_reader_tell_frac(&td->bit_reader);
3922       }
3923 #endif
3924 
3925       av1_tile_set_col(&tile_info, cm, col);
3926 
3927 #if CONFIG_DEPENDENT_HORZTILES
3928       av1_tile_set_tg_boundary(&tile_info, cm, tile_row, tile_col);
3929       if (!cm->dependent_horz_tiles || tile_row == 0 ||
3930           tile_info.tg_horz_boundary) {
3931         av1_zero_above_context(cm, tile_info.mi_col_start,
3932                                tile_info.mi_col_end);
3933       }
3934 #else
3935       av1_zero_above_context(cm, tile_info.mi_col_start, tile_info.mi_col_end);
3936 #endif
3937 #if CONFIG_LOOP_RESTORATION
3938       for (int p = 0; p < MAX_MB_PLANE; ++p) {
3939         set_default_wiener(td->xd.wiener_info + p);
3940         set_default_sgrproj(td->xd.sgrproj_info + p);
3941       }
3942 #endif  // CONFIG_LOOP_RESTORATION
3943 
3944 #if CONFIG_LOOPFILTERING_ACROSS_TILES
3945       dec_setup_across_tile_boundary_info(cm, &tile_info);
3946 #endif  // CONFIG_LOOPFILTERING_ACROSS_TILES
3947 
3948       for (mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end;
3949            mi_row += cm->mib_size) {
3950         int mi_col;
3951 
3952         av1_zero_left_context(&td->xd);
3953 
3954         for (mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end;
3955              mi_col += cm->mib_size) {
3956 #if CONFIG_NCOBMC_ADAPT_WEIGHT
3957           alloc_ncobmc_pred_buffer(&td->xd);
3958           set_sb_mi_boundaries(cm, &td->xd, mi_row, mi_col);
3959 #endif
3960           decode_partition(pbi, &td->xd,
3961 #if CONFIG_SUPERTX
3962                            0,
3963 #endif  // CONFIG_SUPERTX
3964                            mi_row, mi_col, &td->bit_reader, cm->sb_size);
3965 #if NC_MODE_INFO && CONFIG_MOTION_VAR
3966           detoken_and_recon_sb(pbi, &td->xd, mi_row, mi_col, &td->bit_reader,
3967                                cm->sb_size);
3968 #endif
3969 #if CONFIG_NCOBMC_ADAPT_WEIGHT
3970           free_ncobmc_pred_buffer(&td->xd);
3971 #endif
3972         }
3973         aom_merge_corrupted_flag(&pbi->mb.corrupted, td->xd.corrupted);
3974         if (pbi->mb.corrupted)
3975           aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
3976                              "Failed to decode tile data");
3977       }
3978     }
3979 
3980 #if !CONFIG_OBU
3981     assert(mi_row > 0);
3982 #endif
3983 
3984 // when Parallel deblocking is enabled, deblocking should not
3985 // be interleaved with decoding. Instead, deblocking should be done
3986 // after the entire frame is decoded.
3987 #if !CONFIG_VAR_TX && !CONFIG_PARALLEL_DEBLOCKING && !CONFIG_CB4X4
3988     // Loopfilter one tile row.
3989     // Note: If out-of-order tile decoding is used(for example, inv_row_order
3990     // = 1), the loopfiltering has be done after all tile rows are decoded.
3991     if (!inv_row_order && cm->lf.filter_level && !cm->skip_loop_filter) {
3992       LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
3993       const int lf_start = AOMMAX(0, tile_info.mi_row_start - cm->mib_size);
3994       const int lf_end = tile_info.mi_row_end - cm->mib_size;
3995 
3996       // Delay the loopfilter if the first tile row is only
3997       // a single superblock high.
3998       if (lf_end <= 0) continue;
3999 
4000       // Decoding has completed. Finish up the loop filter in this thread.
4001       if (tile_info.mi_row_end >= cm->mi_rows) continue;
4002 
4003       winterface->sync(&pbi->lf_worker);
4004       lf_data->start = lf_start;
4005       lf_data->stop = lf_end;
4006       if (pbi->max_threads > 1) {
4007         winterface->launch(&pbi->lf_worker);
4008       } else {
4009         winterface->execute(&pbi->lf_worker);
4010       }
4011     }
4012 #endif  // !CONFIG_VAR_TX && !CONFIG_PARALLEL_DEBLOCKING
4013 
4014     // After loopfiltering, the last 7 row pixels in each superblock row may
4015     // still be changed by the longest loopfilter of the next superblock row.
4016     if (cm->frame_parallel_decode)
4017       av1_frameworker_broadcast(pbi->cur_buf, mi_row << cm->mib_size_log2);
4018   }
4019 
4020 #if CONFIG_VAR_TX || CONFIG_CB4X4
4021 // Loopfilter the whole frame.
4022 #if CONFIG_LPF_SB
4023   av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
4024                         cm->lf.filter_level, 0, 0, 0, 0);
4025 #else
4026 #if CONFIG_LOOPFILTER_LEVEL
4027   if (cm->lf.filter_level[0] || cm->lf.filter_level[1]) {
4028     av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
4029                           cm->lf.filter_level[0], cm->lf.filter_level[1], 0, 0);
4030     av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
4031                           cm->lf.filter_level_u, cm->lf.filter_level_u, 1, 0);
4032     av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
4033                           cm->lf.filter_level_v, cm->lf.filter_level_v, 2, 0);
4034   }
4035 #else
4036 #if CONFIG_OBU
4037   if (endTile == cm->tile_rows * cm->tile_cols - 1)
4038 #endif
4039     av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
4040                           cm->lf.filter_level, 0, 0);
4041 #endif  // CONFIG_LOOPFILTER_LEVEL
4042 #endif  // CONFIG_LPF_SB
4043 #else
4044 #if CONFIG_PARALLEL_DEBLOCKING
4045   // Loopfilter all rows in the frame in the frame.
4046   if (cm->lf.filter_level && !cm->skip_loop_filter) {
4047     LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
4048     winterface->sync(&pbi->lf_worker);
4049     lf_data->start = 0;
4050     lf_data->stop = cm->mi_rows;
4051     winterface->execute(&pbi->lf_worker);
4052   }
4053 #else
4054   // Loopfilter remaining rows in the frame.
4055   if (cm->lf.filter_level && !cm->skip_loop_filter) {
4056     LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
4057     winterface->sync(&pbi->lf_worker);
4058     lf_data->start = lf_data->stop;
4059     lf_data->stop = cm->mi_rows;
4060     winterface->execute(&pbi->lf_worker);
4061   }
4062 #endif  // CONFIG_PARALLEL_DEBLOCKING
4063 #endif  // CONFIG_VAR_TX
4064   if (cm->frame_parallel_decode)
4065     av1_frameworker_broadcast(pbi->cur_buf, INT_MAX);
4066 
4067 #if CONFIG_EXT_TILE
4068   if (cm->large_scale_tile) {
4069     if (n_tiles == 1) {
4070 #if CONFIG_ANS
4071       return data_end;
4072 #else
4073       // Find the end of the single tile buffer
4074       return aom_reader_find_end(&pbi->tile_data->bit_reader);
4075 #endif  // CONFIG_ANS
4076     } else {
4077       // Return the end of the last tile buffer
4078       return tile_buffers[tile_rows - 1][tile_cols - 1].raw_data_end;
4079     }
4080   } else {
4081 #endif  // CONFIG_EXT_TILE
4082 #if CONFIG_ANS
4083     return data_end;
4084 #else
4085 #if !CONFIG_OBU
4086   {
4087     // Get last tile data.
4088     TileData *const td = pbi->tile_data + tile_cols * tile_rows - 1;
4089     return aom_reader_find_end(&td->bit_reader);
4090   }
4091 #else
4092   TileData *const td = pbi->tile_data + endTile;
4093   return aom_reader_find_end(&td->bit_reader);
4094 #endif
4095 #endif  // CONFIG_ANS
4096 #if CONFIG_EXT_TILE
4097   }
4098 #endif  // CONFIG_EXT_TILE
4099 }
4100 
tile_worker_hook(TileWorkerData * const tile_data,const TileInfo * const tile)4101 static int tile_worker_hook(TileWorkerData *const tile_data,
4102                             const TileInfo *const tile) {
4103   AV1Decoder *const pbi = tile_data->pbi;
4104   const AV1_COMMON *const cm = &pbi->common;
4105   int mi_row, mi_col;
4106 
4107   if (setjmp(tile_data->error_info.jmp)) {
4108     tile_data->error_info.setjmp = 0;
4109     aom_merge_corrupted_flag(&tile_data->xd.corrupted, 1);
4110     return 0;
4111   }
4112 
4113   tile_data->error_info.setjmp = 1;
4114   tile_data->xd.error_info = &tile_data->error_info;
4115 #if CONFIG_DEPENDENT_HORZTILES
4116   if (!cm->dependent_horz_tiles || tile->tg_horz_boundary) {
4117     av1_zero_above_context(&pbi->common, tile->mi_col_start, tile->mi_col_end);
4118   }
4119 #else
4120   av1_zero_above_context(&pbi->common, tile->mi_col_start, tile->mi_col_end);
4121 #endif
4122 
4123   for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
4124        mi_row += cm->mib_size) {
4125     av1_zero_left_context(&tile_data->xd);
4126 
4127     for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
4128          mi_col += cm->mib_size) {
4129       decode_partition(pbi, &tile_data->xd,
4130 #if CONFIG_SUPERTX
4131                        0,
4132 #endif
4133                        mi_row, mi_col, &tile_data->bit_reader, cm->sb_size);
4134 #if NC_MODE_INFO && CONFIG_MOTION_VAR
4135       detoken_and_recon_sb(pbi, &tile_data->xd, mi_row, mi_col,
4136                            &tile_data->bit_reader, cm->sb_size);
4137 #endif
4138     }
4139   }
4140   return !tile_data->xd.corrupted;
4141 }
4142 
4143 // sorts in descending order
compare_tile_buffers(const void * a,const void * b)4144 static int compare_tile_buffers(const void *a, const void *b) {
4145   const TileBufferDec *const buf1 = (const TileBufferDec *)a;
4146   const TileBufferDec *const buf2 = (const TileBufferDec *)b;
4147   return (int)(buf2->size - buf1->size);
4148 }
4149 
decode_tiles_mt(AV1Decoder * pbi,const uint8_t * data,const uint8_t * data_end)4150 static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data,
4151                                       const uint8_t *data_end) {
4152   AV1_COMMON *const cm = &pbi->common;
4153   const AVxWorkerInterface *const winterface = aom_get_worker_interface();
4154   const int tile_cols = cm->tile_cols;
4155   const int tile_rows = cm->tile_rows;
4156   const int num_workers = AOMMIN(pbi->max_threads & ~1, tile_cols);
4157   TileBufferDec(*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
4158 #if CONFIG_EXT_TILE
4159   const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
4160   const int single_row = pbi->dec_tile_row >= 0;
4161   const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
4162   const int single_col = pbi->dec_tile_col >= 0;
4163 #endif  // CONFIG_EXT_TILE
4164   int tile_rows_start;
4165   int tile_rows_end;
4166   int tile_cols_start;
4167   int tile_cols_end;
4168   int tile_row, tile_col;
4169   int i;
4170 
4171 #if CONFIG_EXT_TILE
4172   if (cm->large_scale_tile) {
4173     tile_rows_start = single_row ? dec_tile_row : 0;
4174     tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
4175     tile_cols_start = single_col ? dec_tile_col : 0;
4176     tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
4177   } else {
4178 #endif  // CONFIG_EXT_TILE
4179     tile_rows_start = 0;
4180     tile_rows_end = tile_rows;
4181     tile_cols_start = 0;
4182     tile_cols_end = tile_cols;
4183 #if CONFIG_EXT_TILE
4184   }
4185 #endif  // CONFIG_EXT_TILE
4186 
4187 #if !CONFIG_ANS
4188   int final_worker = -1;
4189 #endif  // !CONFIG_ANS
4190 
4191   assert(tile_rows <= MAX_TILE_ROWS);
4192   assert(tile_cols <= MAX_TILE_COLS);
4193 
4194   assert(tile_cols * tile_rows > 1);
4195 
4196   // TODO(jzern): See if we can remove the restriction of passing in max
4197   // threads to the decoder.
4198   if (pbi->num_tile_workers == 0) {
4199     const int num_threads = pbi->max_threads & ~1;
4200     CHECK_MEM_ERROR(cm, pbi->tile_workers,
4201                     aom_malloc(num_threads * sizeof(*pbi->tile_workers)));
4202     // Ensure tile data offsets will be properly aligned. This may fail on
4203     // platforms without DECLARE_ALIGNED().
4204     assert((sizeof(*pbi->tile_worker_data) % 16) == 0);
4205     CHECK_MEM_ERROR(
4206         cm, pbi->tile_worker_data,
4207         aom_memalign(32, num_threads * sizeof(*pbi->tile_worker_data)));
4208     CHECK_MEM_ERROR(cm, pbi->tile_worker_info,
4209                     aom_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
4210     for (i = 0; i < num_threads; ++i) {
4211       AVxWorker *const worker = &pbi->tile_workers[i];
4212       ++pbi->num_tile_workers;
4213 
4214       winterface->init(worker);
4215       if (i < num_threads - 1 && !winterface->reset(worker)) {
4216         aom_internal_error(&cm->error, AOM_CODEC_ERROR,
4217                            "Tile decoder thread creation failed");
4218       }
4219     }
4220   }
4221 
4222   // Reset tile decoding hook
4223   for (i = 0; i < num_workers; ++i) {
4224     AVxWorker *const worker = &pbi->tile_workers[i];
4225     winterface->sync(worker);
4226     worker->hook = (AVxWorkerHook)tile_worker_hook;
4227     worker->data1 = &pbi->tile_worker_data[i];
4228     worker->data2 = &pbi->tile_worker_info[i];
4229   }
4230 
4231   // Initialize thread frame counts.
4232   if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
4233     for (i = 0; i < num_workers; ++i) {
4234       TileWorkerData *const twd = (TileWorkerData *)pbi->tile_workers[i].data1;
4235       av1_zero(twd->counts);
4236     }
4237   }
4238 
4239 // Load tile data into tile_buffers
4240 #if CONFIG_EXT_TILE
4241   if (cm->large_scale_tile)
4242     get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
4243   else
4244 #endif  // CONFIG_EXT_TILE
4245     get_tile_buffers(pbi, data, data_end, tile_buffers, 0,
4246                      cm->tile_rows * cm->tile_cols - 1);
4247 
4248   for (tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) {
4249     // Sort the buffers in this tile row based on size in descending order.
4250     qsort(&tile_buffers[tile_row][tile_cols_start],
4251           tile_cols_end - tile_cols_start, sizeof(tile_buffers[0][0]),
4252           compare_tile_buffers);
4253 
4254     // Rearrange the tile buffers in this tile row such that per-tile group
4255     // the largest, and presumably the most difficult tile will be decoded in
4256     // the main thread. This should help minimize the number of instances
4257     // where the main thread is waiting for a worker to complete.
4258     {
4259       int group_start;
4260       for (group_start = tile_cols_start; group_start < tile_cols_end;
4261            group_start += num_workers) {
4262         const int group_end = AOMMIN(group_start + num_workers, tile_cols);
4263         const TileBufferDec largest = tile_buffers[tile_row][group_start];
4264         memmove(&tile_buffers[tile_row][group_start],
4265                 &tile_buffers[tile_row][group_start + 1],
4266                 (group_end - group_start - 1) * sizeof(tile_buffers[0][0]));
4267         tile_buffers[tile_row][group_end - 1] = largest;
4268       }
4269     }
4270 
4271     for (tile_col = tile_cols_start; tile_col < tile_cols_end;) {
4272       // Launch workers for individual columns
4273       for (i = 0; i < num_workers && tile_col < tile_cols_end;
4274            ++i, ++tile_col) {
4275         TileBufferDec *const buf = &tile_buffers[tile_row][tile_col];
4276         AVxWorker *const worker = &pbi->tile_workers[i];
4277         TileWorkerData *const twd = (TileWorkerData *)worker->data1;
4278         TileInfo *const tile_info = (TileInfo *)worker->data2;
4279 
4280         twd->pbi = pbi;
4281         twd->xd = pbi->mb;
4282         twd->xd.corrupted = 0;
4283         twd->xd.counts =
4284             cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
4285                 ? &twd->counts
4286                 : NULL;
4287         av1_zero(twd->dqcoeff);
4288         av1_tile_init(tile_info, cm, tile_row, buf->col);
4289         av1_tile_init(&twd->xd.tile, cm, tile_row, buf->col);
4290 
4291 #if CONFIG_LOOPFILTERING_ACROSS_TILES
4292         dec_setup_across_tile_boundary_info(cm, tile_info);
4293 #endif  // CONFIG_LOOPFILTERING_ACROSS_TILES
4294 
4295         setup_bool_decoder(buf->data, data_end, buf->size, &cm->error,
4296                            &twd->bit_reader,
4297 #if CONFIG_ANS && ANS_MAX_SYMBOLS
4298                            1 << cm->ans_window_size_log2,
4299 #endif  // CONFIG_ANS && ANS_MAX_SYMBOLS
4300                            pbi->decrypt_cb, pbi->decrypt_state);
4301         av1_init_macroblockd(cm, &twd->xd,
4302 #if CONFIG_PVQ
4303                              twd->pvq_ref_coeff,
4304 #endif
4305 #if CONFIG_CFL
4306                              &twd->cfl,
4307 #endif
4308                              twd->dqcoeff);
4309 #if CONFIG_PVQ
4310         daala_dec_init(cm, &twd->xd.daala_dec, &twd->bit_reader);
4311         twd->xd.daala_dec.state.adapt = &twd->tctx.pvq_context;
4312 #endif
4313         // Initialise the tile context from the frame context
4314         twd->tctx = *cm->fc;
4315         twd->xd.tile_ctx = &twd->tctx;
4316         twd->xd.plane[0].color_index_map = twd->color_index_map[0];
4317         twd->xd.plane[1].color_index_map = twd->color_index_map[1];
4318 
4319         worker->had_error = 0;
4320         if (i == num_workers - 1 || tile_col == tile_cols_end - 1) {
4321           winterface->execute(worker);
4322         } else {
4323           winterface->launch(worker);
4324         }
4325 
4326 #if !CONFIG_ANS
4327         if (tile_row == tile_rows - 1 && buf->col == tile_cols - 1) {
4328           final_worker = i;
4329         }
4330 #endif  // !CONFIG_ANS
4331       }
4332 
4333       // Sync all workers
4334       for (; i > 0; --i) {
4335         AVxWorker *const worker = &pbi->tile_workers[i - 1];
4336         // TODO(jzern): The tile may have specific error data associated with
4337         // its aom_internal_error_info which could be propagated to the main
4338         // info in cm. Additionally once the threads have been synced and an
4339         // error is detected, there's no point in continuing to decode tiles.
4340         pbi->mb.corrupted |= !winterface->sync(worker);
4341       }
4342     }
4343   }
4344 
4345   // Accumulate thread frame counts.
4346   if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
4347     for (i = 0; i < num_workers; ++i) {
4348       TileWorkerData *const twd = (TileWorkerData *)pbi->tile_workers[i].data1;
4349       av1_accumulate_frame_counts(&cm->counts, &twd->counts);
4350     }
4351   }
4352 
4353 #if CONFIG_EXT_TILE
4354   if (cm->large_scale_tile) {
4355     // Return the end of the last tile buffer
4356     return tile_buffers[tile_rows - 1][tile_cols - 1].raw_data_end;
4357   } else {
4358 #endif  // CONFIG_EXT_TILE
4359 #if CONFIG_ANS
4360     return data_end;
4361 #else
4362   assert(final_worker != -1);
4363   {
4364     TileWorkerData *const twd =
4365         (TileWorkerData *)pbi->tile_workers[final_worker].data1;
4366     return aom_reader_find_end(&twd->bit_reader);
4367   }
4368 #endif  // CONFIG_ANS
4369 #if CONFIG_EXT_TILE
4370   }
4371 #endif  // CONFIG_EXT_TILE
4372 }
4373 
error_handler(void * data)4374 static void error_handler(void *data) {
4375   AV1_COMMON *const cm = (AV1_COMMON *)data;
4376   aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, "Truncated packet");
4377 }
4378 
read_bitdepth_colorspace_sampling(AV1_COMMON * cm,struct aom_read_bit_buffer * rb,int allow_lowbitdepth)4379 static void read_bitdepth_colorspace_sampling(AV1_COMMON *cm,
4380                                               struct aom_read_bit_buffer *rb,
4381                                               int allow_lowbitdepth) {
4382   if (cm->profile >= PROFILE_2) {
4383     cm->bit_depth = aom_rb_read_bit(rb) ? AOM_BITS_12 : AOM_BITS_10;
4384   } else {
4385     cm->bit_depth = AOM_BITS_8;
4386   }
4387 
4388 #if CONFIG_HIGHBITDEPTH
4389   cm->use_highbitdepth = cm->bit_depth > AOM_BITS_8 || !allow_lowbitdepth;
4390 #else
4391   (void)allow_lowbitdepth;
4392 #endif
4393 #if CONFIG_COLORSPACE_HEADERS
4394   cm->color_space = aom_rb_read_literal(rb, 5);
4395   cm->transfer_function = aom_rb_read_literal(rb, 5);
4396 #else
4397   cm->color_space = aom_rb_read_literal(rb, 3);
4398 #endif
4399   if (cm->color_space != AOM_CS_SRGB) {
4400     // [16,235] (including xvycc) vs [0,255] range
4401     cm->color_range = aom_rb_read_bit(rb);
4402     if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
4403       cm->subsampling_x = aom_rb_read_bit(rb);
4404       cm->subsampling_y = aom_rb_read_bit(rb);
4405       if (cm->subsampling_x == 1 && cm->subsampling_y == 1)
4406         aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4407                            "4:2:0 color not supported in profile 1 or 3");
4408       if (aom_rb_read_bit(rb))
4409         aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4410                            "Reserved bit set");
4411     } else {
4412       cm->subsampling_y = cm->subsampling_x = 1;
4413     }
4414 #if CONFIG_COLORSPACE_HEADERS
4415     if (cm->subsampling_x == 1 && cm->subsampling_y == 1) {
4416       cm->chroma_sample_position = aom_rb_read_literal(rb, 2);
4417     }
4418 #endif
4419   } else {
4420     if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
4421       // Note if colorspace is SRGB then 4:4:4 chroma sampling is assumed.
4422       // 4:2:2 or 4:4:0 chroma sampling is not allowed.
4423       cm->subsampling_y = cm->subsampling_x = 0;
4424       if (aom_rb_read_bit(rb))
4425         aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4426                            "Reserved bit set");
4427     } else {
4428       aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4429                          "4:4:4 color not supported in profile 0 or 2");
4430     }
4431   }
4432 }
4433 
4434 #if CONFIG_REFERENCE_BUFFER
read_sequence_header(SequenceHeader * seq_params,struct aom_read_bit_buffer * rb)4435 void read_sequence_header(SequenceHeader *seq_params,
4436                           struct aom_read_bit_buffer *rb) {
4437   /* Placeholder for actually reading from the bitstream */
4438   seq_params->frame_id_numbers_present_flag = aom_rb_read_bit(rb);
4439   if (seq_params->frame_id_numbers_present_flag) {
4440     seq_params->frame_id_length_minus7 = aom_rb_read_literal(rb, 4);
4441     seq_params->delta_frame_id_length_minus2 = aom_rb_read_literal(rb, 4);
4442   }
4443 }
4444 #endif  // CONFIG_REFERENCE_BUFFER
4445 
read_compound_tools(AV1_COMMON * cm,struct aom_read_bit_buffer * rb)4446 static void read_compound_tools(AV1_COMMON *cm,
4447                                 struct aom_read_bit_buffer *rb) {
4448   (void)cm;
4449   (void)rb;
4450 #if CONFIG_INTERINTRA
4451   if (!frame_is_intra_only(cm) && cm->reference_mode != COMPOUND_REFERENCE) {
4452     cm->allow_interintra_compound = aom_rb_read_bit(rb);
4453   } else {
4454     cm->allow_interintra_compound = 0;
4455   }
4456 #endif  // CONFIG_INTERINTRA
4457 #if CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
4458 #if CONFIG_COMPOUND_SINGLEREF
4459   if (!frame_is_intra_only(cm)) {
4460 #else   // !CONFIG_COMPOUND_SINGLEREF
4461   if (!frame_is_intra_only(cm) && cm->reference_mode != SINGLE_REFERENCE) {
4462 #endif  // CONFIG_COMPOUND_SINGLEREF
4463     cm->allow_masked_compound = aom_rb_read_bit(rb);
4464   } else {
4465     cm->allow_masked_compound = 0;
4466   }
4467 #endif  // CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
4468 }
4469 
4470 #if CONFIG_VAR_REFS
4471 static void check_valid_ref_frames(AV1_COMMON *cm) {
4472   MV_REFERENCE_FRAME ref_frame;
4473   // TODO(zoeliu): To handle ALTREF_FRAME the same way as do with other
4474   //               reference frames: Current encoder invalid ALTREF when ALTREF
4475   //               is the same as LAST, but invalid all the other references
4476   //               when they are the same as ALTREF.
4477   for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
4478     RefBuffer *const ref_buf = &cm->frame_refs[ref_frame - LAST_FRAME];
4479 
4480     if (ref_buf->idx != INVALID_IDX) {
4481       ref_buf->is_valid = 1;
4482 
4483       MV_REFERENCE_FRAME ref;
4484       for (ref = LAST_FRAME; ref < ref_frame; ++ref) {
4485         RefBuffer *const buf = &cm->frame_refs[ref - LAST_FRAME];
4486         if (buf->is_valid && buf->idx == ref_buf->idx) {
4487           if (ref_frame != ALTREF_FRAME || ref == LAST_FRAME) {
4488             ref_buf->is_valid = 0;
4489             break;
4490           } else {
4491             buf->is_valid = 0;
4492           }
4493         }
4494       }
4495     } else {
4496       ref_buf->is_valid = 0;
4497     }
4498   }
4499 }
4500 #endif  // CONFIG_VAR_REFS
4501 
4502 #if CONFIG_GLOBAL_MOTION
4503 static int read_global_motion_params(WarpedMotionParams *params,
4504                                      const WarpedMotionParams *ref_params,
4505                                      struct aom_read_bit_buffer *rb,
4506                                      int allow_hp) {
4507   TransformationType type = aom_rb_read_bit(rb);
4508   if (type != IDENTITY) {
4509 #if GLOBAL_TRANS_TYPES > 4
4510     type += aom_rb_read_literal(rb, GLOBAL_TYPE_BITS);
4511 #else
4512     if (aom_rb_read_bit(rb))
4513       type = ROTZOOM;
4514     else
4515       type = aom_rb_read_bit(rb) ? TRANSLATION : AFFINE;
4516 #endif  // GLOBAL_TRANS_TYPES > 4
4517   }
4518 
4519   int trans_bits;
4520   int trans_dec_factor;
4521   int trans_prec_diff;
4522   *params = default_warp_params;
4523   params->wmtype = type;
4524   switch (type) {
4525     case HOMOGRAPHY:
4526     case HORTRAPEZOID:
4527     case VERTRAPEZOID:
4528       if (type != HORTRAPEZOID)
4529         params->wmmat[6] =
4530             aom_rb_read_signed_primitive_refsubexpfin(
4531                 rb, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K,
4532                 (ref_params->wmmat[6] >> GM_ROW3HOMO_PREC_DIFF)) *
4533             GM_ROW3HOMO_DECODE_FACTOR;
4534       if (type != VERTRAPEZOID)
4535         params->wmmat[7] =
4536             aom_rb_read_signed_primitive_refsubexpfin(
4537                 rb, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K,
4538                 (ref_params->wmmat[7] >> GM_ROW3HOMO_PREC_DIFF)) *
4539             GM_ROW3HOMO_DECODE_FACTOR;
4540     case AFFINE:
4541     case ROTZOOM:
4542       params->wmmat[2] = aom_rb_read_signed_primitive_refsubexpfin(
4543                              rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4544                              (ref_params->wmmat[2] >> GM_ALPHA_PREC_DIFF) -
4545                                  (1 << GM_ALPHA_PREC_BITS)) *
4546                              GM_ALPHA_DECODE_FACTOR +
4547                          (1 << WARPEDMODEL_PREC_BITS);
4548       if (type != VERTRAPEZOID)
4549         params->wmmat[3] = aom_rb_read_signed_primitive_refsubexpfin(
4550                                rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4551                                (ref_params->wmmat[3] >> GM_ALPHA_PREC_DIFF)) *
4552                            GM_ALPHA_DECODE_FACTOR;
4553       if (type >= AFFINE) {
4554         if (type != HORTRAPEZOID)
4555           params->wmmat[4] = aom_rb_read_signed_primitive_refsubexpfin(
4556                                  rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4557                                  (ref_params->wmmat[4] >> GM_ALPHA_PREC_DIFF)) *
4558                              GM_ALPHA_DECODE_FACTOR;
4559         params->wmmat[5] = aom_rb_read_signed_primitive_refsubexpfin(
4560                                rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4561                                (ref_params->wmmat[5] >> GM_ALPHA_PREC_DIFF) -
4562                                    (1 << GM_ALPHA_PREC_BITS)) *
4563                                GM_ALPHA_DECODE_FACTOR +
4564                            (1 << WARPEDMODEL_PREC_BITS);
4565       } else {
4566         params->wmmat[4] = -params->wmmat[3];
4567         params->wmmat[5] = params->wmmat[2];
4568       }
4569     // fallthrough intended
4570     case TRANSLATION:
4571       trans_bits = (type == TRANSLATION) ? GM_ABS_TRANS_ONLY_BITS - !allow_hp
4572                                          : GM_ABS_TRANS_BITS;
4573       trans_dec_factor = (type == TRANSLATION)
4574                              ? GM_TRANS_ONLY_DECODE_FACTOR * (1 << !allow_hp)
4575                              : GM_TRANS_DECODE_FACTOR;
4576       trans_prec_diff = (type == TRANSLATION)
4577                             ? GM_TRANS_ONLY_PREC_DIFF + !allow_hp
4578                             : GM_TRANS_PREC_DIFF;
4579       params->wmmat[0] = aom_rb_read_signed_primitive_refsubexpfin(
4580                              rb, (1 << trans_bits) + 1, SUBEXPFIN_K,
4581                              (ref_params->wmmat[0] >> trans_prec_diff)) *
4582                          trans_dec_factor;
4583       params->wmmat[1] = aom_rb_read_signed_primitive_refsubexpfin(
4584                              rb, (1 << trans_bits) + 1, SUBEXPFIN_K,
4585                              (ref_params->wmmat[1] >> trans_prec_diff)) *
4586                          trans_dec_factor;
4587     case IDENTITY: break;
4588     default: assert(0);
4589   }
4590   if (params->wmtype <= AFFINE) {
4591     int good_shear_params = get_shear_params(params);
4592     if (!good_shear_params) return 0;
4593   }
4594 
4595   return 1;
4596 }
4597 
4598 static void read_global_motion(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
4599   int frame;
4600   for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
4601     const WarpedMotionParams *ref_params =
4602         cm->error_resilient_mode ? &default_warp_params
4603                                  : &cm->prev_frame->global_motion[frame];
4604     int good_params = read_global_motion_params(
4605         &cm->global_motion[frame], ref_params, rb, cm->allow_high_precision_mv);
4606     if (!good_params)
4607       aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
4608                          "Invalid shear parameters for global motion.");
4609 
4610     // TODO(sarahparker, debargha): The logic in the commented out code below
4611     // does not work currently and causes mismatches when resize is on. Fix it
4612     // before turning the optimization back on.
4613     /*
4614     YV12_BUFFER_CONFIG *ref_buf = get_ref_frame(cm, frame);
4615     if (cm->width == ref_buf->y_crop_width &&
4616         cm->height == ref_buf->y_crop_height) {
4617       read_global_motion_params(&cm->global_motion[frame],
4618                                 &cm->prev_frame->global_motion[frame], rb,
4619                                 cm->allow_high_precision_mv);
4620     } else {
4621       cm->global_motion[frame] = default_warp_params;
4622     }
4623     */
4624     /*
4625     printf("Dec Ref %d [%d/%d]: %d %d %d %d\n",
4626            frame, cm->current_video_frame, cm->show_frame,
4627            cm->global_motion[frame].wmmat[0],
4628            cm->global_motion[frame].wmmat[1],
4629            cm->global_motion[frame].wmmat[2],
4630            cm->global_motion[frame].wmmat[3]);
4631            */
4632   }
4633   memcpy(cm->cur_frame->global_motion, cm->global_motion,
4634          TOTAL_REFS_PER_FRAME * sizeof(WarpedMotionParams));
4635 }
4636 #endif  // CONFIG_GLOBAL_MOTION
4637 
4638 static size_t read_uncompressed_header(AV1Decoder *pbi,
4639                                        struct aom_read_bit_buffer *rb) {
4640   AV1_COMMON *const cm = &pbi->common;
4641   MACROBLOCKD *const xd = &pbi->mb;
4642   BufferPool *const pool = cm->buffer_pool;
4643   RefCntBuffer *const frame_bufs = pool->frame_bufs;
4644   int i, mask, ref_index = 0;
4645   size_t sz;
4646 
4647   cm->last_frame_type = cm->frame_type;
4648   cm->last_intra_only = cm->intra_only;
4649 
4650 #if CONFIG_EXT_REFS
4651   // NOTE: By default all coded frames to be used as a reference
4652   cm->is_reference_frame = 1;
4653 #endif  // CONFIG_EXT_REFS
4654 
4655 #if !CONFIG_OBU
4656   if (aom_rb_read_literal(rb, 2) != AOM_FRAME_MARKER)
4657     aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4658                        "Invalid frame marker");
4659 
4660   cm->profile = av1_read_profile(rb);
4661 
4662   const BITSTREAM_PROFILE MAX_SUPPORTED_PROFILE =
4663       CONFIG_HIGHBITDEPTH ? MAX_PROFILES : PROFILE_2;
4664 
4665   if (cm->profile >= MAX_SUPPORTED_PROFILE)
4666     aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4667                        "Unsupported bitstream profile");
4668 #endif
4669 
4670 #if CONFIG_EXT_TILE
4671   cm->large_scale_tile = aom_rb_read_literal(rb, 1);
4672 #if CONFIG_REFERENCE_BUFFER
4673   if (cm->large_scale_tile) cm->seq_params.frame_id_numbers_present_flag = 0;
4674 #endif  // CONFIG_REFERENCE_BUFFER
4675 #endif  // CONFIG_EXT_TILE
4676 
4677   cm->show_existing_frame = aom_rb_read_bit(rb);
4678 
4679   if (cm->show_existing_frame) {
4680     // Show an existing frame directly.
4681     const int existing_frame_idx = aom_rb_read_literal(rb, 3);
4682     const int frame_to_show = cm->ref_frame_map[existing_frame_idx];
4683 #if CONFIG_REFERENCE_BUFFER
4684     if (cm->seq_params.frame_id_numbers_present_flag) {
4685       int frame_id_length = cm->seq_params.frame_id_length_minus7 + 7;
4686       int display_frame_id = aom_rb_read_literal(rb, frame_id_length);
4687       /* Compare display_frame_id with ref_frame_id and check valid for
4688        * referencing */
4689       if (display_frame_id != cm->ref_frame_id[existing_frame_idx] ||
4690           cm->valid_for_referencing[existing_frame_idx] == 0)
4691         aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
4692                            "Reference buffer frame ID mismatch");
4693     }
4694 #endif
4695     lock_buffer_pool(pool);
4696     if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
4697       unlock_buffer_pool(pool);
4698       aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4699                          "Buffer %d does not contain a decoded frame",
4700                          frame_to_show);
4701     }
4702     ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
4703     unlock_buffer_pool(pool);
4704 
4705 #if CONFIG_LOOPFILTER_LEVEL
4706     cm->lf.filter_level[0] = 0;
4707     cm->lf.filter_level[1] = 0;
4708 #else
4709     cm->lf.filter_level = 0;
4710 #endif
4711     cm->show_frame = 1;
4712     pbi->refresh_frame_flags = 0;
4713 
4714     if (cm->frame_parallel_decode) {
4715       for (i = 0; i < REF_FRAMES; ++i)
4716         cm->next_ref_frame_map[i] = cm->ref_frame_map[i];
4717     }
4718 
4719     return 0;
4720   }
4721 
4722 #if !CONFIG_OBU
4723   cm->frame_type = (FRAME_TYPE)aom_rb_read_bit(rb);
4724   cm->show_frame = aom_rb_read_bit(rb);
4725   if (cm->frame_type != KEY_FRAME)
4726     cm->intra_only = cm->show_frame ? 0 : aom_rb_read_bit(rb);
4727 #else
4728   cm->frame_type = (FRAME_TYPE)aom_rb_read_literal(rb, 2);  // 2 bits
4729   cm->show_frame = aom_rb_read_bit(rb);
4730   cm->intra_only = cm->frame_type == INTRA_ONLY_FRAME;
4731 #endif
4732   cm->error_resilient_mode = aom_rb_read_bit(rb);
4733 #if CONFIG_REFERENCE_BUFFER
4734 #if !CONFIG_OBU
4735   if (frame_is_intra_only(cm)) read_sequence_header(&cm->seq_params, rb);
4736 #endif  // !CONFIG_OBU
4737   if (cm->seq_params.frame_id_numbers_present_flag) {
4738     int frame_id_length = cm->seq_params.frame_id_length_minus7 + 7;
4739     int diff_len = cm->seq_params.delta_frame_id_length_minus2 + 2;
4740     int prev_frame_id = 0;
4741     if (cm->frame_type != KEY_FRAME) {
4742       prev_frame_id = cm->current_frame_id;
4743     }
4744     cm->current_frame_id = aom_rb_read_literal(rb, frame_id_length);
4745 
4746     if (cm->frame_type != KEY_FRAME) {
4747       int diff_frame_id;
4748       if (cm->current_frame_id > prev_frame_id) {
4749         diff_frame_id = cm->current_frame_id - prev_frame_id;
4750       } else {
4751         diff_frame_id =
4752             (1 << frame_id_length) + cm->current_frame_id - prev_frame_id;
4753       }
4754       /* Check current_frame_id for conformance */
4755       if (prev_frame_id == cm->current_frame_id ||
4756           diff_frame_id >= (1 << (frame_id_length - 1))) {
4757         aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
4758                            "Invalid value of current_frame_id");
4759       }
4760     }
4761     /* Check if some frames need to be marked as not valid for referencing */
4762     for (i = 0; i < REF_FRAMES; i++) {
4763       if (cm->frame_type == KEY_FRAME) {
4764         cm->valid_for_referencing[i] = 0;
4765       } else if (cm->current_frame_id - (1 << diff_len) > 0) {
4766         if (cm->ref_frame_id[i] > cm->current_frame_id ||
4767             cm->ref_frame_id[i] < cm->current_frame_id - (1 << diff_len))
4768           cm->valid_for_referencing[i] = 0;
4769       } else {
4770         if (cm->ref_frame_id[i] > cm->current_frame_id &&
4771             cm->ref_frame_id[i] <
4772                 (1 << frame_id_length) + cm->current_frame_id - (1 << diff_len))
4773           cm->valid_for_referencing[i] = 0;
4774       }
4775     }
4776   }
4777 #endif  // CONFIG_REFERENCE_BUFFER
4778   if (cm->frame_type == KEY_FRAME) {
4779 #if !CONFIG_OBU
4780     read_bitdepth_colorspace_sampling(cm, rb, pbi->allow_lowbitdepth);
4781 #endif
4782     pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1;
4783 
4784     for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
4785       cm->frame_refs[i].idx = INVALID_IDX;
4786       cm->frame_refs[i].buf = NULL;
4787 #if CONFIG_VAR_REFS
4788       cm->frame_refs[i].is_valid = 0;
4789 #endif  // CONFIG_VAR_REFS
4790     }
4791 
4792     setup_frame_size(cm, rb);
4793     setup_sb_size(cm, rb);
4794 
4795     if (pbi->need_resync) {
4796       memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
4797       pbi->need_resync = 0;
4798     }
4799 #if CONFIG_ANS && ANS_MAX_SYMBOLS
4800     cm->ans_window_size_log2 = aom_rb_read_literal(rb, 4) + 8;
4801 #endif  // CONFIG_ANS && ANS_MAX_SYMBOLS
4802     cm->allow_screen_content_tools = aom_rb_read_bit(rb);
4803 #if CONFIG_AMVR
4804     if (cm->allow_screen_content_tools) {
4805       if (aom_rb_read_bit(rb)) {
4806         cm->seq_mv_precision_level = 2;
4807       } else {
4808         cm->seq_mv_precision_level = aom_rb_read_bit(rb) ? 0 : 1;
4809       }
4810     } else {
4811       cm->seq_mv_precision_level = 0;
4812     }
4813 #endif
4814 #if CONFIG_TEMPMV_SIGNALING
4815     cm->use_prev_frame_mvs = 0;
4816 #endif
4817   } else {
4818     if (cm->intra_only) cm->allow_screen_content_tools = aom_rb_read_bit(rb);
4819 #if CONFIG_TEMPMV_SIGNALING
4820     if (cm->intra_only || cm->error_resilient_mode) cm->use_prev_frame_mvs = 0;
4821 #endif
4822 #if CONFIG_NO_FRAME_CONTEXT_SIGNALING
4823 // The only way to reset all frame contexts to their default values is with a
4824 // keyframe.
4825 #else
4826     if (cm->error_resilient_mode) {
4827       cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL;
4828     } else {
4829       if (cm->intra_only) {
4830         cm->reset_frame_context = aom_rb_read_bit(rb)
4831                                       ? RESET_FRAME_CONTEXT_ALL
4832                                       : RESET_FRAME_CONTEXT_CURRENT;
4833       } else {
4834         cm->reset_frame_context = aom_rb_read_bit(rb)
4835                                       ? RESET_FRAME_CONTEXT_CURRENT
4836                                       : RESET_FRAME_CONTEXT_NONE;
4837         if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT)
4838           cm->reset_frame_context = aom_rb_read_bit(rb)
4839                                         ? RESET_FRAME_CONTEXT_ALL
4840                                         : RESET_FRAME_CONTEXT_CURRENT;
4841       }
4842     }
4843 #endif
4844 
4845     if (cm->intra_only) {
4846 #if !CONFIG_OBU
4847       read_bitdepth_colorspace_sampling(cm, rb, pbi->allow_lowbitdepth);
4848 #endif
4849 
4850       pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
4851       setup_frame_size(cm, rb);
4852       setup_sb_size(cm, rb);
4853       if (pbi->need_resync) {
4854         memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
4855         pbi->need_resync = 0;
4856       }
4857 #if CONFIG_ANS && ANS_MAX_SYMBOLS
4858       cm->ans_window_size_log2 = aom_rb_read_literal(rb, 4) + 8;
4859 #endif
4860     } else if (pbi->need_resync != 1) { /* Skip if need resync */
4861 #if CONFIG_OBU
4862       pbi->refresh_frame_flags = (cm->frame_type == S_FRAME)
4863                                      ? ~(1 << REF_FRAMES)
4864                                      : aom_rb_read_literal(rb, REF_FRAMES);
4865 #else
4866       pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
4867 #endif
4868 
4869 #if CONFIG_EXT_REFS
4870       if (!pbi->refresh_frame_flags) {
4871         // NOTE: "pbi->refresh_frame_flags == 0" indicates that the coded frame
4872         //       will not be used as a reference
4873         cm->is_reference_frame = 0;
4874       }
4875 #endif  // CONFIG_EXT_REFS
4876 
4877       for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
4878         const int ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2);
4879         const int idx = cm->ref_frame_map[ref];
4880 
4881         // Most of the time, streams start with a keyframe. In that case,
4882         // ref_frame_map will have been filled in at that point and will not
4883         // contain any -1's. However, streams are explicitly allowed to start
4884         // with an intra-only frame, so long as they don't then signal a
4885         // reference to a slot that hasn't been set yet. That's what we are
4886         // checking here.
4887         if (idx == -1)
4888           aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
4889                              "Inter frame requests nonexistent reference");
4890 
4891         RefBuffer *const ref_frame = &cm->frame_refs[i];
4892         ref_frame->idx = idx;
4893         ref_frame->buf = &frame_bufs[idx].buf;
4894 #if CONFIG_FRAME_SIGN_BIAS
4895 #if CONFIG_OBU
4896         // NOTE: For the scenario of (cm->frame_type != S_FRAME),
4897         // ref_frame_sign_bias will be reset based on frame offsets.
4898         cm->ref_frame_sign_bias[LAST_FRAME + i] = 0;
4899 #endif  // CONFIG_OBU
4900 #else   // !CONFIG_FRAME_SIGN_BIAS
4901 #if CONFIG_OBU
4902         cm->ref_frame_sign_bias[LAST_FRAME + i] =
4903             (cm->frame_type == S_FRAME) ? 0 : aom_rb_read_bit(rb);
4904 #else   // !CONFIG_OBU
4905         cm->ref_frame_sign_bias[LAST_FRAME + i] = aom_rb_read_bit(rb);
4906 #endif  // CONFIG_OBU
4907 #endif  // CONFIG_FRAME_SIGN_BIAS
4908 #if CONFIG_REFERENCE_BUFFER
4909         if (cm->seq_params.frame_id_numbers_present_flag) {
4910           int frame_id_length = cm->seq_params.frame_id_length_minus7 + 7;
4911           int diff_len = cm->seq_params.delta_frame_id_length_minus2 + 2;
4912           int delta_frame_id_minus1 = aom_rb_read_literal(rb, diff_len);
4913           int ref_frame_id =
4914               ((cm->current_frame_id - (delta_frame_id_minus1 + 1) +
4915                 (1 << frame_id_length)) %
4916                (1 << frame_id_length));
4917           /* Compare values derived from delta_frame_id_minus1 and
4918            * refresh_frame_flags. Also, check valid for referencing */
4919           if (ref_frame_id != cm->ref_frame_id[ref] ||
4920               cm->valid_for_referencing[ref] == 0)
4921             aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
4922                                "Reference buffer frame ID mismatch");
4923         }
4924 #endif  // CONFIG_REFERENCE_BUFFER
4925       }
4926 
4927 #if CONFIG_VAR_REFS
4928       check_valid_ref_frames(cm);
4929 #endif  // CONFIG_VAR_REFS
4930 
4931 #if CONFIG_FRAME_SIZE
4932       if (cm->error_resilient_mode == 0) {
4933         setup_frame_size_with_refs(cm, rb);
4934       } else {
4935         setup_frame_size(cm, rb);
4936       }
4937 #else
4938       setup_frame_size_with_refs(cm, rb);
4939 #endif
4940 
4941 #if CONFIG_AMVR
4942       if (cm->seq_mv_precision_level == 2) {
4943         cm->cur_frame_mv_precision_level = aom_rb_read_bit(rb) ? 0 : 1;
4944       } else {
4945         cm->cur_frame_mv_precision_level = cm->seq_mv_precision_level;
4946       }
4947 #endif
4948       cm->allow_high_precision_mv = aom_rb_read_bit(rb);
4949       cm->interp_filter = read_frame_interp_filter(rb);
4950 #if CONFIG_TEMPMV_SIGNALING
4951       if (frame_might_use_prev_frame_mvs(cm))
4952         cm->use_prev_frame_mvs = aom_rb_read_bit(rb);
4953       else
4954         cm->use_prev_frame_mvs = 0;
4955 #endif
4956       for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
4957         RefBuffer *const ref_buf = &cm->frame_refs[i];
4958 #if CONFIG_HIGHBITDEPTH
4959         av1_setup_scale_factors_for_frame(
4960             &ref_buf->sf, ref_buf->buf->y_crop_width,
4961             ref_buf->buf->y_crop_height, cm->width, cm->height,
4962             cm->use_highbitdepth);
4963 #else
4964         av1_setup_scale_factors_for_frame(
4965             &ref_buf->sf, ref_buf->buf->y_crop_width,
4966             ref_buf->buf->y_crop_height, cm->width, cm->height);
4967 #endif
4968       }
4969     }
4970   }
4971 
4972 #if CONFIG_FRAME_MARKER
4973   if (cm->show_frame == 0) {
4974     cm->frame_offset = cm->current_video_frame + aom_rb_read_literal(rb, 4);
4975   } else {
4976     cm->frame_offset = cm->current_video_frame;
4977   }
4978   av1_setup_frame_buf_refs(cm);
4979 
4980 #if CONFIG_FRAME_SIGN_BIAS
4981 #if CONFIG_OBU
4982   if (cm->frame_type != S_FRAME)
4983 #endif  // CONFIG_OBU
4984     av1_setup_frame_sign_bias(cm);
4985 #define FRAME_SIGN_BIAS_DEBUG 0
4986 #if FRAME_SIGN_BIAS_DEBUG
4987   {
4988     printf("\n\nDECODER: Frame=%d, show_frame=%d:", cm->current_video_frame,
4989            cm->show_frame);
4990     MV_REFERENCE_FRAME ref_frame;
4991     for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
4992       printf(" sign_bias[%d]=%d", ref_frame,
4993              cm->ref_frame_sign_bias[ref_frame]);
4994     }
4995     printf("\n");
4996   }
4997 #endif  // FRAME_SIGN_BIAS_DEBUG
4998 #undef FRAME_SIGN_BIAS_DEBUG
4999 #endif  // CONFIG_FRAME_SIGN_BIAS
5000 #endif  // CONFIG_FRAME_MARKER
5001 
5002 #if CONFIG_TEMPMV_SIGNALING
5003   cm->cur_frame->intra_only = cm->frame_type == KEY_FRAME || cm->intra_only;
5004 #endif
5005 
5006 #if CONFIG_REFERENCE_BUFFER
5007   if (cm->seq_params.frame_id_numbers_present_flag) {
5008     /* If bitmask is set, update reference frame id values and
5009        mark frames as valid for reference */
5010     int refresh_frame_flags =
5011         cm->frame_type == KEY_FRAME ? 0xFF : pbi->refresh_frame_flags;
5012     for (i = 0; i < REF_FRAMES; i++) {
5013       if ((refresh_frame_flags >> i) & 1) {
5014         cm->ref_frame_id[i] = cm->current_frame_id;
5015         cm->valid_for_referencing[i] = 1;
5016       }
5017     }
5018   }
5019 #endif  // CONFIG_REFERENCE_BUFFER
5020 
5021   get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
5022   get_frame_new_buffer(cm)->color_space = cm->color_space;
5023 #if CONFIG_COLORSPACE_HEADERS
5024   get_frame_new_buffer(cm)->transfer_function = cm->transfer_function;
5025   get_frame_new_buffer(cm)->chroma_sample_position = cm->chroma_sample_position;
5026 #endif
5027   get_frame_new_buffer(cm)->color_range = cm->color_range;
5028   get_frame_new_buffer(cm)->render_width = cm->render_width;
5029   get_frame_new_buffer(cm)->render_height = cm->render_height;
5030 
5031   if (pbi->need_resync) {
5032     aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
5033                        "Keyframe / intra-only frame required to reset decoder"
5034                        " state");
5035   }
5036 
5037   if (!cm->error_resilient_mode) {
5038     cm->refresh_frame_context = aom_rb_read_bit(rb)
5039                                     ? REFRESH_FRAME_CONTEXT_FORWARD
5040                                     : REFRESH_FRAME_CONTEXT_BACKWARD;
5041   } else {
5042     cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_FORWARD;
5043   }
5044 #if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
5045   // This flag will be overridden by the call to av1_setup_past_independence
5046   // below, forcing the use of context 0 for those frame types.
5047   cm->frame_context_idx = aom_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
5048 #endif
5049 
5050   // Generate next_ref_frame_map.
5051   lock_buffer_pool(pool);
5052   for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
5053     if (mask & 1) {
5054       cm->next_ref_frame_map[ref_index] = cm->new_fb_idx;
5055       ++frame_bufs[cm->new_fb_idx].ref_count;
5056     } else {
5057       cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
5058     }
5059     // Current thread holds the reference frame.
5060     if (cm->ref_frame_map[ref_index] >= 0)
5061       ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
5062     ++ref_index;
5063   }
5064 
5065   for (; ref_index < REF_FRAMES; ++ref_index) {
5066     cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
5067 
5068     // Current thread holds the reference frame.
5069     if (cm->ref_frame_map[ref_index] >= 0)
5070       ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
5071   }
5072   unlock_buffer_pool(pool);
5073   pbi->hold_ref_buf = 1;
5074 
5075   if (frame_is_intra_only(cm) || cm->error_resilient_mode)
5076     av1_setup_past_independence(cm);
5077 
5078   setup_loopfilter(cm, rb);
5079   setup_quantization(cm, rb);
5080   xd->bd = (int)cm->bit_depth;
5081 
5082 #if CONFIG_Q_ADAPT_PROBS
5083   av1_default_coef_probs(cm);
5084   if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
5085       cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL) {
5086     for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = *cm->fc;
5087   } else if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT) {
5088 #if CONFIG_NO_FRAME_CONTEXT_SIGNALING
5089     if (cm->frame_refs[0].idx <= 0) {
5090       cm->frame_contexts[cm->frame_refs[0].idx] = *cm->fc;
5091     }
5092 #else
5093     cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
5094 #endif  // CONFIG_NO_FRAME_CONTEXT_SIGNALING
5095   }
5096 #endif  // CONFIG_Q_ADAPT_PROBS
5097 
5098   setup_segmentation(cm, rb);
5099 
5100   {
5101     struct segmentation *const seg = &cm->seg;
5102     int segment_quantizer_active = 0;
5103     for (i = 0; i < MAX_SEGMENTS; i++) {
5104       if (segfeature_active(seg, i, SEG_LVL_ALT_Q)) {
5105         segment_quantizer_active = 1;
5106       }
5107     }
5108 
5109     cm->delta_q_res = 1;
5110 #if CONFIG_EXT_DELTA_Q
5111     cm->delta_lf_res = 1;
5112     cm->delta_lf_present_flag = 0;
5113 #if CONFIG_LOOPFILTER_LEVEL
5114     cm->delta_lf_multi = 0;
5115 #endif  // CONFIG_LOOPFILTER_LEVEL
5116 #endif
5117     if (segment_quantizer_active == 0 && cm->base_qindex > 0) {
5118       cm->delta_q_present_flag = aom_rb_read_bit(rb);
5119     } else {
5120       cm->delta_q_present_flag = 0;
5121     }
5122     if (cm->delta_q_present_flag) {
5123       xd->prev_qindex = cm->base_qindex;
5124       cm->delta_q_res = 1 << aom_rb_read_literal(rb, 2);
5125 #if CONFIG_EXT_DELTA_Q
5126       assert(!segment_quantizer_active);
5127       cm->delta_lf_present_flag = aom_rb_read_bit(rb);
5128       if (cm->delta_lf_present_flag) {
5129         xd->prev_delta_lf_from_base = 0;
5130         cm->delta_lf_res = 1 << aom_rb_read_literal(rb, 2);
5131 #if CONFIG_LOOPFILTER_LEVEL
5132         cm->delta_lf_multi = aom_rb_read_bit(rb);
5133         for (int lf_id = 0; lf_id < FRAME_LF_COUNT; ++lf_id)
5134           xd->prev_delta_lf[lf_id] = 0;
5135 #endif  // CONFIG_LOOPFILTER_LEVEL
5136       }
5137 #endif  // CONFIG_EXT_DELTA_Q
5138     }
5139   }
5140 #if CONFIG_AMVR
5141   xd->cur_frame_mv_precision_level = cm->cur_frame_mv_precision_level;
5142 #endif
5143 
5144   for (i = 0; i < MAX_SEGMENTS; ++i) {
5145     const int qindex = cm->seg.enabled
5146                            ? av1_get_qindex(&cm->seg, i, cm->base_qindex)
5147                            : cm->base_qindex;
5148     xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
5149                       cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
5150     xd->qindex[i] = qindex;
5151   }
5152   cm->all_lossless = all_lossless(cm, xd);
5153   setup_segmentation_dequant(cm);
5154 #if CONFIG_CDEF
5155   if (!cm->all_lossless) {
5156     setup_cdef(cm, rb);
5157   }
5158 #endif
5159 #if CONFIG_LOOP_RESTORATION
5160   decode_restoration_mode(cm, rb);
5161 #endif  // CONFIG_LOOP_RESTORATION
5162   cm->tx_mode = read_tx_mode(cm, rb);
5163   cm->reference_mode = read_frame_reference_mode(cm, rb);
5164   if (cm->reference_mode != SINGLE_REFERENCE) setup_compound_reference_mode(cm);
5165   read_compound_tools(cm, rb);
5166 
5167 #if CONFIG_EXT_TX
5168   cm->reduced_tx_set_used = aom_rb_read_bit(rb);
5169 #endif  // CONFIG_EXT_TX
5170 
5171 #if CONFIG_ADAPT_SCAN
5172   cm->use_adapt_scan = aom_rb_read_bit(rb);
5173   // TODO(angiebird): call av1_init_scan_order only when use_adapt_scan
5174   // switches from 1 to 0
5175   if (cm->use_adapt_scan == 0) av1_init_scan_order(cm);
5176 #endif  // CONFIG_ADAPT_SCAN
5177 
5178 #if CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
5179   // NOTE(zoeliu): As cm->prev_frame can take neither a frame of
5180   //               show_exisiting_frame=1, nor can it take a frame not used as
5181   //               a reference, it is probable that by the time it is being
5182   //               referred to, the frame buffer it originally points to may
5183   //               already get expired and have been reassigned to the current
5184   //               newly coded frame. Hence, we need to check whether this is
5185   //               the case, and if yes, we have 2 choices:
5186   //               (1) Simply disable the use of previous frame mvs; or
5187   //               (2) Have cm->prev_frame point to one reference frame buffer,
5188   //                   e.g. LAST_FRAME.
5189   if (!dec_is_ref_frame_buf(pbi, cm->prev_frame)) {
5190     // Reassign the LAST_FRAME buffer to cm->prev_frame.
5191     cm->prev_frame =
5192         cm->frame_refs[LAST_FRAME - LAST_FRAME].idx != INVALID_IDX
5193             ? &cm->buffer_pool
5194                    ->frame_bufs[cm->frame_refs[LAST_FRAME - LAST_FRAME].idx]
5195             : NULL;
5196   }
5197 #endif  // CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
5198 
5199 #if CONFIG_TEMPMV_SIGNALING
5200   if (cm->use_prev_frame_mvs && !frame_can_use_prev_frame_mvs(cm)) {
5201     aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
5202                        "Frame wrongly requests previous frame MVs");
5203   }
5204 #else
5205   cm->use_prev_frame_mvs = !cm->error_resilient_mode && cm->prev_frame &&
5206 #if CONFIG_FRAME_SUPERRES
5207                            cm->width == cm->last_width &&
5208                            cm->height == cm->last_height &&
5209 #else
5210                            cm->width == cm->prev_frame->buf.y_crop_width &&
5211                            cm->height == cm->prev_frame->buf.y_crop_height &&
5212 #endif  // CONFIG_FRAME_SUPERRES
5213                            !cm->last_intra_only && cm->last_show_frame &&
5214                            (cm->last_frame_type != KEY_FRAME);
5215 #endif  // CONFIG_TEMPMV_SIGNALING
5216 
5217 #if CONFIG_GLOBAL_MOTION
5218   if (!frame_is_intra_only(cm)) read_global_motion(cm, rb);
5219 #endif
5220 
5221   read_tile_info(pbi, rb);
5222   if (use_compressed_header(cm)) {
5223     sz = aom_rb_read_literal(rb, 16);
5224     if (sz == 0)
5225       aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
5226                          "Invalid header size");
5227   } else {
5228     sz = 0;
5229   }
5230   return sz;
5231 }
5232 
5233 #if CONFIG_SUPERTX
5234 static void read_supertx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
5235   int i, j;
5236   if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
5237     for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
5238       for (j = TX_8X8; j < TX_SIZES; ++j) {
5239         av1_diff_update_prob(r, &fc->supertx_prob[i][j], ACCT_STR);
5240       }
5241     }
5242   }
5243 }
5244 #endif  // CONFIG_SUPERTX
5245 
5246 static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
5247                                   size_t partition_size) {
5248 #if CONFIG_RESTRICT_COMPRESSED_HDR
5249   (void)pbi;
5250   (void)data;
5251   (void)partition_size;
5252   return 0;
5253 #else
5254   AV1_COMMON *const cm = &pbi->common;
5255 #if CONFIG_SUPERTX
5256   MACROBLOCKD *const xd = &pbi->mb;
5257 #endif
5258   aom_reader r;
5259 #if !CONFIG_NEW_MULTISYMBOL
5260   FRAME_CONTEXT *const fc = cm->fc;
5261   int i;
5262 #endif
5263 
5264 #if CONFIG_ANS && ANS_MAX_SYMBOLS
5265   r.window_size = 1 << cm->ans_window_size_log2;
5266 #endif
5267   if (aom_reader_init(&r, data, partition_size, pbi->decrypt_cb,
5268                       pbi->decrypt_state))
5269     aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
5270                        "Failed to allocate bool decoder 0");
5271 
5272 #if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
5273   if (cm->tx_mode == TX_MODE_SELECT)
5274     av1_diff_update_prob(&r, &fc->quarter_tx_size_prob, ACCT_STR);
5275 #endif
5276 
5277 #if CONFIG_LV_MAP && !LV_MAP_PROB
5278   av1_read_txb_probs(fc, cm->tx_mode, &r, &cm->counts);
5279 #endif  // CONFIG_LV_MAP && !LV_MAP_PROB
5280 
5281 #if !CONFIG_NEW_MULTISYMBOL
5282 #if CONFIG_VAR_TX
5283   if (cm->tx_mode == TX_MODE_SELECT)
5284     for (i = 0; i < TXFM_PARTITION_CONTEXTS; ++i)
5285       av1_diff_update_prob(&r, &fc->txfm_partition_prob[i], ACCT_STR);
5286 #endif  // CONFIG_VAR_TX
5287   for (i = 0; i < SKIP_CONTEXTS; ++i)
5288     av1_diff_update_prob(&r, &fc->skip_probs[i], ACCT_STR);
5289 #endif
5290 
5291   if (!frame_is_intra_only(cm)) {
5292 #if !CONFIG_NEW_MULTISYMBOL
5293     read_inter_mode_probs(fc, &r);
5294 #endif
5295 
5296 #if CONFIG_INTERINTRA
5297     if (cm->reference_mode != COMPOUND_REFERENCE &&
5298         cm->allow_interintra_compound) {
5299 #if !CONFIG_NEW_MULTISYMBOL
5300       for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
5301         if (is_interintra_allowed_bsize_group(i)) {
5302           av1_diff_update_prob(&r, &fc->interintra_prob[i], ACCT_STR);
5303         }
5304       }
5305 #endif
5306 #if CONFIG_WEDGE && !CONFIG_NEW_MULTISYMBOL
5307 #if CONFIG_EXT_PARTITION_TYPES
5308       int block_sizes_to_update = BLOCK_SIZES_ALL;
5309 #else
5310       int block_sizes_to_update = BLOCK_SIZES;
5311 #endif
5312       for (i = 0; i < block_sizes_to_update; i++) {
5313         if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i)) {
5314           av1_diff_update_prob(&r, &fc->wedge_interintra_prob[i], ACCT_STR);
5315         }
5316       }
5317 #endif  // CONFIG_WEDGE
5318     }
5319 #endif  // CONFIG_INTERINTRA
5320 
5321 #if !CONFIG_NEW_MULTISYMBOL
5322     for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
5323       av1_diff_update_prob(&r, &fc->intra_inter_prob[i], ACCT_STR);
5324 #endif
5325 
5326 #if !CONFIG_NEW_MULTISYMBOL
5327     read_frame_reference_mode_probs(cm, &r);
5328 #endif
5329 
5330 #if CONFIG_COMPOUND_SINGLEREF
5331     for (i = 0; i < COMP_INTER_MODE_CONTEXTS; i++)
5332       av1_diff_update_prob(&r, &fc->comp_inter_mode_prob[i], ACCT_STR);
5333 #endif  // CONFIG_COMPOUND_SINGLEREF
5334 
5335 #if !CONFIG_NEW_MULTISYMBOL
5336 #if CONFIG_AMVR
5337     if (cm->cur_frame_mv_precision_level == 0) {
5338 #endif
5339       for (i = 0; i < NMV_CONTEXTS; ++i)
5340         read_mv_probs(&fc->nmvc[i], cm->allow_high_precision_mv, &r);
5341 #if CONFIG_AMVR
5342     }
5343 #endif
5344 #endif
5345 #if CONFIG_SUPERTX
5346     if (!xd->lossless[0]) read_supertx_probs(fc, &r);
5347 #endif
5348   }
5349 
5350   return aom_reader_has_error(&r);
5351 #endif  // CONFIG_RESTRICT_COMPRESSED_HDR
5352 }
5353 
5354 #ifdef NDEBUG
5355 #define debug_check_frame_counts(cm) (void)0
5356 #else  // !NDEBUG
5357 // Counts should only be incremented when frame_parallel_decoding_mode and
5358 // error_resilient_mode are disabled.
5359 static void debug_check_frame_counts(const AV1_COMMON *const cm) {
5360   FRAME_COUNTS zero_counts;
5361   av1_zero(zero_counts);
5362   assert(cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD ||
5363          cm->error_resilient_mode);
5364   assert(!memcmp(cm->counts.partition, zero_counts.partition,
5365                  sizeof(cm->counts.partition)));
5366   assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp,
5367                  sizeof(cm->counts.switchable_interp)));
5368   assert(!memcmp(cm->counts.inter_compound_mode,
5369                  zero_counts.inter_compound_mode,
5370                  sizeof(cm->counts.inter_compound_mode)));
5371 #if CONFIG_INTERINTRA
5372   assert(!memcmp(cm->counts.interintra, zero_counts.interintra,
5373                  sizeof(cm->counts.interintra)));
5374 #if CONFIG_WEDGE
5375   assert(!memcmp(cm->counts.wedge_interintra, zero_counts.wedge_interintra,
5376                  sizeof(cm->counts.wedge_interintra)));
5377 #endif  // CONFIG_WEDGE
5378 #endif  // CONFIG_INTERINTRA
5379   assert(!memcmp(cm->counts.compound_interinter,
5380                  zero_counts.compound_interinter,
5381                  sizeof(cm->counts.compound_interinter)));
5382 #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
5383   assert(!memcmp(cm->counts.motion_mode, zero_counts.motion_mode,
5384                  sizeof(cm->counts.motion_mode)));
5385 #endif  // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
5386 #if CONFIG_NCOBMC_ADAPT_WEIGHT && CONFIG_MOTION_VAR
5387   assert(!memcmp(cm->counts.ncobmc_mode, zero_counts.ncobmc_mode,
5388                  sizeof(cm->counts.ncobmc_mode)));
5389 #endif
5390   assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter,
5391                  sizeof(cm->counts.intra_inter)));
5392 #if CONFIG_COMPOUND_SINGLEREF
5393   assert(!memcmp(cm->counts.comp_inter_mode, zero_counts.comp_inter_mode,
5394                  sizeof(cm->counts.comp_inter_mode)));
5395 #endif  // CONFIG_COMPOUND_SINGLEREF
5396   assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter,
5397                  sizeof(cm->counts.comp_inter)));
5398 #if CONFIG_EXT_COMP_REFS
5399   assert(!memcmp(cm->counts.comp_ref_type, zero_counts.comp_ref_type,
5400                  sizeof(cm->counts.comp_ref_type)));
5401   assert(!memcmp(cm->counts.uni_comp_ref, zero_counts.uni_comp_ref,
5402                  sizeof(cm->counts.uni_comp_ref)));
5403 #endif  // CONFIG_EXT_COMP_REFS
5404   assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref,
5405                  sizeof(cm->counts.single_ref)));
5406   assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref,
5407                  sizeof(cm->counts.comp_ref)));
5408 #if CONFIG_EXT_REFS
5409   assert(!memcmp(cm->counts.comp_bwdref, zero_counts.comp_bwdref,
5410                  sizeof(cm->counts.comp_bwdref)));
5411 #endif  // CONFIG_EXT_REFS
5412   assert(!memcmp(&cm->counts.tx_size, &zero_counts.tx_size,
5413                  sizeof(cm->counts.tx_size)));
5414   assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip)));
5415   assert(
5416       !memcmp(&cm->counts.mv[0], &zero_counts.mv[0], sizeof(cm->counts.mv[0])));
5417   assert(
5418       !memcmp(&cm->counts.mv[1], &zero_counts.mv[1], sizeof(cm->counts.mv[0])));
5419 }
5420 #endif  // NDEBUG
5421 
5422 static struct aom_read_bit_buffer *init_read_bit_buffer(
5423     AV1Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
5424     const uint8_t *data_end, uint8_t clear_data[MAX_AV1_HEADER_SIZE]) {
5425   rb->bit_offset = 0;
5426   rb->error_handler = error_handler;
5427   rb->error_handler_data = &pbi->common;
5428   if (pbi->decrypt_cb) {
5429     const int n = (int)AOMMIN(MAX_AV1_HEADER_SIZE, data_end - data);
5430     pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n);
5431     rb->bit_buffer = clear_data;
5432     rb->bit_buffer_end = clear_data + n;
5433   } else {
5434     rb->bit_buffer = data;
5435     rb->bit_buffer_end = data_end;
5436   }
5437   return rb;
5438 }
5439 
5440 //------------------------------------------------------------------------------
5441 
5442 void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
5443                          int *height) {
5444   *width = aom_rb_read_literal(rb, 16) + 1;
5445   *height = aom_rb_read_literal(rb, 16) + 1;
5446 }
5447 
5448 BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb) {
5449   int profile = aom_rb_read_bit(rb);
5450   profile |= aom_rb_read_bit(rb) << 1;
5451   if (profile > 2) profile += aom_rb_read_bit(rb);
5452   return (BITSTREAM_PROFILE)profile;
5453 }
5454 
5455 static void make_update_tile_list_dec(AV1Decoder *pbi, int tile_rows,
5456                                       int tile_cols, FRAME_CONTEXT *ec_ctxs[]) {
5457   int i;
5458   for (i = 0; i < tile_rows * tile_cols; ++i)
5459     ec_ctxs[i] = &pbi->tile_data[i].tctx;
5460 }
5461 
5462 #if CONFIG_FRAME_SUPERRES
5463 void superres_post_decode(AV1Decoder *pbi) {
5464   AV1_COMMON *const cm = &pbi->common;
5465   BufferPool *const pool = cm->buffer_pool;
5466 
5467   if (av1_superres_unscaled(cm)) return;
5468 
5469   lock_buffer_pool(pool);
5470   av1_superres_upscale(cm, pool);
5471   unlock_buffer_pool(pool);
5472 }
5473 #endif  // CONFIG_FRAME_SUPERRES
5474 
5475 static void dec_setup_frame_boundary_info(AV1_COMMON *const cm) {
5476 // Note: When LOOPFILTERING_ACROSS_TILES is enabled, we need to clear the
5477 // boundary information every frame, since the tile boundaries may
5478 // change every frame (particularly when dependent-horztiles is also
5479 // enabled); when it is disabled, the only information stored is the frame
5480 // boundaries, which only depend on the frame size.
5481 #if !CONFIG_LOOPFILTERING_ACROSS_TILES
5482   if (cm->width != cm->last_width || cm->height != cm->last_height)
5483 #endif  // CONFIG_LOOPFILTERING_ACROSS_TILES
5484   {
5485     int row, col;
5486     for (row = 0; row < cm->mi_rows; ++row) {
5487       MODE_INFO *mi = cm->mi + row * cm->mi_stride;
5488       for (col = 0; col < cm->mi_cols; ++col) {
5489         mi->mbmi.boundary_info = 0;
5490         mi++;
5491       }
5492     }
5493     av1_setup_frame_boundary_info(cm);
5494   }
5495 }
5496 
5497 size_t av1_decode_frame_headers_and_setup(AV1Decoder *pbi, const uint8_t *data,
5498                                           const uint8_t *data_end,
5499                                           const uint8_t **p_data_end) {
5500   AV1_COMMON *const cm = &pbi->common;
5501   MACROBLOCKD *const xd = &pbi->mb;
5502   struct aom_read_bit_buffer rb;
5503   uint8_t clear_data[MAX_AV1_HEADER_SIZE];
5504   size_t first_partition_size;
5505   YV12_BUFFER_CONFIG *new_fb;
5506 #if CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
5507   RefBuffer *last_fb_ref_buf = &cm->frame_refs[LAST_FRAME - LAST_FRAME];
5508 #endif  // CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
5509 
5510 #if CONFIG_ADAPT_SCAN
5511   av1_deliver_eob_threshold(cm, xd);
5512 #endif
5513 #if CONFIG_BITSTREAM_DEBUG
5514   bitstream_queue_set_frame_read(cm->current_video_frame * 2 + cm->show_frame);
5515 #endif
5516 
5517 #if CONFIG_GLOBAL_MOTION
5518   int i;
5519   for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
5520     cm->global_motion[i] = default_warp_params;
5521     cm->cur_frame->global_motion[i] = default_warp_params;
5522   }
5523   xd->global_motion = cm->global_motion;
5524 #endif  // CONFIG_GLOBAL_MOTION
5525 
5526   first_partition_size = read_uncompressed_header(
5527       pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
5528 
5529 #if CONFIG_EXT_TILE
5530   // If cm->single_tile_decoding = 0, the independent decoding of a single tile
5531   // or a section of a frame is not allowed.
5532   if (!cm->single_tile_decoding &&
5533       (pbi->dec_tile_row >= 0 || pbi->dec_tile_col >= 0)) {
5534     pbi->dec_tile_row = -1;
5535     pbi->dec_tile_col = -1;
5536   }
5537 #endif  // CONFIG_EXT_TILE
5538 
5539   pbi->first_partition_size = first_partition_size;
5540   pbi->uncomp_hdr_size = aom_rb_bytes_read(&rb);
5541   new_fb = get_frame_new_buffer(cm);
5542   xd->cur_buf = new_fb;
5543 #if CONFIG_INTRABC
5544 #if CONFIG_HIGHBITDEPTH
5545   av1_setup_scale_factors_for_frame(
5546       &xd->sf_identity, xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height,
5547       xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height,
5548       cm->use_highbitdepth);
5549 #else
5550   av1_setup_scale_factors_for_frame(
5551       &xd->sf_identity, xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height,
5552       xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height);
5553 #endif  // CONFIG_HIGHBITDEPTH
5554 #endif  // CONFIG_INTRABC
5555 
5556   if (cm->show_existing_frame) {
5557     // showing a frame directly
5558     *p_data_end = data + aom_rb_bytes_read(&rb);
5559     return 0;
5560   }
5561 
5562   data += aom_rb_bytes_read(&rb);
5563   if (first_partition_size)
5564     if (!read_is_valid(data, first_partition_size, data_end))
5565       aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
5566                          "Truncated packet or corrupt header length");
5567 
5568   cm->setup_mi(cm);
5569 
5570 #if CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
5571   // NOTE(zoeliu): As cm->prev_frame can take neither a frame of
5572   //               show_exisiting_frame=1, nor can it take a frame not used as
5573   //               a reference, it is probable that by the time it is being
5574   //               referred to, the frame buffer it originally points to may
5575   //               already get expired and have been reassigned to the current
5576   //               newly coded frame. Hence, we need to check whether this is
5577   //               the case, and if yes, we have 2 choices:
5578   //               (1) Simply disable the use of previous frame mvs; or
5579   //               (2) Have cm->prev_frame point to one reference frame buffer,
5580   //                   e.g. LAST_FRAME.
5581   if (!dec_is_ref_frame_buf(pbi, cm->prev_frame)) {
5582     // Reassign the LAST_FRAME buffer to cm->prev_frame.
5583     cm->prev_frame = last_fb_ref_buf->idx != INVALID_IDX
5584                          ? &cm->buffer_pool->frame_bufs[last_fb_ref_buf->idx]
5585                          : NULL;
5586   }
5587 #endif  // CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
5588 
5589 #if CONFIG_TEMPMV_SIGNALING
5590   if (cm->use_prev_frame_mvs && !frame_can_use_prev_frame_mvs(cm)) {
5591     aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
5592                        "Frame wrongly requests previous frame MVs");
5593   }
5594 #else
5595   cm->use_prev_frame_mvs = !cm->error_resilient_mode && cm->prev_frame &&
5596 #if CONFIG_FRAME_SUPERRES
5597                            cm->width == cm->last_width &&
5598                            cm->height == cm->last_height &&
5599 #else
5600                            cm->width == cm->prev_frame->buf.y_crop_width &&
5601                            cm->height == cm->prev_frame->buf.y_crop_height &&
5602 #endif  // CONFIG_FRAME_SUPERRES
5603                            !cm->last_intra_only && cm->last_show_frame &&
5604                            (cm->last_frame_type != KEY_FRAME);
5605 #endif  // CONFIG_TEMPMV_SIGNALING
5606 
5607 #if CONFIG_MFMV
5608   av1_setup_motion_field(cm);
5609 #endif  // CONFIG_MFMV
5610 
5611   av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
5612 #if CONFIG_NO_FRAME_CONTEXT_SIGNALING
5613   if (cm->error_resilient_mode || frame_is_intra_only(cm)) {
5614     // use the default frame context values
5615     *cm->fc = cm->frame_contexts[FRAME_CONTEXT_DEFAULTS];
5616     cm->pre_fc = &cm->frame_contexts[FRAME_CONTEXT_DEFAULTS];
5617   } else {
5618     *cm->fc = cm->frame_contexts[cm->frame_refs[0].idx];
5619     cm->pre_fc = &cm->frame_contexts[cm->frame_refs[0].idx];
5620   }
5621 #else
5622   *cm->fc = cm->frame_contexts[cm->frame_context_idx];
5623   cm->pre_fc = &cm->frame_contexts[cm->frame_context_idx];
5624 #endif  // CONFIG_NO_FRAME_CONTEXT_SIGNALING
5625   if (!cm->fc->initialized)
5626     aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
5627                        "Uninitialized entropy context.");
5628 
5629   av1_zero(cm->counts);
5630 
5631   xd->corrupted = 0;
5632   if (first_partition_size) {
5633     new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
5634     if (new_fb->corrupted)
5635       aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
5636                          "Decode failed. Frame data header is corrupted.");
5637   }
5638   return first_partition_size;
5639 }
5640 
5641 void av1_decode_tg_tiles_and_wrapup(AV1Decoder *pbi, const uint8_t *data,
5642                                     const uint8_t *data_end,
5643                                     const uint8_t **p_data_end, int startTile,
5644                                     int endTile, int initialize_flag) {
5645   AV1_COMMON *const cm = &pbi->common;
5646   MACROBLOCKD *const xd = &pbi->mb;
5647   int context_updated = 0;
5648 
5649 #if CONFIG_LOOP_RESTORATION
5650   if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
5651       cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
5652       cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
5653     av1_alloc_restoration_buffers(cm);
5654   }
5655 #endif
5656 
5657 #if !CONFIG_LOOPFILTER_LEVEL
5658   if (cm->lf.filter_level && !cm->skip_loop_filter) {
5659     av1_loop_filter_frame_init(cm, cm->lf.filter_level, cm->lf.filter_level);
5660   }
5661 #endif
5662 
5663   // If encoded in frame parallel mode, frame context is ready after decoding
5664   // the frame header.
5665   if (cm->frame_parallel_decode && initialize_flag &&
5666       cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD) {
5667     AVxWorker *const worker = pbi->frame_worker_owner;
5668     FrameWorkerData *const frame_worker_data = worker->data1;
5669     if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) {
5670       context_updated = 1;
5671 #if CONFIG_NO_FRAME_CONTEXT_SIGNALING
5672       cm->frame_contexts[cm->new_fb_idx] = *cm->fc;
5673 #else
5674       cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
5675 #endif  // CONFIG_NO_FRAME_CONTEXT_SIGNALING
5676     }
5677     av1_frameworker_lock_stats(worker);
5678     pbi->cur_buf->row = -1;
5679     pbi->cur_buf->col = -1;
5680     frame_worker_data->frame_context_ready = 1;
5681     // Signal the main thread that context is ready.
5682     av1_frameworker_signal_stats(worker);
5683     av1_frameworker_unlock_stats(worker);
5684   }
5685 
5686   dec_setup_frame_boundary_info(cm);
5687 
5688   if (pbi->max_threads > 1 && !CONFIG_CB4X4 &&
5689 #if CONFIG_EXT_TILE
5690       pbi->dec_tile_col < 0 &&  // Decoding all columns
5691 #endif                          // CONFIG_EXT_TILE
5692       cm->tile_cols > 1) {
5693     // Multi-threaded tile decoder
5694     *p_data_end =
5695         decode_tiles_mt(pbi, data + pbi->first_partition_size, data_end);
5696     if (!xd->corrupted) {
5697       if (!cm->skip_loop_filter) {
5698 // If multiple threads are used to decode tiles, then we use those
5699 // threads to do parallel loopfiltering.
5700 #if CONFIG_LOOPFILTER_LEVEL
5701         av1_loop_filter_frame_mt(
5702             (YV12_BUFFER_CONFIG *)xd->cur_buf, cm, pbi->mb.plane,
5703             cm->lf.filter_level[0], cm->lf.filter_level[1], 0, 0,
5704             pbi->tile_workers, pbi->num_tile_workers, &pbi->lf_row_sync);
5705 #else
5706         av1_loop_filter_frame_mt((YV12_BUFFER_CONFIG *)xd->cur_buf, cm,
5707                                  pbi->mb.plane, cm->lf.filter_level, 0, 0,
5708                                  pbi->tile_workers, pbi->num_tile_workers,
5709                                  &pbi->lf_row_sync);
5710 #endif  // CONFIG_LOOPFILTER_LEVEL
5711       }
5712     } else {
5713       aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
5714                          "Decode failed. Frame data is corrupted.");
5715     }
5716   } else {
5717 #if CONFIG_OBU
5718     *p_data_end = decode_tiles(pbi, data, data_end, startTile, endTile);
5719 #else
5720     *p_data_end = decode_tiles(
5721         pbi, data + pbi->uncomp_hdr_size + pbi->first_partition_size, data_end,
5722         startTile, endTile);
5723 #endif
5724   }
5725 
5726   if (endTile != cm->tile_rows * cm->tile_cols - 1) {
5727     return;
5728   }
5729 
5730 #if CONFIG_STRIPED_LOOP_RESTORATION
5731   if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
5732       cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
5733       cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
5734     av1_loop_restoration_save_boundary_lines(&pbi->cur_buf->buf, cm);
5735   }
5736 #endif
5737 
5738 #if CONFIG_CDEF
5739   if (!cm->skip_loop_filter && !cm->all_lossless) {
5740     av1_cdef_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
5741   }
5742 #endif  // CONFIG_CDEF
5743 
5744 #if CONFIG_FRAME_SUPERRES
5745   superres_post_decode(pbi);
5746 #endif  // CONFIG_FRAME_SUPERRES
5747 
5748 #if CONFIG_LOOP_RESTORATION
5749   if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
5750       cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
5751       cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
5752     aom_extend_frame_borders((YV12_BUFFER_CONFIG *)xd->cur_buf);
5753     av1_loop_restoration_frame((YV12_BUFFER_CONFIG *)xd->cur_buf, cm,
5754                                cm->rst_info, 7, 0, NULL);
5755   }
5756 #endif  // CONFIG_LOOP_RESTORATION
5757 
5758   if (!xd->corrupted) {
5759     if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
5760       FRAME_CONTEXT **tile_ctxs = aom_malloc(cm->tile_rows * cm->tile_cols *
5761                                              sizeof(&pbi->tile_data[0].tctx));
5762       aom_cdf_prob **cdf_ptrs =
5763           aom_malloc(cm->tile_rows * cm->tile_cols *
5764                      sizeof(&pbi->tile_data[0].tctx.partition_cdf[0][0]));
5765       make_update_tile_list_dec(pbi, cm->tile_rows, cm->tile_cols, tile_ctxs);
5766 #if CONFIG_LV_MAP
5767       av1_adapt_coef_probs(cm);
5768 #endif  // CONFIG_LV_MAP
5769 #if CONFIG_SYMBOLRATE
5770       av1_dump_symbol_rate(cm);
5771 #endif
5772       av1_adapt_intra_frame_probs(cm);
5773       av1_average_tile_coef_cdfs(pbi->common.fc, tile_ctxs, cdf_ptrs,
5774                                  cm->tile_rows * cm->tile_cols);
5775       av1_average_tile_intra_cdfs(pbi->common.fc, tile_ctxs, cdf_ptrs,
5776                                   cm->tile_rows * cm->tile_cols);
5777 #if CONFIG_PVQ
5778       av1_average_tile_pvq_cdfs(pbi->common.fc, tile_ctxs,
5779                                 cm->tile_rows * cm->tile_cols);
5780 #endif  // CONFIG_PVQ
5781 #if CONFIG_ADAPT_SCAN
5782       av1_adapt_scan_order(cm);
5783 #endif  // CONFIG_ADAPT_SCAN
5784 
5785       if (!frame_is_intra_only(cm)) {
5786         av1_adapt_inter_frame_probs(cm);
5787 #if !CONFIG_NEW_MULTISYMBOL
5788         av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
5789 #endif
5790         av1_average_tile_inter_cdfs(&pbi->common, pbi->common.fc, tile_ctxs,
5791                                     cdf_ptrs, cm->tile_rows * cm->tile_cols);
5792         av1_average_tile_mv_cdfs(pbi->common.fc, tile_ctxs, cdf_ptrs,
5793                                  cm->tile_rows * cm->tile_cols);
5794       }
5795       aom_free(tile_ctxs);
5796       aom_free(cdf_ptrs);
5797     } else {
5798       debug_check_frame_counts(cm);
5799     }
5800   } else {
5801     aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
5802                        "Decode failed. Frame data is corrupted.");
5803   }
5804 
5805 #if CONFIG_INSPECTION
5806   if (pbi->inspect_cb != NULL) {
5807     (*pbi->inspect_cb)(pbi, pbi->inspect_ctx);
5808   }
5809 #endif
5810 
5811 // Non frame parallel update frame context here.
5812 #if CONFIG_NO_FRAME_CONTEXT_SIGNALING
5813   if (!context_updated) cm->frame_contexts[cm->new_fb_idx] = *cm->fc;
5814 #else
5815   if (!cm->error_resilient_mode && !context_updated)
5816     cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
5817 #endif
5818 }
5819 
5820 #if CONFIG_OBU
5821 
5822 static OBU_TYPE read_obu_header(struct aom_read_bit_buffer *rb,
5823                                 uint32_t *header_size) {
5824   OBU_TYPE obu_type;
5825   int obu_extension_flag;
5826 
5827   *header_size = 1;
5828 
5829   obu_type = (OBU_TYPE)aom_rb_read_literal(rb, 5);
5830   aom_rb_read_literal(rb, 2);  // reserved
5831   obu_extension_flag = aom_rb_read_bit(rb);
5832   if (obu_extension_flag) {
5833     *header_size += 1;
5834     aom_rb_read_literal(rb, 3);  // temporal_id
5835     aom_rb_read_literal(rb, 2);
5836     aom_rb_read_literal(rb, 2);
5837     aom_rb_read_literal(rb, 1);  // reserved
5838   }
5839 
5840   return obu_type;
5841 }
5842 
5843 static uint32_t read_temporal_delimiter_obu() { return 0; }
5844 
5845 static uint32_t read_sequence_header_obu(AV1Decoder *pbi,
5846                                          struct aom_read_bit_buffer *rb) {
5847   AV1_COMMON *const cm = &pbi->common;
5848   SequenceHeader *const seq_params = &cm->seq_params;
5849   uint32_t saved_bit_offset = rb->bit_offset;
5850 
5851   cm->profile = av1_read_profile(rb);
5852   aom_rb_read_literal(rb, 4);  // level
5853 
5854   seq_params->frame_id_numbers_present_flag = aom_rb_read_bit(rb);
5855   if (seq_params->frame_id_numbers_present_flag) {
5856     seq_params->frame_id_length_minus7 = aom_rb_read_literal(rb, 4);
5857     seq_params->delta_frame_id_length_minus2 = aom_rb_read_literal(rb, 4);
5858   }
5859 
5860   read_bitdepth_colorspace_sampling(cm, rb, pbi->allow_lowbitdepth);
5861 
5862   return ((rb->bit_offset - saved_bit_offset + 7) >> 3);
5863 }
5864 
5865 static uint32_t read_frame_header_obu(AV1Decoder *pbi, const uint8_t *data,
5866                                       const uint8_t *data_end,
5867                                       const uint8_t **p_data_end) {
5868   size_t header_size;
5869 
5870   header_size =
5871       av1_decode_frame_headers_and_setup(pbi, data, data_end, p_data_end);
5872   return (uint32_t)(pbi->uncomp_hdr_size + header_size);
5873 }
5874 
5875 static uint32_t read_tile_group_header(AV1Decoder *pbi,
5876                                        struct aom_read_bit_buffer *rb,
5877                                        int *startTile, int *endTile) {
5878   AV1_COMMON *const cm = &pbi->common;
5879   uint32_t saved_bit_offset = rb->bit_offset;
5880 
5881   *startTile = aom_rb_read_literal(rb, cm->log2_tile_rows + cm->log2_tile_cols);
5882   *endTile = aom_rb_read_literal(rb, cm->log2_tile_rows + cm->log2_tile_cols);
5883 
5884   return ((rb->bit_offset - saved_bit_offset + 7) >> 3);
5885 }
5886 
5887 static uint32_t read_one_tile_group_obu(AV1Decoder *pbi,
5888                                         struct aom_read_bit_buffer *rb,
5889                                         int is_first_tg, const uint8_t *data,
5890                                         const uint8_t *data_end,
5891                                         const uint8_t **p_data_end,
5892                                         int *is_last_tg) {
5893   AV1_COMMON *const cm = &pbi->common;
5894   int startTile, endTile;
5895   uint32_t header_size, tg_payload_size;
5896 
5897   header_size = read_tile_group_header(pbi, rb, &startTile, &endTile);
5898   data += header_size;
5899   av1_decode_tg_tiles_and_wrapup(pbi, data, data_end, p_data_end, startTile,
5900                                  endTile, is_first_tg);
5901   tg_payload_size = (uint32_t)(*p_data_end - data);
5902 
5903   // TODO(shan):  For now, assume all tile groups received in order
5904   *is_last_tg = endTile == cm->tile_rows * cm->tile_cols - 1;
5905 
5906   return header_size + tg_payload_size;
5907 }
5908 
5909 void av1_decode_frame_from_obus(struct AV1Decoder *pbi, const uint8_t *data,
5910                                 const uint8_t *data_end,
5911                                 const uint8_t **p_data_end) {
5912   AV1_COMMON *const cm = &pbi->common;
5913   int frame_decoding_finished = 0;
5914   int is_first_tg_obu_received = 1;
5915   int frame_header_received = 0;
5916   int frame_header_size = 0;
5917 
5918   // decode frame as a series of OBUs
5919   while (!frame_decoding_finished && !cm->error.error_code) {
5920     struct aom_read_bit_buffer rb;
5921     uint8_t clear_data[80];
5922     uint32_t obu_size, obu_header_size, obu_payload_size = 0;
5923     OBU_TYPE obu_type;
5924 
5925     init_read_bit_buffer(pbi, &rb, data + 4, data_end, clear_data);
5926 
5927     // every obu is preceded by 4-byte size of obu (obu header + payload size)
5928     // The obu size is only needed for tile group OBUs
5929     obu_size = mem_get_le32(data);
5930     obu_type = read_obu_header(&rb, &obu_header_size);
5931     data += (4 + obu_header_size);
5932 
5933     switch (obu_type) {
5934       case OBU_TD: obu_payload_size = read_temporal_delimiter_obu(); break;
5935       case OBU_SEQUENCE_HEADER:
5936         obu_payload_size = read_sequence_header_obu(pbi, &rb);
5937         break;
5938       case OBU_FRAME_HEADER:
5939         // Only decode first frame header received
5940         if (!frame_header_received) {
5941           frame_header_size = obu_payload_size =
5942               read_frame_header_obu(pbi, data, data_end, p_data_end);
5943           frame_header_received = 1;
5944         } else {
5945           obu_payload_size = frame_header_size;
5946         }
5947         if (cm->show_existing_frame) frame_decoding_finished = 1;
5948         break;
5949       case OBU_TILE_GROUP:
5950         obu_payload_size = read_one_tile_group_obu(
5951             pbi, &rb, is_first_tg_obu_received, data, data + obu_size - 1,
5952             p_data_end, &frame_decoding_finished);
5953         is_first_tg_obu_received = 0;
5954         break;
5955       default: break;
5956     }
5957     data += obu_payload_size;
5958   }
5959 }
5960 #endif
5961