1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <assert.h>
13 #include <limits.h>
14 #include <stdio.h>
15
16 #include "aom/aom_encoder.h"
17 #include "aom_dsp/aom_dsp_common.h"
18 #include "aom_dsp/binary_codes_writer.h"
19 #include "aom_dsp/bitwriter_buffer.h"
20 #include "aom_mem/aom_mem.h"
21 #include "aom_ports/mem_ops.h"
22 #include "aom_ports/system_state.h"
23 #if CONFIG_BITSTREAM_DEBUG
24 #include "aom_util/debug_util.h"
25 #endif // CONFIG_BITSTREAM_DEBUG
26
27 #if CONFIG_CDEF
28 #include "av1/common/cdef.h"
29 #endif // CONFIG_CDEF
30 #include "av1/common/entropy.h"
31 #include "av1/common/entropymode.h"
32 #include "av1/common/entropymv.h"
33 #include "av1/common/mvref_common.h"
34 #include "av1/common/odintrin.h"
35 #include "av1/common/pred_common.h"
36 #include "av1/common/reconinter.h"
37 #if CONFIG_EXT_INTRA
38 #include "av1/common/reconintra.h"
39 #endif // CONFIG_EXT_INTRA
40 #include "av1/common/seg_common.h"
41 #include "av1/common/tile_common.h"
42
43 #if CONFIG_LV_MAP
44 #include "av1/encoder/encodetxb.h"
45 #endif // CONFIG_LV_MAP
46 #include "av1/encoder/bitstream.h"
47 #include "av1/encoder/cost.h"
48 #include "av1/encoder/encodemv.h"
49 #include "av1/encoder/mcomp.h"
50 #if CONFIG_PALETTE_DELTA_ENCODING
51 #include "av1/encoder/palette.h"
52 #endif // CONFIG_PALETTE_DELTA_ENCODING
53 #include "av1/encoder/segmentation.h"
54 #include "av1/encoder/subexp.h"
55 #include "av1/encoder/tokenize.h"
56 #if CONFIG_PVQ
57 #include "av1/encoder/pvq_encoder.h"
58 #endif
59
60 #define ENC_MISMATCH_DEBUG 0
61
62 #if CONFIG_COMPOUND_SINGLEREF
63 static struct av1_token
64 inter_singleref_comp_mode_encodings[INTER_SINGLEREF_COMP_MODES];
65 #endif // CONFIG_COMPOUND_SINGLEREF
66
67 // TODO(anybody) : remove this flag when PVQ supports pallete coding tool
68 #if !CONFIG_PVQ || CONFIG_EXT_INTRA
write_uniform(aom_writer * w,int n,int v)69 static INLINE void write_uniform(aom_writer *w, int n, int v) {
70 const int l = get_unsigned_bits(n);
71 const int m = (1 << l) - n;
72 if (l == 0) return;
73 if (v < m) {
74 aom_write_literal(w, v, l - 1);
75 } else {
76 aom_write_literal(w, m + ((v - m) >> 1), l - 1);
77 aom_write_literal(w, (v - m) & 1, 1);
78 }
79 }
80 #endif // !CONFIG_PVQ || CONFIG_EXT_INTRA
81
82 #if CONFIG_EXT_INTRA
83 #if CONFIG_INTRA_INTERP
84 static struct av1_token intra_filter_encodings[INTRA_FILTERS];
85 #endif // CONFIG_INTRA_INTERP
86 #endif // CONFIG_EXT_INTRA
87 #if CONFIG_INTERINTRA
88 static struct av1_token interintra_mode_encodings[INTERINTRA_MODES];
89 #endif
90 #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
91 static struct av1_token compound_type_encodings[COMPOUND_TYPES];
92 #endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
93 #if CONFIG_LOOP_RESTORATION
94 static struct av1_token switchable_restore_encodings[RESTORE_SWITCHABLE_TYPES];
95 static void loop_restoration_write_sb_coeffs(const AV1_COMMON *const cm,
96 MACROBLOCKD *xd,
97 aom_writer *const w, int plane,
98 int rtile_idx);
99 #endif // CONFIG_LOOP_RESTORATION
100 #if CONFIG_OBU
101 static void write_uncompressed_header_obu(AV1_COMP *cpi,
102 struct aom_write_bit_buffer *wb);
103 #else
104 static void write_uncompressed_header_frame(AV1_COMP *cpi,
105 struct aom_write_bit_buffer *wb);
106 #endif
107
108 static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data);
109
110 #if !CONFIG_OBU || CONFIG_EXT_TILE
111 static int remux_tiles(const AV1_COMMON *const cm, uint8_t *dst,
112 const uint32_t data_size, const uint32_t max_tile_size,
113 const uint32_t max_tile_col_size,
114 int *const tile_size_bytes,
115 int *const tile_col_size_bytes);
116 #endif
av1_encode_token_init(void)117 void av1_encode_token_init(void) {
118 #if CONFIG_EXT_INTRA && CONFIG_INTRA_INTERP
119 av1_tokens_from_tree(intra_filter_encodings, av1_intra_filter_tree);
120 #endif // CONFIG_EXT_INTRA && CONFIG_INTRA_INTERP
121 #if CONFIG_INTERINTRA
122 av1_tokens_from_tree(interintra_mode_encodings, av1_interintra_mode_tree);
123 #endif // CONFIG_INTERINTRA
124 #if CONFIG_COMPOUND_SINGLEREF
125 av1_tokens_from_tree(inter_singleref_comp_mode_encodings,
126 av1_inter_singleref_comp_mode_tree);
127 #endif // CONFIG_COMPOUND_SINGLEREF
128 #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
129 av1_tokens_from_tree(compound_type_encodings, av1_compound_type_tree);
130 #endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
131 #if CONFIG_LOOP_RESTORATION
132 av1_tokens_from_tree(switchable_restore_encodings,
133 av1_switchable_restore_tree);
134 #endif // CONFIG_LOOP_RESTORATION
135 }
136
write_intra_mode_kf(const AV1_COMMON * cm,FRAME_CONTEXT * frame_ctx,const MODE_INFO * mi,const MODE_INFO * above_mi,const MODE_INFO * left_mi,int block,PREDICTION_MODE mode,aom_writer * w)137 static void write_intra_mode_kf(const AV1_COMMON *cm, FRAME_CONTEXT *frame_ctx,
138 const MODE_INFO *mi, const MODE_INFO *above_mi,
139 const MODE_INFO *left_mi, int block,
140 PREDICTION_MODE mode, aom_writer *w) {
141 #if CONFIG_INTRABC
142 assert(!is_intrabc_block(&mi->mbmi));
143 #endif // CONFIG_INTRABC
144 aom_write_symbol(w, mode,
145 get_y_mode_cdf(frame_ctx, mi, above_mi, left_mi, block),
146 INTRA_MODES);
147 (void)cm;
148 }
149
write_inter_mode(aom_writer * w,PREDICTION_MODE mode,FRAME_CONTEXT * ec_ctx,const int16_t mode_ctx)150 static void write_inter_mode(aom_writer *w, PREDICTION_MODE mode,
151 FRAME_CONTEXT *ec_ctx, const int16_t mode_ctx) {
152 const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
153
154 #if CONFIG_NEW_MULTISYMBOL
155 aom_write_symbol(w, mode != NEWMV, ec_ctx->newmv_cdf[newmv_ctx], 2);
156 #else
157 aom_write(w, mode != NEWMV, ec_ctx->newmv_prob[newmv_ctx]);
158 #endif
159
160 if (mode != NEWMV) {
161 if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET)) {
162 assert(mode == ZEROMV);
163 return;
164 }
165
166 const int16_t zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
167 #if CONFIG_NEW_MULTISYMBOL
168 aom_write_symbol(w, mode != ZEROMV, ec_ctx->zeromv_cdf[zeromv_ctx], 2);
169 #else
170 aom_write(w, mode != ZEROMV, ec_ctx->zeromv_prob[zeromv_ctx]);
171 #endif
172
173 if (mode != ZEROMV) {
174 int16_t refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
175
176 if (mode_ctx & (1 << SKIP_NEARESTMV_OFFSET)) refmv_ctx = 6;
177 if (mode_ctx & (1 << SKIP_NEARMV_OFFSET)) refmv_ctx = 7;
178 if (mode_ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) refmv_ctx = 8;
179 #if CONFIG_NEW_MULTISYMBOL
180 aom_write_symbol(w, mode != NEARESTMV, ec_ctx->refmv_cdf[refmv_ctx], 2);
181 #else
182 aom_write(w, mode != NEARESTMV, ec_ctx->refmv_prob[refmv_ctx]);
183 #endif
184 }
185 }
186 }
187
write_drl_idx(FRAME_CONTEXT * ec_ctx,const MB_MODE_INFO * mbmi,const MB_MODE_INFO_EXT * mbmi_ext,aom_writer * w)188 static void write_drl_idx(FRAME_CONTEXT *ec_ctx, const MB_MODE_INFO *mbmi,
189 const MB_MODE_INFO_EXT *mbmi_ext, aom_writer *w) {
190 uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
191
192 assert(mbmi->ref_mv_idx < 3);
193
194 #if CONFIG_COMPOUND_SINGLEREF
195 if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV ||
196 mbmi->mode == SR_NEW_NEWMV) {
197 #else // !CONFIG_COMPOUND_SINGLEREF
198 if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV) {
199 #endif // CONFIG_COMPOUND_SINGLEREF
200 int idx;
201 for (idx = 0; idx < 2; ++idx) {
202 if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
203 uint8_t drl_ctx =
204 av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
205
206 #if CONFIG_NEW_MULTISYMBOL
207 aom_write_symbol(w, mbmi->ref_mv_idx != idx, ec_ctx->drl_cdf[drl_ctx],
208 2);
209 #else
210 aom_write(w, mbmi->ref_mv_idx != idx, ec_ctx->drl_prob[drl_ctx]);
211 #endif
212 if (mbmi->ref_mv_idx == idx) return;
213 }
214 }
215 return;
216 }
217
218 if (have_nearmv_in_inter_mode(mbmi->mode)) {
219 int idx;
220 // TODO(jingning): Temporary solution to compensate the NEARESTMV offset.
221 for (idx = 1; idx < 3; ++idx) {
222 if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
223 uint8_t drl_ctx =
224 av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
225 #if CONFIG_NEW_MULTISYMBOL
226 aom_write_symbol(w, mbmi->ref_mv_idx != (idx - 1),
227 ec_ctx->drl_cdf[drl_ctx], 2);
228 #else
229 aom_write(w, mbmi->ref_mv_idx != (idx - 1), ec_ctx->drl_prob[drl_ctx]);
230 #endif
231 if (mbmi->ref_mv_idx == (idx - 1)) return;
232 }
233 }
234 return;
235 }
236 }
237
238 static void write_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
239 aom_writer *w, PREDICTION_MODE mode,
240 const int16_t mode_ctx) {
241 assert(is_inter_compound_mode(mode));
242 (void)cm;
243 aom_write_symbol(w, INTER_COMPOUND_OFFSET(mode),
244 xd->tile_ctx->inter_compound_mode_cdf[mode_ctx],
245 INTER_COMPOUND_MODES);
246 }
247
248 #if CONFIG_COMPOUND_SINGLEREF
249 static void write_inter_singleref_comp_mode(MACROBLOCKD *xd, aom_writer *w,
250 PREDICTION_MODE mode,
251 const int16_t mode_ctx) {
252 assert(is_inter_singleref_comp_mode(mode));
253 aom_cdf_prob *const inter_singleref_comp_cdf =
254 xd->tile_ctx->inter_singleref_comp_mode_cdf[mode_ctx];
255
256 aom_write_symbol(w, INTER_SINGLEREF_COMP_OFFSET(mode),
257 inter_singleref_comp_cdf, INTER_SINGLEREF_COMP_MODES);
258 }
259 #endif // CONFIG_COMPOUND_SINGLEREF
260
261 static void encode_unsigned_max(struct aom_write_bit_buffer *wb, int data,
262 int max) {
263 aom_wb_write_literal(wb, data, get_unsigned_bits(max));
264 }
265
266 #if CONFIG_VAR_TX
267 static void write_tx_size_vartx(const AV1_COMMON *cm, MACROBLOCKD *xd,
268 const MB_MODE_INFO *mbmi, TX_SIZE tx_size,
269 int depth, int blk_row, int blk_col,
270 aom_writer *w) {
271 #if CONFIG_NEW_MULTISYMBOL
272 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
273 (void)cm;
274 #endif
275 const int tx_row = blk_row >> 1;
276 const int tx_col = blk_col >> 1;
277 const int max_blocks_high = max_block_high(xd, mbmi->sb_type, 0);
278 const int max_blocks_wide = max_block_wide(xd, mbmi->sb_type, 0);
279
280 int ctx = txfm_partition_context(xd->above_txfm_context + blk_col,
281 xd->left_txfm_context + blk_row,
282 mbmi->sb_type, tx_size);
283
284 if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
285
286 if (depth == MAX_VARTX_DEPTH) {
287 txfm_partition_update(xd->above_txfm_context + blk_col,
288 xd->left_txfm_context + blk_row, tx_size, tx_size);
289 return;
290 }
291
292 #if CONFIG_RECT_TX_EXT
293 if (tx_size == mbmi->inter_tx_size[tx_row][tx_col] ||
294 mbmi->tx_size == quarter_txsize_lookup[mbmi->sb_type]) {
295 #else
296 if (tx_size == mbmi->inter_tx_size[tx_row][tx_col]) {
297 #endif
298 #if CONFIG_NEW_MULTISYMBOL
299 aom_write_symbol(w, 0, ec_ctx->txfm_partition_cdf[ctx], 2);
300 #else
301 aom_write(w, 0, cm->fc->txfm_partition_prob[ctx]);
302 #endif
303
304 txfm_partition_update(xd->above_txfm_context + blk_col,
305 xd->left_txfm_context + blk_row, tx_size, tx_size);
306 // TODO(yuec): set correct txfm partition update for qttx
307 } else {
308 const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
309 const int bsl = tx_size_wide_unit[sub_txs];
310 int i;
311
312 #if CONFIG_NEW_MULTISYMBOL
313 aom_write_symbol(w, 1, ec_ctx->txfm_partition_cdf[ctx], 2);
314 #else
315 aom_write(w, 1, cm->fc->txfm_partition_prob[ctx]);
316 #endif
317
318 if (sub_txs == TX_4X4) {
319 txfm_partition_update(xd->above_txfm_context + blk_col,
320 xd->left_txfm_context + blk_row, sub_txs, tx_size);
321 return;
322 }
323
324 assert(bsl > 0);
325 for (i = 0; i < 4; ++i) {
326 int offsetr = blk_row + (i >> 1) * bsl;
327 int offsetc = blk_col + (i & 0x01) * bsl;
328 write_tx_size_vartx(cm, xd, mbmi, sub_txs, depth + 1, offsetr, offsetc,
329 w);
330 }
331 }
332 }
333
334 #if !CONFIG_NEW_MULTISYMBOL
335 static void update_txfm_partition_probs(AV1_COMMON *cm, aom_writer *w,
336 FRAME_COUNTS *counts, int probwt) {
337 int k;
338 for (k = 0; k < TXFM_PARTITION_CONTEXTS; ++k)
339 av1_cond_prob_diff_update(w, &cm->fc->txfm_partition_prob[k],
340 counts->txfm_partition[k], probwt);
341 }
342 #endif // CONFIG_NEW_MULTISYMBOL
343 #endif // CONFIG_VAR_TX
344
345 static void write_selected_tx_size(const AV1_COMMON *cm, const MACROBLOCKD *xd,
346 aom_writer *w) {
347 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
348 const BLOCK_SIZE bsize = mbmi->sb_type;
349 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
350 (void)cm;
351 if (block_signals_txsize(bsize)) {
352 const TX_SIZE tx_size = mbmi->tx_size;
353 const int is_inter = is_inter_block(mbmi);
354 const int tx_size_ctx = get_tx_size_context(xd);
355 const int32_t tx_size_cat = is_inter ? inter_tx_size_cat_lookup[bsize]
356 : intra_tx_size_cat_lookup[bsize];
357 const TX_SIZE coded_tx_size = txsize_sqr_up_map[tx_size];
358 const int depth = tx_size_to_depth(coded_tx_size);
359 #if CONFIG_EXT_TX && CONFIG_RECT_TX
360 assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed(xd, mbmi)));
361 #endif // CONFIG_EXT_TX && CONFIG_RECT_TX
362
363 aom_write_symbol(w, depth, ec_ctx->tx_size_cdf[tx_size_cat][tx_size_ctx],
364 tx_size_cat + 2);
365 #if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
366 if (is_quarter_tx_allowed(xd, mbmi, is_inter) && tx_size != coded_tx_size)
367 #if CONFIG_NEW_MULTISYMBOL
368 aom_write_symbol(w, tx_size == quarter_txsize_lookup[bsize],
369 cm->fc->quarter_tx_size_cdf, 2);
370 #else
371 aom_write(w, tx_size == quarter_txsize_lookup[bsize],
372 cm->fc->quarter_tx_size_prob);
373 #endif
374 #endif
375 }
376 }
377
378 #if !CONFIG_NEW_MULTISYMBOL
379 static void update_inter_mode_probs(AV1_COMMON *cm, aom_writer *w,
380 FRAME_COUNTS *counts) {
381 int i;
382 const int probwt = cm->num_tg;
383 for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
384 av1_cond_prob_diff_update(w, &cm->fc->newmv_prob[i], counts->newmv_mode[i],
385 probwt);
386 for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
387 av1_cond_prob_diff_update(w, &cm->fc->zeromv_prob[i],
388 counts->zeromv_mode[i], probwt);
389 for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
390 av1_cond_prob_diff_update(w, &cm->fc->refmv_prob[i], counts->refmv_mode[i],
391 probwt);
392 for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
393 av1_cond_prob_diff_update(w, &cm->fc->drl_prob[i], counts->drl_mode[i],
394 probwt);
395 }
396 #endif
397
398 static int write_skip(const AV1_COMMON *cm, const MACROBLOCKD *xd,
399 int segment_id, const MODE_INFO *mi, aom_writer *w) {
400 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
401 return 1;
402 } else {
403 const int skip = mi->mbmi.skip;
404 #if CONFIG_NEW_MULTISYMBOL
405 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
406 const int ctx = av1_get_skip_context(xd);
407 aom_write_symbol(w, skip, ec_ctx->skip_cdfs[ctx], 2);
408 #else
409 aom_write(w, skip, av1_get_skip_prob(cm, xd));
410 #endif
411 return skip;
412 }
413 }
414
415 static void write_is_inter(const AV1_COMMON *cm, const MACROBLOCKD *xd,
416 int segment_id, aom_writer *w, const int is_inter) {
417 if (!segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
418 #if CONFIG_NEW_MULTISYMBOL
419 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
420 const int ctx = av1_get_intra_inter_context(xd);
421 aom_write_symbol(w, is_inter, ec_ctx->intra_inter_cdf[ctx], 2);
422 #else
423 aom_write(w, is_inter, av1_get_intra_inter_prob(cm, xd));
424 #endif
425 }
426 }
427
428 #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
429 static void write_motion_mode(const AV1_COMMON *cm, MACROBLOCKD *xd,
430 const MODE_INFO *mi, aom_writer *w) {
431 const MB_MODE_INFO *mbmi = &mi->mbmi;
432
433 #if !CONFIG_GLOBAL_MOTION
434 // The cm parameter is only used with global_motion or with
435 // motion_var and warped_motion. In other cases, explicitly ignore
436 // it to avoid a compiler warning.
437 (void)cm;
438 #endif
439 MOTION_MODE last_motion_mode_allowed = motion_mode_allowed(
440 #if CONFIG_GLOBAL_MOTION
441 0, cm->global_motion,
442 #endif // CONFIG_GLOBAL_MOTION
443 #if CONFIG_WARPED_MOTION
444 xd,
445 #endif
446 mi);
447 if (last_motion_mode_allowed == SIMPLE_TRANSLATION) return;
448 #if CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
449 #if CONFIG_NCOBMC_ADAPT_WEIGHT
450 if (last_motion_mode_allowed == NCOBMC_ADAPT_WEIGHT) {
451 aom_write_symbol(w, mbmi->motion_mode,
452 xd->tile_ctx->ncobmc_cdf[mbmi->sb_type],
453 OBMC_FAMILY_MODES);
454 } else if (last_motion_mode_allowed == OBMC_CAUSAL) {
455 aom_write_symbol(w, mbmi->motion_mode == OBMC_CAUSAL,
456 xd->tile_ctx->obmc_cdf[mbmi->sb_type], 2);
457 } else {
458 #else
459 if (last_motion_mode_allowed == OBMC_CAUSAL) {
460 #if CONFIG_NEW_MULTISYMBOL
461 aom_write_symbol(w, mbmi->motion_mode == OBMC_CAUSAL,
462 xd->tile_ctx->obmc_cdf[mbmi->sb_type], 2);
463 #else
464 aom_write(w, mbmi->motion_mode == OBMC_CAUSAL,
465 cm->fc->obmc_prob[mbmi->sb_type]);
466 #endif
467 } else {
468 #endif // CONFIG_NCOBMC_ADAPT_WEIGHT
469 #endif // CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
470 aom_write_symbol(w, mbmi->motion_mode,
471 xd->tile_ctx->motion_mode_cdf[mbmi->sb_type],
472 MOTION_MODES);
473 #if CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
474 }
475 #endif // CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
476 }
477
478 #if CONFIG_NCOBMC_ADAPT_WEIGHT
479 static void write_ncobmc_mode(MACROBLOCKD *xd, const MODE_INFO *mi,
480 aom_writer *w) {
481 const MB_MODE_INFO *mbmi = &mi->mbmi;
482 ADAPT_OVERLAP_BLOCK ao_block = adapt_overlap_block_lookup[mbmi->sb_type];
483 if (mbmi->motion_mode != NCOBMC_ADAPT_WEIGHT) return;
484
485 aom_write_symbol(w, mbmi->ncobmc_mode[0],
486 xd->tile_ctx->ncobmc_mode_cdf[ao_block], MAX_NCOBMC_MODES);
487 if (mi_size_wide[mbmi->sb_type] != mi_size_high[mbmi->sb_type]) {
488 aom_write_symbol(w, mbmi->ncobmc_mode[1],
489 xd->tile_ctx->ncobmc_mode_cdf[ao_block], MAX_NCOBMC_MODES);
490 }
491 }
492 #endif
493 #endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
494
495 static void write_delta_qindex(const AV1_COMMON *cm, const MACROBLOCKD *xd,
496 int delta_qindex, aom_writer *w) {
497 int sign = delta_qindex < 0;
498 int abs = sign ? -delta_qindex : delta_qindex;
499 int rem_bits, thr;
500 int smallval = abs < DELTA_Q_SMALL ? 1 : 0;
501 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
502 (void)cm;
503
504 aom_write_symbol(w, AOMMIN(abs, DELTA_Q_SMALL), ec_ctx->delta_q_cdf,
505 DELTA_Q_PROBS + 1);
506
507 if (!smallval) {
508 rem_bits = OD_ILOG_NZ(abs - 1) - 1;
509 thr = (1 << rem_bits) + 1;
510 aom_write_literal(w, rem_bits - 1, 3);
511 aom_write_literal(w, abs - thr, rem_bits);
512 }
513 if (abs > 0) {
514 aom_write_bit(w, sign);
515 }
516 }
517
518 #if CONFIG_EXT_DELTA_Q
519 static void write_delta_lflevel(const AV1_COMMON *cm, const MACROBLOCKD *xd,
520 #if CONFIG_LOOPFILTER_LEVEL
521 int lf_id,
522 #endif
523 int delta_lflevel, aom_writer *w) {
524 int sign = delta_lflevel < 0;
525 int abs = sign ? -delta_lflevel : delta_lflevel;
526 int rem_bits, thr;
527 int smallval = abs < DELTA_LF_SMALL ? 1 : 0;
528 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
529 (void)cm;
530
531 #if CONFIG_LOOPFILTER_LEVEL
532 if (cm->delta_lf_multi) {
533 assert(lf_id >= 0 && lf_id < FRAME_LF_COUNT);
534 aom_write_symbol(w, AOMMIN(abs, DELTA_LF_SMALL),
535 ec_ctx->delta_lf_multi_cdf[lf_id], DELTA_LF_PROBS + 1);
536 } else {
537 aom_write_symbol(w, AOMMIN(abs, DELTA_LF_SMALL), ec_ctx->delta_lf_cdf,
538 DELTA_LF_PROBS + 1);
539 }
540 #else
541 aom_write_symbol(w, AOMMIN(abs, DELTA_LF_SMALL), ec_ctx->delta_lf_cdf,
542 DELTA_LF_PROBS + 1);
543 #endif // CONFIG_LOOPFILTER_LEVEL
544
545 if (!smallval) {
546 rem_bits = OD_ILOG_NZ(abs - 1) - 1;
547 thr = (1 << rem_bits) + 1;
548 aom_write_literal(w, rem_bits - 1, 3);
549 aom_write_literal(w, abs - thr, rem_bits);
550 }
551 if (abs > 0) {
552 aom_write_bit(w, sign);
553 }
554 }
555 #endif // CONFIG_EXT_DELTA_Q
556
557 #if !CONFIG_NEW_MULTISYMBOL
558 static void update_skip_probs(AV1_COMMON *cm, aom_writer *w,
559 FRAME_COUNTS *counts) {
560 int k;
561 const int probwt = cm->num_tg;
562 for (k = 0; k < SKIP_CONTEXTS; ++k) {
563 av1_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k],
564 probwt);
565 }
566 }
567 #endif
568
569 // TODO(anybody) : remove this flag when PVQ supports pallete coding tool
570 #if !CONFIG_PVQ
571 static void pack_map_tokens(aom_writer *w, const TOKENEXTRA **tp, int n,
572 int num) {
573 const TOKENEXTRA *p = *tp;
574 write_uniform(w, n, p->token); // The first color index.
575 ++p;
576 --num;
577 for (int i = 0; i < num; ++i) {
578 aom_write_symbol(w, p->token, p->color_map_cdf, n);
579 ++p;
580 }
581 *tp = p;
582 }
583 #endif // !CONFIG_PVQ
584
585 #if !CONFIG_PVQ
586 #if CONFIG_SUPERTX
587 static void update_supertx_probs(AV1_COMMON *cm, int probwt, aom_writer *w) {
588 const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
589 av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
590 int i, j;
591 int savings = 0;
592 int do_update = 0;
593 for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
594 for (j = TX_8X8; j < TX_SIZES; ++j) {
595 savings += av1_cond_prob_diff_update_savings(
596 &cm->fc->supertx_prob[i][j], cm->counts.supertx[i][j], probwt);
597 }
598 }
599 do_update = savings > savings_thresh;
600 aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
601 if (do_update) {
602 for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
603 for (j = TX_8X8; j < TX_SIZES; ++j) {
604 av1_cond_prob_diff_update(w, &cm->fc->supertx_prob[i][j],
605 cm->counts.supertx[i][j], probwt);
606 }
607 }
608 }
609 }
610 #endif // CONFIG_SUPERTX
611
612 #if !CONFIG_LV_MAP
613 #if CONFIG_NEW_MULTISYMBOL
614 static INLINE void write_coeff_extra(const aom_cdf_prob *const *cdf, int val,
615 int n, aom_writer *w) {
616 // Code the extra bits from LSB to MSB in groups of 4
617 int i = 0;
618 int count = 0;
619 while (count < n) {
620 const int size = AOMMIN(n - count, 4);
621 const int mask = (1 << size) - 1;
622 aom_write_cdf(w, val & mask, cdf[i++], 1 << size);
623 val >>= size;
624 count += size;
625 }
626 }
627 #else
628 static INLINE void write_coeff_extra(const aom_prob *pb, int value,
629 int num_bits, int skip_bits, aom_writer *w,
630 TOKEN_STATS *token_stats) {
631 // Code the extra bits from MSB to LSB 1 bit at a time
632 int index;
633 for (index = skip_bits; index < num_bits; ++index) {
634 const int shift = num_bits - index - 1;
635 const int bb = (value >> shift) & 1;
636 aom_write_record(w, bb, pb[index], token_stats);
637 }
638 }
639 #endif // CONFIG_NEW_MULTISYMBOL
640
641 static void pack_mb_tokens(aom_writer *w, const TOKENEXTRA **tp,
642 const TOKENEXTRA *const stop,
643 aom_bit_depth_t bit_depth, const TX_SIZE tx_size,
644 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
645 TX_TYPE tx_type, int is_inter,
646 #endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
647 TOKEN_STATS *token_stats) {
648 const TOKENEXTRA *p = *tp;
649 #if CONFIG_VAR_TX
650 int count = 0;
651 const int seg_eob = tx_size_2d[tx_size];
652 #endif
653
654 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
655 if (tx_type == MRC_DCT && ((is_inter && SIGNAL_MRC_MASK_INTER) ||
656 (!is_inter && SIGNAL_MRC_MASK_INTRA))) {
657 int rows = tx_size_high[tx_size];
658 int cols = tx_size_wide[tx_size];
659 assert(tx_size == TX_32X32);
660 assert(p < stop);
661 pack_map_tokens(w, &p, 2, rows * cols);
662 }
663 #endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
664
665 while (p < stop && p->token != EOSB_TOKEN) {
666 const int token = p->token;
667 const int eob_val = p->eob_val;
668 if (token == BLOCK_Z_TOKEN) {
669 aom_write_symbol(w, 0, *p->head_cdf, HEAD_TOKENS + 1);
670 p++;
671 #if CONFIG_VAR_TX
672 break;
673 #endif
674 continue;
675 }
676
677 const av1_extra_bit *const extra_bits = &av1_extra_bits[token];
678 if (eob_val == LAST_EOB) {
679 // Just code a flag indicating whether the value is >1 or 1.
680 aom_write_bit(w, token != ONE_TOKEN);
681 } else {
682 int comb_symb = 2 * AOMMIN(token, TWO_TOKEN) - eob_val + p->first_val;
683 aom_write_symbol(w, comb_symb, *p->head_cdf, HEAD_TOKENS + p->first_val);
684 }
685 if (token > ONE_TOKEN) {
686 aom_write_symbol(w, token - TWO_TOKEN, *p->tail_cdf, TAIL_TOKENS);
687 }
688
689 if (extra_bits->base_val) {
690 const int bit_string = p->extra;
691 const int bit_string_length = extra_bits->len; // Length of extra bits to
692 const int is_cat6 = (extra_bits->base_val == CAT6_MIN_VAL);
693 // be written excluding
694 // the sign bit.
695 int skip_bits = is_cat6
696 ? (int)sizeof(av1_cat6_prob) -
697 av1_get_cat6_extrabits_size(tx_size, bit_depth)
698 : 0;
699
700 assert(!(bit_string >> (bit_string_length - skip_bits + 1)));
701 if (bit_string_length > 0)
702 #if CONFIG_NEW_MULTISYMBOL
703 write_coeff_extra(extra_bits->cdf, bit_string >> 1,
704 bit_string_length - skip_bits, w);
705 #else
706 write_coeff_extra(extra_bits->prob, bit_string >> 1, bit_string_length,
707 skip_bits, w, token_stats);
708 #endif
709
710 aom_write_bit_record(w, bit_string & 1, token_stats);
711 }
712 ++p;
713
714 #if CONFIG_VAR_TX
715 ++count;
716 if (eob_val == EARLY_EOB || count == seg_eob) break;
717 #endif
718 }
719
720 *tp = p;
721 }
722 #endif // !CONFIG_LV_MAP
723 #else // !CONFIG_PVQ
724 static PVQ_INFO *get_pvq_block(PVQ_QUEUE *pvq_q) {
725 PVQ_INFO *pvq;
726
727 assert(pvq_q->curr_pos <= pvq_q->last_pos);
728 assert(pvq_q->curr_pos < pvq_q->buf_len);
729
730 pvq = pvq_q->buf + pvq_q->curr_pos;
731 ++pvq_q->curr_pos;
732
733 return pvq;
734 }
735
736 static void pack_pvq_tokens(aom_writer *w, MACROBLOCK *const x,
737 MACROBLOCKD *const xd, int plane, BLOCK_SIZE bsize,
738 const TX_SIZE tx_size) {
739 PVQ_INFO *pvq;
740 int idx, idy;
741 const struct macroblockd_plane *const pd = &xd->plane[plane];
742 od_adapt_ctx *adapt;
743 int max_blocks_wide;
744 int max_blocks_high;
745 int step = (1 << tx_size);
746
747 #if CONFIG_CHROMA_SUB8X8
748 const BLOCK_SIZE plane_bsize =
749 AOMMAX(BLOCK_4X4, get_plane_block_size(bsize, pd));
750 #elif CONFIG_CB4X4
751 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
752 #else
753 const BLOCK_SIZE plane_bsize =
754 get_plane_block_size(AOMMAX(BLOCK_8X8, bsize), pd);
755 #endif
756
757 adapt = x->daala_enc.state.adapt;
758
759 max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
760 max_blocks_high = max_block_high(xd, plane_bsize, plane);
761
762 for (idy = 0; idy < max_blocks_high; idy += step) {
763 for (idx = 0; idx < max_blocks_wide; idx += step) {
764 const int is_keyframe = 0;
765 const int encode_flip = 0;
766 const int flip = 0;
767 int i;
768 const int has_dc_skip = 1;
769 int *exg = &adapt->pvq.pvq_exg[plane][tx_size][0];
770 int *ext = adapt->pvq.pvq_ext + tx_size * PVQ_MAX_PARTITIONS;
771 generic_encoder *model = adapt->pvq.pvq_param_model;
772
773 pvq = get_pvq_block(x->pvq_q);
774
775 // encode block skip info
776 aom_write_symbol(w, pvq->ac_dc_coded,
777 adapt->skip_cdf[2 * tx_size + (plane != 0)], 4);
778
779 // AC coeffs coded?
780 if (pvq->ac_dc_coded & AC_CODED) {
781 assert(pvq->bs == tx_size);
782 for (i = 0; i < pvq->nb_bands; i++) {
783 if (i == 0 ||
784 (!pvq->skip_rest && !(pvq->skip_dir & (1 << ((i - 1) % 3))))) {
785 pvq_encode_partition(
786 w, pvq->qg[i], pvq->theta[i], pvq->y + pvq->off[i],
787 pvq->size[i], pvq->k[i], model, adapt, exg + i, ext + i,
788 (plane != 0) * OD_TXSIZES * PVQ_MAX_PARTITIONS +
789 pvq->bs * PVQ_MAX_PARTITIONS + i,
790 is_keyframe, i == 0 && (i < pvq->nb_bands - 1), pvq->skip_rest,
791 encode_flip, flip);
792 }
793 if (i == 0 && !pvq->skip_rest && pvq->bs > 0) {
794 aom_write_symbol(
795 w, pvq->skip_dir,
796 &adapt->pvq
797 .pvq_skip_dir_cdf[(plane != 0) + 2 * (pvq->bs - 1)][0],
798 7);
799 }
800 }
801 }
802 // Encode residue of DC coeff, if exist.
803 if (!has_dc_skip || (pvq->ac_dc_coded & DC_CODED)) {
804 generic_encode(w, &adapt->model_dc[plane],
805 abs(pvq->dq_dc_residue) - has_dc_skip,
806 &adapt->ex_dc[plane][pvq->bs][0], 2);
807 }
808 if ((pvq->ac_dc_coded & DC_CODED)) {
809 aom_write_bit(w, pvq->dq_dc_residue < 0);
810 }
811 }
812 } // for (idy = 0;
813 }
814 #endif // !CONFIG_PVG
815
816 #if CONFIG_VAR_TX && !CONFIG_COEF_INTERLEAVE
817 #if CONFIG_LV_MAP
818 static void pack_txb_tokens(aom_writer *w,
819 #if CONFIG_LV_MAP
820 AV1_COMMON *cm,
821 #endif // CONFIG_LV_MAP
822 const TOKENEXTRA **tp,
823 const TOKENEXTRA *const tok_end,
824 #if CONFIG_PVQ || CONFIG_LV_MAP
825 MACROBLOCK *const x,
826 #endif
827 MACROBLOCKD *xd, MB_MODE_INFO *mbmi, int plane,
828 BLOCK_SIZE plane_bsize, aom_bit_depth_t bit_depth,
829 int block, int blk_row, int blk_col,
830 TX_SIZE tx_size, TOKEN_STATS *token_stats) {
831 const struct macroblockd_plane *const pd = &xd->plane[plane];
832 const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
833 const int tx_row = blk_row >> (1 - pd->subsampling_y);
834 const int tx_col = blk_col >> (1 - pd->subsampling_x);
835 TX_SIZE plane_tx_size;
836 const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
837 const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
838
839 if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
840
841 plane_tx_size =
842 plane ? uv_txsize_lookup[bsize][mbmi->inter_tx_size[tx_row][tx_col]][0][0]
843 : mbmi->inter_tx_size[tx_row][tx_col];
844
845 if (tx_size == plane_tx_size) {
846 TOKEN_STATS tmp_token_stats;
847 init_token_stats(&tmp_token_stats);
848
849 #if !CONFIG_PVQ
850 tran_low_t *tcoeff = BLOCK_OFFSET(x->mbmi_ext->tcoeff[plane], block);
851 uint16_t eob = x->mbmi_ext->eobs[plane][block];
852 TXB_CTX txb_ctx = { x->mbmi_ext->txb_skip_ctx[plane][block],
853 x->mbmi_ext->dc_sign_ctx[plane][block] };
854 av1_write_coeffs_txb(cm, xd, w, blk_row, blk_col, block, plane, tx_size,
855 tcoeff, eob, &txb_ctx);
856 #else
857 pack_pvq_tokens(w, x, xd, plane, bsize, tx_size);
858 #endif
859 #if CONFIG_RD_DEBUG
860 token_stats->txb_coeff_cost_map[blk_row][blk_col] = tmp_token_stats.cost;
861 token_stats->cost += tmp_token_stats.cost;
862 #endif
863 } else {
864 const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
865 const int bsl = tx_size_wide_unit[sub_txs];
866 int i;
867
868 assert(bsl > 0);
869
870 for (i = 0; i < 4; ++i) {
871 const int offsetr = blk_row + (i >> 1) * bsl;
872 const int offsetc = blk_col + (i & 0x01) * bsl;
873 const int step = tx_size_wide_unit[sub_txs] * tx_size_high_unit[sub_txs];
874
875 if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
876
877 pack_txb_tokens(w,
878 #if CONFIG_LV_MAP
879 cm,
880 #endif
881 tp, tok_end,
882 #if CONFIG_PVQ || CONFIG_LV_MAP
883 x,
884 #endif
885 xd, mbmi, plane, plane_bsize, bit_depth, block, offsetr,
886 offsetc, sub_txs, token_stats);
887 block += step;
888 }
889 }
890 }
891 #else // CONFIG_LV_MAP
892 static void pack_txb_tokens(aom_writer *w, const TOKENEXTRA **tp,
893 const TOKENEXTRA *const tok_end,
894 #if CONFIG_PVQ
895 MACROBLOCK *const x,
896 #endif
897 MACROBLOCKD *xd, MB_MODE_INFO *mbmi, int plane,
898 BLOCK_SIZE plane_bsize, aom_bit_depth_t bit_depth,
899 int block, int blk_row, int blk_col,
900 TX_SIZE tx_size, TOKEN_STATS *token_stats) {
901 const struct macroblockd_plane *const pd = &xd->plane[plane];
902 const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
903 const int tx_row = blk_row >> (1 - pd->subsampling_y);
904 const int tx_col = blk_col >> (1 - pd->subsampling_x);
905 TX_SIZE plane_tx_size;
906 const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
907 const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
908 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
909 TX_TYPE tx_type = av1_get_tx_type(plane ? PLANE_TYPE_UV : PLANE_TYPE_Y, xd,
910 blk_row, blk_col, block, tx_size);
911 #endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
912
913 if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
914
915 plane_tx_size =
916 plane ? uv_txsize_lookup[bsize][mbmi->inter_tx_size[tx_row][tx_col]][0][0]
917 : mbmi->inter_tx_size[tx_row][tx_col];
918
919 if (tx_size == plane_tx_size) {
920 TOKEN_STATS tmp_token_stats;
921 init_token_stats(&tmp_token_stats);
922 #if !CONFIG_PVQ
923 pack_mb_tokens(w, tp, tok_end, bit_depth, tx_size,
924 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
925 tx_type, is_inter_block(mbmi),
926 #endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
927 &tmp_token_stats);
928 #else
929 pack_pvq_tokens(w, x, xd, plane, bsize, tx_size);
930 #endif
931 #if CONFIG_RD_DEBUG
932 token_stats->txb_coeff_cost_map[blk_row][blk_col] = tmp_token_stats.cost;
933 token_stats->cost += tmp_token_stats.cost;
934 #endif
935 } else {
936 #if CONFIG_RECT_TX_EXT
937 int is_qttx = plane_tx_size == quarter_txsize_lookup[plane_bsize];
938 const TX_SIZE sub_txs = is_qttx ? plane_tx_size : sub_tx_size_map[tx_size];
939 #else
940 const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
941 #endif
942 const int bsl = tx_size_wide_unit[sub_txs];
943 int i;
944
945 assert(bsl > 0);
946
947 for (i = 0; i < 4; ++i) {
948 #if CONFIG_RECT_TX_EXT
949 int is_wide_tx = tx_size_wide_unit[sub_txs] > tx_size_high_unit[sub_txs];
950 const int offsetr =
951 is_qttx ? (is_wide_tx ? i * tx_size_high_unit[sub_txs] : 0)
952 : blk_row + (i >> 1) * bsl;
953 const int offsetc =
954 is_qttx ? (is_wide_tx ? 0 : i * tx_size_wide_unit[sub_txs])
955 : blk_col + (i & 0x01) * bsl;
956 #else
957 const int offsetr = blk_row + (i >> 1) * bsl;
958 const int offsetc = blk_col + (i & 0x01) * bsl;
959 #endif
960 const int step = tx_size_wide_unit[sub_txs] * tx_size_high_unit[sub_txs];
961
962 if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
963
964 pack_txb_tokens(w, tp, tok_end,
965 #if CONFIG_PVQ
966 x,
967 #endif
968 xd, mbmi, plane, plane_bsize, bit_depth, block, offsetr,
969 offsetc, sub_txs, token_stats);
970 block += step;
971 }
972 }
973 }
974 #endif // CONFIG_LV_MAP
975 #endif // CONFIG_VAR_TX
976
977 static void write_segment_id(aom_writer *w, const struct segmentation *seg,
978 struct segmentation_probs *segp, int segment_id) {
979 if (seg->enabled && seg->update_map) {
980 aom_write_symbol(w, segment_id, segp->tree_cdf, MAX_SEGMENTS);
981 }
982 }
983
984 #if CONFIG_NEW_MULTISYMBOL
985 #define WRITE_REF_BIT(bname, pname) \
986 aom_write_symbol(w, bname, av1_get_pred_cdf_##pname(cm, xd), 2)
987 #define WRITE_REF_BIT2(bname, pname) \
988 aom_write_symbol(w, bname, av1_get_pred_cdf_##pname(xd), 2)
989 #else
990 #define WRITE_REF_BIT(bname, pname) \
991 aom_write(w, bname, av1_get_pred_prob_##pname(cm, xd))
992 #define WRITE_REF_BIT2(bname, pname) \
993 aom_write(w, bname, av1_get_pred_prob_##pname(cm, xd))
994 #endif
995
996 // This function encodes the reference frame
997 static void write_ref_frames(const AV1_COMMON *cm, const MACROBLOCKD *xd,
998 aom_writer *w) {
999 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1000 const int is_compound = has_second_ref(mbmi);
1001 const int segment_id = mbmi->segment_id;
1002
1003 // If segment level coding of this signal is disabled...
1004 // or the segment allows multiple reference frame options
1005 if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
1006 assert(!is_compound);
1007 assert(mbmi->ref_frame[0] ==
1008 get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
1009 } else {
1010 // does the feature use compound prediction or not
1011 // (if not specified at the frame/segment level)
1012 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
1013 if (is_comp_ref_allowed(mbmi->sb_type))
1014 #if CONFIG_NEW_MULTISYMBOL
1015 aom_write_symbol(w, is_compound, av1_get_reference_mode_cdf(cm, xd), 2);
1016 #else
1017 aom_write(w, is_compound, av1_get_reference_mode_prob(cm, xd));
1018 #endif // CONFIG_NEW_MULTISYMBOL
1019 } else {
1020 assert((!is_compound) == (cm->reference_mode == SINGLE_REFERENCE));
1021 }
1022
1023 if (is_compound) {
1024 #if CONFIG_EXT_COMP_REFS
1025 const COMP_REFERENCE_TYPE comp_ref_type = has_uni_comp_refs(mbmi)
1026 ? UNIDIR_COMP_REFERENCE
1027 : BIDIR_COMP_REFERENCE;
1028 #if USE_UNI_COMP_REFS
1029 #if CONFIG_VAR_REFS
1030 if ((L_OR_L2(cm) || L3_OR_G(cm)) && BWD_OR_ALT(cm))
1031 if (L_AND_L2(cm) || L_AND_L3(cm) || L_AND_G(cm) || BWD_AND_ALT(cm))
1032 #endif // CONFIG_VAR_REFS
1033 #if CONFIG_NEW_MULTISYMBOL
1034 aom_write_symbol(w, comp_ref_type,
1035 av1_get_comp_reference_type_cdf(xd), 2);
1036 #else
1037 aom_write(w, comp_ref_type, av1_get_comp_reference_type_prob(cm, xd));
1038 #endif
1039 #if CONFIG_VAR_REFS
1040 else
1041 assert(comp_ref_type == BIDIR_COMP_REFERENCE);
1042 else
1043 assert(comp_ref_type == UNIDIR_COMP_REFERENCE);
1044 #endif // CONFIG_VAR_REFS
1045 #else // !USE_UNI_COMP_REFS
1046 // NOTE: uni-directional comp refs disabled
1047 assert(comp_ref_type == BIDIR_COMP_REFERENCE);
1048 #endif // USE_UNI_COMP_REFS
1049
1050 if (comp_ref_type == UNIDIR_COMP_REFERENCE) {
1051 const int bit = mbmi->ref_frame[0] == BWDREF_FRAME;
1052 #if CONFIG_VAR_REFS
1053 if ((L_AND_L2(cm) || L_AND_L3(cm) || L_AND_G(cm)) && BWD_AND_ALT(cm))
1054 #endif // CONFIG_VAR_REFS
1055 WRITE_REF_BIT2(bit, uni_comp_ref_p);
1056
1057 if (!bit) {
1058 assert(mbmi->ref_frame[0] == LAST_FRAME);
1059 #if CONFIG_VAR_REFS
1060 if (L_AND_L2(cm) && (L_AND_L3(cm) || L_AND_G(cm))) {
1061 #endif // CONFIG_VAR_REFS
1062 const int bit1 = mbmi->ref_frame[1] == LAST3_FRAME ||
1063 mbmi->ref_frame[1] == GOLDEN_FRAME;
1064 WRITE_REF_BIT2(bit1, uni_comp_ref_p1);
1065 if (bit1) {
1066 #if CONFIG_VAR_REFS
1067 if (L_AND_L3(cm) && L_AND_G(cm)) {
1068 #endif // CONFIG_VAR_REFS
1069 const int bit2 = mbmi->ref_frame[1] == GOLDEN_FRAME;
1070 WRITE_REF_BIT2(bit2, uni_comp_ref_p2);
1071 #if CONFIG_VAR_REFS
1072 }
1073 #endif // CONFIG_VAR_REFS
1074 }
1075 #if CONFIG_VAR_REFS
1076 }
1077 #endif // CONFIG_VAR_REFS
1078 } else {
1079 assert(mbmi->ref_frame[1] == ALTREF_FRAME);
1080 }
1081
1082 return;
1083 }
1084
1085 assert(comp_ref_type == BIDIR_COMP_REFERENCE);
1086 #endif // CONFIG_EXT_COMP_REFS
1087
1088 #if CONFIG_EXT_REFS
1089 const int bit = (mbmi->ref_frame[0] == GOLDEN_FRAME ||
1090 mbmi->ref_frame[0] == LAST3_FRAME);
1091 #if CONFIG_VAR_REFS
1092 // Test need to explicitly code (L,L2) vs (L3,G) branch node in tree
1093 if (L_OR_L2(cm) && L3_OR_G(cm))
1094 #endif // CONFIG_VAR_REFS
1095 WRITE_REF_BIT(bit, comp_ref_p);
1096
1097 if (!bit) {
1098 #if CONFIG_VAR_REFS
1099 // Test need to explicitly code (L) vs (L2) branch node in tree
1100 if (L_AND_L2(cm)) {
1101 #endif // CONFIG_VAR_REFS
1102 const int bit1 = mbmi->ref_frame[0] == LAST_FRAME;
1103 WRITE_REF_BIT(bit1, comp_ref_p1);
1104 #if CONFIG_VAR_REFS
1105 }
1106 #endif // CONFIG_VAR_REFS
1107 } else {
1108 #if CONFIG_VAR_REFS
1109 // Test need to explicitly code (L3) vs (G) branch node in tree
1110 if (L3_AND_G(cm)) {
1111 #endif // CONFIG_VAR_REFS
1112 const int bit2 = mbmi->ref_frame[0] == GOLDEN_FRAME;
1113 WRITE_REF_BIT(bit2, comp_ref_p2);
1114 #if CONFIG_VAR_REFS
1115 }
1116 #endif // CONFIG_VAR_REFS
1117 }
1118
1119 #if CONFIG_VAR_REFS
1120 // Test need to explicitly code (BWD,ALT2) vs (ALT) branch node in tree
1121 if (BWD_OR_ALT2(cm) && ALTREF_IS_VALID(cm)) {
1122 #endif // CONFIG_VAR_REFS
1123 const int bit_bwd = mbmi->ref_frame[1] == ALTREF_FRAME;
1124 WRITE_REF_BIT(bit_bwd, comp_bwdref_p);
1125
1126 if (!bit_bwd) {
1127 #if CONFIG_VAR_REFS
1128 // Test need to explicitly code (BWD,ALT2) vs (ALT) branch node in
1129 // tree
1130 if (BWD_AND_ALT2(cm))
1131 #endif // CONFIG_VAR_REFS
1132 WRITE_REF_BIT(mbmi->ref_frame[1] == ALTREF2_FRAME, comp_bwdref_p1);
1133 }
1134 #if CONFIG_VAR_REFS
1135 }
1136 #endif // CONFIG_VAR_REFS
1137
1138 #else // !CONFIG_EXT_REFS
1139 const int bit = mbmi->ref_frame[0] == GOLDEN_FRAME;
1140 WRITE_REF_BIT(bit, comp_ref_p);
1141 #endif // CONFIG_EXT_REFS
1142 } else {
1143 #if CONFIG_EXT_REFS
1144 const int bit0 = (mbmi->ref_frame[0] <= ALTREF_FRAME &&
1145 mbmi->ref_frame[0] >= BWDREF_FRAME);
1146 #if CONFIG_VAR_REFS
1147 // Test need to explicitly code (L,L2,L3,G) vs (BWD,ALT2,ALT) branch node
1148 // in tree
1149 if ((L_OR_L2(cm) || L3_OR_G(cm)) &&
1150 (BWD_OR_ALT2(cm) || ALTREF_IS_VALID(cm)))
1151 #endif // CONFIG_VAR_REFS
1152 WRITE_REF_BIT(bit0, single_ref_p1);
1153
1154 if (bit0) {
1155 #if CONFIG_VAR_REFS
1156 // Test need to explicitly code (BWD,ALT2) vs (ALT) branch node in tree
1157 if (BWD_OR_ALT2(cm) && ALTREF_IS_VALID(cm)) {
1158 #endif // CONFIG_VAR_REFS
1159 const int bit1 = mbmi->ref_frame[0] == ALTREF_FRAME;
1160 WRITE_REF_BIT(bit1, single_ref_p2);
1161
1162 if (!bit1) {
1163 #if CONFIG_VAR_REFS
1164 // Test need to explicitly code (BWD) vs (ALT2) branch node in tree
1165 if (BWD_AND_ALT2(cm))
1166 #endif // CONFIG_VAR_REFS
1167 WRITE_REF_BIT(mbmi->ref_frame[0] == ALTREF2_FRAME, single_ref_p6);
1168 }
1169 #if CONFIG_VAR_REFS
1170 }
1171 #endif // CONFIG_VAR_REFS
1172 } else {
1173 const int bit2 = (mbmi->ref_frame[0] == LAST3_FRAME ||
1174 mbmi->ref_frame[0] == GOLDEN_FRAME);
1175 #if CONFIG_VAR_REFS
1176 // Test need to explicitly code (L,L2) vs (L3,G) branch node in tree
1177 if (L_OR_L2(cm) && L3_OR_G(cm))
1178 #endif // CONFIG_VAR_REFS
1179 WRITE_REF_BIT(bit2, single_ref_p3);
1180
1181 if (!bit2) {
1182 #if CONFIG_VAR_REFS
1183 // Test need to explicitly code (L) vs (L2) branch node in tree
1184 if (L_AND_L2(cm)) {
1185 #endif // CONFIG_VAR_REFS
1186 const int bit3 = mbmi->ref_frame[0] != LAST_FRAME;
1187 WRITE_REF_BIT(bit3, single_ref_p4);
1188 #if CONFIG_VAR_REFS
1189 }
1190 #endif // CONFIG_VAR_REFS
1191 } else {
1192 #if CONFIG_VAR_REFS
1193 // Test need to explicitly code (L3) vs (G) branch node in tree
1194 if (L3_AND_G(cm)) {
1195 #endif // CONFIG_VAR_REFS
1196 const int bit4 = mbmi->ref_frame[0] != LAST3_FRAME;
1197 WRITE_REF_BIT(bit4, single_ref_p5);
1198 #if CONFIG_VAR_REFS
1199 }
1200 #endif // CONFIG_VAR_REFS
1201 }
1202 }
1203 #else // !CONFIG_EXT_REFS
1204 const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
1205 WRITE_REF_BIT(bit0, single_ref_p1);
1206
1207 if (bit0) {
1208 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
1209 WRITE_REF_BIT(bit1, single_ref_p2);
1210 }
1211 #endif // CONFIG_EXT_REFS
1212 }
1213 }
1214 }
1215
1216 #if CONFIG_FILTER_INTRA
1217 static void write_filter_intra_mode_info(const AV1_COMMON *const cm,
1218 const MACROBLOCKD *xd,
1219 const MB_MODE_INFO *const mbmi,
1220 int mi_row, int mi_col,
1221 aom_writer *w) {
1222 if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0) {
1223 aom_write(w, mbmi->filter_intra_mode_info.use_filter_intra_mode[0],
1224 cm->fc->filter_intra_probs[0]);
1225 if (mbmi->filter_intra_mode_info.use_filter_intra_mode[0]) {
1226 const FILTER_INTRA_MODE mode =
1227 mbmi->filter_intra_mode_info.filter_intra_mode[0];
1228 write_uniform(w, FILTER_INTRA_MODES, mode);
1229 }
1230 }
1231
1232 #if CONFIG_CB4X4
1233 if (!is_chroma_reference(mi_row, mi_col, mbmi->sb_type,
1234 xd->plane[1].subsampling_x,
1235 xd->plane[1].subsampling_y))
1236 return;
1237 #else
1238 (void)xd;
1239 (void)mi_row;
1240 (void)mi_col;
1241 #endif // CONFIG_CB4X4
1242
1243 if (mbmi->uv_mode == UV_DC_PRED &&
1244 mbmi->palette_mode_info.palette_size[1] == 0) {
1245 aom_write(w, mbmi->filter_intra_mode_info.use_filter_intra_mode[1],
1246 cm->fc->filter_intra_probs[1]);
1247 if (mbmi->filter_intra_mode_info.use_filter_intra_mode[1]) {
1248 const FILTER_INTRA_MODE mode =
1249 mbmi->filter_intra_mode_info.filter_intra_mode[1];
1250 write_uniform(w, FILTER_INTRA_MODES, mode);
1251 }
1252 }
1253 }
1254 #endif // CONFIG_FILTER_INTRA
1255
1256 #if CONFIG_EXT_INTRA
1257 static void write_intra_angle_info(const MACROBLOCKD *xd,
1258 FRAME_CONTEXT *const ec_ctx, aom_writer *w) {
1259 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1260 const BLOCK_SIZE bsize = mbmi->sb_type;
1261 #if CONFIG_INTRA_INTERP
1262 const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
1263 int p_angle;
1264 #endif // CONFIG_INTRA_INTERP
1265
1266 (void)ec_ctx;
1267 if (!av1_use_angle_delta(bsize)) return;
1268
1269 if (av1_is_directional_mode(mbmi->mode, bsize)) {
1270 write_uniform(w, 2 * MAX_ANGLE_DELTA + 1,
1271 MAX_ANGLE_DELTA + mbmi->angle_delta[0]);
1272 #if CONFIG_INTRA_INTERP
1273 p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
1274 if (av1_is_intra_filter_switchable(p_angle)) {
1275 aom_write_symbol(w, mbmi->intra_filter,
1276 ec_ctx->intra_filter_cdf[intra_filter_ctx],
1277 INTRA_FILTERS);
1278 }
1279 #endif // CONFIG_INTRA_INTERP
1280 }
1281
1282 if (av1_is_directional_mode(get_uv_mode(mbmi->uv_mode), bsize)) {
1283 write_uniform(w, 2 * MAX_ANGLE_DELTA + 1,
1284 MAX_ANGLE_DELTA + mbmi->angle_delta[1]);
1285 }
1286 }
1287 #endif // CONFIG_EXT_INTRA
1288
1289 static void write_mb_interp_filter(AV1_COMP *cpi, const MACROBLOCKD *xd,
1290 aom_writer *w) {
1291 AV1_COMMON *const cm = &cpi->common;
1292 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
1293 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1294
1295 if (!av1_is_interp_needed(xd)) {
1296 assert(mbmi->interp_filters ==
1297 av1_broadcast_interp_filter(
1298 av1_unswitchable_filter(cm->interp_filter)));
1299 return;
1300 }
1301 if (cm->interp_filter == SWITCHABLE) {
1302 #if CONFIG_DUAL_FILTER
1303 int dir;
1304 for (dir = 0; dir < 2; ++dir) {
1305 if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
1306 (mbmi->ref_frame[1] > INTRA_FRAME &&
1307 has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
1308 const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
1309 InterpFilter filter =
1310 av1_extract_interp_filter(mbmi->interp_filters, dir);
1311 aom_write_symbol(w, filter, ec_ctx->switchable_interp_cdf[ctx],
1312 SWITCHABLE_FILTERS);
1313 ++cpi->interp_filter_selected[0][filter];
1314 } else {
1315 assert(av1_extract_interp_filter(mbmi->interp_filters, dir) ==
1316 EIGHTTAP_REGULAR);
1317 }
1318 }
1319 #else
1320 {
1321 const int ctx = av1_get_pred_context_switchable_interp(xd);
1322 InterpFilter filter = av1_extract_interp_filter(mbmi->interp_filters, 0);
1323 aom_write_symbol(w, filter, ec_ctx->switchable_interp_cdf[ctx],
1324 SWITCHABLE_FILTERS);
1325 ++cpi->interp_filter_selected[0][filter];
1326 }
1327 #endif // CONFIG_DUAL_FILTER
1328 }
1329 }
1330
1331 #if CONFIG_PALETTE_DELTA_ENCODING
1332 // Transmit color values with delta encoding. Write the first value as
1333 // literal, and the deltas between each value and the previous one. "min_val" is
1334 // the smallest possible value of the deltas.
1335 static void delta_encode_palette_colors(const int *colors, int num,
1336 int bit_depth, int min_val,
1337 aom_writer *w) {
1338 if (num <= 0) return;
1339 assert(colors[0] < (1 << bit_depth));
1340 aom_write_literal(w, colors[0], bit_depth);
1341 if (num == 1) return;
1342 int max_delta = 0;
1343 int deltas[PALETTE_MAX_SIZE];
1344 memset(deltas, 0, sizeof(deltas));
1345 for (int i = 1; i < num; ++i) {
1346 assert(colors[i] < (1 << bit_depth));
1347 const int delta = colors[i] - colors[i - 1];
1348 deltas[i - 1] = delta;
1349 assert(delta >= min_val);
1350 if (delta > max_delta) max_delta = delta;
1351 }
1352 const int min_bits = bit_depth - 3;
1353 int bits = AOMMAX(av1_ceil_log2(max_delta + 1 - min_val), min_bits);
1354 assert(bits <= bit_depth);
1355 int range = (1 << bit_depth) - colors[0] - min_val;
1356 aom_write_literal(w, bits - min_bits, 2);
1357 for (int i = 0; i < num - 1; ++i) {
1358 aom_write_literal(w, deltas[i] - min_val, bits);
1359 range -= deltas[i];
1360 bits = AOMMIN(bits, av1_ceil_log2(range));
1361 }
1362 }
1363
1364 // Transmit luma palette color values. First signal if each color in the color
1365 // cache is used. Those colors that are not in the cache are transmitted with
1366 // delta encoding.
1367 static void write_palette_colors_y(const MACROBLOCKD *const xd,
1368 const PALETTE_MODE_INFO *const pmi,
1369 int bit_depth, aom_writer *w) {
1370 const int n = pmi->palette_size[0];
1371 uint16_t color_cache[2 * PALETTE_MAX_SIZE];
1372 const int n_cache = av1_get_palette_cache(xd, 0, color_cache);
1373 int out_cache_colors[PALETTE_MAX_SIZE];
1374 uint8_t cache_color_found[2 * PALETTE_MAX_SIZE];
1375 const int n_out_cache =
1376 av1_index_color_cache(color_cache, n_cache, pmi->palette_colors, n,
1377 cache_color_found, out_cache_colors);
1378 int n_in_cache = 0;
1379 for (int i = 0; i < n_cache && n_in_cache < n; ++i) {
1380 const int found = cache_color_found[i];
1381 aom_write_bit(w, found);
1382 n_in_cache += found;
1383 }
1384 assert(n_in_cache + n_out_cache == n);
1385 delta_encode_palette_colors(out_cache_colors, n_out_cache, bit_depth, 1, w);
1386 }
1387
1388 // Write chroma palette color values. U channel is handled similarly to the luma
1389 // channel. For v channel, either use delta encoding or transmit raw values
1390 // directly, whichever costs less.
1391 static void write_palette_colors_uv(const MACROBLOCKD *const xd,
1392 const PALETTE_MODE_INFO *const pmi,
1393 int bit_depth, aom_writer *w) {
1394 const int n = pmi->palette_size[1];
1395 const uint16_t *colors_u = pmi->palette_colors + PALETTE_MAX_SIZE;
1396 const uint16_t *colors_v = pmi->palette_colors + 2 * PALETTE_MAX_SIZE;
1397 // U channel colors.
1398 uint16_t color_cache[2 * PALETTE_MAX_SIZE];
1399 const int n_cache = av1_get_palette_cache(xd, 1, color_cache);
1400 int out_cache_colors[PALETTE_MAX_SIZE];
1401 uint8_t cache_color_found[2 * PALETTE_MAX_SIZE];
1402 const int n_out_cache = av1_index_color_cache(
1403 color_cache, n_cache, colors_u, n, cache_color_found, out_cache_colors);
1404 int n_in_cache = 0;
1405 for (int i = 0; i < n_cache && n_in_cache < n; ++i) {
1406 const int found = cache_color_found[i];
1407 aom_write_bit(w, found);
1408 n_in_cache += found;
1409 }
1410 delta_encode_palette_colors(out_cache_colors, n_out_cache, bit_depth, 0, w);
1411
1412 // V channel colors. Don't use color cache as the colors are not sorted.
1413 const int max_val = 1 << bit_depth;
1414 int zero_count = 0, min_bits_v = 0;
1415 int bits_v =
1416 av1_get_palette_delta_bits_v(pmi, bit_depth, &zero_count, &min_bits_v);
1417 const int rate_using_delta =
1418 2 + bit_depth + (bits_v + 1) * (n - 1) - zero_count;
1419 const int rate_using_raw = bit_depth * n;
1420 if (rate_using_delta < rate_using_raw) { // delta encoding
1421 assert(colors_v[0] < (1 << bit_depth));
1422 aom_write_bit(w, 1);
1423 aom_write_literal(w, bits_v - min_bits_v, 2);
1424 aom_write_literal(w, colors_v[0], bit_depth);
1425 for (int i = 1; i < n; ++i) {
1426 assert(colors_v[i] < (1 << bit_depth));
1427 if (colors_v[i] == colors_v[i - 1]) { // No need to signal sign bit.
1428 aom_write_literal(w, 0, bits_v);
1429 continue;
1430 }
1431 const int delta = abs((int)colors_v[i] - colors_v[i - 1]);
1432 const int sign_bit = colors_v[i] < colors_v[i - 1];
1433 if (delta <= max_val - delta) {
1434 aom_write_literal(w, delta, bits_v);
1435 aom_write_bit(w, sign_bit);
1436 } else {
1437 aom_write_literal(w, max_val - delta, bits_v);
1438 aom_write_bit(w, !sign_bit);
1439 }
1440 }
1441 } else { // Transmit raw values.
1442 aom_write_bit(w, 0);
1443 for (int i = 0; i < n; ++i) {
1444 assert(colors_v[i] < (1 << bit_depth));
1445 aom_write_literal(w, colors_v[i], bit_depth);
1446 }
1447 }
1448 }
1449 #endif // CONFIG_PALETTE_DELTA_ENCODING
1450
1451 static void write_palette_mode_info(const AV1_COMMON *cm, const MACROBLOCKD *xd,
1452 const MODE_INFO *const mi, aom_writer *w) {
1453 const MB_MODE_INFO *const mbmi = &mi->mbmi;
1454 const MODE_INFO *const above_mi = xd->above_mi;
1455 const MODE_INFO *const left_mi = xd->left_mi;
1456 const BLOCK_SIZE bsize = mbmi->sb_type;
1457 const PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
1458
1459 assert(bsize >= BLOCK_8X8 && bsize <= BLOCK_LARGEST);
1460 const int block_palette_idx = bsize - BLOCK_8X8;
1461
1462 if (mbmi->mode == DC_PRED) {
1463 const int n = pmi->palette_size[0];
1464 int palette_y_mode_ctx = 0;
1465 if (above_mi) {
1466 palette_y_mode_ctx +=
1467 (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
1468 }
1469 if (left_mi) {
1470 palette_y_mode_ctx +=
1471 (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
1472 }
1473 #if CONFIG_NEW_MULTISYMBOL
1474 aom_write_symbol(
1475 w, n > 0,
1476 xd->tile_ctx->palette_y_mode_cdf[block_palette_idx][palette_y_mode_ctx],
1477 2);
1478 #else
1479 aom_write(
1480 w, n > 0,
1481 av1_default_palette_y_mode_prob[block_palette_idx][palette_y_mode_ctx]);
1482 #endif
1483 if (n > 0) {
1484 aom_write_symbol(w, n - PALETTE_MIN_SIZE,
1485 xd->tile_ctx->palette_y_size_cdf[block_palette_idx],
1486 PALETTE_SIZES);
1487 #if CONFIG_PALETTE_DELTA_ENCODING
1488 write_palette_colors_y(xd, pmi, cm->bit_depth, w);
1489 #else
1490 for (int i = 0; i < n; ++i) {
1491 assert(pmi->palette_colors[i] < (1 << cm->bit_depth));
1492 aom_write_literal(w, pmi->palette_colors[i], cm->bit_depth);
1493 }
1494 #endif // CONFIG_PALETTE_DELTA_ENCODING
1495 }
1496 }
1497
1498 if (mbmi->uv_mode == UV_DC_PRED) {
1499 const int n = pmi->palette_size[1];
1500 const int palette_uv_mode_ctx = (pmi->palette_size[0] > 0);
1501 #if CONFIG_NEW_MULTISYMBOL
1502 aom_write_symbol(w, n > 0,
1503 xd->tile_ctx->palette_uv_mode_cdf[palette_uv_mode_ctx], 2);
1504 #else
1505 aom_write(w, n > 0, av1_default_palette_uv_mode_prob[palette_uv_mode_ctx]);
1506 #endif
1507 if (n > 0) {
1508 aom_write_symbol(w, n - PALETTE_MIN_SIZE,
1509 xd->tile_ctx->palette_uv_size_cdf[block_palette_idx],
1510 PALETTE_SIZES);
1511 #if CONFIG_PALETTE_DELTA_ENCODING
1512 write_palette_colors_uv(xd, pmi, cm->bit_depth, w);
1513 #else
1514 for (int i = 0; i < n; ++i) {
1515 assert(pmi->palette_colors[PALETTE_MAX_SIZE + i] <
1516 (1 << cm->bit_depth));
1517 assert(pmi->palette_colors[2 * PALETTE_MAX_SIZE + i] <
1518 (1 << cm->bit_depth));
1519 aom_write_literal(w, pmi->palette_colors[PALETTE_MAX_SIZE + i],
1520 cm->bit_depth);
1521 aom_write_literal(w, pmi->palette_colors[2 * PALETTE_MAX_SIZE + i],
1522 cm->bit_depth);
1523 }
1524 #endif // CONFIG_PALETTE_DELTA_ENCODING
1525 }
1526 }
1527 }
1528
1529 void av1_write_tx_type(const AV1_COMMON *const cm, const MACROBLOCKD *xd,
1530 #if CONFIG_SUPERTX
1531 const int supertx_enabled,
1532 #endif
1533 #if CONFIG_TXK_SEL
1534 int blk_row, int blk_col, int block, int plane,
1535 TX_SIZE tx_size,
1536 #endif
1537 aom_writer *w) {
1538 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
1539 const int is_inter = is_inter_block(mbmi);
1540 #if !CONFIG_TXK_SEL
1541 #if CONFIG_VAR_TX
1542 const TX_SIZE tx_size = is_inter ? mbmi->min_tx_size : mbmi->tx_size;
1543 #else
1544 const TX_SIZE tx_size = mbmi->tx_size;
1545 #endif // CONFIG_VAR_TX
1546 #endif // !CONFIG_TXK_SEL
1547 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1548
1549 #if !CONFIG_TXK_SEL
1550 TX_TYPE tx_type = mbmi->tx_type;
1551 #else
1552 // Only y plane's tx_type is transmitted
1553 if (plane > 0) return;
1554 PLANE_TYPE plane_type = get_plane_type(plane);
1555 TX_TYPE tx_type =
1556 av1_get_tx_type(plane_type, xd, blk_row, blk_col, block, tx_size);
1557 #endif
1558
1559 if (!FIXED_TX_TYPE) {
1560 #if CONFIG_EXT_TX
1561 const TX_SIZE square_tx_size = txsize_sqr_map[tx_size];
1562 const BLOCK_SIZE bsize = mbmi->sb_type;
1563 if (get_ext_tx_types(tx_size, bsize, is_inter, cm->reduced_tx_set_used) >
1564 1 &&
1565 ((!cm->seg.enabled && cm->base_qindex > 0) ||
1566 (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) &&
1567 !mbmi->skip &&
1568 #if CONFIG_SUPERTX
1569 !supertx_enabled &&
1570 #endif // CONFIG_SUPERTX
1571 !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
1572 #if CONFIG_MRC_TX
1573 if (tx_type == MRC_DCT)
1574 assert(mbmi->valid_mrc_mask && "Invalid MRC mask");
1575 #endif // CONFIG_MRC_TX
1576 const TxSetType tx_set_type = get_ext_tx_set_type(
1577 tx_size, bsize, is_inter, cm->reduced_tx_set_used);
1578 const int eset =
1579 get_ext_tx_set(tx_size, bsize, is_inter, cm->reduced_tx_set_used);
1580 // eset == 0 should correspond to a set with only DCT_DCT and there
1581 // is no need to send the tx_type
1582 assert(eset > 0);
1583 assert(av1_ext_tx_used[tx_set_type][tx_type]);
1584 #if !CONFIG_LGT_FROM_PRED
1585 if (is_inter) {
1586 aom_write_symbol(w, av1_ext_tx_ind[tx_set_type][tx_type],
1587 ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
1588 av1_num_ext_tx_set[tx_set_type]);
1589 } else if (ALLOW_INTRA_EXT_TX) {
1590 aom_write_symbol(
1591 w, av1_ext_tx_ind[tx_set_type][tx_type],
1592 ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
1593 av1_num_ext_tx_set[tx_set_type]);
1594 }
1595 #else
1596 // only signal tx_type when lgt is not allowed or not selected
1597 if (is_inter) {
1598 if (LGT_FROM_PRED_INTER) {
1599 if (is_lgt_allowed(mbmi->mode, tx_size) && !cm->reduced_tx_set_used)
1600 aom_write(w, mbmi->use_lgt, ec_ctx->inter_lgt_prob[square_tx_size]);
1601 if (!mbmi->use_lgt)
1602 aom_write_symbol(w, av1_ext_tx_ind[tx_set_type][tx_type],
1603 ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
1604 av1_num_ext_tx_set[tx_set_type]);
1605 } else {
1606 aom_write_symbol(w, av1_ext_tx_ind[tx_set_type][tx_type],
1607 ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
1608 av1_num_ext_tx_set[tx_set_type]);
1609 }
1610 } else if (ALLOW_INTRA_EXT_TX) {
1611 if (LGT_FROM_PRED_INTRA) {
1612 if (is_lgt_allowed(mbmi->mode, tx_size) && !cm->reduced_tx_set_used)
1613 aom_write(w, mbmi->use_lgt,
1614 ec_ctx->intra_lgt_prob[square_tx_size][mbmi->mode]);
1615 if (!mbmi->use_lgt)
1616 aom_write_symbol(
1617 w, av1_ext_tx_ind[tx_set_type][tx_type],
1618 ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
1619 av1_num_ext_tx_set[tx_set_type]);
1620 } else {
1621 aom_write_symbol(
1622 w, av1_ext_tx_ind[tx_set_type][tx_type],
1623 ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
1624 av1_num_ext_tx_set[tx_set_type]);
1625 }
1626 }
1627 #endif // CONFIG_LGT_FROM_PRED
1628 }
1629 #else // CONFIG_EXT_TX
1630 if (tx_size < TX_32X32 &&
1631 ((!cm->seg.enabled && cm->base_qindex > 0) ||
1632 (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) &&
1633 !mbmi->skip &&
1634 #if CONFIG_SUPERTX
1635 !supertx_enabled &&
1636 #endif // CONFIG_SUPERTX
1637 !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
1638 if (is_inter) {
1639 aom_write_symbol(w, av1_ext_tx_ind[tx_type],
1640 ec_ctx->inter_ext_tx_cdf[tx_size], TX_TYPES);
1641 } else {
1642 aom_write_symbol(
1643 w, av1_ext_tx_ind[tx_type],
1644 ec_ctx->intra_ext_tx_cdf[tx_size]
1645 [intra_mode_to_tx_type_context[mbmi->mode]],
1646 TX_TYPES);
1647 }
1648 }
1649 #endif // CONFIG_EXT_TX
1650 }
1651 }
1652
1653 static void write_intra_mode(FRAME_CONTEXT *frame_ctx, BLOCK_SIZE bsize,
1654 PREDICTION_MODE mode, aom_writer *w) {
1655 aom_write_symbol(w, mode, frame_ctx->y_mode_cdf[size_group_lookup[bsize]],
1656 INTRA_MODES);
1657 }
1658
1659 static void write_intra_uv_mode(FRAME_CONTEXT *frame_ctx,
1660 UV_PREDICTION_MODE uv_mode,
1661 PREDICTION_MODE y_mode, aom_writer *w) {
1662 #if !CONFIG_CFL
1663 uv_mode = get_uv_mode(uv_mode);
1664 #endif
1665 aom_write_symbol(w, uv_mode, frame_ctx->uv_mode_cdf[y_mode], UV_INTRA_MODES);
1666 }
1667
1668 #if CONFIG_CFL
1669 static void write_cfl_alphas(FRAME_CONTEXT *const ec_ctx, int idx,
1670 int joint_sign, aom_writer *w) {
1671 aom_write_symbol(w, joint_sign, ec_ctx->cfl_sign_cdf, CFL_JOINT_SIGNS);
1672 // Magnitudes are only signaled for nonzero codes.
1673 if (CFL_SIGN_U(joint_sign) != CFL_SIGN_ZERO) {
1674 aom_cdf_prob *cdf_u = ec_ctx->cfl_alpha_cdf[CFL_CONTEXT_U(joint_sign)];
1675 aom_write_symbol(w, CFL_IDX_U(idx), cdf_u, CFL_ALPHABET_SIZE);
1676 }
1677 if (CFL_SIGN_V(joint_sign) != CFL_SIGN_ZERO) {
1678 aom_cdf_prob *cdf_v = ec_ctx->cfl_alpha_cdf[CFL_CONTEXT_V(joint_sign)];
1679 aom_write_symbol(w, CFL_IDX_V(idx), cdf_v, CFL_ALPHABET_SIZE);
1680 }
1681 }
1682 #endif
1683
1684 static void pack_inter_mode_mvs(AV1_COMP *cpi, const int mi_row,
1685 const int mi_col,
1686 #if CONFIG_SUPERTX
1687 int supertx_enabled,
1688 #endif
1689 aom_writer *w) {
1690 AV1_COMMON *const cm = &cpi->common;
1691 MACROBLOCK *const x = &cpi->td.mb;
1692 MACROBLOCKD *const xd = &x->e_mbd;
1693 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
1694 const MODE_INFO *mi = xd->mi[0];
1695
1696 const struct segmentation *const seg = &cm->seg;
1697 struct segmentation_probs *const segp = &ec_ctx->seg;
1698 const MB_MODE_INFO *const mbmi = &mi->mbmi;
1699 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
1700 const PREDICTION_MODE mode = mbmi->mode;
1701 const int segment_id = mbmi->segment_id;
1702 const BLOCK_SIZE bsize = mbmi->sb_type;
1703 const int allow_hp = cm->allow_high_precision_mv;
1704 const int is_inter = is_inter_block(mbmi);
1705 const int is_compound = has_second_ref(mbmi);
1706 int skip, ref;
1707 #if CONFIG_CB4X4
1708 const int unify_bsize = 1;
1709 #else
1710 const int unify_bsize = 0;
1711 #endif
1712 (void)mi_row;
1713 (void)mi_col;
1714
1715 if (seg->update_map) {
1716 if (seg->temporal_update) {
1717 const int pred_flag = mbmi->seg_id_predicted;
1718 #if CONFIG_NEW_MULTISYMBOL
1719 aom_cdf_prob *pred_cdf = av1_get_pred_cdf_seg_id(segp, xd);
1720 aom_write_symbol(w, pred_flag, pred_cdf, 2);
1721 #else
1722 aom_prob pred_prob = av1_get_pred_prob_seg_id(segp, xd);
1723 aom_write(w, pred_flag, pred_prob);
1724 #endif
1725 if (!pred_flag) write_segment_id(w, seg, segp, segment_id);
1726 } else {
1727 write_segment_id(w, seg, segp, segment_id);
1728 }
1729 }
1730
1731 #if CONFIG_SUPERTX
1732 if (supertx_enabled)
1733 skip = mbmi->skip;
1734 else
1735 skip = write_skip(cm, xd, segment_id, mi, w);
1736 #else
1737 skip = write_skip(cm, xd, segment_id, mi, w);
1738 #endif // CONFIG_SUPERTX
1739 if (cm->delta_q_present_flag) {
1740 int super_block_upper_left =
1741 ((mi_row & MAX_MIB_MASK) == 0) && ((mi_col & MAX_MIB_MASK) == 0);
1742 if ((bsize != BLOCK_LARGEST || skip == 0) && super_block_upper_left) {
1743 assert(mbmi->current_q_index > 0);
1744 int reduced_delta_qindex =
1745 (mbmi->current_q_index - xd->prev_qindex) / cm->delta_q_res;
1746 write_delta_qindex(cm, xd, reduced_delta_qindex, w);
1747 xd->prev_qindex = mbmi->current_q_index;
1748 #if CONFIG_EXT_DELTA_Q
1749 #if CONFIG_LOOPFILTER_LEVEL
1750 if (cm->delta_lf_present_flag) {
1751 if (cm->delta_lf_multi) {
1752 for (int lf_id = 0; lf_id < FRAME_LF_COUNT; ++lf_id) {
1753 int reduced_delta_lflevel =
1754 (mbmi->curr_delta_lf[lf_id] - xd->prev_delta_lf[lf_id]) /
1755 cm->delta_lf_res;
1756 write_delta_lflevel(cm, xd, lf_id, reduced_delta_lflevel, w);
1757 xd->prev_delta_lf[lf_id] = mbmi->curr_delta_lf[lf_id];
1758 }
1759 } else {
1760 int reduced_delta_lflevel =
1761 (mbmi->current_delta_lf_from_base - xd->prev_delta_lf_from_base) /
1762 cm->delta_lf_res;
1763 write_delta_lflevel(cm, xd, -1, reduced_delta_lflevel, w);
1764 xd->prev_delta_lf_from_base = mbmi->current_delta_lf_from_base;
1765 }
1766 }
1767 #else
1768 if (cm->delta_lf_present_flag) {
1769 int reduced_delta_lflevel =
1770 (mbmi->current_delta_lf_from_base - xd->prev_delta_lf_from_base) /
1771 cm->delta_lf_res;
1772 write_delta_lflevel(cm, xd, reduced_delta_lflevel, w);
1773 xd->prev_delta_lf_from_base = mbmi->current_delta_lf_from_base;
1774 }
1775 #endif // CONFIG_LOOPFILTER_LEVEL
1776 #endif // CONFIG_EXT_DELTA_Q
1777 }
1778 }
1779
1780 #if CONFIG_SUPERTX
1781 if (!supertx_enabled)
1782 #endif // CONFIG_SUPERTX
1783 write_is_inter(cm, xd, mbmi->segment_id, w, is_inter);
1784
1785 if (cm->tx_mode == TX_MODE_SELECT &&
1786 #if CONFIG_CB4X4 && CONFIG_VAR_TX && !CONFIG_RECT_TX
1787 (bsize >= BLOCK_8X8 || (bsize > BLOCK_4X4 && is_inter)) &&
1788 #else
1789 block_signals_txsize(bsize) &&
1790 #endif
1791 #if CONFIG_SUPERTX
1792 !supertx_enabled &&
1793 #endif // CONFIG_SUPERTX
1794 !(is_inter && skip) && !xd->lossless[segment_id]) {
1795 #if CONFIG_VAR_TX
1796 if (is_inter) { // This implies skip flag is 0.
1797 const TX_SIZE max_tx_size = get_vartx_max_txsize(mbmi, bsize, 0);
1798 const int bh = tx_size_high_unit[max_tx_size];
1799 const int bw = tx_size_wide_unit[max_tx_size];
1800 const int width = block_size_wide[bsize] >> tx_size_wide_log2[0];
1801 const int height = block_size_high[bsize] >> tx_size_wide_log2[0];
1802 int init_depth =
1803 (height != width) ? RECT_VARTX_DEPTH_INIT : SQR_VARTX_DEPTH_INIT;
1804 int idx, idy;
1805 for (idy = 0; idy < height; idy += bh)
1806 for (idx = 0; idx < width; idx += bw)
1807 write_tx_size_vartx(cm, xd, mbmi, max_tx_size, init_depth, idy, idx,
1808 w);
1809 #if CONFIG_RECT_TX_EXT
1810 if (is_quarter_tx_allowed(xd, mbmi, is_inter_block(mbmi)) &&
1811 quarter_txsize_lookup[bsize] != max_tx_size &&
1812 (mbmi->tx_size == quarter_txsize_lookup[bsize] ||
1813 mbmi->tx_size == max_tx_size)) {
1814 #if CONFIG_NEW_MULTISYMBOL
1815 aom_write_symbol(w, mbmi->tx_size != max_tx_size,
1816 cm->fc->quarter_tx_size_cdf, 2);
1817 #else
1818 aom_write(w, mbmi->tx_size != max_tx_size,
1819 cm->fc->quarter_tx_size_prob);
1820 #endif
1821 }
1822 #endif
1823 } else {
1824 set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, skip, xd);
1825 write_selected_tx_size(cm, xd, w);
1826 }
1827 } else {
1828 set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, skip, xd);
1829 #else
1830 write_selected_tx_size(cm, xd, w);
1831 #endif
1832 }
1833
1834 if (!is_inter) {
1835 if (bsize >= BLOCK_8X8 || unify_bsize) {
1836 write_intra_mode(ec_ctx, bsize, mode, w);
1837 } else {
1838 int idx, idy;
1839 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
1840 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
1841 for (idy = 0; idy < 2; idy += num_4x4_h) {
1842 for (idx = 0; idx < 2; idx += num_4x4_w) {
1843 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
1844 write_intra_mode(ec_ctx, bsize, b_mode, w);
1845 }
1846 }
1847 }
1848 #if CONFIG_CB4X4
1849 if (is_chroma_reference(mi_row, mi_col, bsize, xd->plane[1].subsampling_x,
1850 xd->plane[1].subsampling_y)) {
1851 write_intra_uv_mode(ec_ctx, mbmi->uv_mode, mode, w);
1852 #else // !CONFIG_CB4X4
1853 write_intra_uv_mode(ec_ctx, mbmi->uv_mode, mode, w);
1854 #endif // CONFIG_CB4X4
1855
1856 #if CONFIG_CFL
1857 if (mbmi->uv_mode == UV_CFL_PRED) {
1858 write_cfl_alphas(ec_ctx, mbmi->cfl_alpha_idx, mbmi->cfl_alpha_signs, w);
1859 }
1860 #endif
1861
1862 #if CONFIG_CB4X4
1863 }
1864 #endif
1865
1866 #if CONFIG_EXT_INTRA
1867 write_intra_angle_info(xd, ec_ctx, w);
1868 #endif // CONFIG_EXT_INTRA
1869 if (av1_allow_palette(cm->allow_screen_content_tools, bsize))
1870 write_palette_mode_info(cm, xd, mi, w);
1871 #if CONFIG_FILTER_INTRA
1872 if (bsize >= BLOCK_8X8 || unify_bsize)
1873 write_filter_intra_mode_info(cm, xd, mbmi, mi_row, mi_col, w);
1874 #endif // CONFIG_FILTER_INTRA
1875 } else {
1876 int16_t mode_ctx;
1877 write_ref_frames(cm, xd, w);
1878
1879 #if CONFIG_COMPOUND_SINGLEREF
1880 if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
1881 // NOTE: Handle single ref comp mode
1882 if (!is_compound)
1883 aom_write(w, is_inter_singleref_comp_mode(mode),
1884 av1_get_inter_mode_prob(cm, xd));
1885 }
1886 #endif // CONFIG_COMPOUND_SINGLEREF
1887
1888 #if CONFIG_COMPOUND_SINGLEREF
1889 if (is_compound || is_inter_singleref_comp_mode(mode))
1890 #else // !CONFIG_COMPOUND_SINGLEREF
1891 if (is_compound)
1892 #endif // CONFIG_COMPOUND_SINGLEREF
1893 mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
1894 else
1895
1896 mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
1897 mbmi->ref_frame, bsize, -1);
1898
1899 // If segment skip is not enabled code the mode.
1900 if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
1901 if (bsize >= BLOCK_8X8 || unify_bsize) {
1902 if (is_inter_compound_mode(mode))
1903 write_inter_compound_mode(cm, xd, w, mode, mode_ctx);
1904 #if CONFIG_COMPOUND_SINGLEREF
1905 else if (is_inter_singleref_comp_mode(mode))
1906 write_inter_singleref_comp_mode(xd, w, mode, mode_ctx);
1907 #endif // CONFIG_COMPOUND_SINGLEREF
1908 else if (is_inter_singleref_mode(mode))
1909 write_inter_mode(w, mode, ec_ctx, mode_ctx);
1910
1911 if (mode == NEWMV || mode == NEW_NEWMV ||
1912 #if CONFIG_COMPOUND_SINGLEREF
1913 mbmi->mode == SR_NEW_NEWMV ||
1914 #endif // CONFIG_COMPOUND_SINGLEREF
1915 have_nearmv_in_inter_mode(mode))
1916 write_drl_idx(ec_ctx, mbmi, mbmi_ext, w);
1917 else
1918 assert(mbmi->ref_mv_idx == 0);
1919 }
1920 }
1921
1922 #if !CONFIG_DUAL_FILTER && !CONFIG_WARPED_MOTION && !CONFIG_GLOBAL_MOTION
1923 write_mb_interp_filter(cpi, xd, w);
1924 #endif // !CONFIG_DUAL_FILTER && !CONFIG_WARPED_MOTION
1925
1926 if (bsize < BLOCK_8X8 && !unify_bsize) {
1927 #if CONFIG_COMPOUND_SINGLEREF
1928 /// NOTE: Single ref comp mode does not support sub8x8.
1929 assert(is_compound || !is_inter_singleref_comp_mode(mbmi->mode));
1930 #endif // CONFIG_COMPOUND_SINGLEREF
1931 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
1932 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
1933 int idx, idy;
1934 for (idy = 0; idy < 2; idy += num_4x4_h) {
1935 for (idx = 0; idx < 2; idx += num_4x4_w) {
1936 const int j = idy * 2 + idx;
1937 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
1938 if (!is_compound)
1939 mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
1940 mbmi->ref_frame, bsize, j);
1941 if (is_inter_compound_mode(b_mode))
1942 write_inter_compound_mode(cm, xd, w, b_mode, mode_ctx);
1943 else if (is_inter_singleref_mode(b_mode))
1944 write_inter_mode(w, b_mode, ec_ctx, mode_ctx);
1945
1946 if (b_mode == NEWMV || b_mode == NEW_NEWMV) {
1947 for (ref = 0; ref < 1 + is_compound; ++ref) {
1948 int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1949 int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
1950 mbmi_ext->ref_mv_stack[rf_type], ref,
1951 mbmi->ref_mv_idx);
1952 nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
1953 av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
1954 &mi->bmi[j].ref_mv[ref].as_mv, nmvc, allow_hp);
1955 }
1956 } else if (b_mode == NEAREST_NEWMV || b_mode == NEAR_NEWMV) {
1957 int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1958 int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
1959 mbmi_ext->ref_mv_stack[rf_type], 1,
1960 mbmi->ref_mv_idx);
1961 nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
1962 av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
1963 &mi->bmi[j].ref_mv[1].as_mv, nmvc, allow_hp);
1964 } else if (b_mode == NEW_NEARESTMV || b_mode == NEW_NEARMV) {
1965 int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1966 int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
1967 mbmi_ext->ref_mv_stack[rf_type], 0,
1968 mbmi->ref_mv_idx);
1969 nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
1970 av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
1971 &mi->bmi[j].ref_mv[0].as_mv, nmvc, allow_hp);
1972 }
1973 }
1974 }
1975 } else {
1976 if (mode == NEWMV || mode == NEW_NEWMV) {
1977 int_mv ref_mv;
1978 for (ref = 0; ref < 1 + is_compound; ++ref) {
1979 int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1980 int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
1981 mbmi_ext->ref_mv_stack[rf_type], ref,
1982 mbmi->ref_mv_idx);
1983 nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
1984 ref_mv = mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0];
1985 av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, &ref_mv.as_mv, nmvc,
1986 allow_hp);
1987 }
1988 } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
1989 int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1990 int nmv_ctx =
1991 av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
1992 mbmi_ext->ref_mv_stack[rf_type], 1, mbmi->ref_mv_idx);
1993 nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
1994 av1_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
1995 &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv, nmvc,
1996 allow_hp);
1997 } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
1998 int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
1999 int nmv_ctx =
2000 av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
2001 mbmi_ext->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx);
2002 nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
2003 av1_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
2004 &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv, nmvc,
2005 allow_hp);
2006 #if CONFIG_COMPOUND_SINGLEREF
2007 } else if ( // mode == SR_NEAREST_NEWMV ||
2008 mode == SR_NEAR_NEWMV || mode == SR_ZERO_NEWMV ||
2009 mode == SR_NEW_NEWMV) {
2010 int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
2011 int nmv_ctx =
2012 av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
2013 mbmi_ext->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx);
2014 nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
2015 int_mv ref_mv = mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0];
2016 if (mode == SR_NEW_NEWMV)
2017 av1_encode_mv(cpi, w, &mbmi->mv[0].as_mv, &ref_mv.as_mv, nmvc,
2018 allow_hp);
2019 av1_encode_mv(cpi, w, &mbmi->mv[1].as_mv, &ref_mv.as_mv, nmvc,
2020 allow_hp);
2021 #endif // CONFIG_COMPOUND_SINGLEREF
2022 }
2023 }
2024
2025 #if CONFIG_INTERINTRA
2026 if (cpi->common.reference_mode != COMPOUND_REFERENCE &&
2027 #if CONFIG_SUPERTX
2028 !supertx_enabled &&
2029 #endif // CONFIG_SUPERTX
2030 cpi->common.allow_interintra_compound && is_interintra_allowed(mbmi)) {
2031 const int interintra = mbmi->ref_frame[1] == INTRA_FRAME;
2032 const int bsize_group = size_group_lookup[bsize];
2033 #if CONFIG_NEW_MULTISYMBOL
2034 aom_write_symbol(w, interintra, ec_ctx->interintra_cdf[bsize_group], 2);
2035 #else
2036 aom_write(w, interintra, cm->fc->interintra_prob[bsize_group]);
2037 #endif
2038 if (interintra) {
2039 aom_write_symbol(w, mbmi->interintra_mode,
2040 ec_ctx->interintra_mode_cdf[bsize_group],
2041 INTERINTRA_MODES);
2042 if (is_interintra_wedge_used(bsize)) {
2043 #if CONFIG_NEW_MULTISYMBOL
2044 aom_write_symbol(w, mbmi->use_wedge_interintra,
2045 ec_ctx->wedge_interintra_cdf[bsize], 2);
2046 #else
2047 aom_write(w, mbmi->use_wedge_interintra,
2048 cm->fc->wedge_interintra_prob[bsize]);
2049 #endif
2050 if (mbmi->use_wedge_interintra) {
2051 aom_write_literal(w, mbmi->interintra_wedge_index,
2052 get_wedge_bits_lookup(bsize));
2053 assert(mbmi->interintra_wedge_sign == 0);
2054 }
2055 }
2056 }
2057 }
2058 #endif // CONFIG_INTERINTRA
2059
2060 #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
2061 #if CONFIG_SUPERTX
2062 if (!supertx_enabled)
2063 #endif // CONFIG_SUPERTX
2064 if (mbmi->ref_frame[1] != INTRA_FRAME) write_motion_mode(cm, xd, mi, w);
2065 #if CONFIG_NCOBMC_ADAPT_WEIGHT
2066 write_ncobmc_mode(xd, mi, w);
2067 #endif
2068 #endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
2069
2070 if (
2071 #if CONFIG_COMPOUND_SINGLEREF
2072 is_inter_anyref_comp_mode(mbmi->mode) &&
2073 #else // !CONFIG_COMPOUND_SINGLEREF
2074 cpi->common.reference_mode != SINGLE_REFERENCE &&
2075 is_inter_compound_mode(mbmi->mode) &&
2076 #endif // CONFIG_COMPOUND_SINGLEREF
2077 #if CONFIG_MOTION_VAR
2078 mbmi->motion_mode == SIMPLE_TRANSLATION &&
2079 #endif // CONFIG_MOTION_VAR
2080 is_any_masked_compound_used(bsize)) {
2081 #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
2082 if (cm->allow_masked_compound) {
2083 #if CONFIG_WEDGE && CONFIG_COMPOUND_SEGMENT
2084 if (!is_interinter_compound_used(COMPOUND_WEDGE, bsize))
2085 aom_write_bit(w, mbmi->interinter_compound_type == COMPOUND_AVERAGE);
2086 else
2087 #endif // CONFIG_WEDGE && CONFIG_COMPOUND_SEGMENT
2088 aom_write_symbol(w, mbmi->interinter_compound_type,
2089 ec_ctx->compound_type_cdf[bsize], COMPOUND_TYPES);
2090 #if CONFIG_WEDGE
2091 if (is_interinter_compound_used(COMPOUND_WEDGE, bsize) &&
2092 mbmi->interinter_compound_type == COMPOUND_WEDGE) {
2093 aom_write_literal(w, mbmi->wedge_index, get_wedge_bits_lookup(bsize));
2094 aom_write_bit(w, mbmi->wedge_sign);
2095 }
2096 #endif // CONFIG_WEDGE
2097 #if CONFIG_COMPOUND_SEGMENT
2098 if (mbmi->interinter_compound_type == COMPOUND_SEG) {
2099 aom_write_literal(w, mbmi->mask_type, MAX_SEG_MASK_BITS);
2100 }
2101 #endif // CONFIG_COMPOUND_SEGMENT
2102 }
2103 #endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
2104 }
2105
2106 #if CONFIG_DUAL_FILTER || CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION
2107 write_mb_interp_filter(cpi, xd, w);
2108 #endif // CONFIG_DUAL_FILTE || CONFIG_WARPED_MOTION
2109 }
2110
2111 #if !CONFIG_TXK_SEL
2112 av1_write_tx_type(cm, xd,
2113 #if CONFIG_SUPERTX
2114 supertx_enabled,
2115 #endif
2116 w);
2117 #endif // !CONFIG_TXK_SEL
2118 }
2119
2120 static void write_mb_modes_kf(AV1_COMMON *cm, MACROBLOCKD *xd,
2121 #if CONFIG_INTRABC
2122 const MB_MODE_INFO_EXT *mbmi_ext,
2123 #endif // CONFIG_INTRABC
2124 const int mi_row, const int mi_col,
2125 aom_writer *w) {
2126 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
2127 const struct segmentation *const seg = &cm->seg;
2128 struct segmentation_probs *const segp = &ec_ctx->seg;
2129 const MODE_INFO *const mi = xd->mi[0];
2130 const MODE_INFO *const above_mi = xd->above_mi;
2131 const MODE_INFO *const left_mi = xd->left_mi;
2132 const MB_MODE_INFO *const mbmi = &mi->mbmi;
2133 const BLOCK_SIZE bsize = mbmi->sb_type;
2134 #if CONFIG_CB4X4
2135 const int unify_bsize = 1;
2136 #else
2137 const int unify_bsize = 0;
2138 #endif
2139 (void)mi_row;
2140 (void)mi_col;
2141
2142 if (seg->update_map) write_segment_id(w, seg, segp, mbmi->segment_id);
2143
2144 const int skip = write_skip(cm, xd, mbmi->segment_id, mi, w);
2145 if (cm->delta_q_present_flag) {
2146 int super_block_upper_left =
2147 ((mi_row & MAX_MIB_MASK) == 0) && ((mi_col & MAX_MIB_MASK) == 0);
2148 if ((bsize != BLOCK_LARGEST || skip == 0) && super_block_upper_left) {
2149 assert(mbmi->current_q_index > 0);
2150 int reduced_delta_qindex =
2151 (mbmi->current_q_index - xd->prev_qindex) / cm->delta_q_res;
2152 write_delta_qindex(cm, xd, reduced_delta_qindex, w);
2153 xd->prev_qindex = mbmi->current_q_index;
2154 #if CONFIG_EXT_DELTA_Q
2155 #if CONFIG_LOOPFILTER_LEVEL
2156 if (cm->delta_lf_present_flag) {
2157 if (cm->delta_lf_multi) {
2158 for (int lf_id = 0; lf_id < FRAME_LF_COUNT; ++lf_id) {
2159 int reduced_delta_lflevel =
2160 (mbmi->curr_delta_lf[lf_id] - xd->prev_delta_lf[lf_id]) /
2161 cm->delta_lf_res;
2162 write_delta_lflevel(cm, xd, lf_id, reduced_delta_lflevel, w);
2163 xd->prev_delta_lf[lf_id] = mbmi->curr_delta_lf[lf_id];
2164 }
2165 } else {
2166 int reduced_delta_lflevel =
2167 (mbmi->current_delta_lf_from_base - xd->prev_delta_lf_from_base) /
2168 cm->delta_lf_res;
2169 write_delta_lflevel(cm, xd, -1, reduced_delta_lflevel, w);
2170 xd->prev_delta_lf_from_base = mbmi->current_delta_lf_from_base;
2171 }
2172 }
2173 #else
2174 if (cm->delta_lf_present_flag) {
2175 int reduced_delta_lflevel =
2176 (mbmi->current_delta_lf_from_base - xd->prev_delta_lf_from_base) /
2177 cm->delta_lf_res;
2178 write_delta_lflevel(cm, xd, reduced_delta_lflevel, w);
2179 xd->prev_delta_lf_from_base = mbmi->current_delta_lf_from_base;
2180 }
2181 #endif // CONFIG_LOOPFILTER_LEVEL
2182 #endif // CONFIG_EXT_DELTA_Q
2183 }
2184 }
2185
2186 int enable_tx_size = cm->tx_mode == TX_MODE_SELECT &&
2187 block_signals_txsize(bsize) &&
2188 !xd->lossless[mbmi->segment_id];
2189
2190 #if CONFIG_INTRABC
2191 if (av1_allow_intrabc(bsize, cm)) {
2192 int use_intrabc = is_intrabc_block(mbmi);
2193 aom_write_symbol(w, use_intrabc, ec_ctx->intrabc_cdf, 2);
2194 if (use_intrabc) {
2195 assert(mbmi->mode == DC_PRED);
2196 assert(mbmi->uv_mode == UV_DC_PRED);
2197 if (enable_tx_size && !mbmi->skip) write_selected_tx_size(cm, xd, w);
2198 int_mv dv_ref = mbmi_ext->ref_mvs[INTRA_FRAME][0];
2199 av1_encode_dv(w, &mbmi->mv[0].as_mv, &dv_ref.as_mv, &ec_ctx->ndvc);
2200 #if CONFIG_EXT_TX && !CONFIG_TXK_SEL
2201 av1_write_tx_type(cm, xd,
2202 #if CONFIG_SUPERTX
2203 0,
2204 #endif
2205 w);
2206 #endif // CONFIG_EXT_TX && !CONFIG_TXK_SEL
2207 return;
2208 }
2209 }
2210 #endif // CONFIG_INTRABC
2211 if (enable_tx_size) write_selected_tx_size(cm, xd, w);
2212
2213 if (bsize >= BLOCK_8X8 || unify_bsize) {
2214 write_intra_mode_kf(cm, ec_ctx, mi, above_mi, left_mi, 0, mbmi->mode, w);
2215 } else {
2216 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
2217 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
2218 int idx, idy;
2219
2220 for (idy = 0; idy < 2; idy += num_4x4_h) {
2221 for (idx = 0; idx < 2; idx += num_4x4_w) {
2222 const int block = idy * 2 + idx;
2223 write_intra_mode_kf(cm, ec_ctx, mi, above_mi, left_mi, block,
2224 mi->bmi[block].as_mode, w);
2225 }
2226 }
2227 }
2228
2229 #if CONFIG_CB4X4
2230 if (is_chroma_reference(mi_row, mi_col, bsize, xd->plane[1].subsampling_x,
2231 xd->plane[1].subsampling_y)) {
2232 write_intra_uv_mode(ec_ctx, mbmi->uv_mode, mbmi->mode, w);
2233 #else // !CONFIG_CB4X4
2234 write_intra_uv_mode(ec_ctx, mbmi->uv_mode, mbmi->mode, w);
2235 #endif // CONFIG_CB4X4
2236
2237 #if CONFIG_CFL
2238 if (mbmi->uv_mode == UV_CFL_PRED) {
2239 write_cfl_alphas(ec_ctx, mbmi->cfl_alpha_idx, mbmi->cfl_alpha_signs, w);
2240 }
2241 #endif
2242
2243 #if CONFIG_CB4X4
2244 }
2245 #endif
2246 #if CONFIG_EXT_INTRA
2247 write_intra_angle_info(xd, ec_ctx, w);
2248 #endif // CONFIG_EXT_INTRA
2249 if (av1_allow_palette(cm->allow_screen_content_tools, bsize))
2250 write_palette_mode_info(cm, xd, mi, w);
2251 #if CONFIG_FILTER_INTRA
2252 if (bsize >= BLOCK_8X8 || unify_bsize)
2253 write_filter_intra_mode_info(cm, xd, mbmi, mi_row, mi_col, w);
2254 #endif // CONFIG_FILTER_INTRA
2255
2256 #if !CONFIG_TXK_SEL
2257 av1_write_tx_type(cm, xd,
2258 #if CONFIG_SUPERTX
2259 0,
2260 #endif
2261 w);
2262 #endif // !CONFIG_TXK_SEL
2263 }
2264
2265 #if CONFIG_SUPERTX
2266 #define write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \
2267 mi_row, mi_col) \
2268 write_modes_b(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row, mi_col)
2269 #else
2270 #define write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \
2271 mi_row, mi_col) \
2272 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col)
2273 #endif // CONFIG_SUPERTX
2274
2275 #if CONFIG_RD_DEBUG
2276 static void dump_mode_info(MODE_INFO *mi) {
2277 printf("\nmi->mbmi.mi_row == %d\n", mi->mbmi.mi_row);
2278 printf("&& mi->mbmi.mi_col == %d\n", mi->mbmi.mi_col);
2279 printf("&& mi->mbmi.sb_type == %d\n", mi->mbmi.sb_type);
2280 printf("&& mi->mbmi.tx_size == %d\n", mi->mbmi.tx_size);
2281 if (mi->mbmi.sb_type >= BLOCK_8X8) {
2282 printf("&& mi->mbmi.mode == %d\n", mi->mbmi.mode);
2283 } else {
2284 printf("&& mi->bmi[0].as_mode == %d\n", mi->bmi[0].as_mode);
2285 }
2286 }
2287 static int rd_token_stats_mismatch(RD_STATS *rd_stats, TOKEN_STATS *token_stats,
2288 int plane) {
2289 if (rd_stats->txb_coeff_cost[plane] != token_stats->cost) {
2290 #if CONFIG_VAR_TX
2291 int r, c;
2292 #endif
2293 printf("\nplane %d rd_stats->txb_coeff_cost %d token_stats->cost %d\n",
2294 plane, rd_stats->txb_coeff_cost[plane], token_stats->cost);
2295 #if CONFIG_VAR_TX
2296 printf("rd txb_coeff_cost_map\n");
2297 for (r = 0; r < TXB_COEFF_COST_MAP_SIZE; ++r) {
2298 for (c = 0; c < TXB_COEFF_COST_MAP_SIZE; ++c) {
2299 printf("%d ", rd_stats->txb_coeff_cost_map[plane][r][c]);
2300 }
2301 printf("\n");
2302 }
2303
2304 printf("pack txb_coeff_cost_map\n");
2305 for (r = 0; r < TXB_COEFF_COST_MAP_SIZE; ++r) {
2306 for (c = 0; c < TXB_COEFF_COST_MAP_SIZE; ++c) {
2307 printf("%d ", token_stats->txb_coeff_cost_map[r][c]);
2308 }
2309 printf("\n");
2310 }
2311 #endif
2312 return 1;
2313 }
2314 return 0;
2315 }
2316 #endif
2317
2318 #if ENC_MISMATCH_DEBUG
2319 static void enc_dump_logs(AV1_COMP *cpi, int mi_row, int mi_col) {
2320 AV1_COMMON *const cm = &cpi->common;
2321 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
2322 MODE_INFO *m;
2323 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
2324 m = xd->mi[0];
2325 if (is_inter_block(&m->mbmi)) {
2326 #define FRAME_TO_CHECK 1
2327 if (cm->current_video_frame == FRAME_TO_CHECK && cm->show_frame == 1) {
2328 const MB_MODE_INFO *const mbmi = &m->mbmi;
2329 const BLOCK_SIZE bsize = mbmi->sb_type;
2330
2331 int_mv mv[2];
2332 int is_comp_ref = has_second_ref(&m->mbmi);
2333 int ref;
2334
2335 for (ref = 0; ref < 1 + is_comp_ref; ++ref)
2336 mv[ref].as_mv = m->mbmi.mv[ref].as_mv;
2337
2338 if (!is_comp_ref) {
2339 #if CONFIG_COMPOUND_SINGLEREF
2340 if (is_inter_singleref_comp_mode(m->mbmi.mode))
2341 mv[1].as_mv = m->mbmi.mv[1].as_mv;
2342 else
2343 #endif // CONFIG_COMPOUND_SINGLEREF
2344 mv[1].as_int = 0;
2345 }
2346
2347 MACROBLOCK *const x = &cpi->td.mb;
2348 const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
2349 const int16_t mode_ctx = av1_mode_context_analyzer(
2350 mbmi_ext->mode_context, mbmi->ref_frame, bsize, -1);
2351 const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
2352 int16_t zeromv_ctx = -1;
2353 int16_t refmv_ctx = -1;
2354 if (mbmi->mode != NEWMV) {
2355 zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
2356 if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET)) {
2357 assert(mbmi->mode == ZEROMV);
2358 }
2359 if (mbmi->mode != ZEROMV) {
2360 refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
2361 if (mode_ctx & (1 << SKIP_NEARESTMV_OFFSET)) refmv_ctx = 6;
2362 if (mode_ctx & (1 << SKIP_NEARMV_OFFSET)) refmv_ctx = 7;
2363 if (mode_ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) refmv_ctx = 8;
2364 }
2365 }
2366
2367 int8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
2368 printf(
2369 "=== ENCODER ===: "
2370 "Frame=%d, (mi_row,mi_col)=(%d,%d), mode=%d, bsize=%d, "
2371 "show_frame=%d, mv[0]=(%d,%d), mv[1]=(%d,%d), ref[0]=%d, "
2372 "ref[1]=%d, motion_mode=%d, inter_mode_ctx=%d, mode_ctx=%d, "
2373 "newmv_ctx=%d, zeromv_ctx=%d, refmv_ctx=%d\n",
2374 cm->current_video_frame, mi_row, mi_col, mbmi->mode, bsize,
2375 cm->show_frame, mv[0].as_mv.row, mv[0].as_mv.col, mv[1].as_mv.row,
2376 mv[1].as_mv.col, mbmi->ref_frame[0], mbmi->ref_frame[1],
2377 mbmi->motion_mode, mbmi_ext->mode_context[ref_frame_type], mode_ctx,
2378 newmv_ctx, zeromv_ctx, refmv_ctx);
2379 }
2380 }
2381 }
2382 #endif // ENC_MISMATCH_DEBUG
2383
2384 static void write_mbmi_b(AV1_COMP *cpi, const TileInfo *const tile,
2385 aom_writer *w,
2386 #if CONFIG_SUPERTX
2387 int supertx_enabled,
2388 #endif
2389 int mi_row, int mi_col) {
2390 AV1_COMMON *const cm = &cpi->common;
2391 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
2392 MODE_INFO *m;
2393 int bh, bw;
2394 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
2395 m = xd->mi[0];
2396
2397 assert(m->mbmi.sb_type <= cm->sb_size ||
2398 (m->mbmi.sb_type >= BLOCK_SIZES && m->mbmi.sb_type < BLOCK_SIZES_ALL));
2399
2400 bh = mi_size_high[m->mbmi.sb_type];
2401 bw = mi_size_wide[m->mbmi.sb_type];
2402
2403 cpi->td.mb.mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
2404
2405 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw,
2406 #if CONFIG_DEPENDENT_HORZTILES
2407 cm->dependent_horz_tiles,
2408 #endif // CONFIG_DEPENDENT_HORZTILES
2409 cm->mi_rows, cm->mi_cols);
2410
2411 if (frame_is_intra_only(cm)) {
2412 write_mb_modes_kf(cm, xd,
2413 #if CONFIG_INTRABC
2414 cpi->td.mb.mbmi_ext,
2415 #endif // CONFIG_INTRABC
2416 mi_row, mi_col, w);
2417 } else {
2418 #if CONFIG_VAR_TX
2419 xd->above_txfm_context =
2420 cm->above_txfm_context + (mi_col << TX_UNIT_WIDE_LOG2);
2421 xd->left_txfm_context = xd->left_txfm_context_buffer +
2422 ((mi_row & MAX_MIB_MASK) << TX_UNIT_HIGH_LOG2);
2423 #endif
2424 #if CONFIG_DUAL_FILTER || CONFIG_WARPED_MOTION
2425 // has_subpel_mv_component needs the ref frame buffers set up to look
2426 // up if they are scaled. has_subpel_mv_component is in turn needed by
2427 // write_switchable_interp_filter, which is called by pack_inter_mode_mvs.
2428 set_ref_ptrs(cm, xd, m->mbmi.ref_frame[0], m->mbmi.ref_frame[1]);
2429 #if CONFIG_COMPOUND_SINGLEREF
2430 if (!has_second_ref(&m->mbmi) && is_inter_singleref_comp_mode(m->mbmi.mode))
2431 xd->block_refs[1] = xd->block_refs[0];
2432 #endif // CONFIG_COMPOUND_SINGLEREF
2433 #endif // CONFIG_DUAL_FILTER || CONFIG_WARPED_MOTION
2434
2435 #if ENC_MISMATCH_DEBUG
2436 enc_dump_logs(cpi, mi_row, mi_col);
2437 #endif // ENC_MISMATCH_DEBUG
2438
2439 pack_inter_mode_mvs(cpi, mi_row, mi_col,
2440 #if CONFIG_SUPERTX
2441 supertx_enabled,
2442 #endif
2443 w);
2444 }
2445 }
2446
2447 static void write_tokens_b(AV1_COMP *cpi, const TileInfo *const tile,
2448 aom_writer *w, const TOKENEXTRA **tok,
2449 const TOKENEXTRA *const tok_end, int mi_row,
2450 int mi_col) {
2451 AV1_COMMON *const cm = &cpi->common;
2452 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
2453 const int mi_offset = mi_row * cm->mi_stride + mi_col;
2454 MODE_INFO *const m = *(cm->mi_grid_visible + mi_offset);
2455 MB_MODE_INFO *const mbmi = &m->mbmi;
2456 int plane;
2457 int bh, bw;
2458 #if CONFIG_PVQ || CONFIG_LV_MAP
2459 MACROBLOCK *const x = &cpi->td.mb;
2460 (void)tok;
2461 (void)tok_end;
2462 #endif
2463 xd->mi = cm->mi_grid_visible + mi_offset;
2464
2465 assert(mbmi->sb_type <= cm->sb_size ||
2466 (mbmi->sb_type >= BLOCK_SIZES && mbmi->sb_type < BLOCK_SIZES_ALL));
2467
2468 bh = mi_size_high[mbmi->sb_type];
2469 bw = mi_size_wide[mbmi->sb_type];
2470 cpi->td.mb.mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
2471
2472 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw,
2473 #if CONFIG_DEPENDENT_HORZTILES
2474 cm->dependent_horz_tiles,
2475 #endif // CONFIG_DEPENDENT_HORZTILES
2476 cm->mi_rows, cm->mi_cols);
2477
2478 // TODO(anybody) : remove this flag when PVQ supports pallete coding tool
2479 #if !CONFIG_PVQ
2480 for (plane = 0; plane <= 1; ++plane) {
2481 const uint8_t palette_size_plane =
2482 mbmi->palette_mode_info.palette_size[plane];
2483 if (palette_size_plane > 0) {
2484 #if CONFIG_INTRABC
2485 assert(mbmi->use_intrabc == 0);
2486 #endif
2487 int rows, cols;
2488 assert(mbmi->sb_type >= BLOCK_8X8);
2489 av1_get_block_dimensions(mbmi->sb_type, plane, xd, NULL, NULL, &rows,
2490 &cols);
2491 assert(*tok < tok_end);
2492 pack_map_tokens(w, tok, palette_size_plane, rows * cols);
2493 #if !CONFIG_LV_MAP
2494 assert(*tok < tok_end + mbmi->skip);
2495 #endif // !CONFIG_LV_MAP
2496 }
2497 }
2498 #endif // !CONFIG_PVQ
2499
2500 #if CONFIG_COEF_INTERLEAVE
2501 if (!mbmi->skip) {
2502 const struct macroblockd_plane *const pd_y = &xd->plane[0];
2503 const struct macroblockd_plane *const pd_c = &xd->plane[1];
2504 const TX_SIZE tx_log2_y = mbmi->tx_size;
2505 const TX_SIZE tx_log2_c = av1_get_uv_tx_size(mbmi, pd_c);
2506 const int tx_sz_y = (1 << tx_log2_y);
2507 const int tx_sz_c = (1 << tx_log2_c);
2508
2509 const BLOCK_SIZE plane_bsize_y =
2510 get_plane_block_size(AOMMAX(mbmi->sb_type, 3), pd_y);
2511 const BLOCK_SIZE plane_bsize_c =
2512 get_plane_block_size(AOMMAX(mbmi->sb_type, 3), pd_c);
2513
2514 const int num_4x4_w_y = num_4x4_blocks_wide_lookup[plane_bsize_y];
2515 const int num_4x4_w_c = num_4x4_blocks_wide_lookup[plane_bsize_c];
2516 const int num_4x4_h_y = num_4x4_blocks_high_lookup[plane_bsize_y];
2517 const int num_4x4_h_c = num_4x4_blocks_high_lookup[plane_bsize_c];
2518
2519 const int max_4x4_w_y = get_max_4x4_size(num_4x4_w_y, xd->mb_to_right_edge,
2520 pd_y->subsampling_x);
2521 const int max_4x4_h_y = get_max_4x4_size(num_4x4_h_y, xd->mb_to_bottom_edge,
2522 pd_y->subsampling_y);
2523 const int max_4x4_w_c = get_max_4x4_size(num_4x4_w_c, xd->mb_to_right_edge,
2524 pd_c->subsampling_x);
2525 const int max_4x4_h_c = get_max_4x4_size(num_4x4_h_c, xd->mb_to_bottom_edge,
2526 pd_c->subsampling_y);
2527
2528 // The max_4x4_w/h may be smaller than tx_sz under some corner cases,
2529 // i.e. when the SB is splitted by tile boundaries.
2530 const int tu_num_w_y = (max_4x4_w_y + tx_sz_y - 1) / tx_sz_y;
2531 const int tu_num_h_y = (max_4x4_h_y + tx_sz_y - 1) / tx_sz_y;
2532 const int tu_num_w_c = (max_4x4_w_c + tx_sz_c - 1) / tx_sz_c;
2533 const int tu_num_h_c = (max_4x4_h_c + tx_sz_c - 1) / tx_sz_c;
2534 const int tu_num_y = tu_num_w_y * tu_num_h_y;
2535 const int tu_num_c = tu_num_w_c * tu_num_h_c;
2536
2537 int tu_idx_y = 0, tu_idx_c = 0;
2538 TOKEN_STATS token_stats;
2539 init_token_stats(&token_stats);
2540
2541 assert(*tok < tok_end);
2542
2543 while (tu_idx_y < tu_num_y) {
2544 pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx_log2_y, &token_stats);
2545 assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2546 (*tok)++;
2547 tu_idx_y++;
2548
2549 if (tu_idx_c < tu_num_c) {
2550 pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx_log2_c, &token_stats);
2551 assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2552 (*tok)++;
2553
2554 pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx_log2_c, &token_stats);
2555 assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2556 (*tok)++;
2557
2558 tu_idx_c++;
2559 }
2560 }
2561
2562 // In 422 case, it's possilbe that Chroma has more TUs than Luma
2563 while (tu_idx_c < tu_num_c) {
2564 pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx_log2_c, &token_stats);
2565 assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2566 (*tok)++;
2567
2568 pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx_log2_c, &token_stats);
2569 assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2570 (*tok)++;
2571
2572 tu_idx_c++;
2573 }
2574 }
2575 #else // CONFIG_COEF_INTERLEAVE
2576 if (!mbmi->skip) {
2577 #if !CONFIG_PVQ && !CONFIG_LV_MAP
2578 assert(*tok < tok_end);
2579 #endif
2580 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
2581 #if CONFIG_CB4X4
2582 if (!is_chroma_reference(mi_row, mi_col, mbmi->sb_type,
2583 xd->plane[plane].subsampling_x,
2584 xd->plane[plane].subsampling_y)) {
2585 #if !CONFIG_LV_MAP
2586 (*tok)++;
2587 #endif // !CONFIG_LV_MAP
2588 continue;
2589 }
2590 #endif
2591 #if CONFIG_VAR_TX
2592 const struct macroblockd_plane *const pd = &xd->plane[plane];
2593 BLOCK_SIZE bsize = mbmi->sb_type;
2594 #if CONFIG_CHROMA_SUB8X8
2595 const BLOCK_SIZE plane_bsize =
2596 AOMMAX(BLOCK_4X4, get_plane_block_size(bsize, pd));
2597 #elif CONFIG_CB4X4
2598 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
2599 #else
2600 const BLOCK_SIZE plane_bsize =
2601 get_plane_block_size(AOMMAX(bsize, BLOCK_8X8), pd);
2602 #endif
2603
2604 const int num_4x4_w =
2605 block_size_wide[plane_bsize] >> tx_size_wide_log2[0];
2606 const int num_4x4_h =
2607 block_size_high[plane_bsize] >> tx_size_wide_log2[0];
2608 int row, col;
2609 TOKEN_STATS token_stats;
2610 init_token_stats(&token_stats);
2611
2612 const BLOCK_SIZE max_unit_bsize = get_plane_block_size(BLOCK_64X64, pd);
2613 int mu_blocks_wide =
2614 block_size_wide[max_unit_bsize] >> tx_size_wide_log2[0];
2615 int mu_blocks_high =
2616 block_size_high[max_unit_bsize] >> tx_size_high_log2[0];
2617
2618 mu_blocks_wide = AOMMIN(num_4x4_w, mu_blocks_wide);
2619 mu_blocks_high = AOMMIN(num_4x4_h, mu_blocks_high);
2620
2621 if (is_inter_block(mbmi)) {
2622 const TX_SIZE max_tx_size = get_vartx_max_txsize(
2623 mbmi, plane_bsize, pd->subsampling_x || pd->subsampling_y);
2624 int block = 0;
2625 const int step =
2626 tx_size_wide_unit[max_tx_size] * tx_size_high_unit[max_tx_size];
2627 const int bkw = tx_size_wide_unit[max_tx_size];
2628 const int bkh = tx_size_high_unit[max_tx_size];
2629 assert(bkw <= mu_blocks_wide);
2630 assert(bkh <= mu_blocks_high);
2631 for (row = 0; row < num_4x4_h; row += mu_blocks_high) {
2632 const int unit_height = AOMMIN(mu_blocks_high + row, num_4x4_h);
2633 for (col = 0; col < num_4x4_w; col += mu_blocks_wide) {
2634 int blk_row, blk_col;
2635 const int unit_width = AOMMIN(mu_blocks_wide + col, num_4x4_w);
2636 for (blk_row = row; blk_row < unit_height; blk_row += bkh) {
2637 for (blk_col = col; blk_col < unit_width; blk_col += bkw) {
2638 pack_txb_tokens(w,
2639 #if CONFIG_LV_MAP
2640 cm,
2641 #endif
2642 tok, tok_end,
2643 #if CONFIG_PVQ || CONFIG_LV_MAP
2644 x,
2645 #endif
2646 xd, mbmi, plane, plane_bsize, cm->bit_depth,
2647 block, blk_row, blk_col, max_tx_size,
2648 &token_stats);
2649 block += step;
2650 }
2651 }
2652 }
2653 }
2654 #if CONFIG_RD_DEBUG
2655 if (mbmi->sb_type >= BLOCK_8X8 &&
2656 rd_token_stats_mismatch(&mbmi->rd_stats, &token_stats, plane)) {
2657 dump_mode_info(m);
2658 assert(0);
2659 }
2660 #endif // CONFIG_RD_DEBUG
2661 } else {
2662 #if CONFIG_LV_MAP
2663 av1_write_coeffs_mb(cm, x, w, plane);
2664 #else
2665 const TX_SIZE tx = av1_get_tx_size(plane, xd);
2666 const int bkw = tx_size_wide_unit[tx];
2667 const int bkh = tx_size_high_unit[tx];
2668 int blk_row, blk_col;
2669
2670 for (row = 0; row < num_4x4_h; row += mu_blocks_high) {
2671 for (col = 0; col < num_4x4_w; col += mu_blocks_wide) {
2672 const int unit_height = AOMMIN(mu_blocks_high + row, num_4x4_h);
2673 const int unit_width = AOMMIN(mu_blocks_wide + col, num_4x4_w);
2674
2675 for (blk_row = row; blk_row < unit_height; blk_row += bkh) {
2676 for (blk_col = col; blk_col < unit_width; blk_col += bkw) {
2677 #if !CONFIG_PVQ
2678 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
2679 TX_TYPE tx_type =
2680 av1_get_tx_type(plane ? PLANE_TYPE_UV : PLANE_TYPE_Y, xd,
2681 blk_row, blk_col, 0, tx);
2682 #endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
2683 pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx,
2684 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
2685 tx_type, is_inter_block(mbmi),
2686 #endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
2687 &token_stats);
2688 #else
2689 pack_pvq_tokens(w, x, xd, plane, bsize, tx);
2690 #endif
2691 }
2692 }
2693 }
2694 }
2695 #endif // CONFIG_LV_MAP
2696 }
2697 #else
2698 const TX_SIZE tx = av1_get_tx_size(plane, xd);
2699 TOKEN_STATS token_stats;
2700 #if !CONFIG_PVQ
2701 init_token_stats(&token_stats);
2702 #if CONFIG_LV_MAP
2703 (void)tx;
2704 av1_write_coeffs_mb(cm, x, w, plane);
2705 #else // CONFIG_LV_MAP
2706 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
2707 TX_TYPE tx_type = av1_get_tx_type(plane ? PLANE_TYPE_UV : PLANE_TYPE_Y,
2708 xd, blk_row, blk_col, 0, tx);
2709 #endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
2710 pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx,
2711 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
2712 tx_type, is_inter_block(mbmi),
2713 #endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
2714 &token_stats);
2715 #endif // CONFIG_LV_MAP
2716
2717 #else
2718 (void)token_stats;
2719 pack_pvq_tokens(w, x, xd, plane, mbmi->sb_type, tx);
2720 #endif
2721 #if CONFIG_RD_DEBUG
2722 if (is_inter_block(mbmi) && mbmi->sb_type >= BLOCK_8X8 &&
2723 rd_token_stats_mismatch(&mbmi->rd_stats, &token_stats, plane)) {
2724 dump_mode_info(m);
2725 assert(0);
2726 }
2727 #endif // CONFIG_RD_DEBUG
2728 #endif // CONFIG_VAR_TX
2729
2730 #if !CONFIG_PVQ && !CONFIG_LV_MAP
2731 assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
2732 (*tok)++;
2733 #endif
2734 }
2735 }
2736 #endif // CONFIG_COEF_INTERLEAVE
2737 }
2738
2739 #if CONFIG_MOTION_VAR && NC_MODE_INFO
2740 static void write_tokens_sb(AV1_COMP *cpi, const TileInfo *const tile,
2741 aom_writer *w, const TOKENEXTRA **tok,
2742 const TOKENEXTRA *const tok_end, int mi_row,
2743 int mi_col, BLOCK_SIZE bsize) {
2744 const AV1_COMMON *const cm = &cpi->common;
2745 const int hbs = mi_size_wide[bsize] / 2;
2746 PARTITION_TYPE partition;
2747 BLOCK_SIZE subsize;
2748 #if CONFIG_CB4X4
2749 const int unify_bsize = 1;
2750 #else
2751 const int unify_bsize = 0;
2752 #endif
2753
2754 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2755
2756 partition = get_partition(cm, mi_row, mi_col, bsize);
2757 subsize = get_subsize(bsize, partition);
2758
2759 if (subsize < BLOCK_8X8 && !unify_bsize) {
2760 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2761 } else {
2762 switch (partition) {
2763 case PARTITION_NONE:
2764 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2765 break;
2766 case PARTITION_HORZ:
2767 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2768 if (mi_row + hbs < cm->mi_rows)
2769 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col);
2770 break;
2771 case PARTITION_VERT:
2772 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2773 if (mi_col + hbs < cm->mi_cols)
2774 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + hbs);
2775 break;
2776 case PARTITION_SPLIT:
2777 write_tokens_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize);
2778 write_tokens_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + hbs,
2779 subsize);
2780 write_tokens_sb(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col,
2781 subsize);
2782 write_tokens_sb(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col + hbs,
2783 subsize);
2784 break;
2785 #if CONFIG_EXT_PARTITION_TYPES
2786 #if CONFIG_EXT_PARTITION_TYPES_AB
2787 #error NC_MODE_INFO+MOTION_VAR not yet supported for new HORZ/VERT_AB partitions
2788 #endif
2789 case PARTITION_HORZ_A:
2790 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2791 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + hbs);
2792 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col);
2793 break;
2794 case PARTITION_HORZ_B:
2795 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2796 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col);
2797 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col + hbs);
2798 break;
2799 case PARTITION_VERT_A:
2800 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2801 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col);
2802 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + hbs);
2803 break;
2804 case PARTITION_VERT_B:
2805 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2806 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + hbs);
2807 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row + hbs, mi_col + hbs);
2808 break;
2809 #endif // CONFIG_EXT_PARTITION_TYPES
2810 default: assert(0);
2811 }
2812 }
2813 }
2814 #endif
2815
2816 static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
2817 aom_writer *w, const TOKENEXTRA **tok,
2818 const TOKENEXTRA *const tok_end,
2819 #if CONFIG_SUPERTX
2820 int supertx_enabled,
2821 #endif
2822 int mi_row, int mi_col) {
2823 write_mbmi_b(cpi, tile, w,
2824 #if CONFIG_SUPERTX
2825 supertx_enabled,
2826 #endif
2827 mi_row, mi_col);
2828
2829 #if CONFIG_MOTION_VAR && NC_MODE_INFO
2830 (void)tok;
2831 (void)tok_end;
2832 #else
2833 #if !CONFIG_PVQ && CONFIG_SUPERTX
2834 if (!supertx_enabled)
2835 #endif
2836 write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
2837 #endif
2838 }
2839
2840 static void write_partition(const AV1_COMMON *const cm,
2841 const MACROBLOCKD *const xd, int hbs, int mi_row,
2842 int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
2843 aom_writer *w) {
2844 const int has_rows = (mi_row + hbs) < cm->mi_rows;
2845 const int has_cols = (mi_col + hbs) < cm->mi_cols;
2846 const int is_partition_point = bsize >= BLOCK_8X8;
2847 const int ctx = is_partition_point
2848 ? partition_plane_context(xd, mi_row, mi_col,
2849 #if CONFIG_UNPOISON_PARTITION_CTX
2850 has_rows, has_cols,
2851 #endif
2852 bsize)
2853 : 0;
2854 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
2855 (void)cm;
2856
2857 if (!is_partition_point) return;
2858
2859 if (has_rows && has_cols) {
2860 #if CONFIG_EXT_PARTITION_TYPES
2861 const int num_partition_types =
2862 (mi_width_log2_lookup[bsize] > mi_width_log2_lookup[BLOCK_8X8])
2863 ? EXT_PARTITION_TYPES
2864 : PARTITION_TYPES;
2865 #else
2866 const int num_partition_types = PARTITION_TYPES;
2867 #endif
2868 aom_write_symbol(w, p, ec_ctx->partition_cdf[ctx], num_partition_types);
2869 } else if (!has_rows && has_cols) {
2870 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
2871 assert(bsize > BLOCK_8X8);
2872 aom_cdf_prob cdf[2];
2873 partition_gather_vert_alike(cdf, ec_ctx->partition_cdf[ctx]);
2874 aom_write_cdf(w, p == PARTITION_SPLIT, cdf, 2);
2875 } else if (has_rows && !has_cols) {
2876 assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
2877 assert(bsize > BLOCK_8X8);
2878 aom_cdf_prob cdf[2];
2879 partition_gather_horz_alike(cdf, ec_ctx->partition_cdf[ctx]);
2880 aom_write_cdf(w, p == PARTITION_SPLIT, cdf, 2);
2881 } else {
2882 assert(p == PARTITION_SPLIT);
2883 }
2884 }
2885
2886 #if CONFIG_SUPERTX
2887 #define write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \
2888 mi_row, mi_col, bsize) \
2889 write_modes_sb(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row, mi_col, \
2890 bsize)
2891 #else
2892 #define write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \
2893 mi_row, mi_col, bsize) \
2894 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, bsize)
2895 #endif // CONFIG_SUPERTX
2896
2897 static void write_modes_sb(AV1_COMP *const cpi, const TileInfo *const tile,
2898 aom_writer *const w, const TOKENEXTRA **tok,
2899 const TOKENEXTRA *const tok_end,
2900 #if CONFIG_SUPERTX
2901 int supertx_enabled,
2902 #endif
2903 int mi_row, int mi_col, BLOCK_SIZE bsize) {
2904 const AV1_COMMON *const cm = &cpi->common;
2905 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
2906 const int hbs = mi_size_wide[bsize] / 2;
2907 #if CONFIG_EXT_PARTITION_TYPES
2908 const int quarter_step = mi_size_wide[bsize] / 4;
2909 int i;
2910 #if CONFIG_EXT_PARTITION_TYPES_AB
2911 const int qbs = mi_size_wide[bsize] / 4;
2912 #endif // CONFIG_EXT_PARTITION_TYPES_AB
2913 #endif // CONFIG_EXT_PARTITION_TYPES
2914 const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize);
2915 const BLOCK_SIZE subsize = get_subsize(bsize, partition);
2916 #if CONFIG_CB4X4
2917 const int unify_bsize = 1;
2918 #else
2919 const int unify_bsize = 0;
2920 #endif
2921
2922 #if CONFIG_SUPERTX
2923 const int mi_offset = mi_row * cm->mi_stride + mi_col;
2924 MB_MODE_INFO *mbmi;
2925 const int pack_token = !supertx_enabled;
2926 TX_SIZE supertx_size;
2927 #endif
2928
2929 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
2930
2931 write_partition(cm, xd, hbs, mi_row, mi_col, partition, bsize, w);
2932 #if CONFIG_SUPERTX
2933 mbmi = &cm->mi_grid_visible[mi_offset]->mbmi;
2934 xd->mi = cm->mi_grid_visible + mi_offset;
2935 set_mi_row_col(xd, tile, mi_row, mi_size_high[bsize], mi_col,
2936 mi_size_wide[bsize],
2937 #if CONFIG_DEPENDENT_HORZTILES
2938 cm->dependent_horz_tiles,
2939 #endif // CONFIG_DEPENDENT_HORZTILES
2940 cm->mi_rows, cm->mi_cols);
2941 if (!supertx_enabled && !frame_is_intra_only(cm) &&
2942 partition != PARTITION_NONE && bsize <= MAX_SUPERTX_BLOCK_SIZE &&
2943 !xd->lossless[0]) {
2944 aom_prob prob;
2945 supertx_size = max_txsize_lookup[bsize];
2946 prob = cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
2947 [supertx_size];
2948 supertx_enabled = (xd->mi[0]->mbmi.tx_size == supertx_size);
2949 aom_write(w, supertx_enabled, prob);
2950 }
2951 #endif // CONFIG_SUPERTX
2952 if (subsize < BLOCK_8X8 && !unify_bsize) {
2953 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row,
2954 mi_col);
2955 } else {
2956 switch (partition) {
2957 case PARTITION_NONE:
2958 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2959 mi_row, mi_col);
2960 break;
2961 case PARTITION_HORZ:
2962 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2963 mi_row, mi_col);
2964 if (mi_row + hbs < cm->mi_rows)
2965 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2966 mi_row + hbs, mi_col);
2967 break;
2968 case PARTITION_VERT:
2969 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2970 mi_row, mi_col);
2971 if (mi_col + hbs < cm->mi_cols)
2972 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2973 mi_row, mi_col + hbs);
2974 break;
2975 case PARTITION_SPLIT:
2976 write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2977 mi_row, mi_col, subsize);
2978 write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2979 mi_row, mi_col + hbs, subsize);
2980 write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2981 mi_row + hbs, mi_col, subsize);
2982 write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2983 mi_row + hbs, mi_col + hbs, subsize);
2984 break;
2985 #if CONFIG_EXT_PARTITION_TYPES
2986 #if CONFIG_EXT_PARTITION_TYPES_AB
2987 case PARTITION_HORZ_A:
2988 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2989 mi_row, mi_col);
2990 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2991 mi_row + qbs, mi_col);
2992 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2993 mi_row + hbs, mi_col);
2994 break;
2995 case PARTITION_HORZ_B:
2996 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2997 mi_row, mi_col);
2998 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
2999 mi_row + hbs, mi_col);
3000 if (mi_row + 3 * qbs < cm->mi_rows)
3001 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3002 mi_row + 3 * qbs, mi_col);
3003 break;
3004 case PARTITION_VERT_A:
3005 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3006 mi_row, mi_col);
3007 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3008 mi_row, mi_col + qbs);
3009 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3010 mi_row, mi_col + hbs);
3011 break;
3012 case PARTITION_VERT_B:
3013 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3014 mi_row, mi_col);
3015 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3016 mi_row, mi_col + hbs);
3017 if (mi_col + 3 * qbs < cm->mi_cols)
3018 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3019 mi_row, mi_col + 3 * qbs);
3020 break;
3021 #else
3022 case PARTITION_HORZ_A:
3023 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3024 mi_row, mi_col);
3025 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3026 mi_row, mi_col + hbs);
3027 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3028 mi_row + hbs, mi_col);
3029 break;
3030 case PARTITION_HORZ_B:
3031 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3032 mi_row, mi_col);
3033 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3034 mi_row + hbs, mi_col);
3035 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3036 mi_row + hbs, mi_col + hbs);
3037 break;
3038 case PARTITION_VERT_A:
3039 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3040 mi_row, mi_col);
3041 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3042 mi_row + hbs, mi_col);
3043 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3044 mi_row, mi_col + hbs);
3045 break;
3046 case PARTITION_VERT_B:
3047 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3048 mi_row, mi_col);
3049 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3050 mi_row, mi_col + hbs);
3051 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3052 mi_row + hbs, mi_col + hbs);
3053 break;
3054 #endif
3055 case PARTITION_HORZ_4:
3056 for (i = 0; i < 4; ++i) {
3057 int this_mi_row = mi_row + i * quarter_step;
3058 if (i > 0 && this_mi_row >= cm->mi_rows) break;
3059
3060 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3061 this_mi_row, mi_col);
3062 }
3063 break;
3064 case PARTITION_VERT_4:
3065 for (i = 0; i < 4; ++i) {
3066 int this_mi_col = mi_col + i * quarter_step;
3067 if (i > 0 && this_mi_col >= cm->mi_cols) break;
3068
3069 write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
3070 mi_row, this_mi_col);
3071 }
3072 break;
3073 #endif // CONFIG_EXT_PARTITION_TYPES
3074 default: assert(0);
3075 }
3076 }
3077 #if CONFIG_SUPERTX
3078 if (partition != PARTITION_NONE && supertx_enabled && pack_token) {
3079 int skip;
3080 const int bsw = mi_size_wide[bsize];
3081 const int bsh = mi_size_high[bsize];
3082
3083 xd->mi = cm->mi_grid_visible + mi_offset;
3084 supertx_size = mbmi->tx_size;
3085 set_mi_row_col(xd, tile, mi_row, bsh, mi_col, bsw,
3086 #if CONFIG_DEPENDENT_HORZTILES
3087 cm->dependent_horz_tiles,
3088 #endif // CONFIG_DEPENDENT_HORZTILES
3089 cm->mi_rows, cm->mi_cols);
3090
3091 assert(IMPLIES(!cm->seg.enabled, mbmi->segment_id_supertx == 0));
3092 assert(mbmi->segment_id_supertx < MAX_SEGMENTS);
3093
3094 skip = write_skip(cm, xd, mbmi->segment_id_supertx, xd->mi[0], w);
3095
3096 FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
3097
3098 #if CONFIG_EXT_TX
3099 if (get_ext_tx_types(supertx_size, bsize, 1, cm->reduced_tx_set_used) > 1 &&
3100 !skip) {
3101 const int eset =
3102 get_ext_tx_set(supertx_size, bsize, 1, cm->reduced_tx_set_used);
3103 const int tx_set_type =
3104 get_ext_tx_set_type(supertx_size, bsize, 1, cm->reduced_tx_set_used);
3105 if (eset > 0) {
3106 aom_write_symbol(w, av1_ext_tx_ind[tx_set_type][mbmi->tx_type],
3107 ec_ctx->inter_ext_tx_cdf[eset][supertx_size],
3108 av1_num_ext_tx_set[tx_set_type]);
3109 }
3110 }
3111 #else
3112 if (supertx_size < TX_32X32 && !skip) {
3113 aom_write_symbol(w, mbmi->tx_type, ec_ctx->inter_ext_tx_cdf[supertx_size],
3114 TX_TYPES);
3115 }
3116 #endif // CONFIG_EXT_TX
3117
3118 if (!skip) {
3119 assert(*tok < tok_end);
3120 for (int plane = 0; plane < MAX_MB_PLANE; ++plane) {
3121 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
3122 TX_TYPE tx_type = av1_get_tx_type(plane ? PLANE_TYPE_UV : PLANE_TYPE_Y,
3123 xd, blk_row, blk_col, block, tx_size);
3124 #endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
3125 const struct macroblockd_plane *const pd = &xd->plane[plane];
3126 const int mbmi_txb_size = txsize_to_bsize[mbmi->tx_size];
3127 const BLOCK_SIZE plane_bsize = get_plane_block_size(mbmi_txb_size, pd);
3128
3129 const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
3130 const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
3131
3132 int row, col;
3133 const TX_SIZE tx = av1_get_tx_size(plane, xd);
3134 BLOCK_SIZE txb_size = txsize_to_bsize[tx];
3135
3136 const int stepr = tx_size_high_unit[txb_size];
3137 const int stepc = tx_size_wide_unit[txb_size];
3138
3139 TOKEN_STATS token_stats;
3140 token_stats.cost = 0;
3141 for (row = 0; row < max_blocks_high; row += stepr)
3142 for (col = 0; col < max_blocks_wide; col += stepc)
3143 pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx,
3144 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
3145 tx_type, is_inter_block(mbmi),
3146 #endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
3147 &token_stats);
3148 assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
3149 (*tok)++;
3150 }
3151 }
3152 #if CONFIG_VAR_TX
3153 xd->above_txfm_context = cm->above_txfm_context + mi_col;
3154 xd->left_txfm_context =
3155 xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
3156 set_txfm_ctxs(xd->mi[0]->mbmi.tx_size, bsw, bsh, skip, xd);
3157 #endif
3158 }
3159 #endif // CONFIG_SUPERTX
3160
3161 // update partition context
3162 #if CONFIG_EXT_PARTITION_TYPES
3163 update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition);
3164 #else
3165 if (bsize >= BLOCK_8X8 &&
3166 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
3167 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
3168 #endif // CONFIG_EXT_PARTITION_TYPES
3169
3170 #if CONFIG_LPF_SB
3171 // send filter level for each superblock (64x64)
3172 if (bsize == cm->sb_size) {
3173 if (mi_row == 0 && mi_col == 0) {
3174 aom_write_literal(w, cm->mi_grid_visible[0]->mbmi.filt_lvl, 6);
3175 cm->mi_grid_visible[0]->mbmi.reuse_sb_lvl = 0;
3176 cm->mi_grid_visible[0]->mbmi.delta = 0;
3177 cm->mi_grid_visible[0]->mbmi.sign = 0;
3178 } else {
3179 int prev_mi_row, prev_mi_col;
3180 if (mi_col - MAX_MIB_SIZE < 0) {
3181 prev_mi_row = mi_row - MAX_MIB_SIZE;
3182 prev_mi_col = mi_col;
3183 } else {
3184 prev_mi_row = mi_row;
3185 prev_mi_col = mi_col - MAX_MIB_SIZE;
3186 }
3187 MB_MODE_INFO *curr_mbmi =
3188 &cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi;
3189 MB_MODE_INFO *prev_mbmi =
3190 &cm->mi_grid_visible[prev_mi_row * cm->mi_stride + prev_mi_col]->mbmi;
3191
3192 const uint8_t curr_lvl = curr_mbmi->filt_lvl;
3193 const uint8_t prev_lvl = prev_mbmi->filt_lvl;
3194
3195 const int reuse_prev_lvl = curr_lvl == prev_lvl;
3196 const int reuse_ctx = prev_mbmi->reuse_sb_lvl;
3197 curr_mbmi->reuse_sb_lvl = reuse_prev_lvl;
3198 aom_write_symbol(w, reuse_prev_lvl,
3199 xd->tile_ctx->lpf_reuse_cdf[reuse_ctx], 2);
3200
3201 if (reuse_prev_lvl) {
3202 curr_mbmi->delta = 0;
3203 curr_mbmi->sign = 0;
3204 } else {
3205 const unsigned int delta = abs(curr_lvl - prev_lvl) / LPF_STEP;
3206 const int delta_ctx = prev_mbmi->delta;
3207 curr_mbmi->delta = delta;
3208 aom_write_symbol(w, delta, xd->tile_ctx->lpf_delta_cdf[delta_ctx],
3209 DELTA_RANGE);
3210
3211 if (delta) {
3212 const int sign = curr_lvl > prev_lvl;
3213 const int sign_ctx = prev_mbmi->sign;
3214 curr_mbmi->sign = sign;
3215 aom_write_symbol(w, sign,
3216 xd->tile_ctx->lpf_sign_cdf[reuse_ctx][sign_ctx], 2);
3217 } else {
3218 curr_mbmi->sign = 0;
3219 }
3220 }
3221 }
3222 }
3223 #endif
3224
3225 #if CONFIG_CDEF
3226 if (bsize == cm->sb_size && cm->cdef_bits != 0 && !cm->all_lossless) {
3227 int width_step = mi_size_wide[BLOCK_64X64];
3228 int height_step = mi_size_high[BLOCK_64X64];
3229 int width, height;
3230 for (height = 0; (height < mi_size_high[cm->sb_size]) &&
3231 (mi_row + height < cm->mi_rows);
3232 height += height_step) {
3233 for (width = 0; (width < mi_size_wide[cm->sb_size]) &&
3234 (mi_col + width < cm->mi_cols);
3235 width += width_step) {
3236 if (!sb_all_skip(cm, mi_row + height, mi_col + width))
3237 aom_write_literal(
3238 w,
3239 cm->mi_grid_visible[(mi_row + height) * cm->mi_stride +
3240 (mi_col + width)]
3241 ->mbmi.cdef_strength,
3242 cm->cdef_bits);
3243 }
3244 }
3245 }
3246 #endif
3247 #if CONFIG_LOOP_RESTORATION
3248 for (int plane = 0; plane < MAX_MB_PLANE; ++plane) {
3249 int rcol0, rcol1, rrow0, rrow1, nhtiles;
3250 if (av1_loop_restoration_corners_in_sb(cm, plane, mi_row, mi_col, bsize,
3251 &rcol0, &rcol1, &rrow0, &rrow1,
3252 &nhtiles)) {
3253 for (int rrow = rrow0; rrow < rrow1; ++rrow) {
3254 for (int rcol = rcol0; rcol < rcol1; ++rcol) {
3255 int rtile_idx = rcol + rrow * nhtiles;
3256 loop_restoration_write_sb_coeffs(cm, xd, w, plane, rtile_idx);
3257 }
3258 }
3259 }
3260 }
3261 #endif
3262 }
3263
3264 static void write_modes(AV1_COMP *const cpi, const TileInfo *const tile,
3265 aom_writer *const w, const TOKENEXTRA **tok,
3266 const TOKENEXTRA *const tok_end) {
3267 AV1_COMMON *const cm = &cpi->common;
3268 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
3269 const int mi_row_start = tile->mi_row_start;
3270 const int mi_row_end = tile->mi_row_end;
3271 const int mi_col_start = tile->mi_col_start;
3272 const int mi_col_end = tile->mi_col_end;
3273 int mi_row, mi_col;
3274
3275 #if CONFIG_DEPENDENT_HORZTILES
3276 if (!cm->dependent_horz_tiles || mi_row_start == 0 ||
3277 tile->tg_horz_boundary) {
3278 av1_zero_above_context(cm, mi_col_start, mi_col_end);
3279 }
3280 #else
3281 av1_zero_above_context(cm, mi_col_start, mi_col_end);
3282 #endif
3283 #if CONFIG_PVQ
3284 assert(cpi->td.mb.pvq_q->curr_pos == 0);
3285 #endif
3286 if (cpi->common.delta_q_present_flag) {
3287 xd->prev_qindex = cpi->common.base_qindex;
3288 #if CONFIG_EXT_DELTA_Q
3289 if (cpi->common.delta_lf_present_flag) {
3290 #if CONFIG_LOOPFILTER_LEVEL
3291 for (int lf_id = 0; lf_id < FRAME_LF_COUNT; ++lf_id)
3292 xd->prev_delta_lf[lf_id] = 0;
3293 #endif // CONFIG_LOOPFILTER_LEVEL
3294 xd->prev_delta_lf_from_base = 0;
3295 }
3296 #endif // CONFIG_EXT_DELTA_Q
3297 }
3298
3299 for (mi_row = mi_row_start; mi_row < mi_row_end; mi_row += cm->mib_size) {
3300 av1_zero_left_context(xd);
3301
3302 for (mi_col = mi_col_start; mi_col < mi_col_end; mi_col += cm->mib_size) {
3303 write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, 0, mi_row, mi_col,
3304 cm->sb_size);
3305 #if CONFIG_MOTION_VAR && NC_MODE_INFO
3306 write_tokens_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, cm->sb_size);
3307 #endif
3308 }
3309 }
3310 #if CONFIG_PVQ
3311 // Check that the number of PVQ blocks encoded and written to the bitstream
3312 // are the same
3313 assert(cpi->td.mb.pvq_q->curr_pos == cpi->td.mb.pvq_q->last_pos);
3314 // Reset curr_pos in case we repack the bitstream
3315 cpi->td.mb.pvq_q->curr_pos = 0;
3316 #endif
3317 }
3318
3319 #if CONFIG_LOOP_RESTORATION
3320 static void encode_restoration_mode(AV1_COMMON *cm,
3321 struct aom_write_bit_buffer *wb) {
3322 int p;
3323 RestorationInfo *rsi = &cm->rst_info[0];
3324 switch (rsi->frame_restoration_type) {
3325 case RESTORE_NONE:
3326 aom_wb_write_bit(wb, 0);
3327 aom_wb_write_bit(wb, 0);
3328 break;
3329 case RESTORE_WIENER:
3330 aom_wb_write_bit(wb, 1);
3331 aom_wb_write_bit(wb, 0);
3332 break;
3333 case RESTORE_SGRPROJ:
3334 aom_wb_write_bit(wb, 1);
3335 aom_wb_write_bit(wb, 1);
3336 break;
3337 case RESTORE_SWITCHABLE:
3338 aom_wb_write_bit(wb, 0);
3339 aom_wb_write_bit(wb, 1);
3340 break;
3341 default: assert(0);
3342 }
3343 for (p = 1; p < MAX_MB_PLANE; ++p) {
3344 rsi = &cm->rst_info[p];
3345 switch (rsi->frame_restoration_type) {
3346 case RESTORE_NONE: aom_wb_write_bit(wb, 0); break;
3347 case RESTORE_WIENER:
3348 aom_wb_write_bit(wb, 1);
3349 aom_wb_write_bit(wb, 0);
3350 break;
3351 case RESTORE_SGRPROJ:
3352 aom_wb_write_bit(wb, 1);
3353 aom_wb_write_bit(wb, 1);
3354 break;
3355 default: assert(0);
3356 }
3357 }
3358 if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
3359 cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
3360 cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
3361 rsi = &cm->rst_info[0];
3362 aom_wb_write_bit(wb, rsi->restoration_tilesize != RESTORATION_TILESIZE_MAX);
3363 if (rsi->restoration_tilesize != RESTORATION_TILESIZE_MAX) {
3364 aom_wb_write_bit(
3365 wb, rsi->restoration_tilesize != (RESTORATION_TILESIZE_MAX >> 1));
3366 }
3367 }
3368 int s = AOMMIN(cm->subsampling_x, cm->subsampling_y);
3369 if (s && (cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
3370 cm->rst_info[2].frame_restoration_type != RESTORE_NONE)) {
3371 aom_wb_write_bit(wb,
3372 cm->rst_info[1].restoration_tilesize !=
3373 cm->rst_info[0].restoration_tilesize);
3374 assert(cm->rst_info[1].restoration_tilesize ==
3375 cm->rst_info[0].restoration_tilesize ||
3376 cm->rst_info[1].restoration_tilesize ==
3377 (cm->rst_info[0].restoration_tilesize >> s));
3378 assert(cm->rst_info[2].restoration_tilesize ==
3379 cm->rst_info[1].restoration_tilesize);
3380 } else if (!s) {
3381 assert(cm->rst_info[1].restoration_tilesize ==
3382 cm->rst_info[0].restoration_tilesize);
3383 assert(cm->rst_info[2].restoration_tilesize ==
3384 cm->rst_info[1].restoration_tilesize);
3385 }
3386 }
3387
3388 static void write_wiener_filter(int wiener_win, WienerInfo *wiener_info,
3389 WienerInfo *ref_wiener_info, aom_writer *wb) {
3390 if (wiener_win == WIENER_WIN)
3391 aom_write_primitive_refsubexpfin(
3392 wb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
3393 WIENER_FILT_TAP0_SUBEXP_K,
3394 ref_wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV,
3395 wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV);
3396 else
3397 assert(wiener_info->vfilter[0] == 0 &&
3398 wiener_info->vfilter[WIENER_WIN - 1] == 0);
3399 aom_write_primitive_refsubexpfin(
3400 wb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
3401 WIENER_FILT_TAP1_SUBEXP_K,
3402 ref_wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV,
3403 wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV);
3404 aom_write_primitive_refsubexpfin(
3405 wb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1,
3406 WIENER_FILT_TAP2_SUBEXP_K,
3407 ref_wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV,
3408 wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV);
3409 if (wiener_win == WIENER_WIN)
3410 aom_write_primitive_refsubexpfin(
3411 wb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
3412 WIENER_FILT_TAP0_SUBEXP_K,
3413 ref_wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV,
3414 wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV);
3415 else
3416 assert(wiener_info->hfilter[0] == 0 &&
3417 wiener_info->hfilter[WIENER_WIN - 1] == 0);
3418 aom_write_primitive_refsubexpfin(
3419 wb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
3420 WIENER_FILT_TAP1_SUBEXP_K,
3421 ref_wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV,
3422 wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV);
3423 aom_write_primitive_refsubexpfin(
3424 wb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1,
3425 WIENER_FILT_TAP2_SUBEXP_K,
3426 ref_wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV,
3427 wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV);
3428 memcpy(ref_wiener_info, wiener_info, sizeof(*wiener_info));
3429 }
3430
3431 static void write_sgrproj_filter(SgrprojInfo *sgrproj_info,
3432 SgrprojInfo *ref_sgrproj_info,
3433 aom_writer *wb) {
3434 aom_write_literal(wb, sgrproj_info->ep, SGRPROJ_PARAMS_BITS);
3435 aom_write_primitive_refsubexpfin(wb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1,
3436 SGRPROJ_PRJ_SUBEXP_K,
3437 ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0,
3438 sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0);
3439 aom_write_primitive_refsubexpfin(wb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1,
3440 SGRPROJ_PRJ_SUBEXP_K,
3441 ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1,
3442 sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1);
3443 memcpy(ref_sgrproj_info, sgrproj_info, sizeof(*sgrproj_info));
3444 }
3445
3446 static void loop_restoration_write_sb_coeffs(const AV1_COMMON *const cm,
3447 MACROBLOCKD *xd,
3448 aom_writer *const w, int plane,
3449 int rtile_idx) {
3450 const RestorationInfo *rsi = cm->rst_info + plane;
3451 if (rsi->frame_restoration_type == RESTORE_NONE) return;
3452
3453 const int wiener_win = (plane > 0) ? WIENER_WIN_CHROMA : WIENER_WIN;
3454 WienerInfo *wiener_info = xd->wiener_info + plane;
3455 SgrprojInfo *sgrproj_info = xd->sgrproj_info + plane;
3456
3457 if (rsi->frame_restoration_type == RESTORE_SWITCHABLE) {
3458 assert(plane == 0);
3459 av1_write_token(
3460 w, av1_switchable_restore_tree, cm->fc->switchable_restore_prob,
3461 &switchable_restore_encodings[rsi->restoration_type[rtile_idx]]);
3462 if (rsi->restoration_type[rtile_idx] == RESTORE_WIENER) {
3463 write_wiener_filter(wiener_win, &rsi->wiener_info[rtile_idx], wiener_info,
3464 w);
3465 } else if (rsi->restoration_type[rtile_idx] == RESTORE_SGRPROJ) {
3466 write_sgrproj_filter(&rsi->sgrproj_info[rtile_idx], sgrproj_info, w);
3467 }
3468 } else if (rsi->frame_restoration_type == RESTORE_WIENER) {
3469 aom_write(w, rsi->restoration_type[rtile_idx] != RESTORE_NONE,
3470 RESTORE_NONE_WIENER_PROB);
3471 if (rsi->restoration_type[rtile_idx] != RESTORE_NONE) {
3472 write_wiener_filter(wiener_win, &rsi->wiener_info[rtile_idx], wiener_info,
3473 w);
3474 }
3475 } else if (rsi->frame_restoration_type == RESTORE_SGRPROJ) {
3476 aom_write(w, rsi->restoration_type[rtile_idx] != RESTORE_NONE,
3477 RESTORE_NONE_SGRPROJ_PROB);
3478 if (rsi->restoration_type[rtile_idx] != RESTORE_NONE) {
3479 write_sgrproj_filter(&rsi->sgrproj_info[rtile_idx], sgrproj_info, w);
3480 }
3481 }
3482 }
3483
3484 #endif // CONFIG_LOOP_RESTORATION
3485
3486 static void encode_loopfilter(AV1_COMMON *cm, struct aom_write_bit_buffer *wb) {
3487 int i;
3488 struct loopfilter *lf = &cm->lf;
3489
3490 // Encode the loop filter level and type
3491 #if !CONFIG_LPF_SB
3492 #if CONFIG_LOOPFILTER_LEVEL
3493 aom_wb_write_literal(wb, lf->filter_level[0], 6);
3494 aom_wb_write_literal(wb, lf->filter_level[1], 6);
3495 if (lf->filter_level[0] || lf->filter_level[1]) {
3496 aom_wb_write_literal(wb, lf->filter_level_u, 6);
3497 aom_wb_write_literal(wb, lf->filter_level_v, 6);
3498 }
3499 #else
3500 aom_wb_write_literal(wb, lf->filter_level, 6);
3501 #endif // CONFIG_LOOPFILTER_LEVEL
3502 #endif // CONFIG_LPF_SB
3503 aom_wb_write_literal(wb, lf->sharpness_level, 3);
3504
3505 // Write out loop filter deltas applied at the MB level based on mode or
3506 // ref frame (if they are enabled).
3507 aom_wb_write_bit(wb, lf->mode_ref_delta_enabled);
3508
3509 if (lf->mode_ref_delta_enabled) {
3510 aom_wb_write_bit(wb, lf->mode_ref_delta_update);
3511 if (lf->mode_ref_delta_update) {
3512 for (i = 0; i < TOTAL_REFS_PER_FRAME; i++) {
3513 const int delta = lf->ref_deltas[i];
3514 const int changed = delta != lf->last_ref_deltas[i];
3515 aom_wb_write_bit(wb, changed);
3516 if (changed) {
3517 lf->last_ref_deltas[i] = delta;
3518 aom_wb_write_inv_signed_literal(wb, delta, 6);
3519 }
3520 }
3521
3522 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
3523 const int delta = lf->mode_deltas[i];
3524 const int changed = delta != lf->last_mode_deltas[i];
3525 aom_wb_write_bit(wb, changed);
3526 if (changed) {
3527 lf->last_mode_deltas[i] = delta;
3528 aom_wb_write_inv_signed_literal(wb, delta, 6);
3529 }
3530 }
3531 }
3532 }
3533 }
3534
3535 #if CONFIG_CDEF
3536 static void encode_cdef(const AV1_COMMON *cm, struct aom_write_bit_buffer *wb) {
3537 int i;
3538 #if CONFIG_CDEF_SINGLEPASS
3539 aom_wb_write_literal(wb, cm->cdef_pri_damping - 3, 2);
3540 assert(cm->cdef_pri_damping == cm->cdef_sec_damping);
3541 #else
3542 aom_wb_write_literal(wb, cm->cdef_pri_damping - 5, 1);
3543 aom_wb_write_literal(wb, cm->cdef_sec_damping - 3, 2);
3544 #endif
3545 aom_wb_write_literal(wb, cm->cdef_bits, 2);
3546 for (i = 0; i < cm->nb_cdef_strengths; i++) {
3547 aom_wb_write_literal(wb, cm->cdef_strengths[i], CDEF_STRENGTH_BITS);
3548 if (cm->subsampling_x == cm->subsampling_y)
3549 aom_wb_write_literal(wb, cm->cdef_uv_strengths[i], CDEF_STRENGTH_BITS);
3550 }
3551 }
3552 #endif
3553
3554 static void write_delta_q(struct aom_write_bit_buffer *wb, int delta_q) {
3555 if (delta_q != 0) {
3556 aom_wb_write_bit(wb, 1);
3557 aom_wb_write_inv_signed_literal(wb, delta_q, 6);
3558 } else {
3559 aom_wb_write_bit(wb, 0);
3560 }
3561 }
3562
3563 static void encode_quantization(const AV1_COMMON *const cm,
3564 struct aom_write_bit_buffer *wb) {
3565 aom_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
3566 write_delta_q(wb, cm->y_dc_delta_q);
3567 write_delta_q(wb, cm->uv_dc_delta_q);
3568 write_delta_q(wb, cm->uv_ac_delta_q);
3569 #if CONFIG_AOM_QM
3570 aom_wb_write_bit(wb, cm->using_qmatrix);
3571 if (cm->using_qmatrix) {
3572 aom_wb_write_literal(wb, cm->min_qmlevel, QM_LEVEL_BITS);
3573 aom_wb_write_literal(wb, cm->max_qmlevel, QM_LEVEL_BITS);
3574 }
3575 #endif
3576 }
3577
3578 static void encode_segmentation(AV1_COMMON *cm, MACROBLOCKD *xd,
3579 struct aom_write_bit_buffer *wb) {
3580 int i, j;
3581 const struct segmentation *seg = &cm->seg;
3582
3583 aom_wb_write_bit(wb, seg->enabled);
3584 if (!seg->enabled) return;
3585
3586 // Segmentation map
3587 if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
3588 aom_wb_write_bit(wb, seg->update_map);
3589 } else {
3590 assert(seg->update_map == 1);
3591 }
3592 if (seg->update_map) {
3593 // Select the coding strategy (temporal or spatial)
3594 av1_choose_segmap_coding_method(cm, xd);
3595
3596 // Write out the chosen coding method.
3597 if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
3598 aom_wb_write_bit(wb, seg->temporal_update);
3599 } else {
3600 assert(seg->temporal_update == 0);
3601 }
3602 }
3603
3604 // Segmentation data
3605 aom_wb_write_bit(wb, seg->update_data);
3606 if (seg->update_data) {
3607 aom_wb_write_bit(wb, seg->abs_delta);
3608
3609 for (i = 0; i < MAX_SEGMENTS; i++) {
3610 for (j = 0; j < SEG_LVL_MAX; j++) {
3611 const int active = segfeature_active(seg, i, j);
3612 aom_wb_write_bit(wb, active);
3613 if (active) {
3614 const int data = get_segdata(seg, i, j);
3615 const int data_max = av1_seg_feature_data_max(j);
3616
3617 if (av1_is_segfeature_signed(j)) {
3618 encode_unsigned_max(wb, abs(data), data_max);
3619 aom_wb_write_bit(wb, data < 0);
3620 } else {
3621 encode_unsigned_max(wb, data, data_max);
3622 }
3623 }
3624 }
3625 }
3626 }
3627 }
3628
3629 static void write_tx_mode(AV1_COMMON *cm, TX_MODE *mode,
3630 struct aom_write_bit_buffer *wb) {
3631 if (cm->all_lossless) {
3632 *mode = ONLY_4X4;
3633 return;
3634 }
3635 #if CONFIG_VAR_TX_NO_TX_MODE
3636 (void)wb;
3637 *mode = TX_MODE_SELECT;
3638 return;
3639 #else
3640 #if CONFIG_TX64X64
3641 aom_wb_write_bit(wb, *mode == TX_MODE_SELECT);
3642 if (*mode != TX_MODE_SELECT) {
3643 aom_wb_write_literal(wb, AOMMIN(*mode, ALLOW_32X32), 2);
3644 if (*mode >= ALLOW_32X32) aom_wb_write_bit(wb, *mode == ALLOW_64X64);
3645 }
3646 #else
3647 aom_wb_write_bit(wb, *mode == TX_MODE_SELECT);
3648 if (*mode != TX_MODE_SELECT) aom_wb_write_literal(wb, *mode, 2);
3649 #endif // CONFIG_TX64X64
3650 #endif // CONFIG_VAR_TX_NO_TX_MODE
3651 }
3652
3653 static void write_frame_interp_filter(InterpFilter filter,
3654 struct aom_write_bit_buffer *wb) {
3655 aom_wb_write_bit(wb, filter == SWITCHABLE);
3656 if (filter != SWITCHABLE)
3657 aom_wb_write_literal(wb, filter, LOG_SWITCHABLE_FILTERS);
3658 }
3659
3660 static void fix_interp_filter(AV1_COMMON *cm, FRAME_COUNTS *counts) {
3661 if (cm->interp_filter == SWITCHABLE) {
3662 // Check to see if only one of the filters is actually used
3663 int count[SWITCHABLE_FILTERS];
3664 int i, j, c = 0;
3665 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
3666 count[i] = 0;
3667 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
3668 count[i] += counts->switchable_interp[j][i];
3669 c += (count[i] > 0);
3670 }
3671 if (c == 1) {
3672 // Only one filter is used. So set the filter at frame level
3673 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
3674 if (count[i]) {
3675 #if CONFIG_MOTION_VAR && (CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION)
3676 #if CONFIG_WARPED_MOTION
3677 if (i == EIGHTTAP_REGULAR || WARP_WM_NEIGHBORS_WITH_OBMC)
3678 #else
3679 if (i == EIGHTTAP_REGULAR || WARP_GM_NEIGHBORS_WITH_OBMC)
3680 #endif // CONFIG_WARPED_MOTION
3681 #endif // CONFIG_MOTION_VAR && (CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION)
3682 cm->interp_filter = i;
3683 break;
3684 }
3685 }
3686 }
3687 }
3688 }
3689
3690 #if CONFIG_MAX_TILE
3691
3692 // Same function as write_uniform but writing to uncompresses header wb
3693 static void wb_write_uniform(struct aom_write_bit_buffer *wb, int n, int v) {
3694 const int l = get_unsigned_bits(n);
3695 const int m = (1 << l) - n;
3696 if (l == 0) return;
3697 if (v < m) {
3698 aom_wb_write_literal(wb, v, l - 1);
3699 } else {
3700 aom_wb_write_literal(wb, m + ((v - m) >> 1), l - 1);
3701 aom_wb_write_literal(wb, (v - m) & 1, 1);
3702 }
3703 }
3704
3705 static void write_tile_info_max_tile(const AV1_COMMON *const cm,
3706 struct aom_write_bit_buffer *wb) {
3707 int width_mi = ALIGN_POWER_OF_TWO(cm->mi_cols, MAX_MIB_SIZE_LOG2);
3708 int height_mi = ALIGN_POWER_OF_TWO(cm->mi_rows, MAX_MIB_SIZE_LOG2);
3709 int width_sb = width_mi >> MAX_MIB_SIZE_LOG2;
3710 int height_sb = height_mi >> MAX_MIB_SIZE_LOG2;
3711 int size_sb, i;
3712
3713 aom_wb_write_bit(wb, cm->uniform_tile_spacing_flag);
3714
3715 if (cm->uniform_tile_spacing_flag) {
3716 // Uniform spaced tiles with power-of-two number of rows and columns
3717 // tile columns
3718 int ones = cm->log2_tile_cols - cm->min_log2_tile_cols;
3719 while (ones--) {
3720 aom_wb_write_bit(wb, 1);
3721 }
3722 if (cm->log2_tile_cols < cm->max_log2_tile_cols) {
3723 aom_wb_write_bit(wb, 0);
3724 }
3725
3726 // rows
3727 ones = cm->log2_tile_rows - cm->min_log2_tile_rows;
3728 while (ones--) {
3729 aom_wb_write_bit(wb, 1);
3730 }
3731 if (cm->log2_tile_rows < cm->max_log2_tile_rows) {
3732 aom_wb_write_bit(wb, 0);
3733 }
3734 } else {
3735 // Explicit tiles with configurable tile widths and heights
3736 // columns
3737 for (i = 0; i < cm->tile_cols; i++) {
3738 size_sb = cm->tile_col_start_sb[i + 1] - cm->tile_col_start_sb[i];
3739 wb_write_uniform(wb, AOMMIN(width_sb, MAX_TILE_WIDTH_SB), size_sb - 1);
3740 width_sb -= size_sb;
3741 }
3742 assert(width_sb == 0);
3743
3744 // rows
3745 for (i = 0; i < cm->tile_rows; i++) {
3746 size_sb = cm->tile_row_start_sb[i + 1] - cm->tile_row_start_sb[i];
3747 wb_write_uniform(wb, AOMMIN(height_sb, cm->max_tile_height_sb),
3748 size_sb - 1);
3749 height_sb -= size_sb;
3750 }
3751 assert(height_sb == 0);
3752 }
3753 }
3754 #endif
3755
3756 static void write_tile_info(const AV1_COMMON *const cm,
3757 struct aom_write_bit_buffer *wb) {
3758 #if CONFIG_EXT_TILE
3759 if (cm->large_scale_tile) {
3760 const int tile_width =
3761 ALIGN_POWER_OF_TWO(cm->tile_width, cm->mib_size_log2) >>
3762 cm->mib_size_log2;
3763 const int tile_height =
3764 ALIGN_POWER_OF_TWO(cm->tile_height, cm->mib_size_log2) >>
3765 cm->mib_size_log2;
3766
3767 assert(tile_width > 0);
3768 assert(tile_height > 0);
3769
3770 // Write the tile sizes
3771 #if CONFIG_EXT_PARTITION
3772 if (cm->sb_size == BLOCK_128X128) {
3773 assert(tile_width <= 32);
3774 assert(tile_height <= 32);
3775 aom_wb_write_literal(wb, tile_width - 1, 5);
3776 aom_wb_write_literal(wb, tile_height - 1, 5);
3777 } else {
3778 #endif // CONFIG_EXT_PARTITION
3779 assert(tile_width <= 64);
3780 assert(tile_height <= 64);
3781 aom_wb_write_literal(wb, tile_width - 1, 6);
3782 aom_wb_write_literal(wb, tile_height - 1, 6);
3783 #if CONFIG_EXT_PARTITION
3784 }
3785 #endif // CONFIG_EXT_PARTITION
3786 } else {
3787 #endif // CONFIG_EXT_TILE
3788
3789 #if CONFIG_MAX_TILE
3790 write_tile_info_max_tile(cm, wb);
3791 #else
3792 int min_log2_tile_cols, max_log2_tile_cols, ones;
3793 av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
3794
3795 // columns
3796 ones = cm->log2_tile_cols - min_log2_tile_cols;
3797 while (ones--) aom_wb_write_bit(wb, 1);
3798
3799 if (cm->log2_tile_cols < max_log2_tile_cols) aom_wb_write_bit(wb, 0);
3800
3801 // rows
3802 aom_wb_write_bit(wb, cm->log2_tile_rows != 0);
3803 if (cm->log2_tile_rows != 0) aom_wb_write_bit(wb, cm->log2_tile_rows != 1);
3804 #endif
3805 #if CONFIG_DEPENDENT_HORZTILES
3806 if (cm->tile_rows > 1) aom_wb_write_bit(wb, cm->dependent_horz_tiles);
3807 #endif
3808 #if CONFIG_EXT_TILE
3809 }
3810 #endif // CONFIG_EXT_TILE
3811
3812 #if CONFIG_LOOPFILTERING_ACROSS_TILES
3813 aom_wb_write_bit(wb, cm->loop_filter_across_tiles_enabled);
3814 #endif // CONFIG_LOOPFILTERING_ACROSS_TILES
3815 }
3816
3817 #if CONFIG_EXT_REFS
3818 #if USE_GF16_MULTI_LAYER
3819 static int get_refresh_mask_gf16(AV1_COMP *cpi) {
3820 int refresh_mask = 0;
3821
3822 if (cpi->refresh_last_frame || cpi->refresh_golden_frame ||
3823 cpi->refresh_bwd_ref_frame || cpi->refresh_alt2_ref_frame ||
3824 cpi->refresh_alt_ref_frame) {
3825 assert(cpi->refresh_fb_idx >= 0 && cpi->refresh_fb_idx < REF_FRAMES);
3826 refresh_mask |= (1 << cpi->refresh_fb_idx);
3827 }
3828
3829 return refresh_mask;
3830 }
3831 #endif // USE_GF16_MULTI_LAYER
3832 #endif // CONFIG_EXT_REFS
3833
3834 static int get_refresh_mask(AV1_COMP *cpi) {
3835 int refresh_mask = 0;
3836 #if CONFIG_EXT_REFS
3837 #if USE_GF16_MULTI_LAYER
3838 if (cpi->rc.baseline_gf_interval == 16) return get_refresh_mask_gf16(cpi);
3839 #endif // USE_GF16_MULTI_LAYER
3840
3841 // NOTE(zoeliu): When LAST_FRAME is to get refreshed, the decoder will be
3842 // notified to get LAST3_FRAME refreshed and then the virtual indexes for all
3843 // the 3 LAST reference frames will be updated accordingly, i.e.:
3844 // (1) The original virtual index for LAST3_FRAME will become the new virtual
3845 // index for LAST_FRAME; and
3846 // (2) The original virtual indexes for LAST_FRAME and LAST2_FRAME will be
3847 // shifted and become the new virtual indexes for LAST2_FRAME and
3848 // LAST3_FRAME.
3849 refresh_mask |=
3850 (cpi->refresh_last_frame << cpi->lst_fb_idxes[LAST_REF_FRAMES - 1]);
3851
3852 refresh_mask |= (cpi->refresh_bwd_ref_frame << cpi->bwd_fb_idx);
3853 refresh_mask |= (cpi->refresh_alt2_ref_frame << cpi->alt2_fb_idx);
3854 #else // !CONFIG_EXT_REFS
3855 refresh_mask |= (cpi->refresh_last_frame << cpi->lst_fb_idx);
3856 #endif // CONFIG_EXT_REFS
3857
3858 if (av1_preserve_existing_gf(cpi)) {
3859 // We have decided to preserve the previously existing golden frame as our
3860 // new ARF frame. However, in the short term we leave it in the GF slot and,
3861 // if we're updating the GF with the current decoded frame, we save it
3862 // instead to the ARF slot.
3863 // Later, in the function av1_encoder.c:av1_update_reference_frames() we
3864 // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
3865 // there so that it can be done outside of the recode loop.
3866 // Note: This is highly specific to the use of ARF as a forward reference,
3867 // and this needs to be generalized as other uses are implemented
3868 // (like RTC/temporal scalability).
3869 return refresh_mask | (cpi->refresh_golden_frame << cpi->alt_fb_idx);
3870 } else {
3871 #if CONFIG_EXT_REFS
3872 const int arf_idx = cpi->alt_fb_idx;
3873 #else // !CONFIG_EXT_REFS
3874 int arf_idx = cpi->alt_fb_idx;
3875 if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
3876 const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
3877 arf_idx = gf_group->arf_update_idx[gf_group->index];
3878 }
3879 #endif // CONFIG_EXT_REFS
3880 return refresh_mask | (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
3881 (cpi->refresh_alt_ref_frame << arf_idx);
3882 }
3883 }
3884
3885 #if CONFIG_EXT_TILE
3886 static INLINE int find_identical_tile(
3887 const int tile_row, const int tile_col,
3888 TileBufferEnc (*const tile_buffers)[1024]) {
3889 const MV32 candidate_offset[1] = { { 1, 0 } };
3890 const uint8_t *const cur_tile_data =
3891 tile_buffers[tile_row][tile_col].data + 4;
3892 const size_t cur_tile_size = tile_buffers[tile_row][tile_col].size;
3893
3894 int i;
3895
3896 if (tile_row == 0) return 0;
3897
3898 // (TODO: yunqingwang) For now, only above tile is checked and used.
3899 // More candidates such as left tile can be added later.
3900 for (i = 0; i < 1; i++) {
3901 int row_offset = candidate_offset[0].row;
3902 int col_offset = candidate_offset[0].col;
3903 int row = tile_row - row_offset;
3904 int col = tile_col - col_offset;
3905 uint8_t tile_hdr;
3906 const uint8_t *tile_data;
3907 TileBufferEnc *candidate;
3908
3909 if (row < 0 || col < 0) continue;
3910
3911 tile_hdr = *(tile_buffers[row][col].data);
3912
3913 // Read out tcm bit
3914 if ((tile_hdr >> 7) == 1) {
3915 // The candidate is a copy tile itself
3916 row_offset += tile_hdr & 0x7f;
3917 row = tile_row - row_offset;
3918 }
3919
3920 candidate = &tile_buffers[row][col];
3921
3922 if (row_offset >= 128 || candidate->size != cur_tile_size) continue;
3923
3924 tile_data = candidate->data + 4;
3925
3926 if (memcmp(tile_data, cur_tile_data, cur_tile_size) != 0) continue;
3927
3928 // Identical tile found
3929 assert(row_offset > 0);
3930 return row_offset;
3931 }
3932
3933 // No identical tile found
3934 return 0;
3935 }
3936 #endif // CONFIG_EXT_TILE
3937
3938 #if !CONFIG_OBU || CONFIG_EXT_TILE
3939 static uint32_t write_tiles(AV1_COMP *const cpi, uint8_t *const dst,
3940 unsigned int *max_tile_size,
3941 unsigned int *max_tile_col_size) {
3942 const AV1_COMMON *const cm = &cpi->common;
3943 aom_writer mode_bc;
3944 int tile_row, tile_col;
3945 TOKENEXTRA *(*const tok_buffers)[MAX_TILE_COLS] = cpi->tile_tok;
3946 TileBufferEnc(*const tile_buffers)[MAX_TILE_COLS] = cpi->tile_buffers;
3947 uint32_t total_size = 0;
3948 const int tile_cols = cm->tile_cols;
3949 const int tile_rows = cm->tile_rows;
3950 unsigned int tile_size = 0;
3951 const int have_tiles = tile_cols * tile_rows > 1;
3952 struct aom_write_bit_buffer wb = { dst, 0 };
3953 const int n_log2_tiles = cm->log2_tile_rows + cm->log2_tile_cols;
3954 uint32_t compressed_hdr_size;
3955 // Fixed size tile groups for the moment
3956 const int num_tg_hdrs = cm->num_tg;
3957 const int tg_size =
3958 #if CONFIG_EXT_TILE
3959 (cm->large_scale_tile)
3960 ? 1
3961 :
3962 #endif // CONFIG_EXT_TILE
3963 (tile_rows * tile_cols + num_tg_hdrs - 1) / num_tg_hdrs;
3964 int tile_count = 0;
3965 int tg_count = 1;
3966 int tile_size_bytes = 4;
3967 int tile_col_size_bytes;
3968 uint32_t uncompressed_hdr_size = 0;
3969 struct aom_write_bit_buffer tg_params_wb;
3970 struct aom_write_bit_buffer tile_size_bytes_wb;
3971 uint32_t saved_offset;
3972 int mtu_size = cpi->oxcf.mtu;
3973 int curr_tg_data_size = 0;
3974 int hdr_size;
3975
3976 *max_tile_size = 0;
3977 *max_tile_col_size = 0;
3978
3979 // All tile size fields are output on 4 bytes. A call to remux_tiles will
3980 // later compact the data if smaller headers are adequate.
3981
3982 #if CONFIG_EXT_TILE
3983 if (cm->large_scale_tile) {
3984 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
3985 TileInfo tile_info;
3986 const int is_last_col = (tile_col == tile_cols - 1);
3987 const uint32_t col_offset = total_size;
3988
3989 av1_tile_set_col(&tile_info, cm, tile_col);
3990
3991 // The last column does not have a column header
3992 if (!is_last_col) total_size += 4;
3993
3994 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
3995 TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
3996 const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
3997 const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
3998 const int data_offset = have_tiles ? 4 : 0;
3999 const int tile_idx = tile_row * tile_cols + tile_col;
4000 TileDataEnc *this_tile = &cpi->tile_data[tile_idx];
4001 av1_tile_set_row(&tile_info, cm, tile_row);
4002
4003 buf->data = dst + total_size;
4004
4005 // Is CONFIG_EXT_TILE = 1, every tile in the row has a header,
4006 // even for the last one, unless no tiling is used at all.
4007 total_size += data_offset;
4008 // Initialise tile context from the frame context
4009 this_tile->tctx = *cm->fc;
4010 cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx;
4011 #if CONFIG_PVQ
4012 cpi->td.mb.pvq_q = &this_tile->pvq_q;
4013 cpi->td.mb.daala_enc.state.adapt = &this_tile->tctx.pvq_context;
4014 #endif // CONFIG_PVQ
4015 #if CONFIG_ANS
4016 mode_bc.size = 1 << cpi->common.ans_window_size_log2;
4017 #endif
4018 aom_start_encode(&mode_bc, buf->data + data_offset);
4019 write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
4020 assert(tok == tok_end);
4021 aom_stop_encode(&mode_bc);
4022 tile_size = mode_bc.pos;
4023 #if CONFIG_PVQ
4024 cpi->td.mb.pvq_q = NULL;
4025 #endif
4026 buf->size = tile_size;
4027
4028 // Record the maximum tile size we see, so we can compact headers later.
4029 *max_tile_size = AOMMAX(*max_tile_size, tile_size);
4030
4031 if (have_tiles) {
4032 // tile header: size of this tile, or copy offset
4033 uint32_t tile_header = tile_size;
4034 const int tile_copy_mode =
4035 ((AOMMAX(cm->tile_width, cm->tile_height) << MI_SIZE_LOG2) <= 256)
4036 ? 1
4037 : 0;
4038
4039 // If tile_copy_mode = 1, check if this tile is a copy tile.
4040 // Very low chances to have copy tiles on the key frames, so don't
4041 // search on key frames to reduce unnecessary search.
4042 if (cm->frame_type != KEY_FRAME && tile_copy_mode) {
4043 const int idendical_tile_offset =
4044 find_identical_tile(tile_row, tile_col, tile_buffers);
4045
4046 if (idendical_tile_offset > 0) {
4047 tile_size = 0;
4048 tile_header = idendical_tile_offset | 0x80;
4049 tile_header <<= 24;
4050 }
4051 }
4052
4053 mem_put_le32(buf->data, tile_header);
4054 }
4055
4056 total_size += tile_size;
4057 }
4058
4059 if (!is_last_col) {
4060 uint32_t col_size = total_size - col_offset - 4;
4061 mem_put_le32(dst + col_offset, col_size);
4062
4063 // If it is not final packing, record the maximum tile column size we
4064 // see, otherwise, check if the tile size is out of the range.
4065 *max_tile_col_size = AOMMAX(*max_tile_col_size, col_size);
4066 }
4067 }
4068 } else {
4069 #endif // CONFIG_EXT_TILE
4070 write_uncompressed_header_frame(cpi, &wb);
4071
4072 #if CONFIG_EXT_REFS
4073 if (cm->show_existing_frame) {
4074 total_size = aom_wb_bytes_written(&wb);
4075 return (uint32_t)total_size;
4076 }
4077 #endif // CONFIG_EXT_REFS
4078
4079 // Write the tile length code
4080 tile_size_bytes_wb = wb;
4081 aom_wb_write_literal(&wb, 3, 2);
4082
4083 /* Write a placeholder for the number of tiles in each tile group */
4084 tg_params_wb = wb;
4085 saved_offset = wb.bit_offset;
4086 if (have_tiles) {
4087 aom_wb_overwrite_literal(&wb, 3, n_log2_tiles);
4088 aom_wb_overwrite_literal(&wb, (1 << n_log2_tiles) - 1, n_log2_tiles);
4089 }
4090
4091 if (!use_compressed_header(cm)) {
4092 uncompressed_hdr_size = aom_wb_bytes_written(&wb);
4093 compressed_hdr_size = 0;
4094 } else {
4095 /* Write a placeholder for the compressed header length */
4096 struct aom_write_bit_buffer comp_hdr_len_wb = wb;
4097 aom_wb_write_literal(&wb, 0, 16);
4098
4099 uncompressed_hdr_size = aom_wb_bytes_written(&wb);
4100 compressed_hdr_size =
4101 write_compressed_header(cpi, dst + uncompressed_hdr_size);
4102 aom_wb_overwrite_literal(&comp_hdr_len_wb, (int)(compressed_hdr_size),
4103 16);
4104 }
4105
4106 hdr_size = uncompressed_hdr_size + compressed_hdr_size;
4107 total_size += hdr_size;
4108
4109 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
4110 TileInfo tile_info;
4111 const int is_last_row = (tile_row == tile_rows - 1);
4112 av1_tile_set_row(&tile_info, cm, tile_row);
4113
4114 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
4115 const int tile_idx = tile_row * tile_cols + tile_col;
4116 TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
4117 TileDataEnc *this_tile = &cpi->tile_data[tile_idx];
4118 const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
4119 const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
4120 const int is_last_col = (tile_col == tile_cols - 1);
4121 const int is_last_tile = is_last_col && is_last_row;
4122
4123 if ((!mtu_size && tile_count > tg_size) ||
4124 (mtu_size && tile_count && curr_tg_data_size >= mtu_size)) {
4125 // New tile group
4126 tg_count++;
4127 // We've exceeded the packet size
4128 if (tile_count > 1) {
4129 /* The last tile exceeded the packet size. The tile group size
4130 should therefore be tile_count-1.
4131 Move the last tile and insert headers before it
4132 */
4133 uint32_t old_total_size = total_size - tile_size - 4;
4134 memmove(dst + old_total_size + hdr_size, dst + old_total_size,
4135 (tile_size + 4) * sizeof(uint8_t));
4136 // Copy uncompressed header
4137 memmove(dst + old_total_size, dst,
4138 uncompressed_hdr_size * sizeof(uint8_t));
4139 // Write the number of tiles in the group into the last uncompressed
4140 // header before the one we've just inserted
4141 aom_wb_overwrite_literal(&tg_params_wb, tile_idx - tile_count,
4142 n_log2_tiles);
4143 aom_wb_overwrite_literal(&tg_params_wb, tile_count - 2,
4144 n_log2_tiles);
4145 // Update the pointer to the last TG params
4146 tg_params_wb.bit_offset = saved_offset + 8 * old_total_size;
4147 // Copy compressed header
4148 memmove(dst + old_total_size + uncompressed_hdr_size,
4149 dst + uncompressed_hdr_size,
4150 compressed_hdr_size * sizeof(uint8_t));
4151 total_size += hdr_size;
4152 tile_count = 1;
4153 curr_tg_data_size = hdr_size + tile_size + 4;
4154 } else {
4155 // We exceeded the packet size in just one tile
4156 // Copy uncompressed header
4157 memmove(dst + total_size, dst,
4158 uncompressed_hdr_size * sizeof(uint8_t));
4159 // Write the number of tiles in the group into the last uncompressed
4160 // header
4161 aom_wb_overwrite_literal(&tg_params_wb, tile_idx - tile_count,
4162 n_log2_tiles);
4163 aom_wb_overwrite_literal(&tg_params_wb, tile_count - 1,
4164 n_log2_tiles);
4165 tg_params_wb.bit_offset = saved_offset + 8 * total_size;
4166 // Copy compressed header
4167 memmove(dst + total_size + uncompressed_hdr_size,
4168 dst + uncompressed_hdr_size,
4169 compressed_hdr_size * sizeof(uint8_t));
4170 total_size += hdr_size;
4171 tile_count = 0;
4172 curr_tg_data_size = hdr_size;
4173 }
4174 }
4175 tile_count++;
4176 av1_tile_set_col(&tile_info, cm, tile_col);
4177
4178 #if CONFIG_DEPENDENT_HORZTILES
4179 av1_tile_set_tg_boundary(&tile_info, cm, tile_row, tile_col);
4180 #endif
4181 buf->data = dst + total_size;
4182
4183 // The last tile does not have a header.
4184 if (!is_last_tile) total_size += 4;
4185
4186 // Initialise tile context from the frame context
4187 this_tile->tctx = *cm->fc;
4188 cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx;
4189 #if CONFIG_PVQ
4190 cpi->td.mb.pvq_q = &this_tile->pvq_q;
4191 cpi->td.mb.daala_enc.state.adapt = &this_tile->tctx.pvq_context;
4192 #endif // CONFIG_PVQ
4193 #if CONFIG_ANS
4194 mode_bc.size = 1 << cpi->common.ans_window_size_log2;
4195 #endif // CONFIG_ANS
4196 #if CONFIG_LOOP_RESTORATION
4197 for (int p = 0; p < MAX_MB_PLANE; ++p) {
4198 set_default_wiener(cpi->td.mb.e_mbd.wiener_info + p);
4199 set_default_sgrproj(cpi->td.mb.e_mbd.sgrproj_info + p);
4200 }
4201 #endif // CONFIG_LOOP_RESTORATION
4202
4203 aom_start_encode(&mode_bc, dst + total_size);
4204 write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
4205 #if !CONFIG_LV_MAP
4206 #if !CONFIG_PVQ
4207 assert(tok == tok_end);
4208 #endif // !CONFIG_PVQ
4209 #endif // !CONFIG_LV_MAP
4210 aom_stop_encode(&mode_bc);
4211 tile_size = mode_bc.pos;
4212 #if CONFIG_PVQ
4213 cpi->td.mb.pvq_q = NULL;
4214 #endif
4215
4216 assert(tile_size > 0);
4217
4218 curr_tg_data_size += tile_size + 4;
4219 buf->size = tile_size;
4220
4221 if (!is_last_tile) {
4222 *max_tile_size = AOMMAX(*max_tile_size, tile_size);
4223 // size of this tile
4224 mem_put_le32(buf->data, tile_size);
4225 }
4226
4227 total_size += tile_size;
4228 }
4229 }
4230 // Write the final tile group size
4231 if (n_log2_tiles) {
4232 aom_wb_overwrite_literal(
4233 &tg_params_wb, (tile_cols * tile_rows) - tile_count, n_log2_tiles);
4234 aom_wb_overwrite_literal(&tg_params_wb, tile_count - 1, n_log2_tiles);
4235 }
4236 // Remux if possible. TODO (Thomas Davies): do this for more than one tile
4237 // group
4238 if (have_tiles && tg_count == 1) {
4239 int data_size =
4240 total_size - (uncompressed_hdr_size + compressed_hdr_size);
4241 data_size =
4242 remux_tiles(cm, dst + uncompressed_hdr_size + compressed_hdr_size,
4243 data_size, *max_tile_size, *max_tile_col_size,
4244 &tile_size_bytes, &tile_col_size_bytes);
4245 total_size = data_size + uncompressed_hdr_size + compressed_hdr_size;
4246 aom_wb_overwrite_literal(&tile_size_bytes_wb, tile_size_bytes - 1, 2);
4247 }
4248
4249 #if CONFIG_EXT_TILE
4250 }
4251 #endif // CONFIG_EXT_TILE
4252 return (uint32_t)total_size;
4253 }
4254 #endif
4255
4256 static void write_render_size(const AV1_COMMON *cm,
4257 struct aom_write_bit_buffer *wb) {
4258 const int scaling_active = !av1_resize_unscaled(cm);
4259 aom_wb_write_bit(wb, scaling_active);
4260 if (scaling_active) {
4261 aom_wb_write_literal(wb, cm->render_width - 1, 16);
4262 aom_wb_write_literal(wb, cm->render_height - 1, 16);
4263 }
4264 }
4265
4266 #if CONFIG_FRAME_SUPERRES
4267 static void write_superres_scale(const AV1_COMMON *const cm,
4268 struct aom_write_bit_buffer *wb) {
4269 // First bit is whether to to scale or not
4270 if (cm->superres_scale_denominator == SCALE_NUMERATOR) {
4271 aom_wb_write_bit(wb, 0); // no scaling
4272 } else {
4273 aom_wb_write_bit(wb, 1); // scaling, write scale factor
4274 aom_wb_write_literal(
4275 wb, cm->superres_scale_denominator - SUPERRES_SCALE_DENOMINATOR_MIN,
4276 SUPERRES_SCALE_BITS);
4277 }
4278 }
4279 #endif // CONFIG_FRAME_SUPERRES
4280
4281 static void write_frame_size(const AV1_COMMON *cm,
4282 struct aom_write_bit_buffer *wb) {
4283 #if CONFIG_FRAME_SUPERRES
4284 aom_wb_write_literal(wb, cm->superres_upscaled_width - 1, 16);
4285 aom_wb_write_literal(wb, cm->superres_upscaled_height - 1, 16);
4286 write_superres_scale(cm, wb);
4287 #else
4288 aom_wb_write_literal(wb, cm->width - 1, 16);
4289 aom_wb_write_literal(wb, cm->height - 1, 16);
4290 #endif // CONFIG_FRAME_SUPERRES
4291 write_render_size(cm, wb);
4292 }
4293
4294 static void write_frame_size_with_refs(AV1_COMP *cpi,
4295 struct aom_write_bit_buffer *wb) {
4296 AV1_COMMON *const cm = &cpi->common;
4297 int found = 0;
4298
4299 MV_REFERENCE_FRAME ref_frame;
4300 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
4301 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame);
4302
4303 if (cfg != NULL) {
4304 #if CONFIG_FRAME_SUPERRES
4305 found = cm->superres_upscaled_width == cfg->y_crop_width &&
4306 cm->superres_upscaled_height == cfg->y_crop_height;
4307 #else
4308 found =
4309 cm->width == cfg->y_crop_width && cm->height == cfg->y_crop_height;
4310 #endif
4311 found &= cm->render_width == cfg->render_width &&
4312 cm->render_height == cfg->render_height;
4313 }
4314 aom_wb_write_bit(wb, found);
4315 if (found) {
4316 #if CONFIG_FRAME_SUPERRES
4317 write_superres_scale(cm, wb);
4318 #endif // CONFIG_FRAME_SUPERRES
4319 break;
4320 }
4321 }
4322
4323 if (!found) write_frame_size(cm, wb);
4324 }
4325
4326 static void write_profile(BITSTREAM_PROFILE profile,
4327 struct aom_write_bit_buffer *wb) {
4328 switch (profile) {
4329 case PROFILE_0: aom_wb_write_literal(wb, 0, 2); break;
4330 case PROFILE_1: aom_wb_write_literal(wb, 2, 2); break;
4331 case PROFILE_2: aom_wb_write_literal(wb, 1, 2); break;
4332 case PROFILE_3: aom_wb_write_literal(wb, 6, 3); break;
4333 default: assert(0);
4334 }
4335 }
4336
4337 static void write_bitdepth_colorspace_sampling(
4338 AV1_COMMON *const cm, struct aom_write_bit_buffer *wb) {
4339 if (cm->profile >= PROFILE_2) {
4340 assert(cm->bit_depth > AOM_BITS_8);
4341 aom_wb_write_bit(wb, cm->bit_depth == AOM_BITS_10 ? 0 : 1);
4342 }
4343 #if CONFIG_COLORSPACE_HEADERS
4344 aom_wb_write_literal(wb, cm->color_space, 5);
4345 aom_wb_write_literal(wb, cm->transfer_function, 5);
4346 #else
4347 aom_wb_write_literal(wb, cm->color_space, 3);
4348 #endif
4349 if (cm->color_space != AOM_CS_SRGB) {
4350 // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
4351 aom_wb_write_bit(wb, cm->color_range);
4352 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
4353 assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
4354 aom_wb_write_bit(wb, cm->subsampling_x);
4355 aom_wb_write_bit(wb, cm->subsampling_y);
4356 aom_wb_write_bit(wb, 0); // unused
4357 } else {
4358 assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
4359 }
4360 #if CONFIG_COLORSPACE_HEADERS
4361 if (cm->subsampling_x == 1 && cm->subsampling_y == 1) {
4362 aom_wb_write_literal(wb, cm->chroma_sample_position, 2);
4363 }
4364 #endif
4365 } else {
4366 assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
4367 aom_wb_write_bit(wb, 0); // unused
4368 }
4369 }
4370
4371 #if CONFIG_REFERENCE_BUFFER
4372 void write_sequence_header(AV1_COMMON *const cm,
4373 struct aom_write_bit_buffer *wb) {
4374 SequenceHeader *seq_params = &cm->seq_params;
4375 /* Placeholder for actually writing to the bitstream */
4376 seq_params->frame_id_numbers_present_flag =
4377 #if CONFIG_EXT_TILE
4378 cm->large_scale_tile ? 0 :
4379 #endif // CONFIG_EXT_TILE
4380 FRAME_ID_NUMBERS_PRESENT_FLAG;
4381 seq_params->frame_id_length_minus7 = FRAME_ID_LENGTH_MINUS7;
4382 seq_params->delta_frame_id_length_minus2 = DELTA_FRAME_ID_LENGTH_MINUS2;
4383
4384 aom_wb_write_bit(wb, seq_params->frame_id_numbers_present_flag);
4385 if (seq_params->frame_id_numbers_present_flag) {
4386 aom_wb_write_literal(wb, seq_params->frame_id_length_minus7, 4);
4387 aom_wb_write_literal(wb, seq_params->delta_frame_id_length_minus2, 4);
4388 }
4389 }
4390 #endif // CONFIG_REFERENCE_BUFFER
4391
4392 static void write_sb_size(const AV1_COMMON *cm,
4393 struct aom_write_bit_buffer *wb) {
4394 (void)cm;
4395 (void)wb;
4396 assert(cm->mib_size == mi_size_wide[cm->sb_size]);
4397 assert(cm->mib_size == 1 << cm->mib_size_log2);
4398 #if CONFIG_EXT_PARTITION
4399 assert(cm->sb_size == BLOCK_128X128 || cm->sb_size == BLOCK_64X64);
4400 aom_wb_write_bit(wb, cm->sb_size == BLOCK_128X128 ? 1 : 0);
4401 #else
4402 assert(cm->sb_size == BLOCK_64X64);
4403 #endif // CONFIG_EXT_PARTITION
4404 }
4405
4406 static void write_compound_tools(const AV1_COMMON *cm,
4407 struct aom_write_bit_buffer *wb) {
4408 (void)cm;
4409 (void)wb;
4410 #if CONFIG_INTERINTRA
4411 if (!frame_is_intra_only(cm) && cm->reference_mode != COMPOUND_REFERENCE) {
4412 aom_wb_write_bit(wb, cm->allow_interintra_compound);
4413 } else {
4414 assert(cm->allow_interintra_compound == 0);
4415 }
4416 #endif // CONFIG_INTERINTRA
4417 #if CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
4418 #if CONFIG_COMPOUND_SINGLEREF
4419 if (!frame_is_intra_only(cm)) {
4420 #else // !CONFIG_COMPOUND_SINGLEREF
4421 if (!frame_is_intra_only(cm) && cm->reference_mode != SINGLE_REFERENCE) {
4422 #endif // CONFIG_COMPOUND_SINGLEREF
4423 aom_wb_write_bit(wb, cm->allow_masked_compound);
4424 } else {
4425 assert(cm->allow_masked_compound == 0);
4426 }
4427 #endif // CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
4428 }
4429
4430 #if CONFIG_GLOBAL_MOTION
4431 static void write_global_motion_params(const WarpedMotionParams *params,
4432 const WarpedMotionParams *ref_params,
4433 struct aom_write_bit_buffer *wb,
4434 int allow_hp) {
4435 TransformationType type = params->wmtype;
4436 int trans_bits;
4437 int trans_prec_diff;
4438
4439 aom_wb_write_bit(wb, type != IDENTITY);
4440 if (type != IDENTITY) {
4441 #if GLOBAL_TRANS_TYPES > 4
4442 aom_wb_write_literal(wb, type - 1, GLOBAL_TYPE_BITS);
4443 #else
4444 aom_wb_write_bit(wb, type == ROTZOOM);
4445 if (type != ROTZOOM) aom_wb_write_bit(wb, type == TRANSLATION);
4446 #endif // GLOBAL_TRANS_TYPES > 4
4447 }
4448
4449 switch (type) {
4450 case HOMOGRAPHY:
4451 case HORTRAPEZOID:
4452 case VERTRAPEZOID:
4453 if (type != HORTRAPEZOID)
4454 aom_wb_write_signed_primitive_refsubexpfin(
4455 wb, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K,
4456 (ref_params->wmmat[6] >> GM_ROW3HOMO_PREC_DIFF),
4457 (params->wmmat[6] >> GM_ROW3HOMO_PREC_DIFF));
4458 if (type != VERTRAPEZOID)
4459 aom_wb_write_signed_primitive_refsubexpfin(
4460 wb, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K,
4461 (ref_params->wmmat[7] >> GM_ROW3HOMO_PREC_DIFF),
4462 (params->wmmat[7] >> GM_ROW3HOMO_PREC_DIFF));
4463 // fallthrough intended
4464 case AFFINE:
4465 case ROTZOOM:
4466 aom_wb_write_signed_primitive_refsubexpfin(
4467 wb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4468 (ref_params->wmmat[2] >> GM_ALPHA_PREC_DIFF) -
4469 (1 << GM_ALPHA_PREC_BITS),
4470 (params->wmmat[2] >> GM_ALPHA_PREC_DIFF) - (1 << GM_ALPHA_PREC_BITS));
4471 if (type != VERTRAPEZOID)
4472 aom_wb_write_signed_primitive_refsubexpfin(
4473 wb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4474 (ref_params->wmmat[3] >> GM_ALPHA_PREC_DIFF),
4475 (params->wmmat[3] >> GM_ALPHA_PREC_DIFF));
4476 if (type >= AFFINE) {
4477 if (type != HORTRAPEZOID)
4478 aom_wb_write_signed_primitive_refsubexpfin(
4479 wb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4480 (ref_params->wmmat[4] >> GM_ALPHA_PREC_DIFF),
4481 (params->wmmat[4] >> GM_ALPHA_PREC_DIFF));
4482 aom_wb_write_signed_primitive_refsubexpfin(
4483 wb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
4484 (ref_params->wmmat[5] >> GM_ALPHA_PREC_DIFF) -
4485 (1 << GM_ALPHA_PREC_BITS),
4486 (params->wmmat[5] >> GM_ALPHA_PREC_DIFF) -
4487 (1 << GM_ALPHA_PREC_BITS));
4488 }
4489 // fallthrough intended
4490 case TRANSLATION:
4491 trans_bits = (type == TRANSLATION) ? GM_ABS_TRANS_ONLY_BITS - !allow_hp
4492 : GM_ABS_TRANS_BITS;
4493 trans_prec_diff = (type == TRANSLATION)
4494 ? GM_TRANS_ONLY_PREC_DIFF + !allow_hp
4495 : GM_TRANS_PREC_DIFF;
4496 aom_wb_write_signed_primitive_refsubexpfin(
4497 wb, (1 << trans_bits) + 1, SUBEXPFIN_K,
4498 (ref_params->wmmat[0] >> trans_prec_diff),
4499 (params->wmmat[0] >> trans_prec_diff));
4500 aom_wb_write_signed_primitive_refsubexpfin(
4501 wb, (1 << trans_bits) + 1, SUBEXPFIN_K,
4502 (ref_params->wmmat[1] >> trans_prec_diff),
4503 (params->wmmat[1] >> trans_prec_diff));
4504 break;
4505 case IDENTITY: break;
4506 default: assert(0);
4507 }
4508 }
4509
4510 static void write_global_motion(AV1_COMP *cpi,
4511 struct aom_write_bit_buffer *wb) {
4512 AV1_COMMON *const cm = &cpi->common;
4513 int frame;
4514 for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
4515 const WarpedMotionParams *ref_params =
4516 cm->error_resilient_mode ? &default_warp_params
4517 : &cm->prev_frame->global_motion[frame];
4518 write_global_motion_params(&cm->global_motion[frame], ref_params, wb,
4519 cm->allow_high_precision_mv);
4520 // TODO(sarahparker, debargha): The logic in the commented out code below
4521 // does not work currently and causes mismatches when resize is on.
4522 // Fix it before turning the optimization back on.
4523 /*
4524 YV12_BUFFER_CONFIG *ref_buf = get_ref_frame_buffer(cpi, frame);
4525 if (cpi->source->y_crop_width == ref_buf->y_crop_width &&
4526 cpi->source->y_crop_height == ref_buf->y_crop_height) {
4527 write_global_motion_params(&cm->global_motion[frame],
4528 &cm->prev_frame->global_motion[frame], wb,
4529 cm->allow_high_precision_mv);
4530 } else {
4531 assert(cm->global_motion[frame].wmtype == IDENTITY &&
4532 "Invalid warp type for frames of different resolutions");
4533 }
4534 */
4535 /*
4536 printf("Frame %d/%d: Enc Ref %d: %d %d %d %d\n",
4537 cm->current_video_frame, cm->show_frame, frame,
4538 cm->global_motion[frame].wmmat[0],
4539 cm->global_motion[frame].wmmat[1], cm->global_motion[frame].wmmat[2],
4540 cm->global_motion[frame].wmmat[3]);
4541 */
4542 }
4543 }
4544 #endif
4545
4546 #if !CONFIG_OBU
4547 static void write_uncompressed_header_frame(AV1_COMP *cpi,
4548 struct aom_write_bit_buffer *wb) {
4549 AV1_COMMON *const cm = &cpi->common;
4550 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
4551
4552 aom_wb_write_literal(wb, AOM_FRAME_MARKER, 2);
4553
4554 write_profile(cm->profile, wb);
4555
4556 #if CONFIG_EXT_TILE
4557 aom_wb_write_literal(wb, cm->large_scale_tile, 1);
4558 #endif // CONFIG_EXT_TILE
4559
4560 #if CONFIG_EXT_REFS
4561 // NOTE: By default all coded frames to be used as a reference
4562 cm->is_reference_frame = 1;
4563
4564 if (cm->show_existing_frame) {
4565 RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
4566 const int frame_to_show = cm->ref_frame_map[cpi->existing_fb_idx_to_show];
4567
4568 if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
4569 aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4570 "Buffer %d does not contain a reconstructed frame",
4571 frame_to_show);
4572 }
4573 ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
4574
4575 aom_wb_write_bit(wb, 1); // show_existing_frame
4576 aom_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3);
4577
4578 #if CONFIG_REFERENCE_BUFFER
4579 if (cm->seq_params.frame_id_numbers_present_flag) {
4580 int frame_id_len = cm->seq_params.frame_id_length_minus7 + 7;
4581 int display_frame_id = cm->ref_frame_id[cpi->existing_fb_idx_to_show];
4582 aom_wb_write_literal(wb, display_frame_id, frame_id_len);
4583 /* Add a zero byte to prevent emulation of superframe marker */
4584 /* Same logic as when when terminating the entropy coder */
4585 /* Consider to have this logic only one place */
4586 aom_wb_write_literal(wb, 0, 8);
4587 }
4588 #endif // CONFIG_REFERENCE_BUFFER
4589
4590 return;
4591 } else {
4592 #endif // CONFIG_EXT_REFS
4593 aom_wb_write_bit(wb, 0); // show_existing_frame
4594 #if CONFIG_EXT_REFS
4595 }
4596 #endif // CONFIG_EXT_REFS
4597
4598 aom_wb_write_bit(wb, cm->frame_type);
4599 aom_wb_write_bit(wb, cm->show_frame);
4600 if (cm->frame_type != KEY_FRAME)
4601 if (!cm->show_frame) aom_wb_write_bit(wb, cm->intra_only);
4602 aom_wb_write_bit(wb, cm->error_resilient_mode);
4603
4604 if (frame_is_intra_only(cm)) {
4605 #if CONFIG_REFERENCE_BUFFER
4606 write_sequence_header(cm, wb);
4607 #endif // CONFIG_REFERENCE_BUFFER
4608 }
4609 #if CONFIG_REFERENCE_BUFFER
4610 cm->invalid_delta_frame_id_minus1 = 0;
4611 if (cm->seq_params.frame_id_numbers_present_flag) {
4612 int frame_id_len = cm->seq_params.frame_id_length_minus7 + 7;
4613 aom_wb_write_literal(wb, cm->current_frame_id, frame_id_len);
4614 }
4615 #endif // CONFIG_REFERENCE_BUFFER
4616 if (cm->frame_type == KEY_FRAME) {
4617 write_bitdepth_colorspace_sampling(cm, wb);
4618 write_frame_size(cm, wb);
4619 write_sb_size(cm, wb);
4620
4621 #if CONFIG_ANS && ANS_MAX_SYMBOLS
4622 assert(cpi->common.ans_window_size_log2 >= 8);
4623 assert(cpi->common.ans_window_size_log2 < 24);
4624 aom_wb_write_literal(wb, cpi->common.ans_window_size_log2 - 8, 4);
4625 #endif // CONFIG_ANS && ANS_MAX_SYMBOLS
4626 aom_wb_write_bit(wb, cm->allow_screen_content_tools);
4627 #if CONFIG_AMVR
4628 if (cm->allow_screen_content_tools) {
4629 if (cm->seq_mv_precision_level == 2) {
4630 aom_wb_write_bit(wb, 1);
4631 } else {
4632 aom_wb_write_bit(wb, 0);
4633 aom_wb_write_bit(wb, cm->seq_mv_precision_level == 0);
4634 }
4635 }
4636 #endif
4637 } else {
4638 if (cm->intra_only) aom_wb_write_bit(wb, cm->allow_screen_content_tools);
4639 #if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
4640 if (!cm->error_resilient_mode) {
4641 if (cm->intra_only) {
4642 aom_wb_write_bit(wb,
4643 cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
4644 } else {
4645 aom_wb_write_bit(wb,
4646 cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE);
4647 if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE)
4648 aom_wb_write_bit(wb,
4649 cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
4650 }
4651 }
4652 #endif
4653 #if CONFIG_EXT_REFS
4654 cpi->refresh_frame_mask = get_refresh_mask(cpi);
4655 #endif // CONFIG_EXT_REFS
4656
4657 if (cm->intra_only) {
4658 write_bitdepth_colorspace_sampling(cm, wb);
4659
4660 #if CONFIG_EXT_REFS
4661 aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
4662 #else
4663 aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
4664 #endif // CONFIG_EXT_REFS
4665 write_frame_size(cm, wb);
4666
4667 #if CONFIG_ANS && ANS_MAX_SYMBOLS
4668 assert(cpi->common.ans_window_size_log2 >= 8);
4669 assert(cpi->common.ans_window_size_log2 < 24);
4670 aom_wb_write_literal(wb, cpi->common.ans_window_size_log2 - 8, 4);
4671 #endif // CONFIG_ANS && ANS_MAX_SYMBOLS
4672 } else {
4673 MV_REFERENCE_FRAME ref_frame;
4674
4675 #if CONFIG_EXT_REFS
4676 aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
4677 #else
4678 aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
4679 #endif // CONFIG_EXT_REFS
4680
4681 #if CONFIG_EXT_REFS
4682 if (!cpi->refresh_frame_mask) {
4683 // NOTE: "cpi->refresh_frame_mask == 0" indicates that the coded frame
4684 // will not be used as a reference
4685 cm->is_reference_frame = 0;
4686 }
4687 #endif // CONFIG_EXT_REFS
4688
4689 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
4690 assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
4691 aom_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
4692 REF_FRAMES_LOG2);
4693 #if !CONFIG_FRAME_SIGN_BIAS
4694 aom_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
4695 #endif // !CONFIG_FRAME_SIGN_BIAS
4696 #if CONFIG_REFERENCE_BUFFER
4697 if (cm->seq_params.frame_id_numbers_present_flag) {
4698 int i = get_ref_frame_map_idx(cpi, ref_frame);
4699 int frame_id_len = cm->seq_params.frame_id_length_minus7 + 7;
4700 int diff_len = cm->seq_params.delta_frame_id_length_minus2 + 2;
4701 int delta_frame_id_minus1 =
4702 ((cm->current_frame_id - cm->ref_frame_id[i] +
4703 (1 << frame_id_len)) %
4704 (1 << frame_id_len)) -
4705 1;
4706 if (delta_frame_id_minus1 < 0 ||
4707 delta_frame_id_minus1 >= (1 << diff_len))
4708 cm->invalid_delta_frame_id_minus1 = 1;
4709 aom_wb_write_literal(wb, delta_frame_id_minus1, diff_len);
4710 }
4711 #endif // CONFIG_REFERENCE_BUFFER
4712 }
4713
4714 #if CONFIG_FRAME_SIGN_BIAS
4715 #define FRAME_SIGN_BIAS_DEBUG 0
4716 #if FRAME_SIGN_BIAS_DEBUG
4717 {
4718 printf("\n\nENCODER: Frame=%d, show_frame=%d:", cm->current_video_frame,
4719 cm->show_frame);
4720 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
4721 printf(" sign_bias[%d]=%d", ref_frame,
4722 cm->ref_frame_sign_bias[ref_frame]);
4723 }
4724 printf("\n");
4725 }
4726 #endif // FRAME_SIGN_BIAS_DEBUG
4727 #undef FRAME_SIGN_BIAS_DEBUG
4728 #endif // CONFIG_FRAME_SIGN_BIAS
4729
4730 #if CONFIG_FRAME_SIZE
4731 if (cm->error_resilient_mode == 0) {
4732 write_frame_size_with_refs(cpi, wb);
4733 } else {
4734 write_frame_size(cm, wb);
4735 }
4736 #else
4737 write_frame_size_with_refs(cpi, wb);
4738 #endif
4739
4740 #if CONFIG_AMVR
4741 if (cm->seq_mv_precision_level == 2) {
4742 aom_wb_write_bit(wb, cm->cur_frame_mv_precision_level == 0);
4743 }
4744 #endif
4745 aom_wb_write_bit(wb, cm->allow_high_precision_mv);
4746
4747 fix_interp_filter(cm, cpi->td.counts);
4748 write_frame_interp_filter(cm->interp_filter, wb);
4749 #if CONFIG_TEMPMV_SIGNALING
4750 if (frame_might_use_prev_frame_mvs(cm)) {
4751 aom_wb_write_bit(wb, cm->use_prev_frame_mvs);
4752 }
4753 #endif
4754 }
4755 }
4756
4757 #if CONFIG_FRAME_MARKER
4758 if (cm->show_frame == 0) {
4759 int arf_offset = AOMMIN(
4760 (MAX_GF_INTERVAL - 1),
4761 cpi->twopass.gf_group.arf_src_offset[cpi->twopass.gf_group.index]);
4762 #if CONFIG_EXT_REFS
4763 int brf_offset =
4764 cpi->twopass.gf_group.brf_src_offset[cpi->twopass.gf_group.index];
4765
4766 arf_offset = AOMMIN((MAX_GF_INTERVAL - 1), arf_offset + brf_offset);
4767 #endif
4768 aom_wb_write_literal(wb, arf_offset, 4);
4769 }
4770 #endif
4771
4772 #if CONFIG_REFERENCE_BUFFER
4773 if (cm->seq_params.frame_id_numbers_present_flag) {
4774 cm->refresh_mask =
4775 cm->frame_type == KEY_FRAME ? 0xFF : get_refresh_mask(cpi);
4776 }
4777 #endif // CONFIG_REFERENCE_BUFFER
4778
4779 if (!cm->error_resilient_mode) {
4780 aom_wb_write_bit(
4781 wb, cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD);
4782 }
4783 #if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
4784 aom_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
4785 #endif
4786 encode_loopfilter(cm, wb);
4787 encode_quantization(cm, wb);
4788 encode_segmentation(cm, xd, wb);
4789 {
4790 int i;
4791 struct segmentation *const seg = &cm->seg;
4792 int segment_quantizer_active = 0;
4793 for (i = 0; i < MAX_SEGMENTS; i++) {
4794 if (segfeature_active(seg, i, SEG_LVL_ALT_Q)) {
4795 segment_quantizer_active = 1;
4796 }
4797 }
4798
4799 if (cm->delta_q_present_flag)
4800 assert(segment_quantizer_active == 0 && cm->base_qindex > 0);
4801 if (segment_quantizer_active == 0 && cm->base_qindex > 0) {
4802 aom_wb_write_bit(wb, cm->delta_q_present_flag);
4803 if (cm->delta_q_present_flag) {
4804 aom_wb_write_literal(wb, OD_ILOG_NZ(cm->delta_q_res) - 1, 2);
4805 xd->prev_qindex = cm->base_qindex;
4806 #if CONFIG_EXT_DELTA_Q
4807 assert(seg->abs_delta == SEGMENT_DELTADATA);
4808 aom_wb_write_bit(wb, cm->delta_lf_present_flag);
4809 if (cm->delta_lf_present_flag) {
4810 aom_wb_write_literal(wb, OD_ILOG_NZ(cm->delta_lf_res) - 1, 2);
4811 xd->prev_delta_lf_from_base = 0;
4812 #if CONFIG_LOOPFILTER_LEVEL
4813 aom_wb_write_bit(wb, cm->delta_lf_multi);
4814 for (int lf_id = 0; lf_id < FRAME_LF_COUNT; ++lf_id)
4815 xd->prev_delta_lf[lf_id] = 0;
4816 #endif // CONFIG_LOOPFILTER_LEVEL
4817 }
4818 #endif // CONFIG_EXT_DELTA_Q
4819 }
4820 }
4821 }
4822 #if CONFIG_CDEF
4823 if (!cm->all_lossless) {
4824 encode_cdef(cm, wb);
4825 }
4826 #endif
4827 #if CONFIG_LOOP_RESTORATION
4828 encode_restoration_mode(cm, wb);
4829 #endif // CONFIG_LOOP_RESTORATION
4830 write_tx_mode(cm, &cm->tx_mode, wb);
4831
4832 if (cpi->allow_comp_inter_inter) {
4833 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
4834 #if !CONFIG_REF_ADAPT
4835 const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
4836 #endif // !CONFIG_REF_ADAPT
4837
4838 aom_wb_write_bit(wb, use_hybrid_pred);
4839 #if !CONFIG_REF_ADAPT
4840 if (!use_hybrid_pred) aom_wb_write_bit(wb, use_compound_pred);
4841 #endif // !CONFIG_REF_ADAPT
4842 }
4843 write_compound_tools(cm, wb);
4844
4845 #if CONFIG_EXT_TX
4846 aom_wb_write_bit(wb, cm->reduced_tx_set_used);
4847 #endif // CONFIG_EXT_TX
4848
4849 #if CONFIG_ADAPT_SCAN
4850 aom_wb_write_bit(wb, cm->use_adapt_scan);
4851 #endif
4852
4853 #if CONFIG_GLOBAL_MOTION
4854 if (!frame_is_intra_only(cm)) write_global_motion(cpi, wb);
4855 #endif // CONFIG_GLOBAL_MOTION
4856
4857 write_tile_info(cm, wb);
4858 }
4859
4860 #else
4861 // New function based on HLS R18
4862 static void write_uncompressed_header_obu(AV1_COMP *cpi,
4863 struct aom_write_bit_buffer *wb) {
4864 AV1_COMMON *const cm = &cpi->common;
4865 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
4866
4867 #if CONFIG_EXT_TILE
4868 aom_wb_write_literal(wb, cm->large_scale_tile, 1);
4869 #endif // CONFIG_EXT_TILE
4870
4871 #if CONFIG_EXT_REFS
4872 // NOTE: By default all coded frames to be used as a reference
4873 cm->is_reference_frame = 1;
4874
4875 if (cm->show_existing_frame) {
4876 RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
4877 const int frame_to_show = cm->ref_frame_map[cpi->existing_fb_idx_to_show];
4878
4879 if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
4880 aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
4881 "Buffer %d does not contain a reconstructed frame",
4882 frame_to_show);
4883 }
4884 ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
4885
4886 aom_wb_write_bit(wb, 1); // show_existing_frame
4887 aom_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3);
4888
4889 #if CONFIG_REFERENCE_BUFFER
4890 if (cm->seq_params.frame_id_numbers_present_flag) {
4891 int frame_id_len = cm->seq_params.frame_id_length_minus7 + 7;
4892 int display_frame_id = cm->ref_frame_id[cpi->existing_fb_idx_to_show];
4893 aom_wb_write_literal(wb, display_frame_id, frame_id_len);
4894 /* Add a zero byte to prevent emulation of superframe marker */
4895 /* Same logic as when when terminating the entropy coder */
4896 /* Consider to have this logic only one place */
4897 aom_wb_write_literal(wb, 0, 8);
4898 }
4899 #endif // CONFIG_REFERENCE_BUFFER
4900
4901 return;
4902 } else {
4903 #endif // CONFIG_EXT_REFS
4904 aom_wb_write_bit(wb, 0); // show_existing_frame
4905 #if CONFIG_EXT_REFS
4906 }
4907 #endif // CONFIG_EXT_REFS
4908
4909 cm->frame_type = cm->intra_only ? INTRA_ONLY_FRAME : cm->frame_type;
4910 aom_wb_write_literal(wb, cm->frame_type, 2);
4911
4912 if (cm->intra_only) cm->frame_type = INTRA_ONLY_FRAME;
4913
4914 aom_wb_write_bit(wb, cm->show_frame);
4915 aom_wb_write_bit(wb, cm->error_resilient_mode);
4916
4917 #if CONFIG_REFERENCE_BUFFER
4918 cm->invalid_delta_frame_id_minus1 = 0;
4919 if (cm->seq_params.frame_id_numbers_present_flag) {
4920 int frame_id_len = cm->seq_params.frame_id_length_minus7 + 7;
4921 aom_wb_write_literal(wb, cm->current_frame_id, frame_id_len);
4922 }
4923 #endif // CONFIG_REFERENCE_BUFFER
4924 if (cm->frame_type == KEY_FRAME) {
4925 write_frame_size(cm, wb);
4926 write_sb_size(cm, wb);
4927
4928 #if CONFIG_ANS && ANS_MAX_SYMBOLS
4929 assert(cpi->common.ans_window_size_log2 >= 8);
4930 assert(cpi->common.ans_window_size_log2 < 24);
4931 aom_wb_write_literal(wb, cpi->common.ans_window_size_log2 - 8, 4);
4932 #endif // CONFIG_ANS && ANS_MAX_SYMBOLS
4933 aom_wb_write_bit(wb, cm->allow_screen_content_tools);
4934 #if CONFIG_AMVR
4935 if (cm->allow_screen_content_tools) {
4936 if (cm->seq_mv_precision_level == 2) {
4937 aom_wb_write_bit(wb, 1);
4938 } else {
4939 aom_wb_write_bit(wb, 0);
4940 aom_wb_write_bit(wb, cm->seq_mv_precision_level == 0);
4941 }
4942 }
4943 #endif
4944 } else if (cm->frame_type == INTRA_ONLY_FRAME) {
4945 if (cm->intra_only) aom_wb_write_bit(wb, cm->allow_screen_content_tools);
4946 #if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
4947 if (!cm->error_resilient_mode) {
4948 if (cm->intra_only) {
4949 aom_wb_write_bit(wb,
4950 cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
4951 }
4952 }
4953 #endif
4954 #if CONFIG_EXT_REFS
4955 cpi->refresh_frame_mask = get_refresh_mask(cpi);
4956 #endif // CONFIG_EXT_REFS
4957
4958 if (cm->intra_only) {
4959 #if CONFIG_EXT_REFS
4960 aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
4961 #else
4962 aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
4963 #endif // CONFIG_EXT_REFS
4964 write_frame_size(cm, wb);
4965
4966 #if CONFIG_ANS && ANS_MAX_SYMBOLS
4967 assert(cpi->common.ans_window_size_log2 >= 8);
4968 assert(cpi->common.ans_window_size_log2 < 24);
4969 aom_wb_write_literal(wb, cpi->common.ans_window_size_log2 - 8, 4);
4970 #endif // CONFIG_ANS && ANS_MAX_SYMBOLS
4971 }
4972 } else if (cm->frame_type == INTER_FRAME) {
4973 MV_REFERENCE_FRAME ref_frame;
4974 #if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
4975 if (!cm->error_resilient_mode) {
4976 aom_wb_write_bit(wb, cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE);
4977 if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE)
4978 aom_wb_write_bit(wb,
4979 cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
4980 }
4981 #endif
4982
4983 #if CONFIG_EXT_REFS
4984 cpi->refresh_frame_mask = get_refresh_mask(cpi);
4985 aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
4986 #else
4987 aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
4988 #endif // CONFIG_EXT_REFS
4989
4990 #if CONFIG_EXT_REFS
4991 if (!cpi->refresh_frame_mask) {
4992 // NOTE: "cpi->refresh_frame_mask == 0" indicates that the coded frame
4993 // will not be used as a reference
4994 cm->is_reference_frame = 0;
4995 }
4996 #endif // CONFIG_EXT_REFS
4997
4998 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
4999 assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
5000 aom_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
5001 REF_FRAMES_LOG2);
5002 #if !CONFIG_FRAME_SIGN_BIAS
5003 aom_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
5004 #endif // !CONFIG_FRAME_SIGN_BIAS
5005 #if CONFIG_REFERENCE_BUFFER
5006 if (cm->seq_params.frame_id_numbers_present_flag) {
5007 int i = get_ref_frame_map_idx(cpi, ref_frame);
5008 int frame_id_len = cm->seq_params.frame_id_length_minus7 + 7;
5009 int diff_len = cm->seq_params.delta_frame_id_length_minus2 + 2;
5010 int delta_frame_id_minus1 =
5011 ((cm->current_frame_id - cm->ref_frame_id[i] +
5012 (1 << frame_id_len)) %
5013 (1 << frame_id_len)) -
5014 1;
5015 if (delta_frame_id_minus1 < 0 ||
5016 delta_frame_id_minus1 >= (1 << diff_len))
5017 cm->invalid_delta_frame_id_minus1 = 1;
5018 aom_wb_write_literal(wb, delta_frame_id_minus1, diff_len);
5019 }
5020 #endif // CONFIG_REFERENCE_BUFFER
5021 }
5022
5023 #if CONFIG_FRAME_SIZE
5024 if (cm->error_resilient_mode == 0) {
5025 write_frame_size_with_refs(cpi, wb);
5026 } else {
5027 write_frame_size(cm, wb);
5028 }
5029 #else
5030 write_frame_size_with_refs(cpi, wb);
5031 #endif
5032
5033 #if CONFIG_AMVR
5034 if (cm->seq_mv_precision_level == 2) {
5035 aom_wb_write_bit(wb, cm->cur_frame_mv_precision_level == 0);
5036 }
5037 #endif
5038 aom_wb_write_bit(wb, cm->allow_high_precision_mv);
5039
5040 fix_interp_filter(cm, cpi->td.counts);
5041 write_frame_interp_filter(cm->interp_filter, wb);
5042 #if CONFIG_TEMPMV_SIGNALING
5043 if (frame_might_use_prev_frame_mvs(cm)) {
5044 aom_wb_write_bit(wb, cm->use_prev_frame_mvs);
5045 }
5046 #endif
5047 } else if (cm->frame_type == S_FRAME) {
5048 MV_REFERENCE_FRAME ref_frame;
5049
5050 #if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
5051 if (!cm->error_resilient_mode) {
5052 aom_wb_write_bit(wb, cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE);
5053 if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE)
5054 aom_wb_write_bit(wb,
5055 cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
5056 }
5057 #endif
5058
5059 #if CONFIG_EXT_REFS
5060 if (!cpi->refresh_frame_mask) {
5061 // NOTE: "cpi->refresh_frame_mask == 0" indicates that the coded frame
5062 // will not be used as a reference
5063 cm->is_reference_frame = 0;
5064 }
5065 #endif // CONFIG_EXT_REFS
5066
5067 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
5068 assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
5069 aom_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
5070 REF_FRAMES_LOG2);
5071 assert(cm->ref_frame_sign_bias[ref_frame] == 0);
5072 #if CONFIG_REFERENCE_BUFFER
5073 if (cm->seq_params.frame_id_numbers_present_flag) {
5074 int i = get_ref_frame_map_idx(cpi, ref_frame);
5075 int frame_id_len = cm->seq_params.frame_id_length_minus7 + 7;
5076 int diff_len = cm->seq_params.delta_frame_id_length_minus2 + 2;
5077 int delta_frame_id_minus1 =
5078 ((cm->current_frame_id - cm->ref_frame_id[i] +
5079 (1 << frame_id_len)) %
5080 (1 << frame_id_len)) -
5081 1;
5082 if (delta_frame_id_minus1 < 0 ||
5083 delta_frame_id_minus1 >= (1 << diff_len))
5084 cm->invalid_delta_frame_id_minus1 = 1;
5085 aom_wb_write_literal(wb, delta_frame_id_minus1, diff_len);
5086 }
5087 #endif // CONFIG_REFERENCE_BUFFER
5088 }
5089
5090 #if CONFIG_FRAME_SIZE
5091 if (cm->error_resilient_mode == 0) {
5092 write_frame_size_with_refs(cpi, wb);
5093 } else {
5094 write_frame_size(cm, wb);
5095 }
5096 #else
5097 write_frame_size_with_refs(cpi, wb);
5098 #endif
5099
5100 aom_wb_write_bit(wb, cm->allow_high_precision_mv);
5101
5102 fix_interp_filter(cm, cpi->td.counts);
5103 write_frame_interp_filter(cm->interp_filter, wb);
5104 #if CONFIG_TEMPMV_SIGNALING
5105 if (frame_might_use_prev_frame_mvs(cm)) {
5106 aom_wb_write_bit(wb, cm->use_prev_frame_mvs);
5107 }
5108 #endif
5109 }
5110
5111 #if CONFIG_MFMV
5112 if (cm->show_frame == 0) {
5113 int arf_offset = AOMMIN(
5114 (MAX_GF_INTERVAL - 1),
5115 cpi->twopass.gf_group.arf_src_offset[cpi->twopass.gf_group.index]);
5116 #if CONFIG_EXT_REFS
5117 int brf_offset =
5118 cpi->twopass.gf_group.brf_src_offset[cpi->twopass.gf_group.index];
5119
5120 arf_offset = AOMMIN((MAX_GF_INTERVAL - 1), arf_offset + brf_offset);
5121 #endif
5122 aom_wb_write_literal(wb, arf_offset, 4);
5123 }
5124 #endif
5125
5126 #if CONFIG_REFERENCE_BUFFER
5127 if (cm->seq_params.frame_id_numbers_present_flag) {
5128 cm->refresh_mask =
5129 cm->frame_type == KEY_FRAME ? 0xFF : get_refresh_mask(cpi);
5130 }
5131 #endif // CONFIG_REFERENCE_BUFFER
5132
5133 if (!cm->error_resilient_mode) {
5134 aom_wb_write_bit(
5135 wb, cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD);
5136 }
5137 #if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
5138 aom_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
5139 #endif
5140 encode_loopfilter(cm, wb);
5141 encode_quantization(cm, wb);
5142 encode_segmentation(cm, xd, wb);
5143 {
5144 int i;
5145 struct segmentation *const seg = &cm->seg;
5146 int segment_quantizer_active = 0;
5147 for (i = 0; i < MAX_SEGMENTS; i++) {
5148 if (segfeature_active(seg, i, SEG_LVL_ALT_Q)) {
5149 segment_quantizer_active = 1;
5150 }
5151 }
5152
5153 if (cm->delta_q_present_flag)
5154 assert(segment_quantizer_active == 0 && cm->base_qindex > 0);
5155 if (segment_quantizer_active == 0 && cm->base_qindex > 0) {
5156 aom_wb_write_bit(wb, cm->delta_q_present_flag);
5157 if (cm->delta_q_present_flag) {
5158 aom_wb_write_literal(wb, OD_ILOG_NZ(cm->delta_q_res) - 1, 2);
5159 xd->prev_qindex = cm->base_qindex;
5160 #if CONFIG_EXT_DELTA_Q
5161 assert(seg->abs_delta == SEGMENT_DELTADATA);
5162 aom_wb_write_bit(wb, cm->delta_lf_present_flag);
5163 if (cm->delta_lf_present_flag) {
5164 aom_wb_write_literal(wb, OD_ILOG_NZ(cm->delta_lf_res) - 1, 2);
5165 #if CONFIG_LOOPFILTER_LEVEL
5166 for (int lf_id = 0; lf_id < FRAME_LF_COUNT; ++lf_id)
5167 xd->prev_delta_lf[lf_id] = 0;
5168 #endif // CONFIG_LOOPFILTER_LEVEL
5169 xd->prev_delta_lf_from_base = 0;
5170 }
5171 #endif // CONFIG_EXT_DELTA_Q
5172 }
5173 }
5174 }
5175 #if CONFIG_CDEF
5176 if (!cm->all_lossless) {
5177 encode_cdef(cm, wb);
5178 }
5179 #endif
5180 #if CONFIG_LOOP_RESTORATION
5181 encode_restoration_mode(cm, wb);
5182 #endif // CONFIG_LOOP_RESTORATION
5183 write_tx_mode(cm, &cm->tx_mode, wb);
5184
5185 if (cpi->allow_comp_inter_inter) {
5186 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
5187 #if !CONFIG_REF_ADAPT
5188 const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
5189 #endif // !CONFIG_REF_ADAPT
5190
5191 aom_wb_write_bit(wb, use_hybrid_pred);
5192 #if !CONFIG_REF_ADAPT
5193 if (!use_hybrid_pred) aom_wb_write_bit(wb, use_compound_pred);
5194 #endif // !CONFIG_REF_ADAPT
5195 }
5196 write_compound_tools(cm, wb);
5197
5198 #if CONFIG_EXT_TX
5199 aom_wb_write_bit(wb, cm->reduced_tx_set_used);
5200 #endif // CONFIG_EXT_TX
5201
5202 #if CONFIG_GLOBAL_MOTION
5203 if (!frame_is_intra_only(cm)) write_global_motion(cpi, wb);
5204 #endif // CONFIG_GLOBAL_MOTION
5205
5206 write_tile_info(cm, wb);
5207 }
5208 #endif // CONFIG_OBU
5209
5210 static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
5211 AV1_COMMON *const cm = &cpi->common;
5212 #if CONFIG_SUPERTX
5213 MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
5214 #endif // CONFIG_SUPERTX
5215 FRAME_CONTEXT *const fc = cm->fc;
5216 aom_writer *header_bc;
5217 int i;
5218 #if !CONFIG_NEW_MULTISYMBOL
5219 FRAME_COUNTS *counts = cpi->td.counts;
5220 int j;
5221 #endif
5222
5223 const int probwt = cm->num_tg;
5224 (void)probwt;
5225 (void)i;
5226 (void)fc;
5227
5228 aom_writer real_header_bc;
5229 header_bc = &real_header_bc;
5230 #if CONFIG_ANS
5231 header_bc->size = 1 << cpi->common.ans_window_size_log2;
5232 #endif
5233 aom_start_encode(header_bc, data);
5234
5235 #if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
5236 if (cm->tx_mode == TX_MODE_SELECT)
5237 av1_cond_prob_diff_update(header_bc, &cm->fc->quarter_tx_size_prob,
5238 cm->counts.quarter_tx_size, probwt);
5239 #endif
5240 #if CONFIG_LV_MAP
5241 av1_write_txb_probs(cpi, header_bc);
5242 #endif // CONFIG_LV_MAP
5243
5244 #if CONFIG_VAR_TX && !CONFIG_NEW_MULTISYMBOL
5245 if (cm->tx_mode == TX_MODE_SELECT)
5246 update_txfm_partition_probs(cm, header_bc, counts, probwt);
5247 #endif
5248
5249 #if !CONFIG_NEW_MULTISYMBOL
5250 update_skip_probs(cm, header_bc, counts);
5251 #endif
5252
5253 if (!frame_is_intra_only(cm)) {
5254 #if !CONFIG_NEW_MULTISYMBOL
5255 update_inter_mode_probs(cm, header_bc, counts);
5256 #endif
5257 #if CONFIG_INTERINTRA
5258 if (cm->reference_mode != COMPOUND_REFERENCE &&
5259 cm->allow_interintra_compound) {
5260 #if !CONFIG_NEW_MULTISYMBOL
5261 for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
5262 if (is_interintra_allowed_bsize_group(i)) {
5263 av1_cond_prob_diff_update(header_bc, &fc->interintra_prob[i],
5264 cm->counts.interintra[i], probwt);
5265 }
5266 }
5267 #endif
5268 #if CONFIG_WEDGE && !CONFIG_NEW_MULTISYMBOL
5269 #if CONFIG_EXT_PARTITION_TYPES
5270 int block_sizes_to_update = BLOCK_SIZES_ALL;
5271 #else
5272 int block_sizes_to_update = BLOCK_SIZES;
5273 #endif
5274 for (i = 0; i < block_sizes_to_update; i++) {
5275 if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i))
5276 av1_cond_prob_diff_update(header_bc, &fc->wedge_interintra_prob[i],
5277 cm->counts.wedge_interintra[i], probwt);
5278 }
5279 #endif // CONFIG_WEDGE && CONFIG_NEW_MULTISYMBOL
5280 }
5281 #endif // CONFIG_INTERINTRA
5282
5283 #if !CONFIG_NEW_MULTISYMBOL
5284 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
5285 av1_cond_prob_diff_update(header_bc, &fc->intra_inter_prob[i],
5286 counts->intra_inter[i], probwt);
5287 #endif
5288
5289 #if !CONFIG_NEW_MULTISYMBOL
5290 if (cpi->allow_comp_inter_inter) {
5291 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
5292 if (use_hybrid_pred)
5293 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
5294 av1_cond_prob_diff_update(header_bc, &fc->comp_inter_prob[i],
5295 counts->comp_inter[i], probwt);
5296 }
5297
5298 if (cm->reference_mode != COMPOUND_REFERENCE) {
5299 for (i = 0; i < REF_CONTEXTS; i++) {
5300 for (j = 0; j < (SINGLE_REFS - 1); j++) {
5301 av1_cond_prob_diff_update(header_bc, &fc->single_ref_prob[i][j],
5302 counts->single_ref[i][j], probwt);
5303 }
5304 }
5305 }
5306
5307 if (cm->reference_mode != SINGLE_REFERENCE) {
5308 #if CONFIG_EXT_COMP_REFS
5309 for (i = 0; i < COMP_REF_TYPE_CONTEXTS; i++)
5310 av1_cond_prob_diff_update(header_bc, &fc->comp_ref_type_prob[i],
5311 counts->comp_ref_type[i], probwt);
5312
5313 for (i = 0; i < UNI_COMP_REF_CONTEXTS; i++)
5314 for (j = 0; j < (UNIDIR_COMP_REFS - 1); j++)
5315 av1_cond_prob_diff_update(header_bc, &fc->uni_comp_ref_prob[i][j],
5316 counts->uni_comp_ref[i][j], probwt);
5317 #endif // CONFIG_EXT_COMP_REFS
5318
5319 for (i = 0; i < REF_CONTEXTS; i++) {
5320 #if CONFIG_EXT_REFS
5321 for (j = 0; j < (FWD_REFS - 1); j++) {
5322 av1_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
5323 counts->comp_ref[i][j], probwt);
5324 }
5325 for (j = 0; j < (BWD_REFS - 1); j++) {
5326 av1_cond_prob_diff_update(header_bc, &fc->comp_bwdref_prob[i][j],
5327 counts->comp_bwdref[i][j], probwt);
5328 }
5329 #else
5330 for (j = 0; j < (COMP_REFS - 1); j++) {
5331 av1_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
5332 counts->comp_ref[i][j], probwt);
5333 }
5334 #endif // CONFIG_EXT_REFS
5335 }
5336 }
5337 #endif // CONFIG_NEW_MULTISYMBOL
5338
5339 #if CONFIG_COMPOUND_SINGLEREF
5340 for (i = 0; i < COMP_INTER_MODE_CONTEXTS; i++)
5341 av1_cond_prob_diff_update(header_bc, &fc->comp_inter_mode_prob[i],
5342 counts->comp_inter_mode[i], probwt);
5343 #endif // CONFIG_COMPOUND_SINGLEREF
5344
5345 #if !CONFIG_NEW_MULTISYMBOL
5346 av1_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc, counts->mv);
5347 #endif
5348 #if CONFIG_SUPERTX
5349 if (!xd->lossless[0]) update_supertx_probs(cm, probwt, header_bc);
5350 #endif // CONFIG_SUPERTX
5351 }
5352 aom_stop_encode(header_bc);
5353 assert(header_bc->pos <= 0xffff);
5354 return header_bc->pos;
5355 }
5356
5357 #if !CONFIG_OBU || CONFIG_EXT_TILE
5358 static int choose_size_bytes(uint32_t size, int spare_msbs) {
5359 // Choose the number of bytes required to represent size, without
5360 // using the 'spare_msbs' number of most significant bits.
5361
5362 // Make sure we will fit in 4 bytes to start with..
5363 if (spare_msbs > 0 && size >> (32 - spare_msbs) != 0) return -1;
5364
5365 // Normalise to 32 bits
5366 size <<= spare_msbs;
5367
5368 if (size >> 24 != 0)
5369 return 4;
5370 else if (size >> 16 != 0)
5371 return 3;
5372 else if (size >> 8 != 0)
5373 return 2;
5374 else
5375 return 1;
5376 }
5377
5378 static void mem_put_varsize(uint8_t *const dst, const int sz, const int val) {
5379 switch (sz) {
5380 case 1: dst[0] = (uint8_t)(val & 0xff); break;
5381 case 2: mem_put_le16(dst, val); break;
5382 case 3: mem_put_le24(dst, val); break;
5383 case 4: mem_put_le32(dst, val); break;
5384 default: assert(0 && "Invalid size"); break;
5385 }
5386 }
5387
5388 static int remux_tiles(const AV1_COMMON *const cm, uint8_t *dst,
5389 const uint32_t data_size, const uint32_t max_tile_size,
5390 const uint32_t max_tile_col_size,
5391 int *const tile_size_bytes,
5392 int *const tile_col_size_bytes) {
5393 // Choose the tile size bytes (tsb) and tile column size bytes (tcsb)
5394 int tsb;
5395 int tcsb;
5396
5397 #if CONFIG_EXT_TILE
5398 if (cm->large_scale_tile) {
5399 // The top bit in the tile size field indicates tile copy mode, so we
5400 // have 1 less bit to code the tile size
5401 tsb = choose_size_bytes(max_tile_size, 1);
5402 tcsb = choose_size_bytes(max_tile_col_size, 0);
5403 } else {
5404 #endif // CONFIG_EXT_TILE
5405 tsb = choose_size_bytes(max_tile_size, 0);
5406 tcsb = 4; // This is ignored
5407 (void)max_tile_col_size;
5408 #if CONFIG_EXT_TILE
5409 }
5410 #endif // CONFIG_EXT_TILE
5411
5412 assert(tsb > 0);
5413 assert(tcsb > 0);
5414
5415 *tile_size_bytes = tsb;
5416 *tile_col_size_bytes = tcsb;
5417
5418 if (tsb == 4 && tcsb == 4) {
5419 return data_size;
5420 } else {
5421 uint32_t wpos = 0;
5422 uint32_t rpos = 0;
5423
5424 #if CONFIG_EXT_TILE
5425 if (cm->large_scale_tile) {
5426 int tile_row;
5427 int tile_col;
5428
5429 for (tile_col = 0; tile_col < cm->tile_cols; tile_col++) {
5430 // All but the last column has a column header
5431 if (tile_col < cm->tile_cols - 1) {
5432 uint32_t tile_col_size = mem_get_le32(dst + rpos);
5433 rpos += 4;
5434
5435 // Adjust the tile column size by the number of bytes removed
5436 // from the tile size fields.
5437 tile_col_size -= (4 - tsb) * cm->tile_rows;
5438
5439 mem_put_varsize(dst + wpos, tcsb, tile_col_size);
5440 wpos += tcsb;
5441 }
5442
5443 for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) {
5444 // All, including the last row has a header
5445 uint32_t tile_header = mem_get_le32(dst + rpos);
5446 rpos += 4;
5447
5448 // If this is a copy tile, we need to shift the MSB to the
5449 // top bit of the new width, and there is no data to copy.
5450 if (tile_header >> 31 != 0) {
5451 if (tsb < 4) tile_header >>= 32 - 8 * tsb;
5452 mem_put_varsize(dst + wpos, tsb, tile_header);
5453 wpos += tsb;
5454 } else {
5455 mem_put_varsize(dst + wpos, tsb, tile_header);
5456 wpos += tsb;
5457
5458 memmove(dst + wpos, dst + rpos, tile_header);
5459 rpos += tile_header;
5460 wpos += tile_header;
5461 }
5462 }
5463 }
5464 } else {
5465 #endif // CONFIG_EXT_TILE
5466 const int n_tiles = cm->tile_cols * cm->tile_rows;
5467 int n;
5468
5469 for (n = 0; n < n_tiles; n++) {
5470 int tile_size;
5471
5472 if (n == n_tiles - 1) {
5473 tile_size = data_size - rpos;
5474 } else {
5475 tile_size = mem_get_le32(dst + rpos);
5476 rpos += 4;
5477 mem_put_varsize(dst + wpos, tsb, tile_size);
5478 wpos += tsb;
5479 }
5480
5481 memmove(dst + wpos, dst + rpos, tile_size);
5482
5483 rpos += tile_size;
5484 wpos += tile_size;
5485 }
5486 #if CONFIG_EXT_TILE
5487 }
5488 #endif // CONFIG_EXT_TILE
5489
5490 assert(rpos > wpos);
5491 assert(rpos == data_size);
5492
5493 return wpos;
5494 }
5495 }
5496 #endif
5497
5498 #if CONFIG_OBU
5499 static uint32_t write_obu_header(OBU_TYPE obu_type, int obu_extension,
5500 uint8_t *const dst) {
5501 struct aom_write_bit_buffer wb = { dst, 0 };
5502 uint32_t size = 0;
5503
5504 aom_wb_write_literal(&wb, (int)obu_type, 5);
5505 aom_wb_write_literal(&wb, 0, 2);
5506 aom_wb_write_literal(&wb, obu_extension ? 1 : 0, 1);
5507 if (obu_extension) {
5508 aom_wb_write_literal(&wb, obu_extension & 0xFF, 8);
5509 }
5510
5511 size = aom_wb_bytes_written(&wb);
5512 return size;
5513 }
5514
5515 static uint32_t write_temporal_delimiter_obu() { return 0; }
5516
5517 static uint32_t write_sequence_header_obu(AV1_COMP *cpi, uint8_t *const dst) {
5518 AV1_COMMON *const cm = &cpi->common;
5519 SequenceHeader *const seq_params = &cm->seq_params;
5520 struct aom_write_bit_buffer wb = { dst, 0 };
5521 uint32_t size = 0;
5522
5523 write_profile(cm->profile, &wb);
5524
5525 aom_wb_write_literal(&wb, 0, 4);
5526
5527 seq_params->frame_id_numbers_present_flag = FRAME_ID_NUMBERS_PRESENT_FLAG;
5528 aom_wb_write_literal(&wb, seq_params->frame_id_numbers_present_flag, 1);
5529 if (seq_params->frame_id_numbers_present_flag) {
5530 seq_params->frame_id_length_minus7 = FRAME_ID_LENGTH_MINUS7;
5531 seq_params->delta_frame_id_length_minus2 = DELTA_FRAME_ID_LENGTH_MINUS2;
5532 aom_wb_write_literal(&wb, seq_params->frame_id_length_minus7, 4);
5533 aom_wb_write_literal(&wb, seq_params->delta_frame_id_length_minus2, 4);
5534 }
5535
5536 // color_config
5537 write_bitdepth_colorspace_sampling(cm, &wb);
5538
5539 size = aom_wb_bytes_written(&wb);
5540 return size;
5541 }
5542
5543 static uint32_t write_frame_header_obu(AV1_COMP *cpi, uint8_t *const dst) {
5544 AV1_COMMON *const cm = &cpi->common;
5545 struct aom_write_bit_buffer wb = { dst, 0 };
5546 uint32_t total_size = 0;
5547 uint32_t compressed_hdr_size, uncompressed_hdr_size;
5548
5549 write_uncompressed_header_obu(cpi, &wb);
5550
5551 if (cm->show_existing_frame) {
5552 total_size = aom_wb_bytes_written(&wb);
5553 return total_size;
5554 }
5555
5556 // write the tile length code (Always 4 bytes for now)
5557 aom_wb_write_literal(&wb, 3, 2);
5558
5559 if (!use_compressed_header(cm)) {
5560 uncompressed_hdr_size = aom_wb_bytes_written(&wb);
5561 compressed_hdr_size = 0;
5562 } else {
5563 // placeholder for the compressed header length
5564 struct aom_write_bit_buffer compr_hdr_len_wb = wb;
5565 aom_wb_write_literal(&wb, 0, 16);
5566
5567 uncompressed_hdr_size = aom_wb_bytes_written(&wb);
5568 compressed_hdr_size =
5569 write_compressed_header(cpi, dst + uncompressed_hdr_size);
5570 aom_wb_overwrite_literal(&compr_hdr_len_wb, (int)(compressed_hdr_size), 16);
5571 }
5572
5573 total_size = uncompressed_hdr_size + compressed_hdr_size;
5574 return total_size;
5575 }
5576
5577 static uint32_t write_tile_group_header(uint8_t *const dst, int startTile,
5578 int endTile, int tiles_log2) {
5579 struct aom_write_bit_buffer wb = { dst, 0 };
5580 uint32_t size = 0;
5581
5582 aom_wb_write_literal(&wb, startTile, tiles_log2);
5583 aom_wb_write_literal(&wb, endTile, tiles_log2);
5584
5585 size = aom_wb_bytes_written(&wb);
5586 return size;
5587 }
5588
5589 static uint32_t write_tiles_in_tg_obus(AV1_COMP *const cpi, uint8_t *const dst,
5590 unsigned int *max_tile_size,
5591 unsigned int *max_tile_col_size,
5592 uint8_t *const frame_header_obu_location,
5593 uint32_t frame_header_obu_size,
5594 int insert_frame_header_obu_flag) {
5595 const AV1_COMMON *const cm = &cpi->common;
5596 aom_writer mode_bc;
5597 int tile_row, tile_col;
5598 TOKENEXTRA *(*const tok_buffers)[MAX_TILE_COLS] = cpi->tile_tok;
5599 TileBufferEnc(*const tile_buffers)[MAX_TILE_COLS] = cpi->tile_buffers;
5600 uint32_t total_size = 0;
5601 const int tile_cols = cm->tile_cols;
5602 const int tile_rows = cm->tile_rows;
5603 unsigned int tile_size = 0;
5604 const int n_log2_tiles = cm->log2_tile_rows + cm->log2_tile_cols;
5605 // Fixed size tile groups for the moment
5606 const int num_tg_hdrs = cm->num_tg;
5607 const int tg_size =
5608 #if CONFIG_EXT_TILE
5609 (cm->large_scale_tile)
5610 ? 1
5611 :
5612 #endif // CONFIG_EXT_TILE
5613 (tile_rows * tile_cols + num_tg_hdrs - 1) / num_tg_hdrs;
5614 int tile_count = 0;
5615 int curr_tg_data_size = 0;
5616 uint8_t *data = dst;
5617 int new_tg = 1;
5618 #if CONFIG_EXT_TILE
5619 const int have_tiles = tile_cols * tile_rows > 1;
5620 #endif
5621
5622 *max_tile_size = 0;
5623 *max_tile_col_size = 0;
5624
5625 #if CONFIG_EXT_TILE
5626 if (cm->large_scale_tile) {
5627 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
5628 TileInfo tile_info;
5629 const int is_last_col = (tile_col == tile_cols - 1);
5630 const uint32_t col_offset = total_size;
5631
5632 av1_tile_set_col(&tile_info, cm, tile_col);
5633
5634 // The last column does not have a column header
5635 if (!is_last_col) total_size += 4;
5636
5637 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
5638 TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
5639 const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
5640 const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
5641 const int data_offset = have_tiles ? 4 : 0;
5642 const int tile_idx = tile_row * tile_cols + tile_col;
5643 TileDataEnc *this_tile = &cpi->tile_data[tile_idx];
5644 av1_tile_set_row(&tile_info, cm, tile_row);
5645
5646 buf->data = dst + total_size;
5647
5648 // Is CONFIG_EXT_TILE = 1, every tile in the row has a header,
5649 // even for the last one, unless no tiling is used at all.
5650 total_size += data_offset;
5651 // Initialise tile context from the frame context
5652 this_tile->tctx = *cm->fc;
5653 cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx;
5654 #if CONFIG_PVQ
5655 cpi->td.mb.pvq_q = &this_tile->pvq_q;
5656 cpi->td.mb.daala_enc.state.adapt = &this_tile->tctx.pvq_context;
5657 #endif // CONFIG_PVQ
5658 #if CONFIG_ANS
5659 mode_bc.size = 1 << cpi->common.ans_window_size_log2;
5660 #endif
5661 aom_start_encode(&mode_bc, buf->data + data_offset);
5662 write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
5663 assert(tok == tok_end);
5664 aom_stop_encode(&mode_bc);
5665 tile_size = mode_bc.pos;
5666 #if CONFIG_PVQ
5667 cpi->td.mb.pvq_q = NULL;
5668 #endif
5669 buf->size = tile_size;
5670
5671 // Record the maximum tile size we see, so we can compact headers later.
5672 *max_tile_size = AOMMAX(*max_tile_size, tile_size);
5673
5674 if (have_tiles) {
5675 // tile header: size of this tile, or copy offset
5676 uint32_t tile_header = tile_size;
5677 const int tile_copy_mode =
5678 ((AOMMAX(cm->tile_width, cm->tile_height) << MI_SIZE_LOG2) <= 256)
5679 ? 1
5680 : 0;
5681
5682 // If tile_copy_mode = 1, check if this tile is a copy tile.
5683 // Very low chances to have copy tiles on the key frames, so don't
5684 // search on key frames to reduce unnecessary search.
5685 if (cm->frame_type != KEY_FRAME && tile_copy_mode) {
5686 const int idendical_tile_offset =
5687 find_identical_tile(tile_row, tile_col, tile_buffers);
5688
5689 if (idendical_tile_offset > 0) {
5690 tile_size = 0;
5691 tile_header = idendical_tile_offset | 0x80;
5692 tile_header <<= 24;
5693 }
5694 }
5695
5696 mem_put_le32(buf->data, tile_header);
5697 }
5698
5699 total_size += tile_size;
5700 }
5701
5702 if (!is_last_col) {
5703 uint32_t col_size = total_size - col_offset - 4;
5704 mem_put_le32(dst + col_offset, col_size);
5705
5706 // If it is not final packing, record the maximum tile column size we
5707 // see, otherwise, check if the tile size is out of the range.
5708 *max_tile_col_size = AOMMAX(*max_tile_col_size, col_size);
5709 }
5710 }
5711 } else {
5712 #endif // CONFIG_EXT_TILE
5713
5714 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
5715 TileInfo tile_info;
5716 const int is_last_row = (tile_row == tile_rows - 1);
5717 av1_tile_set_row(&tile_info, cm, tile_row);
5718
5719 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
5720 const int tile_idx = tile_row * tile_cols + tile_col;
5721 TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
5722 TileDataEnc *this_tile = &cpi->tile_data[tile_idx];
5723 const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
5724 const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
5725 const int is_last_col = (tile_col == tile_cols - 1);
5726 const int is_last_tile = is_last_col && is_last_row;
5727 int is_last_tile_in_tg = 0;
5728
5729 if (new_tg) {
5730 if (insert_frame_header_obu_flag && tile_idx) {
5731 // insert a copy of frame header OBU (including 4-byte size),
5732 // except before the first tile group
5733 data = dst + total_size;
5734 memmove(data, frame_header_obu_location, frame_header_obu_size);
5735 total_size += frame_header_obu_size;
5736 }
5737 data = dst + total_size;
5738 // A new tile group begins at this tile. Write the obu header and
5739 // tile group header
5740 curr_tg_data_size = write_obu_header(OBU_TILE_GROUP, 0, data + 4);
5741 if (n_log2_tiles)
5742 curr_tg_data_size += write_tile_group_header(
5743 data + curr_tg_data_size + 4, tile_idx,
5744 AOMMIN(tile_idx + tg_size - 1, tile_cols * tile_rows - 1),
5745 n_log2_tiles);
5746 total_size += curr_tg_data_size + 4;
5747 new_tg = 0;
5748 tile_count = 0;
5749 }
5750 tile_count++;
5751 av1_tile_set_col(&tile_info, cm, tile_col);
5752
5753 if (tile_count == tg_size || tile_idx == (tile_cols * tile_rows - 1)) {
5754 is_last_tile_in_tg = 1;
5755 new_tg = 1;
5756 } else {
5757 is_last_tile_in_tg = 0;
5758 }
5759
5760 #if CONFIG_DEPENDENT_HORZTILES
5761 av1_tile_set_tg_boundary(&tile_info, cm, tile_row, tile_col);
5762 #endif
5763 buf->data = dst + total_size;
5764
5765 // The last tile of the tile group does not have a header.
5766 if (!is_last_tile_in_tg) total_size += 4;
5767
5768 // Initialise tile context from the frame context
5769 this_tile->tctx = *cm->fc;
5770 cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx;
5771 #if CONFIG_PVQ
5772 cpi->td.mb.pvq_q = &this_tile->pvq_q;
5773 cpi->td.mb.daala_enc.state.adapt = &this_tile->tctx.pvq_context;
5774 #endif // CONFIG_PVQ
5775 #if CONFIG_ANS
5776 mode_bc.size = 1 << cpi->common.ans_window_size_log2;
5777 #endif // CONFIG_ANS
5778 aom_start_encode(&mode_bc, dst + total_size);
5779 write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
5780 #if !CONFIG_LV_MAP
5781 #if !CONFIG_PVQ
5782 assert(tok == tok_end);
5783 #endif // !CONFIG_PVQ
5784 #endif // !CONFIG_LV_MAP
5785 aom_stop_encode(&mode_bc);
5786 tile_size = mode_bc.pos;
5787 #if CONFIG_PVQ
5788 cpi->td.mb.pvq_q = NULL;
5789 #endif
5790 assert(tile_size > 0);
5791
5792 curr_tg_data_size += (tile_size + (is_last_tile_in_tg ? 0 : 4));
5793 buf->size = tile_size;
5794
5795 if (!is_last_tile) {
5796 *max_tile_size = AOMMAX(*max_tile_size, tile_size);
5797 }
5798 if (!is_last_tile_in_tg) {
5799 // size of this tile
5800 mem_put_le32(buf->data, tile_size);
5801 } else {
5802 // write current tile group size
5803 mem_put_le32(data, curr_tg_data_size);
5804 }
5805
5806 total_size += tile_size;
5807 }
5808 }
5809 #if CONFIG_EXT_TILE
5810 }
5811 #endif // CONFIG_EXT_TILE
5812 return (uint32_t)total_size;
5813 }
5814
5815 #endif
5816
5817 void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size) {
5818 uint8_t *data = dst;
5819 uint32_t data_size;
5820 #if CONFIG_EXT_TILE
5821 AV1_COMMON *const cm = &cpi->common;
5822 uint32_t compressed_hdr_size = 0;
5823 uint32_t uncompressed_hdr_size;
5824 struct aom_write_bit_buffer saved_wb;
5825 struct aom_write_bit_buffer wb = { data, 0 };
5826 const int have_tiles = cm->tile_cols * cm->tile_rows > 1;
5827 int tile_size_bytes;
5828 int tile_col_size_bytes;
5829 #endif // CONFIG_EXT_TILE
5830 unsigned int max_tile_size;
5831 unsigned int max_tile_col_size;
5832 #if CONFIG_OBU
5833 #if !CONFIG_EXT_TILE
5834 AV1_COMMON *const cm = &cpi->common;
5835 #endif
5836 uint32_t obu_size;
5837 uint8_t *frame_header_location;
5838 uint32_t frame_header_size;
5839 #endif
5840
5841 #if CONFIG_BITSTREAM_DEBUG
5842 bitstream_queue_reset_write();
5843 #endif
5844
5845 #if CONFIG_OBU
5846 // write temporal delimiter obu, preceded by 4-byte size
5847 obu_size = write_obu_header(OBU_TD, 0, data + 4);
5848 obu_size += write_temporal_delimiter_obu(/*data + 4 + obu_size*/);
5849 mem_put_le32(data, obu_size);
5850 data += obu_size + 4;
5851
5852 // write sequence header obu if KEY_FRAME, preceded by 4-byte size
5853 if (cm->frame_type == KEY_FRAME) {
5854 obu_size = write_obu_header(OBU_SEQUENCE_HEADER, 0, data + 4);
5855 obu_size += write_sequence_header_obu(cpi, data + 4 + obu_size);
5856 mem_put_le32(data, obu_size);
5857 data += obu_size + 4;
5858 }
5859
5860 // write frame header obu, preceded by 4-byte size
5861 frame_header_location = data + 4;
5862 obu_size = write_obu_header(OBU_FRAME_HEADER, 0, frame_header_location);
5863 frame_header_size = write_frame_header_obu(cpi, data + 4 + obu_size);
5864 obu_size += frame_header_size;
5865 mem_put_le32(data, obu_size);
5866 data += obu_size + 4;
5867
5868 if (cm->show_existing_frame) {
5869 data_size = 0;
5870 } else {
5871 // Each tile group obu will be preceded by 4-byte size of the tile group
5872 // obu
5873 data_size =
5874 write_tiles_in_tg_obus(cpi, data, &max_tile_size, &max_tile_col_size,
5875 frame_header_location - 4, obu_size + 4,
5876 1 /* cm->error_resilient_mode */);
5877 }
5878
5879 #endif
5880
5881 #if CONFIG_EXT_TILE
5882 if (cm->large_scale_tile) {
5883 // Write the uncompressed header
5884 write_uncompressed_header_frame(cpi, &wb);
5885
5886 #if CONFIG_EXT_REFS
5887 if (cm->show_existing_frame) {
5888 *size = aom_wb_bytes_written(&wb);
5889 return;
5890 }
5891 #endif // CONFIG_EXT_REFS
5892
5893 // We do not know these in advance. Output placeholder bit.
5894 saved_wb = wb;
5895 // Write tile size magnitudes
5896 if (have_tiles) {
5897 // Note that the last item in the uncompressed header is the data
5898 // describing tile configuration.
5899 // Number of bytes in tile column size - 1
5900 aom_wb_write_literal(&wb, 0, 2);
5901
5902 // Number of bytes in tile size - 1
5903 aom_wb_write_literal(&wb, 0, 2);
5904 }
5905
5906 if (!use_compressed_header(cm)) {
5907 uncompressed_hdr_size = (uint32_t)aom_wb_bytes_written(&wb);
5908 aom_clear_system_state();
5909 compressed_hdr_size = 0;
5910 } else {
5911 // Size of compressed header
5912 aom_wb_write_literal(&wb, 0, 16);
5913 uncompressed_hdr_size = (uint32_t)aom_wb_bytes_written(&wb);
5914 aom_clear_system_state();
5915 // Write the compressed header
5916 compressed_hdr_size =
5917 write_compressed_header(cpi, data + uncompressed_hdr_size);
5918 }
5919 data += uncompressed_hdr_size + compressed_hdr_size;
5920
5921 // Write the encoded tile data
5922 data_size = write_tiles(cpi, data, &max_tile_size, &max_tile_col_size);
5923 } else {
5924 #endif // CONFIG_EXT_TILE
5925 #if !CONFIG_OBU
5926 data_size = write_tiles(cpi, data, &max_tile_size, &max_tile_col_size);
5927 #endif
5928 #if CONFIG_EXT_TILE
5929 }
5930 #endif // CONFIG_EXT_TILE
5931 #if CONFIG_EXT_TILE
5932 if (cm->large_scale_tile) {
5933 if (have_tiles) {
5934 data_size =
5935 remux_tiles(cm, data, data_size, max_tile_size, max_tile_col_size,
5936 &tile_size_bytes, &tile_col_size_bytes);
5937 }
5938
5939 data += data_size;
5940
5941 // Now fill in the gaps in the uncompressed header.
5942 if (have_tiles) {
5943 assert(tile_col_size_bytes >= 1 && tile_col_size_bytes <= 4);
5944 aom_wb_write_literal(&saved_wb, tile_col_size_bytes - 1, 2);
5945
5946 assert(tile_size_bytes >= 1 && tile_size_bytes <= 4);
5947 aom_wb_write_literal(&saved_wb, tile_size_bytes - 1, 2);
5948 }
5949 // TODO(jbb): Figure out what to do if compressed_hdr_size > 16 bits.
5950 assert(compressed_hdr_size <= 0xffff);
5951 aom_wb_write_literal(&saved_wb, compressed_hdr_size, 16);
5952 } else {
5953 #endif // CONFIG_EXT_TILE
5954 data += data_size;
5955 #if CONFIG_EXT_TILE
5956 }
5957 #endif // CONFIG_EXT_TILE
5958 #if CONFIG_ANS && ANS_REVERSE
5959 // Avoid aliasing the superframe index
5960 *data++ = 0;
5961 #endif
5962 *size = data - dst;
5963 }
5964