1 /*
2 * Copyright © 2018-2021, VideoLAN and dav1d authors
3 * Copyright © 2018, Two Orioles, LLC
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include "config.h"
29
30 #include <string.h>
31 #include <stdio.h>
32
33 #include "common/attributes.h"
34 #include "common/bitdepth.h"
35 #include "common/dump.h"
36 #include "common/frame.h"
37 #include "common/intops.h"
38
39 #include "src/cdef_apply.h"
40 #include "src/ctx.h"
41 #include "src/ipred_prepare.h"
42 #include "src/lf_apply.h"
43 #include "src/lr_apply.h"
44 #include "src/recon.h"
45 #include "src/scan.h"
46 #include "src/tables.h"
47 #include "src/wedge.h"
48
read_golomb(MsacContext * const msac)49 static inline unsigned read_golomb(MsacContext *const msac) {
50 int len = 0;
51 unsigned val = 1;
52
53 while (!dav1d_msac_decode_bool_equi(msac) && len < 32) len++;
54 while (len--) val = (val << 1) + dav1d_msac_decode_bool_equi(msac);
55
56 return val - 1;
57 }
58
get_skip_ctx(const TxfmInfo * const t_dim,const enum BlockSize bs,const uint8_t * const a,const uint8_t * const l,const int chroma,const enum Dav1dPixelLayout layout)59 static inline unsigned get_skip_ctx(const TxfmInfo *const t_dim,
60 const enum BlockSize bs,
61 const uint8_t *const a,
62 const uint8_t *const l,
63 const int chroma,
64 const enum Dav1dPixelLayout layout)
65 {
66 const uint8_t *const b_dim = dav1d_block_dimensions[bs];
67
68 if (chroma) {
69 const int ss_ver = layout == DAV1D_PIXEL_LAYOUT_I420;
70 const int ss_hor = layout != DAV1D_PIXEL_LAYOUT_I444;
71 const int not_one_blk = b_dim[2] - (!!b_dim[2] && ss_hor) > t_dim->lw ||
72 b_dim[3] - (!!b_dim[3] && ss_ver) > t_dim->lh;
73 unsigned ca, cl;
74
75 #define MERGE_CTX(dir, type, no_val) \
76 c##dir = *(const type *) dir != no_val; \
77 break
78
79 switch (t_dim->lw) {
80 /* For some reason the MSVC CRT _wassert() function is not flagged as
81 * __declspec(noreturn), so when using those headers the compiler will
82 * expect execution to continue after an assertion has been triggered
83 * and will therefore complain about the use of uninitialized variables
84 * when compiled in debug mode if we put the default case at the end. */
85 default: assert(0); /* fall-through */
86 case TX_4X4: MERGE_CTX(a, uint8_t, 0x40);
87 case TX_8X8: MERGE_CTX(a, uint16_t, 0x4040);
88 case TX_16X16: MERGE_CTX(a, uint32_t, 0x40404040U);
89 case TX_32X32: MERGE_CTX(a, uint64_t, 0x4040404040404040ULL);
90 }
91 switch (t_dim->lh) {
92 default: assert(0); /* fall-through */
93 case TX_4X4: MERGE_CTX(l, uint8_t, 0x40);
94 case TX_8X8: MERGE_CTX(l, uint16_t, 0x4040);
95 case TX_16X16: MERGE_CTX(l, uint32_t, 0x40404040U);
96 case TX_32X32: MERGE_CTX(l, uint64_t, 0x4040404040404040ULL);
97 }
98 #undef MERGE_CTX
99
100 return 7 + not_one_blk * 3 + ca + cl;
101 } else if (b_dim[2] == t_dim->lw && b_dim[3] == t_dim->lh) {
102 return 0;
103 } else {
104 unsigned la, ll;
105
106 #define MERGE_CTX(dir, type, tx) \
107 if (tx == TX_64X64) { \
108 uint64_t tmp = *(const uint64_t *) dir; \
109 tmp |= *(const uint64_t *) &dir[8]; \
110 l##dir = (unsigned) (tmp >> 32) | (unsigned) tmp; \
111 } else \
112 l##dir = *(const type *) dir; \
113 if (tx == TX_32X32) l##dir |= *(const type *) &dir[sizeof(type)]; \
114 if (tx >= TX_16X16) l##dir |= l##dir >> 16; \
115 if (tx >= TX_8X8) l##dir |= l##dir >> 8; \
116 break
117
118 switch (t_dim->lw) {
119 default: assert(0); /* fall-through */
120 case TX_4X4: MERGE_CTX(a, uint8_t, TX_4X4);
121 case TX_8X8: MERGE_CTX(a, uint16_t, TX_8X8);
122 case TX_16X16: MERGE_CTX(a, uint32_t, TX_16X16);
123 case TX_32X32: MERGE_CTX(a, uint32_t, TX_32X32);
124 case TX_64X64: MERGE_CTX(a, uint32_t, TX_64X64);
125 }
126 switch (t_dim->lh) {
127 default: assert(0); /* fall-through */
128 case TX_4X4: MERGE_CTX(l, uint8_t, TX_4X4);
129 case TX_8X8: MERGE_CTX(l, uint16_t, TX_8X8);
130 case TX_16X16: MERGE_CTX(l, uint32_t, TX_16X16);
131 case TX_32X32: MERGE_CTX(l, uint32_t, TX_32X32);
132 case TX_64X64: MERGE_CTX(l, uint32_t, TX_64X64);
133 }
134 #undef MERGE_CTX
135
136 return dav1d_skip_ctx[umin(la & 0x3F, 4)][umin(ll & 0x3F, 4)];
137 }
138 }
139
get_dc_sign_ctx(const int tx,const uint8_t * const a,const uint8_t * const l)140 static inline unsigned get_dc_sign_ctx(const int /*enum RectTxfmSize*/ tx,
141 const uint8_t *const a,
142 const uint8_t *const l)
143 {
144 uint64_t mask = 0xC0C0C0C0C0C0C0C0ULL, mul = 0x0101010101010101ULL;
145 int s;
146
147 #if ARCH_X86_64 && defined(__GNUC__)
148 /* Coerce compilers into producing better code. For some reason
149 * every x86-64 compiler is awful at handling 64-bit constants. */
150 __asm__("" : "+r"(mask), "+r"(mul));
151 #endif
152
153 switch(tx) {
154 default: assert(0); /* fall-through */
155 case TX_4X4: {
156 int t = *(const uint8_t *) a >> 6;
157 t += *(const uint8_t *) l >> 6;
158 s = t - 1 - 1;
159 break;
160 }
161 case TX_8X8: {
162 uint32_t t = *(const uint16_t *) a & (uint32_t) mask;
163 t += *(const uint16_t *) l & (uint32_t) mask;
164 t *= 0x04040404U;
165 s = (int) (t >> 24) - 2 - 2;
166 break;
167 }
168 case TX_16X16: {
169 uint32_t t = (*(const uint32_t *) a & (uint32_t) mask) >> 6;
170 t += (*(const uint32_t *) l & (uint32_t) mask) >> 6;
171 t *= (uint32_t) mul;
172 s = (int) (t >> 24) - 4 - 4;
173 break;
174 }
175 case TX_32X32: {
176 uint64_t t = (*(const uint64_t *) a & mask) >> 6;
177 t += (*(const uint64_t *) l & mask) >> 6;
178 t *= mul;
179 s = (int) (t >> 56) - 8 - 8;
180 break;
181 }
182 case TX_64X64: {
183 uint64_t t = (*(const uint64_t *) &a[0] & mask) >> 6;
184 t += (*(const uint64_t *) &a[8] & mask) >> 6;
185 t += (*(const uint64_t *) &l[0] & mask) >> 6;
186 t += (*(const uint64_t *) &l[8] & mask) >> 6;
187 t *= mul;
188 s = (int) (t >> 56) - 16 - 16;
189 break;
190 }
191 case RTX_4X8: {
192 uint32_t t = *(const uint8_t *) a & (uint32_t) mask;
193 t += *(const uint16_t *) l & (uint32_t) mask;
194 t *= 0x04040404U;
195 s = (int) (t >> 24) - 1 - 2;
196 break;
197 }
198 case RTX_8X4: {
199 uint32_t t = *(const uint16_t *) a & (uint32_t) mask;
200 t += *(const uint8_t *) l & (uint32_t) mask;
201 t *= 0x04040404U;
202 s = (int) (t >> 24) - 2 - 1;
203 break;
204 }
205 case RTX_8X16: {
206 uint32_t t = *(const uint16_t *) a & (uint32_t) mask;
207 t += *(const uint32_t *) l & (uint32_t) mask;
208 t = (t >> 6) * (uint32_t) mul;
209 s = (int) (t >> 24) - 2 - 4;
210 break;
211 }
212 case RTX_16X8: {
213 uint32_t t = *(const uint32_t *) a & (uint32_t) mask;
214 t += *(const uint16_t *) l & (uint32_t) mask;
215 t = (t >> 6) * (uint32_t) mul;
216 s = (int) (t >> 24) - 4 - 2;
217 break;
218 }
219 case RTX_16X32: {
220 uint64_t t = *(const uint32_t *) a & (uint32_t) mask;
221 t += *(const uint64_t *) l & mask;
222 t = (t >> 6) * mul;
223 s = (int) (t >> 56) - 4 - 8;
224 break;
225 }
226 case RTX_32X16: {
227 uint64_t t = *(const uint64_t *) a & mask;
228 t += *(const uint32_t *) l & (uint32_t) mask;
229 t = (t >> 6) * mul;
230 s = (int) (t >> 56) - 8 - 4;
231 break;
232 }
233 case RTX_32X64: {
234 uint64_t t = (*(const uint64_t *) &a[0] & mask) >> 6;
235 t += (*(const uint64_t *) &l[0] & mask) >> 6;
236 t += (*(const uint64_t *) &l[8] & mask) >> 6;
237 t *= mul;
238 s = (int) (t >> 56) - 8 - 16;
239 break;
240 }
241 case RTX_64X32: {
242 uint64_t t = (*(const uint64_t *) &a[0] & mask) >> 6;
243 t += (*(const uint64_t *) &a[8] & mask) >> 6;
244 t += (*(const uint64_t *) &l[0] & mask) >> 6;
245 t *= mul;
246 s = (int) (t >> 56) - 16 - 8;
247 break;
248 }
249 case RTX_4X16: {
250 uint32_t t = *(const uint8_t *) a & (uint32_t) mask;
251 t += *(const uint32_t *) l & (uint32_t) mask;
252 t = (t >> 6) * (uint32_t) mul;
253 s = (int) (t >> 24) - 1 - 4;
254 break;
255 }
256 case RTX_16X4: {
257 uint32_t t = *(const uint32_t *) a & (uint32_t) mask;
258 t += *(const uint8_t *) l & (uint32_t) mask;
259 t = (t >> 6) * (uint32_t) mul;
260 s = (int) (t >> 24) - 4 - 1;
261 break;
262 }
263 case RTX_8X32: {
264 uint64_t t = *(const uint16_t *) a & (uint32_t) mask;
265 t += *(const uint64_t *) l & mask;
266 t = (t >> 6) * mul;
267 s = (int) (t >> 56) - 2 - 8;
268 break;
269 }
270 case RTX_32X8: {
271 uint64_t t = *(const uint64_t *) a & mask;
272 t += *(const uint16_t *) l & (uint32_t) mask;
273 t = (t >> 6) * mul;
274 s = (int) (t >> 56) - 8 - 2;
275 break;
276 }
277 case RTX_16X64: {
278 uint64_t t = *(const uint32_t *) a & (uint32_t) mask;
279 t += *(const uint64_t *) &l[0] & mask;
280 t = (t >> 6) + ((*(const uint64_t *) &l[8] & mask) >> 6);
281 t *= mul;
282 s = (int) (t >> 56) - 4 - 16;
283 break;
284 }
285 case RTX_64X16: {
286 uint64_t t = *(const uint64_t *) &a[0] & mask;
287 t += *(const uint32_t *) l & (uint32_t) mask;
288 t = (t >> 6) + ((*(const uint64_t *) &a[8] & mask) >> 6);
289 t *= mul;
290 s = (int) (t >> 56) - 16 - 4;
291 break;
292 }
293 }
294
295 return (s != 0) + (s > 0);
296 }
297
get_lo_ctx(const uint8_t * const levels,const enum TxClass tx_class,unsigned * const hi_mag,const uint8_t (* const ctx_offsets)[5],const unsigned x,const unsigned y,const ptrdiff_t stride)298 static inline unsigned get_lo_ctx(const uint8_t *const levels,
299 const enum TxClass tx_class,
300 unsigned *const hi_mag,
301 const uint8_t (*const ctx_offsets)[5],
302 const unsigned x, const unsigned y,
303 const ptrdiff_t stride)
304 {
305 unsigned mag = levels[0 * stride + 1] + levels[1 * stride + 0];
306 unsigned offset;
307 if (tx_class == TX_CLASS_2D) {
308 mag += levels[1 * stride + 1];
309 *hi_mag = mag;
310 mag += levels[0 * stride + 2] + levels[2 * stride + 0];
311 offset = ctx_offsets[umin(y, 4)][umin(x, 4)];
312 } else {
313 mag += levels[0 * stride + 2];
314 *hi_mag = mag;
315 mag += levels[0 * stride + 3] + levels[0 * stride + 4];
316 offset = 26 + (y > 1 ? 10 : y * 5);
317 }
318 return offset + (mag > 512 ? 4 : (mag + 64) >> 7);
319 }
320
decode_coefs(Dav1dTileContext * const t,uint8_t * const a,uint8_t * const l,const enum RectTxfmSize tx,const enum BlockSize bs,const Av1Block * const b,const int intra,const int plane,coef * cf,enum TxfmType * const txtp,uint8_t * res_ctx)321 static int decode_coefs(Dav1dTileContext *const t,
322 uint8_t *const a, uint8_t *const l,
323 const enum RectTxfmSize tx, const enum BlockSize bs,
324 const Av1Block *const b, const int intra,
325 const int plane, coef *cf,
326 enum TxfmType *const txtp, uint8_t *res_ctx)
327 {
328 Dav1dTileState *const ts = t->ts;
329 const int chroma = !!plane;
330 const Dav1dFrameContext *const f = t->f;
331 const int lossless = f->frame_hdr->segmentation.lossless[b->seg_id];
332 const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[tx];
333 const int dbg = DEBUG_BLOCK_INFO && plane && 0;
334
335 if (dbg)
336 printf("Start: r=%d\n", ts->msac.rng);
337
338 // does this block have any non-zero coefficients
339 const int sctx = get_skip_ctx(t_dim, bs, a, l, chroma, f->cur.p.layout);
340 const int all_skip = dav1d_msac_decode_bool_adapt(&ts->msac,
341 ts->cdf.coef.skip[t_dim->ctx][sctx]);
342 if (dbg)
343 printf("Post-non-zero[%d][%d][%d]: r=%d\n",
344 t_dim->ctx, sctx, all_skip, ts->msac.rng);
345 if (all_skip) {
346 *res_ctx = 0x40;
347 *txtp = lossless * WHT_WHT; /* lossless ? WHT_WHT : DCT_DCT */
348 return -1;
349 }
350
351 // transform type (chroma: derived, luma: explicitly coded)
352 if (lossless) {
353 assert(t_dim->max == TX_4X4);
354 *txtp = WHT_WHT;
355 } else if (t_dim->max + intra >= TX_64X64) {
356 *txtp = DCT_DCT;
357 } else if (chroma) {
358 // inferred from either the luma txtp (inter) or a LUT (intra)
359 *txtp = intra ? dav1d_txtp_from_uvmode[b->uv_mode] :
360 get_uv_inter_txtp(t_dim, *txtp);
361 } else if (!f->frame_hdr->segmentation.qidx[b->seg_id]) {
362 // In libaom, lossless is checked by a literal qidx == 0, but not all
363 // such blocks are actually lossless. The remainder gets an implicit
364 // transform type (for luma)
365 *txtp = DCT_DCT;
366 } else {
367 unsigned idx;
368 if (intra) {
369 const enum IntraPredMode y_mode_nofilt = b->y_mode == FILTER_PRED ?
370 dav1d_filter_mode_to_y_mode[b->y_angle] : b->y_mode;
371 if (f->frame_hdr->reduced_txtp_set || t_dim->min == TX_16X16) {
372 idx = dav1d_msac_decode_symbol_adapt4(&ts->msac,
373 ts->cdf.m.txtp_intra2[t_dim->min][y_mode_nofilt], 4);
374 *txtp = dav1d_tx_types_per_set[idx + 0];
375 } else {
376 idx = dav1d_msac_decode_symbol_adapt8(&ts->msac,
377 ts->cdf.m.txtp_intra1[t_dim->min][y_mode_nofilt], 6);
378 *txtp = dav1d_tx_types_per_set[idx + 5];
379 }
380 if (dbg)
381 printf("Post-txtp-intra[%d->%d][%d][%d->%d]: r=%d\n",
382 tx, t_dim->min, y_mode_nofilt, idx, *txtp, ts->msac.rng);
383 } else {
384 if (f->frame_hdr->reduced_txtp_set || t_dim->max == TX_32X32) {
385 idx = dav1d_msac_decode_bool_adapt(&ts->msac,
386 ts->cdf.m.txtp_inter3[t_dim->min]);
387 *txtp = (idx - 1) & IDTX; /* idx ? DCT_DCT : IDTX */
388 } else if (t_dim->min == TX_16X16) {
389 idx = dav1d_msac_decode_symbol_adapt16(&ts->msac,
390 ts->cdf.m.txtp_inter2, 11);
391 *txtp = dav1d_tx_types_per_set[idx + 12];
392 } else {
393 idx = dav1d_msac_decode_symbol_adapt16(&ts->msac,
394 ts->cdf.m.txtp_inter1[t_dim->min], 15);
395 *txtp = dav1d_tx_types_per_set[idx + 24];
396 }
397 if (dbg)
398 printf("Post-txtp-inter[%d->%d][%d->%d]: r=%d\n",
399 tx, t_dim->min, idx, *txtp, ts->msac.rng);
400 }
401 }
402
403 // find end-of-block (eob)
404 int eob_bin;
405 const int tx2dszctx = imin(t_dim->lw, TX_32X32) + imin(t_dim->lh, TX_32X32);
406 const enum TxClass tx_class = dav1d_tx_type_class[*txtp];
407 const int is_1d = tx_class != TX_CLASS_2D;
408 switch (tx2dszctx) {
409 #define case_sz(sz, bin, ns, is_1d) \
410 case sz: { \
411 uint16_t *const eob_bin_cdf = ts->cdf.coef.eob_bin_##bin[chroma]is_1d; \
412 eob_bin = dav1d_msac_decode_symbol_adapt##ns(&ts->msac, eob_bin_cdf, 4 + sz); \
413 break; \
414 }
415 case_sz(0, 16, 4, [is_1d]);
416 case_sz(1, 32, 8, [is_1d]);
417 case_sz(2, 64, 8, [is_1d]);
418 case_sz(3, 128, 8, [is_1d]);
419 case_sz(4, 256, 16, [is_1d]);
420 case_sz(5, 512, 16, );
421 case_sz(6, 1024, 16, );
422 #undef case_sz
423 }
424 if (dbg)
425 printf("Post-eob_bin_%d[%d][%d][%d]: r=%d\n",
426 16 << tx2dszctx, chroma, is_1d, eob_bin, ts->msac.rng);
427 int eob;
428 if (eob_bin > 1) {
429 uint16_t *const eob_hi_bit_cdf =
430 ts->cdf.coef.eob_hi_bit[t_dim->ctx][chroma][eob_bin];
431 const int eob_hi_bit = dav1d_msac_decode_bool_adapt(&ts->msac, eob_hi_bit_cdf);
432 if (dbg)
433 printf("Post-eob_hi_bit[%d][%d][%d][%d]: r=%d\n",
434 t_dim->ctx, chroma, eob_bin, eob_hi_bit, ts->msac.rng);
435 eob = ((eob_hi_bit | 2) << (eob_bin - 2)) |
436 dav1d_msac_decode_bools(&ts->msac, eob_bin - 2);
437 if (dbg)
438 printf("Post-eob[%d]: r=%d\n", eob, ts->msac.rng);
439 } else {
440 eob = eob_bin;
441 }
442 assert(eob >= 0);
443
444 // base tokens
445 uint16_t (*const eob_cdf)[4] = ts->cdf.coef.eob_base_tok[t_dim->ctx][chroma];
446 uint16_t (*const hi_cdf)[4] = ts->cdf.coef.br_tok[imin(t_dim->ctx, 3)][chroma];
447 unsigned rc, dc_tok;
448
449 if (eob) {
450 uint16_t (*const lo_cdf)[4] = ts->cdf.coef.base_tok[t_dim->ctx][chroma];
451 uint8_t *const levels = t->scratch.levels; // bits 0-5: tok, 6-7: lo_tok
452 const int sw = imin(t_dim->w, 8), sh = imin(t_dim->h, 8);
453
454 /* eob */
455 unsigned ctx = 1 + (eob > sw * sh * 2) + (eob > sw * sh * 4);
456 int eob_tok = dav1d_msac_decode_symbol_adapt4(&ts->msac, eob_cdf[ctx], 2);
457 int tok = eob_tok + 1;
458 int level_tok = tok * 0x41;
459 unsigned mag;
460
461 #define DECODE_COEFS_CLASS(tx_class) \
462 unsigned x, y; \
463 if (tx_class == TX_CLASS_2D) \
464 rc = scan[eob], x = rc >> shift, y = rc & mask; \
465 else if (tx_class == TX_CLASS_H) \
466 /* Transposing reduces the stride and padding requirements */ \
467 x = eob & mask, y = eob >> shift, rc = eob; \
468 else /* tx_class == TX_CLASS_V */ \
469 x = eob & mask, y = eob >> shift, rc = (x << shift2) | y; \
470 if (dbg) \
471 printf("Post-lo_tok[%d][%d][%d][%d=%d=%d]: r=%d\n", \
472 t_dim->ctx, chroma, ctx, eob, rc, tok, ts->msac.rng); \
473 if (eob_tok == 2) { \
474 ctx = (tx_class == TX_CLASS_2D ? (x | y) > 1 : y != 0) ? 14 : 7; \
475 tok = dav1d_msac_decode_hi_tok(&ts->msac, hi_cdf[ctx]); \
476 level_tok = tok + (3 << 6); \
477 if (dbg) \
478 printf("Post-hi_tok[%d][%d][%d][%d=%d=%d]: r=%d\n", \
479 imin(t_dim->ctx, 3), chroma, ctx, eob, rc, tok, \
480 ts->msac.rng); \
481 } \
482 cf[rc] = tok << 11; \
483 levels[x * stride + y] = (uint8_t) level_tok; \
484 for (int i = eob - 1; i > 0; i--) { /* ac */ \
485 unsigned rc_i; \
486 if (tx_class == TX_CLASS_2D) \
487 rc_i = scan[i], x = rc_i >> shift, y = rc_i & mask; \
488 else if (tx_class == TX_CLASS_H) \
489 x = i & mask, y = i >> shift, rc_i = i; \
490 else /* tx_class == TX_CLASS_V */ \
491 x = i & mask, y = i >> shift, rc_i = (x << shift2) | y; \
492 assert(x < 32 && y < 32); \
493 uint8_t *const level = levels + x * stride + y; \
494 ctx = get_lo_ctx(level, tx_class, &mag, lo_ctx_offsets, x, y, stride); \
495 if (tx_class == TX_CLASS_2D) \
496 y |= x; \
497 tok = dav1d_msac_decode_symbol_adapt4(&ts->msac, lo_cdf[ctx], 3); \
498 if (dbg) \
499 printf("Post-lo_tok[%d][%d][%d][%d=%d=%d]: r=%d\n", \
500 t_dim->ctx, chroma, ctx, i, rc_i, tok, ts->msac.rng); \
501 if (tok == 3) { \
502 mag &= 63; \
503 ctx = (y > (tx_class == TX_CLASS_2D) ? 14 : 7) + \
504 (mag > 12 ? 6 : (mag + 1) >> 1); \
505 tok = dav1d_msac_decode_hi_tok(&ts->msac, hi_cdf[ctx]); \
506 if (dbg) \
507 printf("Post-hi_tok[%d][%d][%d][%d=%d=%d]: r=%d\n", \
508 imin(t_dim->ctx, 3), chroma, ctx, i, rc_i, tok, \
509 ts->msac.rng); \
510 *level = (uint8_t) (tok + (3 << 6)); \
511 cf[rc_i] = (tok << 11) | rc; \
512 rc = rc_i; \
513 } else { \
514 /* 0x1 for tok, 0x7ff as bitmask for rc, 0x41 for level_tok */ \
515 tok *= 0x17ff41; \
516 *level = (uint8_t) tok; \
517 /* tok ? (tok << 11) | rc : 0 */ \
518 tok = (tok >> 9) & (rc + ~0x7ffu); \
519 if (tok) rc = rc_i; \
520 cf[rc_i] = tok; \
521 } \
522 } \
523 /* dc */ \
524 ctx = (tx_class == TX_CLASS_2D) ? 0 : \
525 get_lo_ctx(levels, tx_class, &mag, lo_ctx_offsets, 0, 0, stride); \
526 dc_tok = dav1d_msac_decode_symbol_adapt4(&ts->msac, lo_cdf[ctx], 3); \
527 if (dbg) \
528 printf("Post-dc_lo_tok[%d][%d][%d][%d]: r=%d\n", \
529 t_dim->ctx, chroma, ctx, dc_tok, ts->msac.rng); \
530 if (dc_tok == 3) { \
531 if (tx_class == TX_CLASS_2D) \
532 mag = levels[0 * stride + 1] + levels[1 * stride + 0] + \
533 levels[1 * stride + 1]; \
534 mag &= 63; \
535 ctx = mag > 12 ? 6 : (mag + 1) >> 1; \
536 dc_tok = dav1d_msac_decode_hi_tok(&ts->msac, hi_cdf[ctx]); \
537 if (dbg) \
538 printf("Post-dc_hi_tok[%d][%d][0][%d]: r=%d\n", \
539 imin(t_dim->ctx, 3), chroma, dc_tok, ts->msac.rng); \
540 } \
541 break
542
543 const uint16_t *scan;
544 switch (tx_class) {
545 case TX_CLASS_2D: {
546 const unsigned nonsquare_tx = tx >= RTX_4X8;
547 const uint8_t (*const lo_ctx_offsets)[5] =
548 dav1d_lo_ctx_offsets[nonsquare_tx + (tx & nonsquare_tx)];
549 scan = dav1d_scans[tx];
550 const ptrdiff_t stride = 4 * sh;
551 const unsigned shift = t_dim->lh < 4 ? t_dim->lh + 2 : 5, shift2 = 0;
552 const unsigned mask = 4 * sh - 1;
553 memset(levels, 0, stride * (4 * sw + 2));
554 DECODE_COEFS_CLASS(TX_CLASS_2D);
555 }
556 case TX_CLASS_H: {
557 const uint8_t (*const lo_ctx_offsets)[5] = NULL;
558 const ptrdiff_t stride = 16;
559 const unsigned shift = t_dim->lh + 2, shift2 = 0;
560 const unsigned mask = 4 * sh - 1;
561 memset(levels, 0, stride * (4 * sh + 2));
562 DECODE_COEFS_CLASS(TX_CLASS_H);
563 }
564 case TX_CLASS_V: {
565 const uint8_t (*const lo_ctx_offsets)[5] = NULL;
566 const ptrdiff_t stride = 16;
567 const unsigned shift = t_dim->lw + 2, shift2 = t_dim->lh + 2;
568 const unsigned mask = 4 * sw - 1;
569 memset(levels, 0, stride * (4 * sw + 2));
570 DECODE_COEFS_CLASS(TX_CLASS_V);
571 }
572 #undef DECODE_COEFS_CLASS
573 default: assert(0);
574 }
575 } else { // dc-only
576 int tok_br = dav1d_msac_decode_symbol_adapt4(&ts->msac, eob_cdf[0], 2);
577 dc_tok = 1 + tok_br;
578 if (dbg)
579 printf("Post-dc_lo_tok[%d][%d][%d][%d]: r=%d\n",
580 t_dim->ctx, chroma, 0, dc_tok, ts->msac.rng);
581 if (tok_br == 2) {
582 dc_tok = dav1d_msac_decode_hi_tok(&ts->msac, hi_cdf[0]);
583 if (dbg)
584 printf("Post-dc_hi_tok[%d][%d][0][%d]: r=%d\n",
585 imin(t_dim->ctx, 3), chroma, dc_tok, ts->msac.rng);
586 }
587 rc = 0;
588 }
589
590 // residual and sign
591 const uint16_t *const dq_tbl = ts->dq[b->seg_id][plane];
592 const uint8_t *const qm_tbl = *txtp < IDTX ? f->qm[tx][plane] : NULL;
593 const int dq_shift = imax(0, t_dim->ctx - 2);
594 const unsigned cf_max = ~(~127U << (BITDEPTH == 8 ? 8 : f->cur.p.bpc));
595 unsigned cul_level, dc_sign_level;
596
597 if (!dc_tok) {
598 cul_level = 0;
599 dc_sign_level = 1 << 6;
600 if (qm_tbl) goto ac_qm;
601 goto ac_noqm;
602 }
603
604 const int dc_sign_ctx = get_dc_sign_ctx(tx, a, l);
605 uint16_t *const dc_sign_cdf = ts->cdf.coef.dc_sign[chroma][dc_sign_ctx];
606 const int dc_sign = dav1d_msac_decode_bool_adapt(&ts->msac, dc_sign_cdf);
607 if (dbg)
608 printf("Post-dc_sign[%d][%d][%d]: r=%d\n",
609 chroma, dc_sign_ctx, dc_sign, ts->msac.rng);
610
611 unsigned dc_dq = dq_tbl[0];
612 dc_sign_level = (dc_sign - 1) & (2 << 6);
613
614 if (qm_tbl) {
615 dc_dq = (dc_dq * qm_tbl[0] + 16) >> 5;
616
617 if (dc_tok == 15) {
618 dc_tok = read_golomb(&ts->msac) + 15;
619 if (dbg)
620 printf("Post-dc_residual[%d->%d]: r=%d\n",
621 dc_tok - 15, dc_tok, ts->msac.rng);
622
623 dc_tok &= 0xfffff;
624 dc_dq = (dc_dq * dc_tok) & 0xffffff;
625 } else {
626 dc_dq *= dc_tok;
627 assert(dc_dq <= 0xffffff);
628 }
629 cul_level = dc_tok;
630 dc_dq >>= dq_shift;
631 cf[0] = (coef) (umin(dc_dq - dc_sign, cf_max) ^ -dc_sign);
632
633 if (rc) ac_qm: {
634 const unsigned ac_dq = dq_tbl[1];
635 do {
636 const int sign = dav1d_msac_decode_bool_equi(&ts->msac);
637 if (dbg)
638 printf("Post-sign[%d=%d]: r=%d\n", rc, sign, ts->msac.rng);
639 const unsigned rc_tok = cf[rc];
640 unsigned tok, dq = (ac_dq * qm_tbl[rc] + 16) >> 5;
641
642 if (rc_tok >= (15 << 11)) {
643 tok = read_golomb(&ts->msac) + 15;
644 if (dbg)
645 printf("Post-residual[%d=%d->%d]: r=%d\n",
646 rc, tok - 15, tok, ts->msac.rng);
647
648 tok &= 0xfffff;
649 dq = (dq * tok) & 0xffffff;
650 } else {
651 tok = rc_tok >> 11;
652 dq *= tok;
653 assert(dq <= 0xffffff);
654 }
655 cul_level += tok;
656 dq >>= dq_shift;
657 cf[rc] = (coef) (umin(dq - sign, cf_max) ^ -sign);
658
659 rc = rc_tok & 0x3ff;
660 } while (rc);
661 }
662 } else {
663 // non-qmatrix is the common case and allows for additional optimizations
664 if (dc_tok == 15) {
665 dc_tok = read_golomb(&ts->msac) + 15;
666 if (dbg)
667 printf("Post-dc_residual[%d->%d]: r=%d\n",
668 dc_tok - 15, dc_tok, ts->msac.rng);
669
670 dc_tok &= 0xfffff;
671 dc_dq = ((dc_dq * dc_tok) & 0xffffff) >> dq_shift;
672 dc_dq = umin(dc_dq - dc_sign, cf_max);
673 } else {
674 dc_dq = ((dc_dq * dc_tok) >> dq_shift) - dc_sign;
675 assert(dc_dq <= cf_max);
676 }
677 cul_level = dc_tok;
678 cf[0] = (coef) (dc_dq ^ -dc_sign);
679
680 if (rc) ac_noqm: {
681 const unsigned ac_dq = dq_tbl[1];
682 do {
683 const int sign = dav1d_msac_decode_bool_equi(&ts->msac);
684 if (dbg)
685 printf("Post-sign[%d=%d]: r=%d\n", rc, sign, ts->msac.rng);
686 const unsigned rc_tok = cf[rc];
687 unsigned tok, dq;
688
689 // residual
690 if (rc_tok >= (15 << 11)) {
691 tok = read_golomb(&ts->msac) + 15;
692 if (dbg)
693 printf("Post-residual[%d=%d->%d]: r=%d\n",
694 rc, tok - 15, tok, ts->msac.rng);
695
696 // coefficient parsing, see 5.11.39
697 tok &= 0xfffff;
698
699 // dequant, see 7.12.3
700 dq = ((ac_dq * tok) & 0xffffff) >> dq_shift;
701 dq = umin(dq - sign, cf_max);
702 } else {
703 // cannot exceed cf_max, so we can avoid the clipping
704 tok = rc_tok >> 11;
705 dq = ((ac_dq * tok) >> dq_shift) - sign;
706 assert(dq <= cf_max);
707 }
708 cul_level += tok;
709 cf[rc] = (coef) (dq ^ -sign);
710
711 rc = rc_tok & 0x3ff; // next non-zero rc, zero if eob
712 } while (rc);
713 }
714 }
715
716 // context
717 *res_ctx = umin(cul_level, 63) | dc_sign_level;
718
719 return eob;
720 }
721
read_coef_tree(Dav1dTileContext * const t,const enum BlockSize bs,const Av1Block * const b,const enum RectTxfmSize ytx,const int depth,const uint16_t * const tx_split,const int x_off,const int y_off,pixel * dst)722 static void read_coef_tree(Dav1dTileContext *const t,
723 const enum BlockSize bs, const Av1Block *const b,
724 const enum RectTxfmSize ytx, const int depth,
725 const uint16_t *const tx_split,
726 const int x_off, const int y_off, pixel *dst)
727 {
728 const Dav1dFrameContext *const f = t->f;
729 Dav1dTileState *const ts = t->ts;
730 const Dav1dDSPContext *const dsp = f->dsp;
731 const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[ytx];
732 const int txw = t_dim->w, txh = t_dim->h;
733
734 /* y_off can be larger than 3 since lossless blocks use TX_4X4 but can't
735 * be splitted. Aviods an undefined left shift. */
736 if (depth < 2 && tx_split[depth] &&
737 tx_split[depth] & (1 << (y_off * 4 + x_off)))
738 {
739 const enum RectTxfmSize sub = t_dim->sub;
740 const TxfmInfo *const sub_t_dim = &dav1d_txfm_dimensions[sub];
741 const int txsw = sub_t_dim->w, txsh = sub_t_dim->h;
742
743 read_coef_tree(t, bs, b, sub, depth + 1, tx_split,
744 x_off * 2 + 0, y_off * 2 + 0, dst);
745 t->bx += txsw;
746 if (txw >= txh && t->bx < f->bw)
747 read_coef_tree(t, bs, b, sub, depth + 1, tx_split, x_off * 2 + 1,
748 y_off * 2 + 0, dst ? &dst[4 * txsw] : NULL);
749 t->bx -= txsw;
750 t->by += txsh;
751 if (txh >= txw && t->by < f->bh) {
752 if (dst)
753 dst += 4 * txsh * PXSTRIDE(f->cur.stride[0]);
754 read_coef_tree(t, bs, b, sub, depth + 1, tx_split,
755 x_off * 2 + 0, y_off * 2 + 1, dst);
756 t->bx += txsw;
757 if (txw >= txh && t->bx < f->bw)
758 read_coef_tree(t, bs, b, sub, depth + 1, tx_split, x_off * 2 + 1,
759 y_off * 2 + 1, dst ? &dst[4 * txsw] : NULL);
760 t->bx -= txsw;
761 }
762 t->by -= txsh;
763 } else {
764 const int bx4 = t->bx & 31, by4 = t->by & 31;
765 enum TxfmType txtp;
766 uint8_t cf_ctx;
767 int eob;
768 coef *cf;
769 struct CodedBlockInfo *cbi;
770
771 if (f->frame_thread.pass) {
772 assert(ts->frame_thread.cf);
773 cf = ts->frame_thread.cf;
774 ts->frame_thread.cf += imin(t_dim->w, 8) * imin(t_dim->h, 8) * 16;
775 cbi = &f->frame_thread.cbi[t->by * f->b4_stride + t->bx];
776 } else {
777 cf = bitfn(t->cf);
778 }
779 if (f->frame_thread.pass != 2) {
780 eob = decode_coefs(t, &t->a->lcoef[bx4], &t->l.lcoef[by4],
781 ytx, bs, b, 0, 0, cf, &txtp, &cf_ctx);
782 if (DEBUG_BLOCK_INFO)
783 printf("Post-y-cf-blk[tx=%d,txtp=%d,eob=%d]: r=%d\n",
784 ytx, txtp, eob, ts->msac.rng);
785 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
786 rep_macro(type, t->dir lcoef, off, mul * cf_ctx)
787 #define default_memset(dir, diridx, off, sz) \
788 memset(&t->dir lcoef[off], cf_ctx, sz)
789 case_set_upto16_with_default(imin(txh, f->bh - t->by), l., 1, by4);
790 case_set_upto16_with_default(imin(txw, f->bw - t->bx), a->, 0, bx4);
791 #undef default_memset
792 #undef set_ctx
793 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
794 for (int y = 0; y < txh; y++) { \
795 rep_macro(type, txtp_map, 0, mul * txtp); \
796 txtp_map += 32; \
797 }
798 uint8_t *txtp_map = &t->txtp_map[by4 * 32 + bx4];
799 case_set_upto16(txw,,,);
800 #undef set_ctx
801 if (f->frame_thread.pass == 1) {
802 cbi->eob[0] = eob;
803 cbi->txtp[0] = txtp;
804 }
805 } else {
806 eob = cbi->eob[0];
807 txtp = cbi->txtp[0];
808 }
809 if (!(f->frame_thread.pass & 1)) {
810 assert(dst);
811 if (eob >= 0) {
812 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
813 coef_dump(cf, imin(t_dim->h, 8) * 4, imin(t_dim->w, 8) * 4, 3, "dq");
814 dsp->itx.itxfm_add[ytx][txtp](dst, f->cur.stride[0], cf, eob
815 HIGHBD_CALL_SUFFIX);
816 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
817 hex_dump(dst, f->cur.stride[0], t_dim->w * 4, t_dim->h * 4, "recon");
818 }
819 }
820 }
821 }
822
bytefn(dav1d_read_coef_blocks)823 void bytefn(dav1d_read_coef_blocks)(Dav1dTileContext *const t,
824 const enum BlockSize bs, const Av1Block *const b)
825 {
826 const Dav1dFrameContext *const f = t->f;
827 const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
828 const int ss_hor = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
829 const int bx4 = t->bx & 31, by4 = t->by & 31;
830 const int cbx4 = bx4 >> ss_hor, cby4 = by4 >> ss_ver;
831 const uint8_t *const b_dim = dav1d_block_dimensions[bs];
832 const int bw4 = b_dim[0], bh4 = b_dim[1];
833 const int cbw4 = (bw4 + ss_hor) >> ss_hor, cbh4 = (bh4 + ss_ver) >> ss_ver;
834 const int has_chroma = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I400 &&
835 (bw4 > ss_hor || t->bx & 1) &&
836 (bh4 > ss_ver || t->by & 1);
837
838 if (b->skip) {
839 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
840 rep_macro(type, t->dir lcoef, off, mul * 0x40)
841 case_set(bh4, l., 1, by4);
842 case_set(bw4, a->, 0, bx4);
843 #undef set_ctx
844 if (has_chroma) {
845 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
846 rep_macro(type, t->dir ccoef[0], off, mul * 0x40); \
847 rep_macro(type, t->dir ccoef[1], off, mul * 0x40)
848 case_set(cbh4, l., 1, cby4);
849 case_set(cbw4, a->, 0, cbx4);
850 #undef set_ctx
851 }
852 return;
853 }
854
855 Dav1dTileState *const ts = t->ts;
856 const int w4 = imin(bw4, f->bw - t->bx), h4 = imin(bh4, f->bh - t->by);
857 const int cw4 = (w4 + ss_hor) >> ss_hor, ch4 = (h4 + ss_ver) >> ss_ver;
858 assert(f->frame_thread.pass == 1);
859 assert(!b->skip);
860 const TxfmInfo *const uv_t_dim = &dav1d_txfm_dimensions[b->uvtx];
861 const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[b->intra ? b->tx : b->max_ytx];
862 const uint16_t tx_split[2] = { b->tx_split0, b->tx_split1 };
863
864 for (int init_y = 0; init_y < h4; init_y += 16) {
865 const int sub_h4 = imin(h4, 16 + init_y);
866 for (int init_x = 0; init_x < w4; init_x += 16) {
867 const int sub_w4 = imin(w4, init_x + 16);
868 int y_off = !!init_y, y, x;
869 for (y = init_y, t->by += init_y; y < sub_h4;
870 y += t_dim->h, t->by += t_dim->h, y_off++)
871 {
872 struct CodedBlockInfo *const cbi =
873 &f->frame_thread.cbi[t->by * f->b4_stride];
874 int x_off = !!init_x;
875 for (x = init_x, t->bx += init_x; x < sub_w4;
876 x += t_dim->w, t->bx += t_dim->w, x_off++)
877 {
878 if (!b->intra) {
879 read_coef_tree(t, bs, b, b->max_ytx, 0, tx_split,
880 x_off, y_off, NULL);
881 } else {
882 uint8_t cf_ctx = 0x40;
883 enum TxfmType txtp;
884 const int eob = cbi[t->bx].eob[0] =
885 decode_coefs(t, &t->a->lcoef[bx4 + x],
886 &t->l.lcoef[by4 + y], b->tx, bs, b, 1,
887 0, ts->frame_thread.cf, &txtp, &cf_ctx);
888 if (DEBUG_BLOCK_INFO)
889 printf("Post-y-cf-blk[tx=%d,txtp=%d,eob=%d]: r=%d\n",
890 b->tx, txtp, eob, ts->msac.rng);
891 cbi[t->bx].txtp[0] = txtp;
892 ts->frame_thread.cf += imin(t_dim->w, 8) * imin(t_dim->h, 8) * 16;
893 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
894 rep_macro(type, t->dir lcoef, off, mul * cf_ctx)
895 #define default_memset(dir, diridx, off, sz) \
896 memset(&t->dir lcoef[off], cf_ctx, sz)
897 case_set_upto16_with_default(imin(t_dim->h, f->bh - t->by),
898 l., 1, by4 + y);
899 case_set_upto16_with_default(imin(t_dim->w, f->bw - t->bx),
900 a->, 0, bx4 + x);
901 #undef default_memset
902 #undef set_ctx
903 }
904 }
905 t->bx -= x;
906 }
907 t->by -= y;
908
909 if (!has_chroma) continue;
910
911 const int sub_ch4 = imin(ch4, (init_y + 16) >> ss_ver);
912 const int sub_cw4 = imin(cw4, (init_x + 16) >> ss_hor);
913 for (int pl = 0; pl < 2; pl++) {
914 for (y = init_y >> ss_ver, t->by += init_y; y < sub_ch4;
915 y += uv_t_dim->h, t->by += uv_t_dim->h << ss_ver)
916 {
917 struct CodedBlockInfo *const cbi =
918 &f->frame_thread.cbi[t->by * f->b4_stride];
919 for (x = init_x >> ss_hor, t->bx += init_x; x < sub_cw4;
920 x += uv_t_dim->w, t->bx += uv_t_dim->w << ss_hor)
921 {
922 uint8_t cf_ctx = 0x40;
923 enum TxfmType txtp;
924 if (!b->intra)
925 txtp = t->txtp_map[(by4 + (y << ss_ver)) * 32 +
926 bx4 + (x << ss_hor)];
927 const int eob = cbi[t->bx].eob[1 + pl] =
928 decode_coefs(t, &t->a->ccoef[pl][cbx4 + x],
929 &t->l.ccoef[pl][cby4 + y], b->uvtx, bs,
930 b, b->intra, 1 + pl, ts->frame_thread.cf,
931 &txtp, &cf_ctx);
932 if (DEBUG_BLOCK_INFO)
933 printf("Post-uv-cf-blk[pl=%d,tx=%d,"
934 "txtp=%d,eob=%d]: r=%d\n",
935 pl, b->uvtx, txtp, eob, ts->msac.rng);
936 cbi[t->bx].txtp[1 + pl] = txtp;
937 ts->frame_thread.cf += uv_t_dim->w * uv_t_dim->h * 16;
938 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
939 rep_macro(type, t->dir ccoef[pl], off, mul * cf_ctx)
940 #define default_memset(dir, diridx, off, sz) \
941 memset(&t->dir ccoef[pl][off], cf_ctx, sz)
942 case_set_upto16_with_default( \
943 imin(uv_t_dim->h, (f->bh - t->by + ss_ver) >> ss_ver),
944 l., 1, cby4 + y);
945 case_set_upto16_with_default( \
946 imin(uv_t_dim->w, (f->bw - t->bx + ss_hor) >> ss_hor),
947 a->, 0, cbx4 + x);
948 #undef default_memset
949 #undef set_ctx
950 }
951 t->bx -= x << ss_hor;
952 }
953 t->by -= y << ss_ver;
954 }
955 }
956 }
957 }
958
mc(Dav1dTileContext * const t,pixel * const dst8,int16_t * const dst16,const ptrdiff_t dst_stride,const int bw4,const int bh4,const int bx,const int by,const int pl,const mv mv,const Dav1dThreadPicture * const refp,const int refidx,const enum Filter2d filter_2d)959 static int mc(Dav1dTileContext *const t,
960 pixel *const dst8, int16_t *const dst16, const ptrdiff_t dst_stride,
961 const int bw4, const int bh4,
962 const int bx, const int by, const int pl,
963 const mv mv, const Dav1dThreadPicture *const refp, const int refidx,
964 const enum Filter2d filter_2d)
965 {
966 assert((dst8 != NULL) ^ (dst16 != NULL));
967 const Dav1dFrameContext *const f = t->f;
968 const int ss_ver = !!pl && f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
969 const int ss_hor = !!pl && f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
970 const int h_mul = 4 >> ss_hor, v_mul = 4 >> ss_ver;
971 const int mvx = mv.x, mvy = mv.y;
972 const int mx = mvx & (15 >> !ss_hor), my = mvy & (15 >> !ss_ver);
973 ptrdiff_t ref_stride = refp->p.stride[!!pl];
974 const pixel *ref;
975
976 if (refp->p.p.w == f->cur.p.w && refp->p.p.h == f->cur.p.h) {
977 const int dx = bx * h_mul + (mvx >> (3 + ss_hor));
978 const int dy = by * v_mul + (mvy >> (3 + ss_ver));
979 int w, h;
980
981 if (refp->p.data[0] != f->cur.data[0]) { // i.e. not for intrabc
982 if (dav1d_thread_picture_wait(refp, dy + bh4 * v_mul + !!my * 4,
983 PLANE_TYPE_Y + !!pl))
984 {
985 return -1;
986 }
987 w = (f->cur.p.w + ss_hor) >> ss_hor;
988 h = (f->cur.p.h + ss_ver) >> ss_ver;
989 } else {
990 w = f->bw * 4 >> ss_hor;
991 h = f->bh * 4 >> ss_ver;
992 }
993 if (dx < !!mx * 3 || dy < !!my * 3 ||
994 dx + bw4 * h_mul + !!mx * 4 > w ||
995 dy + bh4 * v_mul + !!my * 4 > h)
996 {
997 pixel *const emu_edge_buf = bitfn(t->scratch.emu_edge);
998 f->dsp->mc.emu_edge(bw4 * h_mul + !!mx * 7, bh4 * v_mul + !!my * 7,
999 w, h, dx - !!mx * 3, dy - !!my * 3,
1000 emu_edge_buf, 192 * sizeof(pixel),
1001 refp->p.data[pl], ref_stride);
1002 ref = &emu_edge_buf[192 * !!my * 3 + !!mx * 3];
1003 ref_stride = 192 * sizeof(pixel);
1004 } else {
1005 ref = ((pixel *) refp->p.data[pl]) + PXSTRIDE(ref_stride) * dy + dx;
1006 }
1007
1008 if (dst8 != NULL) {
1009 f->dsp->mc.mc[filter_2d](dst8, dst_stride, ref, ref_stride, bw4 * h_mul,
1010 bh4 * v_mul, mx << !ss_hor, my << !ss_ver
1011 HIGHBD_CALL_SUFFIX);
1012 } else {
1013 f->dsp->mc.mct[filter_2d](dst16, ref, ref_stride, bw4 * h_mul,
1014 bh4 * v_mul, mx << !ss_hor, my << !ss_ver
1015 HIGHBD_CALL_SUFFIX);
1016 }
1017 } else {
1018 assert(refp != &f->sr_cur);
1019
1020 const int orig_pos_y = (by * v_mul << 4) + mvy * (1 << !ss_ver);
1021 const int orig_pos_x = (bx * h_mul << 4) + mvx * (1 << !ss_hor);
1022 #define scale_mv(res, val, scale) do { \
1023 const int64_t tmp = (int64_t)(val) * scale + (scale - 0x4000) * 8; \
1024 res = apply_sign64((int) ((llabs(tmp) + 128) >> 8), tmp) + 32; \
1025 } while (0)
1026 int pos_y, pos_x;
1027 scale_mv(pos_x, orig_pos_x, f->svc[refidx][0].scale);
1028 scale_mv(pos_y, orig_pos_y, f->svc[refidx][1].scale);
1029 #undef scale_mv
1030 const int left = pos_x >> 10;
1031 const int top = pos_y >> 10;
1032 const int right =
1033 ((pos_x + (bw4 * h_mul - 1) * f->svc[refidx][0].step) >> 10) + 1;
1034 const int bottom =
1035 ((pos_y + (bh4 * v_mul - 1) * f->svc[refidx][1].step) >> 10) + 1;
1036
1037 if (dav1d_thread_picture_wait(refp, bottom + 4, PLANE_TYPE_Y + !!pl))
1038 return -1;
1039 if (DEBUG_BLOCK_INFO)
1040 printf("Off %dx%d [%d,%d,%d], size %dx%d [%d,%d]\n",
1041 left, top, orig_pos_x, f->svc[refidx][0].scale, refidx,
1042 right-left, bottom-top,
1043 f->svc[refidx][0].step, f->svc[refidx][1].step);
1044
1045 const int w = (refp->p.p.w + ss_hor) >> ss_hor;
1046 const int h = (refp->p.p.h + ss_ver) >> ss_ver;
1047 if (left < 3 || top < 3 || right + 4 > w || bottom + 4 > h) {
1048 pixel *const emu_edge_buf = bitfn(t->scratch.emu_edge);
1049 f->dsp->mc.emu_edge(right - left + 7, bottom - top + 7,
1050 w, h, left - 3, top - 3,
1051 emu_edge_buf, 320 * sizeof(pixel),
1052 refp->p.data[pl], ref_stride);
1053 ref = &emu_edge_buf[320 * 3 + 3];
1054 ref_stride = 320 * sizeof(pixel);
1055 if (DEBUG_BLOCK_INFO) printf("Emu\n");
1056 } else {
1057 ref = ((pixel *) refp->p.data[pl]) + PXSTRIDE(ref_stride) * top + left;
1058 }
1059
1060 if (dst8 != NULL) {
1061 f->dsp->mc.mc_scaled[filter_2d](dst8, dst_stride, ref, ref_stride,
1062 bw4 * h_mul, bh4 * v_mul,
1063 pos_x & 0x3ff, pos_y & 0x3ff,
1064 f->svc[refidx][0].step,
1065 f->svc[refidx][1].step
1066 HIGHBD_CALL_SUFFIX);
1067 } else {
1068 f->dsp->mc.mct_scaled[filter_2d](dst16, ref, ref_stride,
1069 bw4 * h_mul, bh4 * v_mul,
1070 pos_x & 0x3ff, pos_y & 0x3ff,
1071 f->svc[refidx][0].step,
1072 f->svc[refidx][1].step
1073 HIGHBD_CALL_SUFFIX);
1074 }
1075 }
1076
1077 return 0;
1078 }
1079
obmc(Dav1dTileContext * const t,pixel * const dst,const ptrdiff_t dst_stride,const uint8_t * const b_dim,const int pl,const int bx4,const int by4,const int w4,const int h4)1080 static int obmc(Dav1dTileContext *const t,
1081 pixel *const dst, const ptrdiff_t dst_stride,
1082 const uint8_t *const b_dim, const int pl,
1083 const int bx4, const int by4, const int w4, const int h4)
1084 {
1085 assert(!(t->bx & 1) && !(t->by & 1));
1086 const Dav1dFrameContext *const f = t->f;
1087 /*const*/ refmvs_block **r = &t->rt.r[(t->by & 31) + 5];
1088 pixel *const lap = bitfn(t->scratch.lap);
1089 const int ss_ver = !!pl && f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
1090 const int ss_hor = !!pl && f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
1091 const int h_mul = 4 >> ss_hor, v_mul = 4 >> ss_ver;
1092 int res;
1093
1094 if (t->by > t->ts->tiling.row_start &&
1095 (!pl || b_dim[0] * h_mul + b_dim[1] * v_mul >= 16))
1096 {
1097 for (int i = 0, x = 0; x < w4 && i < imin(b_dim[2], 4); ) {
1098 // only odd blocks are considered for overlap handling, hence +1
1099 const refmvs_block *const a_r = &r[-1][t->bx + x + 1];
1100 const uint8_t *const a_b_dim = dav1d_block_dimensions[a_r->bs];
1101
1102 if (a_r->ref.ref[0] > 0) {
1103 const int ow4 = iclip(a_b_dim[0], 2, b_dim[0]);
1104 const int oh4 = imin(b_dim[1], 16) >> 1;
1105 res = mc(t, lap, NULL, ow4 * h_mul * sizeof(pixel), ow4, (oh4 * 3 + 3) >> 2,
1106 t->bx + x, t->by, pl, a_r->mv.mv[0],
1107 &f->refp[a_r->ref.ref[0] - 1], a_r->ref.ref[0] - 1,
1108 dav1d_filter_2d[t->a->filter[1][bx4 + x + 1]][t->a->filter[0][bx4 + x + 1]]);
1109 if (res) return res;
1110 f->dsp->mc.blend_h(&dst[x * h_mul], dst_stride, lap,
1111 h_mul * ow4, v_mul * oh4);
1112 i++;
1113 }
1114 x += imax(a_b_dim[0], 2);
1115 }
1116 }
1117
1118 if (t->bx > t->ts->tiling.col_start)
1119 for (int i = 0, y = 0; y < h4 && i < imin(b_dim[3], 4); ) {
1120 // only odd blocks are considered for overlap handling, hence +1
1121 const refmvs_block *const l_r = &r[y + 1][t->bx - 1];
1122 const uint8_t *const l_b_dim = dav1d_block_dimensions[l_r->bs];
1123
1124 if (l_r->ref.ref[0] > 0) {
1125 const int ow4 = imin(b_dim[0], 16) >> 1;
1126 const int oh4 = iclip(l_b_dim[1], 2, b_dim[1]);
1127 res = mc(t, lap, NULL, h_mul * ow4 * sizeof(pixel), ow4, oh4,
1128 t->bx, t->by + y, pl, l_r->mv.mv[0],
1129 &f->refp[l_r->ref.ref[0] - 1], l_r->ref.ref[0] - 1,
1130 dav1d_filter_2d[t->l.filter[1][by4 + y + 1]][t->l.filter[0][by4 + y + 1]]);
1131 if (res) return res;
1132 f->dsp->mc.blend_v(&dst[y * v_mul * PXSTRIDE(dst_stride)],
1133 dst_stride, lap, h_mul * ow4, v_mul * oh4);
1134 i++;
1135 }
1136 y += imax(l_b_dim[1], 2);
1137 }
1138 return 0;
1139 }
1140
warp_affine(Dav1dTileContext * const t,pixel * dst8,int16_t * dst16,const ptrdiff_t dstride,const uint8_t * const b_dim,const int pl,const Dav1dThreadPicture * const refp,const Dav1dWarpedMotionParams * const wmp)1141 static int warp_affine(Dav1dTileContext *const t,
1142 pixel *dst8, int16_t *dst16, const ptrdiff_t dstride,
1143 const uint8_t *const b_dim, const int pl,
1144 const Dav1dThreadPicture *const refp,
1145 const Dav1dWarpedMotionParams *const wmp)
1146 {
1147 assert((dst8 != NULL) ^ (dst16 != NULL));
1148 const Dav1dFrameContext *const f = t->f;
1149 const Dav1dDSPContext *const dsp = f->dsp;
1150 const int ss_ver = !!pl && f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
1151 const int ss_hor = !!pl && f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
1152 const int h_mul = 4 >> ss_hor, v_mul = 4 >> ss_ver;
1153 assert(!((b_dim[0] * h_mul) & 7) && !((b_dim[1] * v_mul) & 7));
1154 const int32_t *const mat = wmp->matrix;
1155 const int width = (refp->p.p.w + ss_hor) >> ss_hor;
1156 const int height = (refp->p.p.h + ss_ver) >> ss_ver;
1157
1158 for (int y = 0; y < b_dim[1] * v_mul; y += 8) {
1159 const int src_y = t->by * 4 + ((y + 4) << ss_ver);
1160 const int64_t mat3_y = (int64_t) mat[3] * src_y + mat[0];
1161 const int64_t mat5_y = (int64_t) mat[5] * src_y + mat[1];
1162 for (int x = 0; x < b_dim[0] * h_mul; x += 8) {
1163 // calculate transformation relative to center of 8x8 block in
1164 // luma pixel units
1165 const int src_x = t->bx * 4 + ((x + 4) << ss_hor);
1166 const int64_t mvx = ((int64_t) mat[2] * src_x + mat3_y) >> ss_hor;
1167 const int64_t mvy = ((int64_t) mat[4] * src_x + mat5_y) >> ss_ver;
1168
1169 const int dx = (int) (mvx >> 16) - 4;
1170 const int mx = (((int) mvx & 0xffff) - wmp->u.p.alpha * 4 -
1171 wmp->u.p.beta * 7) & ~0x3f;
1172 const int dy = (int) (mvy >> 16) - 4;
1173 const int my = (((int) mvy & 0xffff) - wmp->u.p.gamma * 4 -
1174 wmp->u.p.delta * 4) & ~0x3f;
1175
1176 const pixel *ref_ptr;
1177 ptrdiff_t ref_stride = refp->p.stride[!!pl];
1178
1179 if (dav1d_thread_picture_wait(refp, dy + 4 + 8,
1180 PLANE_TYPE_Y + !!pl))
1181 {
1182 return -1;
1183 }
1184 if (dx < 3 || dx + 8 + 4 > width || dy < 3 || dy + 8 + 4 > height) {
1185 pixel *const emu_edge_buf = bitfn(t->scratch.emu_edge);
1186 f->dsp->mc.emu_edge(15, 15, width, height, dx - 3, dy - 3,
1187 emu_edge_buf, 32 * sizeof(pixel),
1188 refp->p.data[pl], ref_stride);
1189 ref_ptr = &emu_edge_buf[32 * 3 + 3];
1190 ref_stride = 32 * sizeof(pixel);
1191 } else {
1192 ref_ptr = ((pixel *) refp->p.data[pl]) + PXSTRIDE(ref_stride) * dy + dx;
1193 }
1194 if (dst16 != NULL)
1195 dsp->mc.warp8x8t(&dst16[x], dstride, ref_ptr, ref_stride,
1196 wmp->u.abcd, mx, my HIGHBD_CALL_SUFFIX);
1197 else
1198 dsp->mc.warp8x8(&dst8[x], dstride, ref_ptr, ref_stride,
1199 wmp->u.abcd, mx, my HIGHBD_CALL_SUFFIX);
1200 }
1201 if (dst8) dst8 += 8 * PXSTRIDE(dstride);
1202 else dst16 += 8 * dstride;
1203 }
1204 return 0;
1205 }
1206
bytefn(dav1d_recon_b_intra)1207 void bytefn(dav1d_recon_b_intra)(Dav1dTileContext *const t, const enum BlockSize bs,
1208 const enum EdgeFlags intra_edge_flags,
1209 const Av1Block *const b)
1210 {
1211 Dav1dTileState *const ts = t->ts;
1212 const Dav1dFrameContext *const f = t->f;
1213 const Dav1dDSPContext *const dsp = f->dsp;
1214 const int bx4 = t->bx & 31, by4 = t->by & 31;
1215 const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
1216 const int ss_hor = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
1217 const int cbx4 = bx4 >> ss_hor, cby4 = by4 >> ss_ver;
1218 const uint8_t *const b_dim = dav1d_block_dimensions[bs];
1219 const int bw4 = b_dim[0], bh4 = b_dim[1];
1220 const int w4 = imin(bw4, f->bw - t->bx), h4 = imin(bh4, f->bh - t->by);
1221 const int cw4 = (w4 + ss_hor) >> ss_hor, ch4 = (h4 + ss_ver) >> ss_ver;
1222 const int has_chroma = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I400 &&
1223 (bw4 > ss_hor || t->bx & 1) &&
1224 (bh4 > ss_ver || t->by & 1);
1225 const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[b->tx];
1226 const TxfmInfo *const uv_t_dim = &dav1d_txfm_dimensions[b->uvtx];
1227
1228 // coefficient coding
1229 pixel *const edge = bitfn(t->scratch.edge) + 128;
1230 const int cbw4 = (bw4 + ss_hor) >> ss_hor, cbh4 = (bh4 + ss_ver) >> ss_ver;
1231
1232 const int intra_edge_filter_flag = f->seq_hdr->intra_edge_filter << 10;
1233
1234 for (int init_y = 0; init_y < h4; init_y += 16) {
1235 const int sub_h4 = imin(h4, 16 + init_y);
1236 const int sub_ch4 = imin(ch4, (init_y + 16) >> ss_ver);
1237 for (int init_x = 0; init_x < w4; init_x += 16) {
1238 if (b->pal_sz[0]) {
1239 pixel *dst = ((pixel *) f->cur.data[0]) +
1240 4 * (t->by * PXSTRIDE(f->cur.stride[0]) + t->bx);
1241 const uint8_t *pal_idx;
1242 if (f->frame_thread.pass) {
1243 assert(ts->frame_thread.pal_idx);
1244 pal_idx = ts->frame_thread.pal_idx;
1245 ts->frame_thread.pal_idx += bw4 * bh4 * 16;
1246 } else {
1247 pal_idx = t->scratch.pal_idx;
1248 }
1249 const uint16_t *const pal = f->frame_thread.pass ?
1250 f->frame_thread.pal[((t->by >> 1) + (t->bx & 1)) * (f->b4_stride >> 1) +
1251 ((t->bx >> 1) + (t->by & 1))][0] : t->scratch.pal[0];
1252 f->dsp->ipred.pal_pred(dst, f->cur.stride[0], pal,
1253 pal_idx, bw4 * 4, bh4 * 4);
1254 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
1255 hex_dump(dst, PXSTRIDE(f->cur.stride[0]),
1256 bw4 * 4, bh4 * 4, "y-pal-pred");
1257 }
1258
1259 const int intra_flags = (sm_flag(t->a, bx4) |
1260 sm_flag(&t->l, by4) |
1261 intra_edge_filter_flag);
1262 const int sb_has_tr = init_x + 16 < w4 ? 1 : init_y ? 0 :
1263 intra_edge_flags & EDGE_I444_TOP_HAS_RIGHT;
1264 const int sb_has_bl = init_x ? 0 : init_y + 16 < h4 ? 1 :
1265 intra_edge_flags & EDGE_I444_LEFT_HAS_BOTTOM;
1266 int y, x;
1267 const int sub_w4 = imin(w4, init_x + 16);
1268 for (y = init_y, t->by += init_y; y < sub_h4;
1269 y += t_dim->h, t->by += t_dim->h)
1270 {
1271 pixel *dst = ((pixel *) f->cur.data[0]) +
1272 4 * (t->by * PXSTRIDE(f->cur.stride[0]) +
1273 t->bx + init_x);
1274 for (x = init_x, t->bx += init_x; x < sub_w4;
1275 x += t_dim->w, t->bx += t_dim->w)
1276 {
1277 if (b->pal_sz[0]) goto skip_y_pred;
1278
1279 int angle = b->y_angle;
1280 const enum EdgeFlags edge_flags =
1281 (((y > init_y || !sb_has_tr) && (x + t_dim->w >= sub_w4)) ?
1282 0 : EDGE_I444_TOP_HAS_RIGHT) |
1283 ((x > init_x || (!sb_has_bl && y + t_dim->h >= sub_h4)) ?
1284 0 : EDGE_I444_LEFT_HAS_BOTTOM);
1285 const pixel *top_sb_edge = NULL;
1286 if (!(t->by & (f->sb_step - 1))) {
1287 top_sb_edge = f->ipred_edge[0];
1288 const int sby = t->by >> f->sb_shift;
1289 top_sb_edge += f->sb128w * 128 * (sby - 1);
1290 }
1291 const enum IntraPredMode m =
1292 bytefn(dav1d_prepare_intra_edges)(t->bx,
1293 t->bx > ts->tiling.col_start,
1294 t->by,
1295 t->by > ts->tiling.row_start,
1296 ts->tiling.col_end,
1297 ts->tiling.row_end,
1298 edge_flags, dst,
1299 f->cur.stride[0], top_sb_edge,
1300 b->y_mode, &angle,
1301 t_dim->w, t_dim->h,
1302 f->seq_hdr->intra_edge_filter,
1303 edge HIGHBD_CALL_SUFFIX);
1304 dsp->ipred.intra_pred[m](dst, f->cur.stride[0], edge,
1305 t_dim->w * 4, t_dim->h * 4,
1306 angle | intra_flags,
1307 4 * f->bw - 4 * t->bx,
1308 4 * f->bh - 4 * t->by
1309 HIGHBD_CALL_SUFFIX);
1310
1311 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS) {
1312 hex_dump(edge - t_dim->h * 4, t_dim->h * 4,
1313 t_dim->h * 4, 2, "l");
1314 hex_dump(edge, 0, 1, 1, "tl");
1315 hex_dump(edge + 1, t_dim->w * 4,
1316 t_dim->w * 4, 2, "t");
1317 hex_dump(dst, f->cur.stride[0],
1318 t_dim->w * 4, t_dim->h * 4, "y-intra-pred");
1319 }
1320
1321 skip_y_pred: {}
1322 if (!b->skip) {
1323 coef *cf;
1324 int eob;
1325 enum TxfmType txtp;
1326 if (f->frame_thread.pass) {
1327 cf = ts->frame_thread.cf;
1328 ts->frame_thread.cf += imin(t_dim->w, 8) * imin(t_dim->h, 8) * 16;
1329 const struct CodedBlockInfo *const cbi =
1330 &f->frame_thread.cbi[t->by * f->b4_stride + t->bx];
1331 eob = cbi->eob[0];
1332 txtp = cbi->txtp[0];
1333 } else {
1334 uint8_t cf_ctx;
1335 cf = bitfn(t->cf);
1336 eob = decode_coefs(t, &t->a->lcoef[bx4 + x],
1337 &t->l.lcoef[by4 + y], b->tx, bs,
1338 b, 1, 0, cf, &txtp, &cf_ctx);
1339 if (DEBUG_BLOCK_INFO)
1340 printf("Post-y-cf-blk[tx=%d,txtp=%d,eob=%d]: r=%d\n",
1341 b->tx, txtp, eob, ts->msac.rng);
1342 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1343 rep_macro(type, t->dir lcoef, off, mul * cf_ctx)
1344 #define default_memset(dir, diridx, off, sz) \
1345 memset(&t->dir lcoef[off], cf_ctx, sz)
1346 case_set_upto16_with_default(imin(t_dim->h, f->bh - t->by), \
1347 l., 1, by4 + y);
1348 case_set_upto16_with_default(imin(t_dim->w, f->bw - t->bx), \
1349 a->, 0, bx4 + x);
1350 #undef default_memset
1351 #undef set_ctx
1352 }
1353 if (eob >= 0) {
1354 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
1355 coef_dump(cf, imin(t_dim->h, 8) * 4,
1356 imin(t_dim->w, 8) * 4, 3, "dq");
1357 dsp->itx.itxfm_add[b->tx]
1358 [txtp](dst,
1359 f->cur.stride[0],
1360 cf, eob HIGHBD_CALL_SUFFIX);
1361 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
1362 hex_dump(dst, f->cur.stride[0],
1363 t_dim->w * 4, t_dim->h * 4, "recon");
1364 }
1365 } else if (!f->frame_thread.pass) {
1366 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1367 rep_macro(type, t->dir lcoef, off, mul * 0x40)
1368 case_set_upto16(t_dim->h, l., 1, by4 + y);
1369 case_set_upto16(t_dim->w, a->, 0, bx4 + x);
1370 #undef set_ctx
1371 }
1372 dst += 4 * t_dim->w;
1373 }
1374 t->bx -= x;
1375 }
1376 t->by -= y;
1377
1378 if (!has_chroma) continue;
1379
1380 const ptrdiff_t stride = f->cur.stride[1];
1381
1382 if (b->uv_mode == CFL_PRED) {
1383 assert(!init_x && !init_y);
1384
1385 int16_t *const ac = t->scratch.ac;
1386 pixel *y_src = ((pixel *) f->cur.data[0]) + 4 * (t->bx & ~ss_hor) +
1387 4 * (t->by & ~ss_ver) * PXSTRIDE(f->cur.stride[0]);
1388 const ptrdiff_t uv_off = 4 * ((t->bx >> ss_hor) +
1389 (t->by >> ss_ver) * PXSTRIDE(stride));
1390 pixel *const uv_dst[2] = { ((pixel *) f->cur.data[1]) + uv_off,
1391 ((pixel *) f->cur.data[2]) + uv_off };
1392
1393 const int furthest_r =
1394 ((cw4 << ss_hor) + t_dim->w - 1) & ~(t_dim->w - 1);
1395 const int furthest_b =
1396 ((ch4 << ss_ver) + t_dim->h - 1) & ~(t_dim->h - 1);
1397 dsp->ipred.cfl_ac[f->cur.p.layout - 1](ac, y_src, f->cur.stride[0],
1398 cbw4 - (furthest_r >> ss_hor),
1399 cbh4 - (furthest_b >> ss_ver),
1400 cbw4 * 4, cbh4 * 4);
1401 for (int pl = 0; pl < 2; pl++) {
1402 if (!b->cfl_alpha[pl]) continue;
1403 int angle = 0;
1404 const pixel *top_sb_edge = NULL;
1405 if (!((t->by & ~ss_ver) & (f->sb_step - 1))) {
1406 top_sb_edge = f->ipred_edge[pl + 1];
1407 const int sby = t->by >> f->sb_shift;
1408 top_sb_edge += f->sb128w * 128 * (sby - 1);
1409 }
1410 const int xpos = t->bx >> ss_hor, ypos = t->by >> ss_ver;
1411 const int xstart = ts->tiling.col_start >> ss_hor;
1412 const int ystart = ts->tiling.row_start >> ss_ver;
1413 const enum IntraPredMode m =
1414 bytefn(dav1d_prepare_intra_edges)(xpos, xpos > xstart,
1415 ypos, ypos > ystart,
1416 ts->tiling.col_end >> ss_hor,
1417 ts->tiling.row_end >> ss_ver,
1418 0, uv_dst[pl], stride,
1419 top_sb_edge, DC_PRED, &angle,
1420 uv_t_dim->w, uv_t_dim->h, 0,
1421 edge HIGHBD_CALL_SUFFIX);
1422 dsp->ipred.cfl_pred[m](uv_dst[pl], stride, edge,
1423 uv_t_dim->w * 4,
1424 uv_t_dim->h * 4,
1425 ac, b->cfl_alpha[pl]
1426 HIGHBD_CALL_SUFFIX);
1427 }
1428 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS) {
1429 ac_dump(ac, 4*cbw4, 4*cbh4, "ac");
1430 hex_dump(uv_dst[0], stride, cbw4 * 4, cbh4 * 4, "u-cfl-pred");
1431 hex_dump(uv_dst[1], stride, cbw4 * 4, cbh4 * 4, "v-cfl-pred");
1432 }
1433 } else if (b->pal_sz[1]) {
1434 const ptrdiff_t uv_dstoff = 4 * ((t->bx >> ss_hor) +
1435 (t->by >> ss_ver) * PXSTRIDE(f->cur.stride[1]));
1436 const uint16_t (*pal)[8];
1437 const uint8_t *pal_idx;
1438 if (f->frame_thread.pass) {
1439 assert(ts->frame_thread.pal_idx);
1440 pal = f->frame_thread.pal[((t->by >> 1) + (t->bx & 1)) * (f->b4_stride >> 1) +
1441 ((t->bx >> 1) + (t->by & 1))];
1442 pal_idx = ts->frame_thread.pal_idx;
1443 ts->frame_thread.pal_idx += cbw4 * cbh4 * 16;
1444 } else {
1445 pal = t->scratch.pal;
1446 pal_idx = &t->scratch.pal_idx[bw4 * bh4 * 16];
1447 }
1448
1449 f->dsp->ipred.pal_pred(((pixel *) f->cur.data[1]) + uv_dstoff,
1450 f->cur.stride[1], pal[1],
1451 pal_idx, cbw4 * 4, cbh4 * 4);
1452 f->dsp->ipred.pal_pred(((pixel *) f->cur.data[2]) + uv_dstoff,
1453 f->cur.stride[1], pal[2],
1454 pal_idx, cbw4 * 4, cbh4 * 4);
1455 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS) {
1456 hex_dump(((pixel *) f->cur.data[1]) + uv_dstoff,
1457 PXSTRIDE(f->cur.stride[1]),
1458 cbw4 * 4, cbh4 * 4, "u-pal-pred");
1459 hex_dump(((pixel *) f->cur.data[2]) + uv_dstoff,
1460 PXSTRIDE(f->cur.stride[1]),
1461 cbw4 * 4, cbh4 * 4, "v-pal-pred");
1462 }
1463 }
1464
1465 const int sm_uv_fl = sm_uv_flag(t->a, cbx4) |
1466 sm_uv_flag(&t->l, cby4);
1467 const int uv_sb_has_tr =
1468 ((init_x + 16) >> ss_hor) < cw4 ? 1 : init_y ? 0 :
1469 intra_edge_flags & (EDGE_I420_TOP_HAS_RIGHT >> (f->cur.p.layout - 1));
1470 const int uv_sb_has_bl =
1471 init_x ? 0 : ((init_y + 16) >> ss_ver) < ch4 ? 1 :
1472 intra_edge_flags & (EDGE_I420_LEFT_HAS_BOTTOM >> (f->cur.p.layout - 1));
1473 const int sub_cw4 = imin(cw4, (init_x + 16) >> ss_hor);
1474 for (int pl = 0; pl < 2; pl++) {
1475 for (y = init_y >> ss_ver, t->by += init_y; y < sub_ch4;
1476 y += uv_t_dim->h, t->by += uv_t_dim->h << ss_ver)
1477 {
1478 pixel *dst = ((pixel *) f->cur.data[1 + pl]) +
1479 4 * ((t->by >> ss_ver) * PXSTRIDE(stride) +
1480 ((t->bx + init_x) >> ss_hor));
1481 for (x = init_x >> ss_hor, t->bx += init_x; x < sub_cw4;
1482 x += uv_t_dim->w, t->bx += uv_t_dim->w << ss_hor)
1483 {
1484 if ((b->uv_mode == CFL_PRED && b->cfl_alpha[pl]) ||
1485 b->pal_sz[1])
1486 {
1487 goto skip_uv_pred;
1488 }
1489
1490 int angle = b->uv_angle;
1491 // this probably looks weird because we're using
1492 // luma flags in a chroma loop, but that's because
1493 // prepare_intra_edges() expects luma flags as input
1494 const enum EdgeFlags edge_flags =
1495 (((y > (init_y >> ss_ver) || !uv_sb_has_tr) &&
1496 (x + uv_t_dim->w >= sub_cw4)) ?
1497 0 : EDGE_I444_TOP_HAS_RIGHT) |
1498 ((x > (init_x >> ss_hor) ||
1499 (!uv_sb_has_bl && y + uv_t_dim->h >= sub_ch4)) ?
1500 0 : EDGE_I444_LEFT_HAS_BOTTOM);
1501 const pixel *top_sb_edge = NULL;
1502 if (!((t->by & ~ss_ver) & (f->sb_step - 1))) {
1503 top_sb_edge = f->ipred_edge[1 + pl];
1504 const int sby = t->by >> f->sb_shift;
1505 top_sb_edge += f->sb128w * 128 * (sby - 1);
1506 }
1507 const enum IntraPredMode uv_mode =
1508 b->uv_mode == CFL_PRED ? DC_PRED : b->uv_mode;
1509 const int xpos = t->bx >> ss_hor, ypos = t->by >> ss_ver;
1510 const int xstart = ts->tiling.col_start >> ss_hor;
1511 const int ystart = ts->tiling.row_start >> ss_ver;
1512 const enum IntraPredMode m =
1513 bytefn(dav1d_prepare_intra_edges)(xpos, xpos > xstart,
1514 ypos, ypos > ystart,
1515 ts->tiling.col_end >> ss_hor,
1516 ts->tiling.row_end >> ss_ver,
1517 edge_flags, dst, stride,
1518 top_sb_edge, uv_mode,
1519 &angle, uv_t_dim->w,
1520 uv_t_dim->h,
1521 f->seq_hdr->intra_edge_filter,
1522 edge HIGHBD_CALL_SUFFIX);
1523 angle |= intra_edge_filter_flag;
1524 dsp->ipred.intra_pred[m](dst, stride, edge,
1525 uv_t_dim->w * 4,
1526 uv_t_dim->h * 4,
1527 angle | sm_uv_fl,
1528 (4 * f->bw + ss_hor -
1529 4 * (t->bx & ~ss_hor)) >> ss_hor,
1530 (4 * f->bh + ss_ver -
1531 4 * (t->by & ~ss_ver)) >> ss_ver
1532 HIGHBD_CALL_SUFFIX);
1533 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS) {
1534 hex_dump(edge - uv_t_dim->h * 4, uv_t_dim->h * 4,
1535 uv_t_dim->h * 4, 2, "l");
1536 hex_dump(edge, 0, 1, 1, "tl");
1537 hex_dump(edge + 1, uv_t_dim->w * 4,
1538 uv_t_dim->w * 4, 2, "t");
1539 hex_dump(dst, stride, uv_t_dim->w * 4,
1540 uv_t_dim->h * 4, pl ? "v-intra-pred" : "u-intra-pred");
1541 }
1542
1543 skip_uv_pred: {}
1544 if (!b->skip) {
1545 enum TxfmType txtp;
1546 int eob;
1547 coef *cf;
1548 if (f->frame_thread.pass) {
1549 cf = ts->frame_thread.cf;
1550 ts->frame_thread.cf += uv_t_dim->w * uv_t_dim->h * 16;
1551 const struct CodedBlockInfo *const cbi =
1552 &f->frame_thread.cbi[t->by * f->b4_stride + t->bx];
1553 eob = cbi->eob[pl + 1];
1554 txtp = cbi->txtp[pl + 1];
1555 } else {
1556 uint8_t cf_ctx;
1557 cf = bitfn(t->cf);
1558 eob = decode_coefs(t, &t->a->ccoef[pl][cbx4 + x],
1559 &t->l.ccoef[pl][cby4 + y],
1560 b->uvtx, bs, b, 1, 1 + pl, cf,
1561 &txtp, &cf_ctx);
1562 if (DEBUG_BLOCK_INFO)
1563 printf("Post-uv-cf-blk[pl=%d,tx=%d,"
1564 "txtp=%d,eob=%d]: r=%d [x=%d,cbx4=%d]\n",
1565 pl, b->uvtx, txtp, eob, ts->msac.rng, x, cbx4);
1566 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1567 rep_macro(type, t->dir ccoef[pl], off, mul * cf_ctx)
1568 #define default_memset(dir, diridx, off, sz) \
1569 memset(&t->dir ccoef[pl][off], cf_ctx, sz)
1570 case_set_upto16_with_default( \
1571 imin(uv_t_dim->h, (f->bh - t->by + ss_ver) >> ss_ver),
1572 l., 1, cby4 + y);
1573 case_set_upto16_with_default( \
1574 imin(uv_t_dim->w, (f->bw - t->bx + ss_hor) >> ss_hor),
1575 a->, 0, cbx4 + x);
1576 #undef default_memset
1577 #undef set_ctx
1578 }
1579 if (eob >= 0) {
1580 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
1581 coef_dump(cf, uv_t_dim->h * 4,
1582 uv_t_dim->w * 4, 3, "dq");
1583 dsp->itx.itxfm_add[b->uvtx]
1584 [txtp](dst, stride,
1585 cf, eob HIGHBD_CALL_SUFFIX);
1586 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
1587 hex_dump(dst, stride, uv_t_dim->w * 4,
1588 uv_t_dim->h * 4, "recon");
1589 }
1590 } else if (!f->frame_thread.pass) {
1591 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1592 rep_macro(type, t->dir ccoef[pl], off, mul * 0x40)
1593 case_set_upto16(uv_t_dim->h, l., 1, cby4 + y);
1594 case_set_upto16(uv_t_dim->w, a->, 0, cbx4 + x);
1595 #undef set_ctx
1596 }
1597 dst += uv_t_dim->w * 4;
1598 }
1599 t->bx -= x << ss_hor;
1600 }
1601 t->by -= y << ss_ver;
1602 }
1603 }
1604 }
1605 }
1606
bytefn(dav1d_recon_b_inter)1607 int bytefn(dav1d_recon_b_inter)(Dav1dTileContext *const t, const enum BlockSize bs,
1608 const Av1Block *const b)
1609 {
1610 Dav1dTileState *const ts = t->ts;
1611 const Dav1dFrameContext *const f = t->f;
1612 const Dav1dDSPContext *const dsp = f->dsp;
1613 const int bx4 = t->bx & 31, by4 = t->by & 31;
1614 const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
1615 const int ss_hor = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
1616 const int cbx4 = bx4 >> ss_hor, cby4 = by4 >> ss_ver;
1617 const uint8_t *const b_dim = dav1d_block_dimensions[bs];
1618 const int bw4 = b_dim[0], bh4 = b_dim[1];
1619 const int w4 = imin(bw4, f->bw - t->bx), h4 = imin(bh4, f->bh - t->by);
1620 const int has_chroma = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I400 &&
1621 (bw4 > ss_hor || t->bx & 1) &&
1622 (bh4 > ss_ver || t->by & 1);
1623 const int chr_layout_idx = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I400 ? 0 :
1624 DAV1D_PIXEL_LAYOUT_I444 - f->cur.p.layout;
1625 int res;
1626
1627 // prediction
1628 const int cbh4 = (bh4 + ss_ver) >> ss_ver, cbw4 = (bw4 + ss_hor) >> ss_hor;
1629 pixel *dst = ((pixel *) f->cur.data[0]) +
1630 4 * (t->by * PXSTRIDE(f->cur.stride[0]) + t->bx);
1631 const ptrdiff_t uvdstoff =
1632 4 * ((t->bx >> ss_hor) + (t->by >> ss_ver) * PXSTRIDE(f->cur.stride[1]));
1633 if (IS_KEY_OR_INTRA(f->frame_hdr)) {
1634 // intrabc
1635 assert(!f->frame_hdr->super_res.enabled);
1636 res = mc(t, dst, NULL, f->cur.stride[0], bw4, bh4, t->bx, t->by, 0,
1637 b->mv[0], &f->sr_cur, 0 /* unused */, FILTER_2D_BILINEAR);
1638 if (res) return res;
1639 if (has_chroma) for (int pl = 1; pl < 3; pl++) {
1640 res = mc(t, ((pixel *)f->cur.data[pl]) + uvdstoff, NULL, f->cur.stride[1],
1641 bw4 << (bw4 == ss_hor), bh4 << (bh4 == ss_ver),
1642 t->bx & ~ss_hor, t->by & ~ss_ver, pl, b->mv[0],
1643 &f->sr_cur, 0 /* unused */, FILTER_2D_BILINEAR);
1644 if (res) return res;
1645 }
1646 } else if (b->comp_type == COMP_INTER_NONE) {
1647 const Dav1dThreadPicture *const refp = &f->refp[b->ref[0]];
1648 const enum Filter2d filter_2d = b->filter2d;
1649
1650 if (imin(bw4, bh4) > 1 &&
1651 ((b->inter_mode == GLOBALMV && f->gmv_warp_allowed[b->ref[0]]) ||
1652 (b->motion_mode == MM_WARP && t->warpmv.type > DAV1D_WM_TYPE_TRANSLATION)))
1653 {
1654 res = warp_affine(t, dst, NULL, f->cur.stride[0], b_dim, 0, refp,
1655 b->motion_mode == MM_WARP ? &t->warpmv :
1656 &f->frame_hdr->gmv[b->ref[0]]);
1657 if (res) return res;
1658 } else {
1659 res = mc(t, dst, NULL, f->cur.stride[0],
1660 bw4, bh4, t->bx, t->by, 0, b->mv[0], refp, b->ref[0], filter_2d);
1661 if (res) return res;
1662 if (b->motion_mode == MM_OBMC) {
1663 res = obmc(t, dst, f->cur.stride[0], b_dim, 0, bx4, by4, w4, h4);
1664 if (res) return res;
1665 }
1666 }
1667 if (b->interintra_type) {
1668 pixel *const tl_edge = bitfn(t->scratch.edge) + 32;
1669 enum IntraPredMode m = b->interintra_mode == II_SMOOTH_PRED ?
1670 SMOOTH_PRED : b->interintra_mode;
1671 pixel *const tmp = bitfn(t->scratch.interintra);
1672 int angle = 0;
1673 const pixel *top_sb_edge = NULL;
1674 if (!(t->by & (f->sb_step - 1))) {
1675 top_sb_edge = f->ipred_edge[0];
1676 const int sby = t->by >> f->sb_shift;
1677 top_sb_edge += f->sb128w * 128 * (sby - 1);
1678 }
1679 m = bytefn(dav1d_prepare_intra_edges)(t->bx, t->bx > ts->tiling.col_start,
1680 t->by, t->by > ts->tiling.row_start,
1681 ts->tiling.col_end, ts->tiling.row_end,
1682 0, dst, f->cur.stride[0], top_sb_edge,
1683 m, &angle, bw4, bh4, 0, tl_edge
1684 HIGHBD_CALL_SUFFIX);
1685 dsp->ipred.intra_pred[m](tmp, 4 * bw4 * sizeof(pixel),
1686 tl_edge, bw4 * 4, bh4 * 4, 0, 0, 0
1687 HIGHBD_CALL_SUFFIX);
1688 const uint8_t *const ii_mask =
1689 b->interintra_type == INTER_INTRA_BLEND ?
1690 dav1d_ii_masks[bs][0][b->interintra_mode] :
1691 dav1d_wedge_masks[bs][0][0][b->wedge_idx];
1692 dsp->mc.blend(dst, f->cur.stride[0], tmp,
1693 bw4 * 4, bh4 * 4, ii_mask);
1694 }
1695
1696 if (!has_chroma) goto skip_inter_chroma_pred;
1697
1698 // sub8x8 derivation
1699 int is_sub8x8 = bw4 == ss_hor || bh4 == ss_ver;
1700 refmvs_block *const *r;
1701 if (is_sub8x8) {
1702 assert(ss_hor == 1);
1703 r = &t->rt.r[(t->by & 31) + 5];
1704 if (bw4 == 1) is_sub8x8 &= r[0][t->bx - 1].ref.ref[0] > 0;
1705 if (bh4 == ss_ver) is_sub8x8 &= r[-1][t->bx].ref.ref[0] > 0;
1706 if (bw4 == 1 && bh4 == ss_ver)
1707 is_sub8x8 &= r[-1][t->bx - 1].ref.ref[0] > 0;
1708 }
1709
1710 // chroma prediction
1711 if (is_sub8x8) {
1712 assert(ss_hor == 1);
1713 ptrdiff_t h_off = 0, v_off = 0;
1714 if (bw4 == 1 && bh4 == ss_ver) {
1715 for (int pl = 0; pl < 2; pl++) {
1716 res = mc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff,
1717 NULL, f->cur.stride[1],
1718 bw4, bh4, t->bx - 1, t->by - 1, 1 + pl,
1719 r[-1][t->bx - 1].mv.mv[0],
1720 &f->refp[r[-1][t->bx - 1].ref.ref[0] - 1],
1721 r[-1][t->bx - 1].ref.ref[0] - 1,
1722 f->frame_thread.pass != 2 ? t->tl_4x4_filter :
1723 f->frame_thread.b[((t->by - 1) * f->b4_stride) + t->bx - 1].filter2d);
1724 if (res) return res;
1725 }
1726 v_off = 2 * PXSTRIDE(f->cur.stride[1]);
1727 h_off = 2;
1728 }
1729 if (bw4 == 1) {
1730 const enum Filter2d left_filter_2d =
1731 dav1d_filter_2d[t->l.filter[1][by4]][t->l.filter[0][by4]];
1732 for (int pl = 0; pl < 2; pl++) {
1733 res = mc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff + v_off, NULL,
1734 f->cur.stride[1], bw4, bh4, t->bx - 1,
1735 t->by, 1 + pl, r[0][t->bx - 1].mv.mv[0],
1736 &f->refp[r[0][t->bx - 1].ref.ref[0] - 1],
1737 r[0][t->bx - 1].ref.ref[0] - 1,
1738 f->frame_thread.pass != 2 ? left_filter_2d :
1739 f->frame_thread.b[(t->by * f->b4_stride) + t->bx - 1].filter2d);
1740 if (res) return res;
1741 }
1742 h_off = 2;
1743 }
1744 if (bh4 == ss_ver) {
1745 const enum Filter2d top_filter_2d =
1746 dav1d_filter_2d[t->a->filter[1][bx4]][t->a->filter[0][bx4]];
1747 for (int pl = 0; pl < 2; pl++) {
1748 res = mc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff + h_off, NULL,
1749 f->cur.stride[1], bw4, bh4, t->bx, t->by - 1,
1750 1 + pl, r[-1][t->bx].mv.mv[0],
1751 &f->refp[r[-1][t->bx].ref.ref[0] - 1],
1752 r[-1][t->bx].ref.ref[0] - 1,
1753 f->frame_thread.pass != 2 ? top_filter_2d :
1754 f->frame_thread.b[((t->by - 1) * f->b4_stride) + t->bx].filter2d);
1755 if (res) return res;
1756 }
1757 v_off = 2 * PXSTRIDE(f->cur.stride[1]);
1758 }
1759 for (int pl = 0; pl < 2; pl++) {
1760 res = mc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff + h_off + v_off, NULL, f->cur.stride[1],
1761 bw4, bh4, t->bx, t->by, 1 + pl, b->mv[0],
1762 refp, b->ref[0], filter_2d);
1763 if (res) return res;
1764 }
1765 } else {
1766 if (imin(cbw4, cbh4) > 1 &&
1767 ((b->inter_mode == GLOBALMV && f->gmv_warp_allowed[b->ref[0]]) ||
1768 (b->motion_mode == MM_WARP && t->warpmv.type > DAV1D_WM_TYPE_TRANSLATION)))
1769 {
1770 for (int pl = 0; pl < 2; pl++) {
1771 res = warp_affine(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff, NULL,
1772 f->cur.stride[1], b_dim, 1 + pl, refp,
1773 b->motion_mode == MM_WARP ? &t->warpmv :
1774 &f->frame_hdr->gmv[b->ref[0]]);
1775 if (res) return res;
1776 }
1777 } else {
1778 for (int pl = 0; pl < 2; pl++) {
1779 res = mc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff,
1780 NULL, f->cur.stride[1],
1781 bw4 << (bw4 == ss_hor), bh4 << (bh4 == ss_ver),
1782 t->bx & ~ss_hor, t->by & ~ss_ver,
1783 1 + pl, b->mv[0], refp, b->ref[0], filter_2d);
1784 if (res) return res;
1785 if (b->motion_mode == MM_OBMC) {
1786 res = obmc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff,
1787 f->cur.stride[1], b_dim, 1 + pl, bx4, by4, w4, h4);
1788 if (res) return res;
1789 }
1790 }
1791 }
1792 if (b->interintra_type) {
1793 // FIXME for 8x32 with 4:2:2 subsampling, this probably does
1794 // the wrong thing since it will select 4x16, not 4x32, as a
1795 // transform size...
1796 const uint8_t *const ii_mask =
1797 b->interintra_type == INTER_INTRA_BLEND ?
1798 dav1d_ii_masks[bs][chr_layout_idx][b->interintra_mode] :
1799 dav1d_wedge_masks[bs][chr_layout_idx][0][b->wedge_idx];
1800
1801 for (int pl = 0; pl < 2; pl++) {
1802 pixel *const tmp = bitfn(t->scratch.interintra);
1803 pixel *const tl_edge = bitfn(t->scratch.edge) + 32;
1804 enum IntraPredMode m =
1805 b->interintra_mode == II_SMOOTH_PRED ?
1806 SMOOTH_PRED : b->interintra_mode;
1807 int angle = 0;
1808 pixel *const uvdst = ((pixel *) f->cur.data[1 + pl]) + uvdstoff;
1809 const pixel *top_sb_edge = NULL;
1810 if (!(t->by & (f->sb_step - 1))) {
1811 top_sb_edge = f->ipred_edge[pl + 1];
1812 const int sby = t->by >> f->sb_shift;
1813 top_sb_edge += f->sb128w * 128 * (sby - 1);
1814 }
1815 m = bytefn(dav1d_prepare_intra_edges)(t->bx >> ss_hor,
1816 (t->bx >> ss_hor) >
1817 (ts->tiling.col_start >> ss_hor),
1818 t->by >> ss_ver,
1819 (t->by >> ss_ver) >
1820 (ts->tiling.row_start >> ss_ver),
1821 ts->tiling.col_end >> ss_hor,
1822 ts->tiling.row_end >> ss_ver,
1823 0, uvdst, f->cur.stride[1],
1824 top_sb_edge, m,
1825 &angle, cbw4, cbh4, 0, tl_edge
1826 HIGHBD_CALL_SUFFIX);
1827 dsp->ipred.intra_pred[m](tmp, cbw4 * 4 * sizeof(pixel),
1828 tl_edge, cbw4 * 4, cbh4 * 4, 0, 0, 0
1829 HIGHBD_CALL_SUFFIX);
1830 dsp->mc.blend(uvdst, f->cur.stride[1], tmp,
1831 cbw4 * 4, cbh4 * 4, ii_mask);
1832 }
1833 }
1834 }
1835
1836 skip_inter_chroma_pred: {}
1837 t->tl_4x4_filter = filter_2d;
1838 } else {
1839 const enum Filter2d filter_2d = b->filter2d;
1840 // Maximum super block size is 128x128
1841 int16_t (*tmp)[128 * 128] = t->scratch.compinter;
1842 int jnt_weight;
1843 uint8_t *const seg_mask = t->scratch.seg_mask;
1844 const uint8_t *mask;
1845
1846 for (int i = 0; i < 2; i++) {
1847 const Dav1dThreadPicture *const refp = &f->refp[b->ref[i]];
1848
1849 if (b->inter_mode == GLOBALMV_GLOBALMV && f->gmv_warp_allowed[b->ref[i]]) {
1850 res = warp_affine(t, NULL, tmp[i], bw4 * 4, b_dim, 0, refp,
1851 &f->frame_hdr->gmv[b->ref[i]]);
1852 if (res) return res;
1853 } else {
1854 res = mc(t, NULL, tmp[i], 0, bw4, bh4, t->bx, t->by, 0,
1855 b->mv[i], refp, b->ref[i], filter_2d);
1856 if (res) return res;
1857 }
1858 }
1859 switch (b->comp_type) {
1860 case COMP_INTER_AVG:
1861 dsp->mc.avg(dst, f->cur.stride[0], tmp[0], tmp[1],
1862 bw4 * 4, bh4 * 4 HIGHBD_CALL_SUFFIX);
1863 break;
1864 case COMP_INTER_WEIGHTED_AVG:
1865 jnt_weight = f->jnt_weights[b->ref[0]][b->ref[1]];
1866 dsp->mc.w_avg(dst, f->cur.stride[0], tmp[0], tmp[1],
1867 bw4 * 4, bh4 * 4, jnt_weight HIGHBD_CALL_SUFFIX);
1868 break;
1869 case COMP_INTER_SEG:
1870 dsp->mc.w_mask[chr_layout_idx](dst, f->cur.stride[0],
1871 tmp[b->mask_sign], tmp[!b->mask_sign],
1872 bw4 * 4, bh4 * 4, seg_mask,
1873 b->mask_sign HIGHBD_CALL_SUFFIX);
1874 mask = seg_mask;
1875 break;
1876 case COMP_INTER_WEDGE:
1877 mask = dav1d_wedge_masks[bs][0][0][b->wedge_idx];
1878 dsp->mc.mask(dst, f->cur.stride[0],
1879 tmp[b->mask_sign], tmp[!b->mask_sign],
1880 bw4 * 4, bh4 * 4, mask HIGHBD_CALL_SUFFIX);
1881 if (has_chroma)
1882 mask = dav1d_wedge_masks[bs][chr_layout_idx][b->mask_sign][b->wedge_idx];
1883 break;
1884 }
1885
1886 // chroma
1887 if (has_chroma) for (int pl = 0; pl < 2; pl++) {
1888 for (int i = 0; i < 2; i++) {
1889 const Dav1dThreadPicture *const refp = &f->refp[b->ref[i]];
1890 if (b->inter_mode == GLOBALMV_GLOBALMV &&
1891 imin(cbw4, cbh4) > 1 && f->gmv_warp_allowed[b->ref[i]])
1892 {
1893 res = warp_affine(t, NULL, tmp[i], bw4 * 4 >> ss_hor,
1894 b_dim, 1 + pl,
1895 refp, &f->frame_hdr->gmv[b->ref[i]]);
1896 if (res) return res;
1897 } else {
1898 res = mc(t, NULL, tmp[i], 0, bw4, bh4, t->bx, t->by,
1899 1 + pl, b->mv[i], refp, b->ref[i], filter_2d);
1900 if (res) return res;
1901 }
1902 }
1903 pixel *const uvdst = ((pixel *) f->cur.data[1 + pl]) + uvdstoff;
1904 switch (b->comp_type) {
1905 case COMP_INTER_AVG:
1906 dsp->mc.avg(uvdst, f->cur.stride[1], tmp[0], tmp[1],
1907 bw4 * 4 >> ss_hor, bh4 * 4 >> ss_ver
1908 HIGHBD_CALL_SUFFIX);
1909 break;
1910 case COMP_INTER_WEIGHTED_AVG:
1911 dsp->mc.w_avg(uvdst, f->cur.stride[1], tmp[0], tmp[1],
1912 bw4 * 4 >> ss_hor, bh4 * 4 >> ss_ver, jnt_weight
1913 HIGHBD_CALL_SUFFIX);
1914 break;
1915 case COMP_INTER_WEDGE:
1916 case COMP_INTER_SEG:
1917 dsp->mc.mask(uvdst, f->cur.stride[1],
1918 tmp[b->mask_sign], tmp[!b->mask_sign],
1919 bw4 * 4 >> ss_hor, bh4 * 4 >> ss_ver, mask
1920 HIGHBD_CALL_SUFFIX);
1921 break;
1922 }
1923 }
1924 }
1925
1926 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS) {
1927 hex_dump(dst, f->cur.stride[0], b_dim[0] * 4, b_dim[1] * 4, "y-pred");
1928 if (has_chroma) {
1929 hex_dump(&((pixel *) f->cur.data[1])[uvdstoff], f->cur.stride[1],
1930 cbw4 * 4, cbh4 * 4, "u-pred");
1931 hex_dump(&((pixel *) f->cur.data[2])[uvdstoff], f->cur.stride[1],
1932 cbw4 * 4, cbh4 * 4, "v-pred");
1933 }
1934 }
1935
1936 const int cw4 = (w4 + ss_hor) >> ss_hor, ch4 = (h4 + ss_ver) >> ss_ver;
1937
1938 if (b->skip) {
1939 // reset coef contexts
1940 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1941 rep_macro(type, t->dir lcoef, off, mul * 0x40)
1942 case_set(bh4, l., 1, by4);
1943 case_set(bw4, a->, 0, bx4);
1944 #undef set_ctx
1945 if (has_chroma) {
1946 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1947 rep_macro(type, t->dir ccoef[0], off, mul * 0x40); \
1948 rep_macro(type, t->dir ccoef[1], off, mul * 0x40)
1949 case_set(cbh4, l., 1, cby4);
1950 case_set(cbw4, a->, 0, cbx4);
1951 #undef set_ctx
1952 }
1953 return 0;
1954 }
1955
1956 const TxfmInfo *const uvtx = &dav1d_txfm_dimensions[b->uvtx];
1957 const TxfmInfo *const ytx = &dav1d_txfm_dimensions[b->max_ytx];
1958 const uint16_t tx_split[2] = { b->tx_split0, b->tx_split1 };
1959
1960 for (int init_y = 0; init_y < bh4; init_y += 16) {
1961 for (int init_x = 0; init_x < bw4; init_x += 16) {
1962 // coefficient coding & inverse transforms
1963 int y_off = !!init_y, y;
1964 dst += PXSTRIDE(f->cur.stride[0]) * 4 * init_y;
1965 for (y = init_y, t->by += init_y; y < imin(h4, init_y + 16);
1966 y += ytx->h, y_off++)
1967 {
1968 int x, x_off = !!init_x;
1969 for (x = init_x, t->bx += init_x; x < imin(w4, init_x + 16);
1970 x += ytx->w, x_off++)
1971 {
1972 read_coef_tree(t, bs, b, b->max_ytx, 0, tx_split,
1973 x_off, y_off, &dst[x * 4]);
1974 t->bx += ytx->w;
1975 }
1976 dst += PXSTRIDE(f->cur.stride[0]) * 4 * ytx->h;
1977 t->bx -= x;
1978 t->by += ytx->h;
1979 }
1980 dst -= PXSTRIDE(f->cur.stride[0]) * 4 * y;
1981 t->by -= y;
1982
1983 // chroma coefs and inverse transform
1984 if (has_chroma) for (int pl = 0; pl < 2; pl++) {
1985 pixel *uvdst = ((pixel *) f->cur.data[1 + pl]) + uvdstoff +
1986 (PXSTRIDE(f->cur.stride[1]) * init_y * 4 >> ss_ver);
1987 for (y = init_y >> ss_ver, t->by += init_y;
1988 y < imin(ch4, (init_y + 16) >> ss_ver); y += uvtx->h)
1989 {
1990 int x;
1991 for (x = init_x >> ss_hor, t->bx += init_x;
1992 x < imin(cw4, (init_x + 16) >> ss_hor); x += uvtx->w)
1993 {
1994 coef *cf;
1995 int eob;
1996 enum TxfmType txtp;
1997 if (f->frame_thread.pass) {
1998 cf = ts->frame_thread.cf;
1999 ts->frame_thread.cf += uvtx->w * uvtx->h * 16;
2000 const struct CodedBlockInfo *const cbi =
2001 &f->frame_thread.cbi[t->by * f->b4_stride + t->bx];
2002 eob = cbi->eob[1 + pl];
2003 txtp = cbi->txtp[1 + pl];
2004 } else {
2005 uint8_t cf_ctx;
2006 cf = bitfn(t->cf);
2007 txtp = t->txtp_map[(by4 + (y << ss_ver)) * 32 +
2008 bx4 + (x << ss_hor)];
2009 eob = decode_coefs(t, &t->a->ccoef[pl][cbx4 + x],
2010 &t->l.ccoef[pl][cby4 + y],
2011 b->uvtx, bs, b, 0, 1 + pl,
2012 cf, &txtp, &cf_ctx);
2013 if (DEBUG_BLOCK_INFO)
2014 printf("Post-uv-cf-blk[pl=%d,tx=%d,"
2015 "txtp=%d,eob=%d]: r=%d\n",
2016 pl, b->uvtx, txtp, eob, ts->msac.rng);
2017 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
2018 rep_macro(type, t->dir ccoef[pl], off, mul * cf_ctx)
2019 #define default_memset(dir, diridx, off, sz) \
2020 memset(&t->dir ccoef[pl][off], cf_ctx, sz)
2021 case_set_upto16_with_default( \
2022 imin(uvtx->h, (f->bh - t->by + ss_ver) >> ss_ver),
2023 l., 1, cby4 + y);
2024 case_set_upto16_with_default( \
2025 imin(uvtx->w, (f->bw - t->bx + ss_hor) >> ss_hor),
2026 a->, 0, cbx4 + x);
2027 #undef default_memset
2028 #undef set_ctx
2029 }
2030 if (eob >= 0) {
2031 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
2032 coef_dump(cf, uvtx->h * 4, uvtx->w * 4, 3, "dq");
2033 dsp->itx.itxfm_add[b->uvtx]
2034 [txtp](&uvdst[4 * x],
2035 f->cur.stride[1],
2036 cf, eob HIGHBD_CALL_SUFFIX);
2037 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
2038 hex_dump(&uvdst[4 * x], f->cur.stride[1],
2039 uvtx->w * 4, uvtx->h * 4, "recon");
2040 }
2041 t->bx += uvtx->w << ss_hor;
2042 }
2043 uvdst += PXSTRIDE(f->cur.stride[1]) * 4 * uvtx->h;
2044 t->bx -= x << ss_hor;
2045 t->by += uvtx->h << ss_ver;
2046 }
2047 t->by -= y << ss_ver;
2048 }
2049 }
2050 }
2051 return 0;
2052 }
2053
bytefn(dav1d_filter_sbrow_deblock)2054 void bytefn(dav1d_filter_sbrow_deblock)(Dav1dFrameContext*const f, const int sby) {
2055 const int y = sby * f->sb_step * 4;
2056 const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2057 pixel *const p[3] = {
2058 f->lf.p[0] + y * PXSTRIDE(f->cur.stride[0]),
2059 f->lf.p[1] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver),
2060 f->lf.p[2] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver)
2061 };
2062 Av1Filter *mask = f->lf.mask + (sby >> !f->seq_hdr->sb128) * f->sb128w;
2063 if (f->frame_hdr->loopfilter.level_y[0] || f->frame_hdr->loopfilter.level_y[1]) {
2064 int start_of_tile_row = 0;
2065 if (f->frame_hdr->tiling.row_start_sb[f->lf.tile_row] == sby)
2066 start_of_tile_row = f->lf.tile_row++;
2067 bytefn(dav1d_loopfilter_sbrow)(f, p, mask, sby, start_of_tile_row);
2068 }
2069 if (f->lf.restore_planes) {
2070 // Store loop filtered pixels required by loop restoration
2071 bytefn(dav1d_lr_copy_lpf)(f, p, sby);
2072 }
2073 }
2074
bytefn(dav1d_filter_sbrow_cdef)2075 void bytefn(dav1d_filter_sbrow_cdef)(Dav1dFrameContext *const f, const int sby) {
2076 const int sbsz = f->sb_step;
2077 const int y = sby * sbsz * 4;
2078 const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2079 pixel *const p[3] = {
2080 f->lf.p[0] + y * PXSTRIDE(f->cur.stride[0]),
2081 f->lf.p[1] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver),
2082 f->lf.p[2] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver)
2083 };
2084 Av1Filter *prev_mask = f->lf.mask + ((sby - 1) >> !f->seq_hdr->sb128) * f->sb128w;
2085 Av1Filter *mask = f->lf.mask + (sby >> !f->seq_hdr->sb128) * f->sb128w;
2086 const int start = sby * sbsz;
2087 if (sby) {
2088 const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2089 pixel *p_up[3] = {
2090 p[0] - 8 * PXSTRIDE(f->cur.stride[0]),
2091 p[1] - (8 * PXSTRIDE(f->cur.stride[1]) >> ss_ver),
2092 p[2] - (8 * PXSTRIDE(f->cur.stride[1]) >> ss_ver),
2093 };
2094 bytefn(dav1d_cdef_brow)(f, p_up, prev_mask, start - 2, start);
2095 }
2096 const int n_blks = sbsz - 2 * (sby + 1 < f->sbh);
2097 const int end = imin(start + n_blks, f->bh);
2098 bytefn(dav1d_cdef_brow)(f, p, mask, start, end);
2099 }
2100
bytefn(dav1d_filter_sbrow_resize)2101 void bytefn(dav1d_filter_sbrow_resize)(Dav1dFrameContext *const f, const int sby) {
2102 const int sbsz = f->sb_step;
2103 const int y = sby * sbsz * 4;
2104 const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2105 const pixel *const p[3] = {
2106 f->lf.p[0] + y * PXSTRIDE(f->cur.stride[0]),
2107 f->lf.p[1] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver),
2108 f->lf.p[2] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver)
2109 };
2110 pixel *const sr_p[3] = {
2111 f->lf.sr_p[0] + y * PXSTRIDE(f->sr_cur.p.stride[0]),
2112 f->lf.sr_p[1] + (y * PXSTRIDE(f->sr_cur.p.stride[1]) >> ss_ver),
2113 f->lf.sr_p[2] + (y * PXSTRIDE(f->sr_cur.p.stride[1]) >> ss_ver)
2114 };
2115 const int has_chroma = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I400;
2116 for (int pl = 0; pl < 1 + 2 * has_chroma; pl++) {
2117 const int ss_ver = pl && f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2118 const int h_start = 8 * !!sby >> ss_ver;
2119 const ptrdiff_t dst_stride = f->sr_cur.p.stride[!!pl];
2120 pixel *dst = sr_p[pl] - h_start * PXSTRIDE(dst_stride);
2121 const ptrdiff_t src_stride = f->cur.stride[!!pl];
2122 const pixel *src = p[pl] - h_start * PXSTRIDE(src_stride);
2123 const int h_end = 4 * (sbsz - 2 * (sby + 1 < f->sbh)) >> ss_ver;
2124 const int ss_hor = pl && f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
2125 const int dst_w = (f->sr_cur.p.p.w + ss_hor) >> ss_hor;
2126 const int src_w = (4 * f->bw + ss_hor) >> ss_hor;
2127 const int img_h = (f->cur.p.h - sbsz * 4 * sby + ss_ver) >> ss_ver;
2128
2129 f->dsp->mc.resize(dst, dst_stride, src, src_stride, dst_w,
2130 imin(img_h, h_end) + h_start, src_w,
2131 f->resize_step[!!pl], f->resize_start[!!pl]
2132 HIGHBD_CALL_SUFFIX);
2133 }
2134 }
2135
bytefn(dav1d_filter_sbrow_lr)2136 void bytefn(dav1d_filter_sbrow_lr)(Dav1dFrameContext *const f, const int sby) {
2137 const int y = sby * f->sb_step * 4;
2138 const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2139 pixel *const sr_p[3] = {
2140 f->lf.sr_p[0] + y * PXSTRIDE(f->sr_cur.p.stride[0]),
2141 f->lf.sr_p[1] + (y * PXSTRIDE(f->sr_cur.p.stride[1]) >> ss_ver),
2142 f->lf.sr_p[2] + (y * PXSTRIDE(f->sr_cur.p.stride[1]) >> ss_ver)
2143 };
2144 bytefn(dav1d_lr_sbrow)(f, sr_p, sby);
2145 }
2146
bytefn(dav1d_filter_sbrow)2147 void bytefn(dav1d_filter_sbrow)(Dav1dFrameContext *const f, const int sby) {
2148 bytefn(dav1d_filter_sbrow_deblock)(f, sby);
2149 if (f->seq_hdr->cdef)
2150 bytefn(dav1d_filter_sbrow_cdef)(f, sby);
2151 if (f->frame_hdr->width[0] != f->frame_hdr->width[1])
2152 bytefn(dav1d_filter_sbrow_resize)(f, sby);
2153 if (f->lf.restore_planes)
2154 bytefn(dav1d_filter_sbrow_lr)(f, sby);
2155 }
2156
bytefn(dav1d_backup_ipred_edge)2157 void bytefn(dav1d_backup_ipred_edge)(Dav1dTileContext *const t) {
2158 const Dav1dFrameContext *const f = t->f;
2159 Dav1dTileState *const ts = t->ts;
2160 const int sby = t->by >> f->sb_shift;
2161 const int sby_off = f->sb128w * 128 * sby;
2162 const int x_off = ts->tiling.col_start;
2163
2164 const pixel *const y =
2165 ((const pixel *) f->cur.data[0]) + x_off * 4 +
2166 ((t->by + f->sb_step) * 4 - 1) * PXSTRIDE(f->cur.stride[0]);
2167 pixel_copy(&f->ipred_edge[0][sby_off + x_off * 4], y,
2168 4 * (ts->tiling.col_end - x_off));
2169
2170 if (f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I400) {
2171 const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2172 const int ss_hor = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
2173
2174 const ptrdiff_t uv_off = (x_off * 4 >> ss_hor) +
2175 (((t->by + f->sb_step) * 4 >> ss_ver) - 1) * PXSTRIDE(f->cur.stride[1]);
2176 for (int pl = 1; pl <= 2; pl++)
2177 pixel_copy(&f->ipred_edge[pl][sby_off + (x_off * 4 >> ss_hor)],
2178 &((const pixel *) f->cur.data[pl])[uv_off],
2179 4 * (ts->tiling.col_end - x_off) >> ss_hor);
2180 }
2181 }
2182