1 /*
2  * Copyright © 2018-2021, VideoLAN and dav1d authors
3  * Copyright © 2018, Two Orioles, LLC
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "config.h"
29 
30 #include <string.h>
31 #include <stdio.h>
32 
33 #include "common/attributes.h"
34 #include "common/bitdepth.h"
35 #include "common/dump.h"
36 #include "common/frame.h"
37 #include "common/intops.h"
38 
39 #include "src/cdef_apply.h"
40 #include "src/ctx.h"
41 #include "src/ipred_prepare.h"
42 #include "src/lf_apply.h"
43 #include "src/lr_apply.h"
44 #include "src/recon.h"
45 #include "src/scan.h"
46 #include "src/tables.h"
47 #include "src/wedge.h"
48 
read_golomb(MsacContext * const msac)49 static inline unsigned read_golomb(MsacContext *const msac) {
50     int len = 0;
51     unsigned val = 1;
52 
53     while (!dav1d_msac_decode_bool_equi(msac) && len < 32) len++;
54     while (len--) val = (val << 1) + dav1d_msac_decode_bool_equi(msac);
55 
56     return val - 1;
57 }
58 
get_skip_ctx(const TxfmInfo * const t_dim,const enum BlockSize bs,const uint8_t * const a,const uint8_t * const l,const int chroma,const enum Dav1dPixelLayout layout)59 static inline unsigned get_skip_ctx(const TxfmInfo *const t_dim,
60                                     const enum BlockSize bs,
61                                     const uint8_t *const a,
62                                     const uint8_t *const l,
63                                     const int chroma,
64                                     const enum Dav1dPixelLayout layout)
65 {
66     const uint8_t *const b_dim = dav1d_block_dimensions[bs];
67 
68     if (chroma) {
69         const int ss_ver = layout == DAV1D_PIXEL_LAYOUT_I420;
70         const int ss_hor = layout != DAV1D_PIXEL_LAYOUT_I444;
71         const int not_one_blk = b_dim[2] - (!!b_dim[2] && ss_hor) > t_dim->lw ||
72                                 b_dim[3] - (!!b_dim[3] && ss_ver) > t_dim->lh;
73         unsigned ca, cl;
74 
75 #define MERGE_CTX(dir, type, no_val) \
76         c##dir = *(const type *) dir != no_val; \
77         break
78 
79         switch (t_dim->lw) {
80         /* For some reason the MSVC CRT _wassert() function is not flagged as
81          * __declspec(noreturn), so when using those headers the compiler will
82          * expect execution to continue after an assertion has been triggered
83          * and will therefore complain about the use of uninitialized variables
84          * when compiled in debug mode if we put the default case at the end. */
85         default: assert(0); /* fall-through */
86         case TX_4X4:   MERGE_CTX(a, uint8_t,  0x40);
87         case TX_8X8:   MERGE_CTX(a, uint16_t, 0x4040);
88         case TX_16X16: MERGE_CTX(a, uint32_t, 0x40404040U);
89         case TX_32X32: MERGE_CTX(a, uint64_t, 0x4040404040404040ULL);
90         }
91         switch (t_dim->lh) {
92         default: assert(0); /* fall-through */
93         case TX_4X4:   MERGE_CTX(l, uint8_t,  0x40);
94         case TX_8X8:   MERGE_CTX(l, uint16_t, 0x4040);
95         case TX_16X16: MERGE_CTX(l, uint32_t, 0x40404040U);
96         case TX_32X32: MERGE_CTX(l, uint64_t, 0x4040404040404040ULL);
97         }
98 #undef MERGE_CTX
99 
100         return 7 + not_one_blk * 3 + ca + cl;
101     } else if (b_dim[2] == t_dim->lw && b_dim[3] == t_dim->lh) {
102         return 0;
103     } else {
104         unsigned la, ll;
105 
106 #define MERGE_CTX(dir, type, tx) \
107         if (tx == TX_64X64) { \
108             uint64_t tmp = *(const uint64_t *) dir; \
109             tmp |= *(const uint64_t *) &dir[8]; \
110             l##dir = (unsigned) (tmp >> 32) | (unsigned) tmp; \
111         } else \
112             l##dir = *(const type *) dir; \
113         if (tx == TX_32X32) l##dir |= *(const type *) &dir[sizeof(type)]; \
114         if (tx >= TX_16X16) l##dir |= l##dir >> 16; \
115         if (tx >= TX_8X8)   l##dir |= l##dir >> 8; \
116         break
117 
118         switch (t_dim->lw) {
119         default: assert(0); /* fall-through */
120         case TX_4X4:   MERGE_CTX(a, uint8_t,  TX_4X4);
121         case TX_8X8:   MERGE_CTX(a, uint16_t, TX_8X8);
122         case TX_16X16: MERGE_CTX(a, uint32_t, TX_16X16);
123         case TX_32X32: MERGE_CTX(a, uint32_t, TX_32X32);
124         case TX_64X64: MERGE_CTX(a, uint32_t, TX_64X64);
125         }
126         switch (t_dim->lh) {
127         default: assert(0); /* fall-through */
128         case TX_4X4:   MERGE_CTX(l, uint8_t,  TX_4X4);
129         case TX_8X8:   MERGE_CTX(l, uint16_t, TX_8X8);
130         case TX_16X16: MERGE_CTX(l, uint32_t, TX_16X16);
131         case TX_32X32: MERGE_CTX(l, uint32_t, TX_32X32);
132         case TX_64X64: MERGE_CTX(l, uint32_t, TX_64X64);
133         }
134 #undef MERGE_CTX
135 
136         return dav1d_skip_ctx[umin(la & 0x3F, 4)][umin(ll & 0x3F, 4)];
137     }
138 }
139 
get_dc_sign_ctx(const int tx,const uint8_t * const a,const uint8_t * const l)140 static inline unsigned get_dc_sign_ctx(const int /*enum RectTxfmSize*/ tx,
141                                        const uint8_t *const a,
142                                        const uint8_t *const l)
143 {
144     uint64_t mask = 0xC0C0C0C0C0C0C0C0ULL, mul = 0x0101010101010101ULL;
145     int s;
146 
147 #if ARCH_X86_64 && defined(__GNUC__)
148     /* Coerce compilers into producing better code. For some reason
149      * every x86-64 compiler is awful at handling 64-bit constants. */
150     __asm__("" : "+r"(mask), "+r"(mul));
151 #endif
152 
153     switch(tx) {
154     default: assert(0); /* fall-through */
155     case TX_4X4: {
156         int t = *(const uint8_t *) a >> 6;
157         t    += *(const uint8_t *) l >> 6;
158         s = t - 1 - 1;
159         break;
160     }
161     case TX_8X8: {
162         uint32_t t = *(const uint16_t *) a & (uint32_t) mask;
163         t         += *(const uint16_t *) l & (uint32_t) mask;
164         t *= 0x04040404U;
165         s = (int) (t >> 24) - 2 - 2;
166         break;
167     }
168     case TX_16X16: {
169         uint32_t t = (*(const uint32_t *) a & (uint32_t) mask) >> 6;
170         t         += (*(const uint32_t *) l & (uint32_t) mask) >> 6;
171         t *= (uint32_t) mul;
172         s = (int) (t >> 24) - 4 - 4;
173         break;
174     }
175     case TX_32X32: {
176         uint64_t t = (*(const uint64_t *) a & mask) >> 6;
177         t         += (*(const uint64_t *) l & mask) >> 6;
178         t *= mul;
179         s = (int) (t >> 56) - 8 - 8;
180         break;
181     }
182     case TX_64X64: {
183         uint64_t t = (*(const uint64_t *) &a[0] & mask) >> 6;
184         t         += (*(const uint64_t *) &a[8] & mask) >> 6;
185         t         += (*(const uint64_t *) &l[0] & mask) >> 6;
186         t         += (*(const uint64_t *) &l[8] & mask) >> 6;
187         t *= mul;
188         s = (int) (t >> 56) - 16 - 16;
189         break;
190     }
191     case RTX_4X8: {
192         uint32_t t = *(const uint8_t  *) a & (uint32_t) mask;
193         t         += *(const uint16_t *) l & (uint32_t) mask;
194         t *= 0x04040404U;
195         s = (int) (t >> 24) - 1 - 2;
196         break;
197     }
198     case RTX_8X4: {
199         uint32_t t = *(const uint16_t *) a & (uint32_t) mask;
200         t         += *(const uint8_t  *) l & (uint32_t) mask;
201         t *= 0x04040404U;
202         s = (int) (t >> 24) - 2 - 1;
203         break;
204     }
205     case RTX_8X16: {
206         uint32_t t = *(const uint16_t *) a & (uint32_t) mask;
207         t         += *(const uint32_t *) l & (uint32_t) mask;
208         t = (t >> 6) * (uint32_t) mul;
209         s = (int) (t >> 24) - 2 - 4;
210         break;
211     }
212     case RTX_16X8: {
213         uint32_t t = *(const uint32_t *) a & (uint32_t) mask;
214         t         += *(const uint16_t *) l & (uint32_t) mask;
215         t = (t >> 6) * (uint32_t) mul;
216         s = (int) (t >> 24) - 4 - 2;
217         break;
218     }
219     case RTX_16X32: {
220         uint64_t t = *(const uint32_t *) a & (uint32_t) mask;
221         t         += *(const uint64_t *) l & mask;
222         t = (t >> 6) * mul;
223         s = (int) (t >> 56) - 4 - 8;
224         break;
225     }
226     case RTX_32X16: {
227         uint64_t t = *(const uint64_t *) a & mask;
228         t         += *(const uint32_t *) l & (uint32_t) mask;
229         t = (t >> 6) * mul;
230         s = (int) (t >> 56) - 8 - 4;
231         break;
232     }
233     case RTX_32X64: {
234         uint64_t t = (*(const uint64_t *) &a[0] & mask) >> 6;
235         t         += (*(const uint64_t *) &l[0] & mask) >> 6;
236         t         += (*(const uint64_t *) &l[8] & mask) >> 6;
237         t *= mul;
238         s = (int) (t >> 56) - 8 - 16;
239         break;
240     }
241     case RTX_64X32: {
242         uint64_t t = (*(const uint64_t *) &a[0] & mask) >> 6;
243         t         += (*(const uint64_t *) &a[8] & mask) >> 6;
244         t         += (*(const uint64_t *) &l[0] & mask) >> 6;
245         t *= mul;
246         s = (int) (t >> 56) - 16 - 8;
247         break;
248     }
249     case RTX_4X16: {
250         uint32_t t = *(const uint8_t  *) a & (uint32_t) mask;
251         t         += *(const uint32_t *) l & (uint32_t) mask;
252         t = (t >> 6) * (uint32_t) mul;
253         s = (int) (t >> 24) - 1 - 4;
254         break;
255     }
256     case RTX_16X4: {
257         uint32_t t = *(const uint32_t *) a & (uint32_t) mask;
258         t         += *(const uint8_t  *) l & (uint32_t) mask;
259         t = (t >> 6) * (uint32_t) mul;
260         s = (int) (t >> 24) - 4 - 1;
261         break;
262     }
263     case RTX_8X32: {
264         uint64_t t = *(const uint16_t *) a & (uint32_t) mask;
265         t         += *(const uint64_t *) l & mask;
266         t = (t >> 6) * mul;
267         s = (int) (t >> 56) - 2 - 8;
268         break;
269     }
270     case RTX_32X8: {
271         uint64_t t = *(const uint64_t *) a & mask;
272         t         += *(const uint16_t *) l & (uint32_t) mask;
273         t = (t >> 6) * mul;
274         s = (int) (t >> 56) - 8 - 2;
275         break;
276     }
277     case RTX_16X64: {
278         uint64_t t = *(const uint32_t *) a & (uint32_t) mask;
279         t         += *(const uint64_t *) &l[0] & mask;
280         t = (t >> 6) + ((*(const uint64_t *) &l[8] & mask) >> 6);
281         t *= mul;
282         s = (int) (t >> 56) - 4 - 16;
283         break;
284     }
285     case RTX_64X16: {
286         uint64_t t = *(const uint64_t *) &a[0] & mask;
287         t         += *(const uint32_t *) l & (uint32_t) mask;
288         t = (t >> 6) + ((*(const uint64_t *) &a[8] & mask) >> 6);
289         t *= mul;
290         s = (int) (t >> 56) - 16 - 4;
291         break;
292     }
293     }
294 
295     return (s != 0) + (s > 0);
296 }
297 
get_lo_ctx(const uint8_t * const levels,const enum TxClass tx_class,unsigned * const hi_mag,const uint8_t (* const ctx_offsets)[5],const unsigned x,const unsigned y,const ptrdiff_t stride)298 static inline unsigned get_lo_ctx(const uint8_t *const levels,
299                                   const enum TxClass tx_class,
300                                   unsigned *const hi_mag,
301                                   const uint8_t (*const ctx_offsets)[5],
302                                   const unsigned x, const unsigned y,
303                                   const ptrdiff_t stride)
304 {
305     unsigned mag = levels[0 * stride + 1] + levels[1 * stride + 0];
306     unsigned offset;
307     if (tx_class == TX_CLASS_2D) {
308         mag += levels[1 * stride + 1];
309         *hi_mag = mag;
310         mag += levels[0 * stride + 2] + levels[2 * stride + 0];
311         offset = ctx_offsets[umin(y, 4)][umin(x, 4)];
312     } else {
313         mag += levels[0 * stride + 2];
314         *hi_mag = mag;
315         mag += levels[0 * stride + 3] + levels[0 * stride + 4];
316         offset = 26 + (y > 1 ? 10 : y * 5);
317     }
318     return offset + (mag > 512 ? 4 : (mag + 64) >> 7);
319 }
320 
decode_coefs(Dav1dTaskContext * const t,uint8_t * const a,uint8_t * const l,const enum RectTxfmSize tx,const enum BlockSize bs,const Av1Block * const b,const int intra,const int plane,coef * cf,enum TxfmType * const txtp,uint8_t * res_ctx)321 static int decode_coefs(Dav1dTaskContext *const t,
322                         uint8_t *const a, uint8_t *const l,
323                         const enum RectTxfmSize tx, const enum BlockSize bs,
324                         const Av1Block *const b, const int intra,
325                         const int plane, coef *cf,
326                         enum TxfmType *const txtp, uint8_t *res_ctx)
327 {
328     Dav1dTileState *const ts = t->ts;
329     const int chroma = !!plane;
330     const Dav1dFrameContext *const f = t->f;
331     const int lossless = f->frame_hdr->segmentation.lossless[b->seg_id];
332     const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[tx];
333     const int dbg = DEBUG_BLOCK_INFO && plane && 0;
334 
335     if (dbg)
336         printf("Start: r=%d\n", ts->msac.rng);
337 
338     // does this block have any non-zero coefficients
339     const int sctx = get_skip_ctx(t_dim, bs, a, l, chroma, f->cur.p.layout);
340     const int all_skip = dav1d_msac_decode_bool_adapt(&ts->msac,
341                              ts->cdf.coef.skip[t_dim->ctx][sctx]);
342     if (dbg)
343         printf("Post-non-zero[%d][%d][%d]: r=%d\n",
344                t_dim->ctx, sctx, all_skip, ts->msac.rng);
345     if (all_skip) {
346         *res_ctx = 0x40;
347         *txtp = lossless * WHT_WHT; /* lossless ? WHT_WHT : DCT_DCT */
348         return -1;
349     }
350 
351     // transform type (chroma: derived, luma: explicitly coded)
352     if (lossless) {
353         assert(t_dim->max == TX_4X4);
354         *txtp = WHT_WHT;
355     } else if (t_dim->max + intra >= TX_64X64) {
356         *txtp = DCT_DCT;
357     } else if (chroma) {
358         // inferred from either the luma txtp (inter) or a LUT (intra)
359         *txtp = intra ? dav1d_txtp_from_uvmode[b->uv_mode] :
360                         get_uv_inter_txtp(t_dim, *txtp);
361     } else if (!f->frame_hdr->segmentation.qidx[b->seg_id]) {
362         // In libaom, lossless is checked by a literal qidx == 0, but not all
363         // such blocks are actually lossless. The remainder gets an implicit
364         // transform type (for luma)
365         *txtp = DCT_DCT;
366     } else {
367         unsigned idx;
368         if (intra) {
369             const enum IntraPredMode y_mode_nofilt = b->y_mode == FILTER_PRED ?
370                 dav1d_filter_mode_to_y_mode[b->y_angle] : b->y_mode;
371             if (f->frame_hdr->reduced_txtp_set || t_dim->min == TX_16X16) {
372                 idx = dav1d_msac_decode_symbol_adapt4(&ts->msac,
373                           ts->cdf.m.txtp_intra2[t_dim->min][y_mode_nofilt], 4);
374                 *txtp = dav1d_tx_types_per_set[idx + 0];
375             } else {
376                 idx = dav1d_msac_decode_symbol_adapt8(&ts->msac,
377                           ts->cdf.m.txtp_intra1[t_dim->min][y_mode_nofilt], 6);
378                 *txtp = dav1d_tx_types_per_set[idx + 5];
379             }
380             if (dbg)
381                 printf("Post-txtp-intra[%d->%d][%d][%d->%d]: r=%d\n",
382                        tx, t_dim->min, y_mode_nofilt, idx, *txtp, ts->msac.rng);
383         } else {
384             if (f->frame_hdr->reduced_txtp_set || t_dim->max == TX_32X32) {
385                 idx = dav1d_msac_decode_bool_adapt(&ts->msac,
386                           ts->cdf.m.txtp_inter3[t_dim->min]);
387                 *txtp = (idx - 1) & IDTX; /* idx ? DCT_DCT : IDTX */
388             } else if (t_dim->min == TX_16X16) {
389                 idx = dav1d_msac_decode_symbol_adapt16(&ts->msac,
390                           ts->cdf.m.txtp_inter2, 11);
391                 *txtp = dav1d_tx_types_per_set[idx + 12];
392             } else {
393                 idx = dav1d_msac_decode_symbol_adapt16(&ts->msac,
394                           ts->cdf.m.txtp_inter1[t_dim->min], 15);
395                 *txtp = dav1d_tx_types_per_set[idx + 24];
396             }
397             if (dbg)
398                 printf("Post-txtp-inter[%d->%d][%d->%d]: r=%d\n",
399                        tx, t_dim->min, idx, *txtp, ts->msac.rng);
400         }
401     }
402 
403     // find end-of-block (eob)
404     int eob_bin;
405     const int tx2dszctx = imin(t_dim->lw, TX_32X32) + imin(t_dim->lh, TX_32X32);
406     const enum TxClass tx_class = dav1d_tx_type_class[*txtp];
407     const int is_1d = tx_class != TX_CLASS_2D;
408     switch (tx2dszctx) {
409 #define case_sz(sz, bin, ns, is_1d) \
410     case sz: { \
411         uint16_t *const eob_bin_cdf = ts->cdf.coef.eob_bin_##bin[chroma]is_1d; \
412         eob_bin = dav1d_msac_decode_symbol_adapt##ns(&ts->msac, eob_bin_cdf, 4 + sz); \
413         break; \
414     }
415     case_sz(0,   16,  4, [is_1d]);
416     case_sz(1,   32,  8, [is_1d]);
417     case_sz(2,   64,  8, [is_1d]);
418     case_sz(3,  128,  8, [is_1d]);
419     case_sz(4,  256, 16, [is_1d]);
420     case_sz(5,  512, 16,        );
421     case_sz(6, 1024, 16,        );
422 #undef case_sz
423     }
424     if (dbg)
425         printf("Post-eob_bin_%d[%d][%d][%d]: r=%d\n",
426                16 << tx2dszctx, chroma, is_1d, eob_bin, ts->msac.rng);
427     int eob;
428     if (eob_bin > 1) {
429         uint16_t *const eob_hi_bit_cdf =
430             ts->cdf.coef.eob_hi_bit[t_dim->ctx][chroma][eob_bin];
431         const int eob_hi_bit = dav1d_msac_decode_bool_adapt(&ts->msac, eob_hi_bit_cdf);
432         if (dbg)
433             printf("Post-eob_hi_bit[%d][%d][%d][%d]: r=%d\n",
434                    t_dim->ctx, chroma, eob_bin, eob_hi_bit, ts->msac.rng);
435         eob = ((eob_hi_bit | 2) << (eob_bin - 2)) |
436               dav1d_msac_decode_bools(&ts->msac, eob_bin - 2);
437         if (dbg)
438             printf("Post-eob[%d]: r=%d\n", eob, ts->msac.rng);
439     } else {
440         eob = eob_bin;
441     }
442     assert(eob >= 0);
443 
444     // base tokens
445     uint16_t (*const eob_cdf)[4] = ts->cdf.coef.eob_base_tok[t_dim->ctx][chroma];
446     uint16_t (*const hi_cdf)[4] = ts->cdf.coef.br_tok[imin(t_dim->ctx, 3)][chroma];
447     unsigned rc, dc_tok;
448 
449     if (eob) {
450         uint16_t (*const lo_cdf)[4] = ts->cdf.coef.base_tok[t_dim->ctx][chroma];
451         uint8_t *const levels = t->scratch.levels; // bits 0-5: tok, 6-7: lo_tok
452         const int sw = imin(t_dim->w, 8), sh = imin(t_dim->h, 8);
453 
454         /* eob */
455         unsigned ctx = 1 + (eob > sw * sh * 2) + (eob > sw * sh * 4);
456         int eob_tok = dav1d_msac_decode_symbol_adapt4(&ts->msac, eob_cdf[ctx], 2);
457         int tok = eob_tok + 1;
458         int level_tok = tok * 0x41;
459         unsigned mag;
460 
461 #define DECODE_COEFS_CLASS(tx_class) \
462         unsigned x, y; \
463         if (tx_class == TX_CLASS_2D) \
464             rc = scan[eob], x = rc >> shift, y = rc & mask; \
465         else if (tx_class == TX_CLASS_H) \
466             /* Transposing reduces the stride and padding requirements */ \
467             x = eob & mask, y = eob >> shift, rc = eob; \
468         else /* tx_class == TX_CLASS_V */ \
469             x = eob & mask, y = eob >> shift, rc = (x << shift2) | y; \
470         if (dbg) \
471             printf("Post-lo_tok[%d][%d][%d][%d=%d=%d]: r=%d\n", \
472                    t_dim->ctx, chroma, ctx, eob, rc, tok, ts->msac.rng); \
473         if (eob_tok == 2) { \
474             ctx = (tx_class == TX_CLASS_2D ? (x | y) > 1 : y != 0) ? 14 : 7; \
475             tok = dav1d_msac_decode_hi_tok(&ts->msac, hi_cdf[ctx]); \
476             level_tok = tok + (3 << 6); \
477             if (dbg) \
478                 printf("Post-hi_tok[%d][%d][%d][%d=%d=%d]: r=%d\n", \
479                        imin(t_dim->ctx, 3), chroma, ctx, eob, rc, tok, \
480                        ts->msac.rng); \
481         } \
482         cf[rc] = tok << 11; \
483         levels[x * stride + y] = (uint8_t) level_tok; \
484         for (int i = eob - 1; i > 0; i--) { /* ac */ \
485             unsigned rc_i; \
486             if (tx_class == TX_CLASS_2D) \
487                 rc_i = scan[i], x = rc_i >> shift, y = rc_i & mask; \
488             else if (tx_class == TX_CLASS_H) \
489                 x = i & mask, y = i >> shift, rc_i = i; \
490             else /* tx_class == TX_CLASS_V */ \
491                 x = i & mask, y = i >> shift, rc_i = (x << shift2) | y; \
492             assert(x < 32 && y < 32); \
493             uint8_t *const level = levels + x * stride + y; \
494             ctx = get_lo_ctx(level, tx_class, &mag, lo_ctx_offsets, x, y, stride); \
495             if (tx_class == TX_CLASS_2D) \
496                 y |= x; \
497             tok = dav1d_msac_decode_symbol_adapt4(&ts->msac, lo_cdf[ctx], 3); \
498             if (dbg) \
499                 printf("Post-lo_tok[%d][%d][%d][%d=%d=%d]: r=%d\n", \
500                        t_dim->ctx, chroma, ctx, i, rc_i, tok, ts->msac.rng); \
501             if (tok == 3) { \
502                 mag &= 63; \
503                 ctx = (y > (tx_class == TX_CLASS_2D) ? 14 : 7) + \
504                       (mag > 12 ? 6 : (mag + 1) >> 1); \
505                 tok = dav1d_msac_decode_hi_tok(&ts->msac, hi_cdf[ctx]); \
506                 if (dbg) \
507                     printf("Post-hi_tok[%d][%d][%d][%d=%d=%d]: r=%d\n", \
508                            imin(t_dim->ctx, 3), chroma, ctx, i, rc_i, tok, \
509                            ts->msac.rng); \
510                 *level = (uint8_t) (tok + (3 << 6)); \
511                 cf[rc_i] = (tok << 11) | rc; \
512                 rc = rc_i; \
513             } else { \
514                 /* 0x1 for tok, 0x7ff as bitmask for rc, 0x41 for level_tok */ \
515                 tok *= 0x17ff41; \
516                 *level = (uint8_t) tok; \
517                 /* tok ? (tok << 11) | rc : 0 */ \
518                 tok = (tok >> 9) & (rc + ~0x7ffu); \
519                 if (tok) rc = rc_i; \
520                 cf[rc_i] = tok; \
521             } \
522         } \
523         /* dc */ \
524         ctx = (tx_class == TX_CLASS_2D) ? 0 : \
525             get_lo_ctx(levels, tx_class, &mag, lo_ctx_offsets, 0, 0, stride); \
526         dc_tok = dav1d_msac_decode_symbol_adapt4(&ts->msac, lo_cdf[ctx], 3); \
527         if (dbg) \
528             printf("Post-dc_lo_tok[%d][%d][%d][%d]: r=%d\n", \
529                    t_dim->ctx, chroma, ctx, dc_tok, ts->msac.rng); \
530         if (dc_tok == 3) { \
531             if (tx_class == TX_CLASS_2D) \
532                 mag = levels[0 * stride + 1] + levels[1 * stride + 0] + \
533                       levels[1 * stride + 1]; \
534             mag &= 63; \
535             ctx = mag > 12 ? 6 : (mag + 1) >> 1; \
536             dc_tok = dav1d_msac_decode_hi_tok(&ts->msac, hi_cdf[ctx]); \
537             if (dbg) \
538                 printf("Post-dc_hi_tok[%d][%d][0][%d]: r=%d\n", \
539                        imin(t_dim->ctx, 3), chroma, dc_tok, ts->msac.rng); \
540         } \
541         break
542 
543         const uint16_t *scan;
544         switch (tx_class) {
545         case TX_CLASS_2D: {
546             const unsigned nonsquare_tx = tx >= RTX_4X8;
547             const uint8_t (*const lo_ctx_offsets)[5] =
548                 dav1d_lo_ctx_offsets[nonsquare_tx + (tx & nonsquare_tx)];
549             scan = dav1d_scans[tx];
550             const ptrdiff_t stride = 4 * sh;
551             const unsigned shift = t_dim->lh < 4 ? t_dim->lh + 2 : 5, shift2 = 0;
552             const unsigned mask = 4 * sh - 1;
553             memset(levels, 0, stride * (4 * sw + 2));
554             DECODE_COEFS_CLASS(TX_CLASS_2D);
555         }
556         case TX_CLASS_H: {
557             const uint8_t (*const lo_ctx_offsets)[5] = NULL;
558             const ptrdiff_t stride = 16;
559             const unsigned shift = t_dim->lh + 2, shift2 = 0;
560             const unsigned mask = 4 * sh - 1;
561             memset(levels, 0, stride * (4 * sh + 2));
562             DECODE_COEFS_CLASS(TX_CLASS_H);
563         }
564         case TX_CLASS_V: {
565             const uint8_t (*const lo_ctx_offsets)[5] = NULL;
566             const ptrdiff_t stride = 16;
567             const unsigned shift = t_dim->lw + 2, shift2 = t_dim->lh + 2;
568             const unsigned mask = 4 * sw - 1;
569             memset(levels, 0, stride * (4 * sw + 2));
570             DECODE_COEFS_CLASS(TX_CLASS_V);
571         }
572 #undef DECODE_COEFS_CLASS
573         default: assert(0);
574         }
575     } else { // dc-only
576         int tok_br = dav1d_msac_decode_symbol_adapt4(&ts->msac, eob_cdf[0], 2);
577         dc_tok = 1 + tok_br;
578         if (dbg)
579             printf("Post-dc_lo_tok[%d][%d][%d][%d]: r=%d\n",
580                    t_dim->ctx, chroma, 0, dc_tok, ts->msac.rng);
581         if (tok_br == 2) {
582             dc_tok = dav1d_msac_decode_hi_tok(&ts->msac, hi_cdf[0]);
583             if (dbg)
584                 printf("Post-dc_hi_tok[%d][%d][0][%d]: r=%d\n",
585                        imin(t_dim->ctx, 3), chroma, dc_tok, ts->msac.rng);
586         }
587         rc = 0;
588     }
589 
590     // residual and sign
591     const uint16_t *const dq_tbl = ts->dq[b->seg_id][plane];
592     const uint8_t *const qm_tbl = *txtp < IDTX ? f->qm[tx][plane] : NULL;
593     const int dq_shift = imax(0, t_dim->ctx - 2);
594     const unsigned cf_max = ~(~127U << (BITDEPTH == 8 ? 8 : f->cur.p.bpc));
595     unsigned cul_level, dc_sign_level;
596 
597     if (!dc_tok) {
598         cul_level = 0;
599         dc_sign_level = 1 << 6;
600         if (qm_tbl) goto ac_qm;
601         goto ac_noqm;
602     }
603 
604     const int dc_sign_ctx = get_dc_sign_ctx(tx, a, l);
605     uint16_t *const dc_sign_cdf = ts->cdf.coef.dc_sign[chroma][dc_sign_ctx];
606     const int dc_sign = dav1d_msac_decode_bool_adapt(&ts->msac, dc_sign_cdf);
607     if (dbg)
608         printf("Post-dc_sign[%d][%d][%d]: r=%d\n",
609                chroma, dc_sign_ctx, dc_sign, ts->msac.rng);
610 
611     unsigned dc_dq = dq_tbl[0];
612     dc_sign_level = (dc_sign - 1) & (2 << 6);
613 
614     if (qm_tbl) {
615         dc_dq = (dc_dq * qm_tbl[0] + 16) >> 5;
616 
617         if (dc_tok == 15) {
618             dc_tok = read_golomb(&ts->msac) + 15;
619             if (dbg)
620                 printf("Post-dc_residual[%d->%d]: r=%d\n",
621                        dc_tok - 15, dc_tok, ts->msac.rng);
622 
623             dc_tok &= 0xfffff;
624             dc_dq = (dc_dq * dc_tok) & 0xffffff;
625         } else {
626             dc_dq *= dc_tok;
627             assert(dc_dq <= 0xffffff);
628         }
629         cul_level = dc_tok;
630         dc_dq >>= dq_shift;
631         cf[0] = (coef) (umin(dc_dq - dc_sign, cf_max) ^ -dc_sign);
632 
633         if (rc) ac_qm: {
634             const unsigned ac_dq = dq_tbl[1];
635             do {
636                 const int sign = dav1d_msac_decode_bool_equi(&ts->msac);
637                 if (dbg)
638                     printf("Post-sign[%d=%d]: r=%d\n", rc, sign, ts->msac.rng);
639                 const unsigned rc_tok = cf[rc];
640                 unsigned tok, dq = (ac_dq * qm_tbl[rc] + 16) >> 5;
641 
642                 if (rc_tok >= (15 << 11)) {
643                     tok = read_golomb(&ts->msac) + 15;
644                     if (dbg)
645                         printf("Post-residual[%d=%d->%d]: r=%d\n",
646                                rc, tok - 15, tok, ts->msac.rng);
647 
648                     tok &= 0xfffff;
649                     dq = (dq * tok) & 0xffffff;
650                 } else {
651                     tok = rc_tok >> 11;
652                     dq *= tok;
653                     assert(dq <= 0xffffff);
654                 }
655                 cul_level += tok;
656                 dq >>= dq_shift;
657                 cf[rc] = (coef) (umin(dq - sign, cf_max) ^ -sign);
658 
659                 rc = rc_tok & 0x3ff;
660             } while (rc);
661         }
662     } else {
663         // non-qmatrix is the common case and allows for additional optimizations
664         if (dc_tok == 15) {
665             dc_tok = read_golomb(&ts->msac) + 15;
666             if (dbg)
667                 printf("Post-dc_residual[%d->%d]: r=%d\n",
668                        dc_tok - 15, dc_tok, ts->msac.rng);
669 
670             dc_tok &= 0xfffff;
671             dc_dq = ((dc_dq * dc_tok) & 0xffffff) >> dq_shift;
672             dc_dq = umin(dc_dq - dc_sign, cf_max);
673         } else {
674             dc_dq = ((dc_dq * dc_tok) >> dq_shift) - dc_sign;
675             assert(dc_dq <= cf_max);
676         }
677         cul_level = dc_tok;
678         cf[0] = (coef) (dc_dq ^ -dc_sign);
679 
680         if (rc) ac_noqm: {
681             const unsigned ac_dq = dq_tbl[1];
682             do {
683                 const int sign = dav1d_msac_decode_bool_equi(&ts->msac);
684                 if (dbg)
685                     printf("Post-sign[%d=%d]: r=%d\n", rc, sign, ts->msac.rng);
686                 const unsigned rc_tok = cf[rc];
687                 unsigned tok, dq;
688 
689                 // residual
690                 if (rc_tok >= (15 << 11)) {
691                     tok = read_golomb(&ts->msac) + 15;
692                     if (dbg)
693                         printf("Post-residual[%d=%d->%d]: r=%d\n",
694                                rc, tok - 15, tok, ts->msac.rng);
695 
696                     // coefficient parsing, see 5.11.39
697                     tok &= 0xfffff;
698 
699                     // dequant, see 7.12.3
700                     dq = ((ac_dq * tok) & 0xffffff) >> dq_shift;
701                     dq = umin(dq - sign, cf_max);
702                 } else {
703                     // cannot exceed cf_max, so we can avoid the clipping
704                     tok = rc_tok >> 11;
705                     dq = ((ac_dq * tok) >> dq_shift) - sign;
706                     assert(dq <= cf_max);
707                 }
708                 cul_level += tok;
709                 cf[rc] = (coef) (dq ^ -sign);
710 
711                 rc = rc_tok & 0x3ff; // next non-zero rc, zero if eob
712             } while (rc);
713         }
714     }
715 
716     // context
717     *res_ctx = umin(cul_level, 63) | dc_sign_level;
718 
719     return eob;
720 }
721 
read_coef_tree(Dav1dTaskContext * const t,const enum BlockSize bs,const Av1Block * const b,const enum RectTxfmSize ytx,const int depth,const uint16_t * const tx_split,const int x_off,const int y_off,pixel * dst)722 static void read_coef_tree(Dav1dTaskContext *const t,
723                            const enum BlockSize bs, const Av1Block *const b,
724                            const enum RectTxfmSize ytx, const int depth,
725                            const uint16_t *const tx_split,
726                            const int x_off, const int y_off, pixel *dst)
727 {
728     const Dav1dFrameContext *const f = t->f;
729     Dav1dTileState *const ts = t->ts;
730     const Dav1dDSPContext *const dsp = f->dsp;
731     const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[ytx];
732     const int txw = t_dim->w, txh = t_dim->h;
733 
734     /* y_off can be larger than 3 since lossless blocks use TX_4X4 but can't
735      * be splitted. Aviods an undefined left shift. */
736     if (depth < 2 && tx_split[depth] &&
737         tx_split[depth] & (1 << (y_off * 4 + x_off)))
738     {
739         const enum RectTxfmSize sub = t_dim->sub;
740         const TxfmInfo *const sub_t_dim = &dav1d_txfm_dimensions[sub];
741         const int txsw = sub_t_dim->w, txsh = sub_t_dim->h;
742 
743         read_coef_tree(t, bs, b, sub, depth + 1, tx_split,
744                        x_off * 2 + 0, y_off * 2 + 0, dst);
745         t->bx += txsw;
746         if (txw >= txh && t->bx < f->bw)
747             read_coef_tree(t, bs, b, sub, depth + 1, tx_split, x_off * 2 + 1,
748                            y_off * 2 + 0, dst ? &dst[4 * txsw] : NULL);
749         t->bx -= txsw;
750         t->by += txsh;
751         if (txh >= txw && t->by < f->bh) {
752             if (dst)
753                 dst += 4 * txsh * PXSTRIDE(f->cur.stride[0]);
754             read_coef_tree(t, bs, b, sub, depth + 1, tx_split,
755                            x_off * 2 + 0, y_off * 2 + 1, dst);
756             t->bx += txsw;
757             if (txw >= txh && t->bx < f->bw)
758                 read_coef_tree(t, bs, b, sub, depth + 1, tx_split, x_off * 2 + 1,
759                                y_off * 2 + 1, dst ? &dst[4 * txsw] : NULL);
760             t->bx -= txsw;
761         }
762         t->by -= txsh;
763     } else {
764         const int bx4 = t->bx & 31, by4 = t->by & 31;
765         enum TxfmType txtp;
766         uint8_t cf_ctx;
767         int eob;
768         coef *cf;
769         struct CodedBlockInfo *cbi;
770 
771         if (t->frame_thread.pass) {
772             const int p = t->frame_thread.pass & 1;
773             assert(ts->frame_thread[p].cf);
774             cf = ts->frame_thread[p].cf;
775             ts->frame_thread[p].cf += imin(t_dim->w, 8) * imin(t_dim->h, 8) * 16;
776             cbi = &f->frame_thread.cbi[t->by * f->b4_stride + t->bx];
777         } else {
778             cf = bitfn(t->cf);
779         }
780         if (t->frame_thread.pass != 2) {
781             eob = decode_coefs(t, &t->a->lcoef[bx4], &t->l.lcoef[by4],
782                                ytx, bs, b, 0, 0, cf, &txtp, &cf_ctx);
783             if (DEBUG_BLOCK_INFO)
784                 printf("Post-y-cf-blk[tx=%d,txtp=%d,eob=%d]: r=%d\n",
785                        ytx, txtp, eob, ts->msac.rng);
786 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
787             rep_macro(type, t->dir lcoef, off, mul * cf_ctx)
788 #define default_memset(dir, diridx, off, sz) \
789             memset(&t->dir lcoef[off], cf_ctx, sz)
790             case_set_upto16_with_default(imin(txh, f->bh - t->by), l., 1, by4);
791             case_set_upto16_with_default(imin(txw, f->bw - t->bx), a->, 0, bx4);
792 #undef default_memset
793 #undef set_ctx
794 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
795             for (int y = 0; y < txh; y++) { \
796                 rep_macro(type, txtp_map, 0, mul * txtp); \
797                 txtp_map += 32; \
798             }
799             uint8_t *txtp_map = &t->txtp_map[by4 * 32 + bx4];
800             case_set_upto16(txw,,,);
801 #undef set_ctx
802             if (t->frame_thread.pass == 1) {
803                 cbi->eob[0] = eob;
804                 cbi->txtp[0] = txtp;
805             }
806         } else {
807             eob = cbi->eob[0];
808             txtp = cbi->txtp[0];
809         }
810         if (!(t->frame_thread.pass & 1)) {
811             assert(dst);
812             if (eob >= 0) {
813                 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
814                     coef_dump(cf, imin(t_dim->h, 8) * 4, imin(t_dim->w, 8) * 4, 3, "dq");
815                 dsp->itx.itxfm_add[ytx][txtp](dst, f->cur.stride[0], cf, eob
816                                               HIGHBD_CALL_SUFFIX);
817                 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
818                     hex_dump(dst, f->cur.stride[0], t_dim->w * 4, t_dim->h * 4, "recon");
819             }
820         }
821     }
822 }
823 
bytefn(dav1d_read_coef_blocks)824 void bytefn(dav1d_read_coef_blocks)(Dav1dTaskContext *const t,
825                                     const enum BlockSize bs, const Av1Block *const b)
826 {
827     const Dav1dFrameContext *const f = t->f;
828     const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
829     const int ss_hor = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
830     const int bx4 = t->bx & 31, by4 = t->by & 31;
831     const int cbx4 = bx4 >> ss_hor, cby4 = by4 >> ss_ver;
832     const uint8_t *const b_dim = dav1d_block_dimensions[bs];
833     const int bw4 = b_dim[0], bh4 = b_dim[1];
834     const int cbw4 = (bw4 + ss_hor) >> ss_hor, cbh4 = (bh4 + ss_ver) >> ss_ver;
835     const int has_chroma = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I400 &&
836                            (bw4 > ss_hor || t->bx & 1) &&
837                            (bh4 > ss_ver || t->by & 1);
838 
839     if (b->skip) {
840 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
841         rep_macro(type, t->dir lcoef, off, mul * 0x40)
842         case_set(bh4, l., 1, by4);
843         case_set(bw4, a->, 0, bx4);
844 #undef set_ctx
845         if (has_chroma) {
846 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
847             rep_macro(type, t->dir ccoef[0], off, mul * 0x40); \
848             rep_macro(type, t->dir ccoef[1], off, mul * 0x40)
849             case_set(cbh4, l., 1, cby4);
850             case_set(cbw4, a->, 0, cbx4);
851 #undef set_ctx
852         }
853         return;
854     }
855 
856     Dav1dTileState *const ts = t->ts;
857     const int w4 = imin(bw4, f->bw - t->bx), h4 = imin(bh4, f->bh - t->by);
858     const int cw4 = (w4 + ss_hor) >> ss_hor, ch4 = (h4 + ss_ver) >> ss_ver;
859     assert(t->frame_thread.pass == 1);
860     assert(!b->skip);
861     const TxfmInfo *const uv_t_dim = &dav1d_txfm_dimensions[b->uvtx];
862     const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[b->intra ? b->tx : b->max_ytx];
863     const uint16_t tx_split[2] = { b->tx_split0, b->tx_split1 };
864 
865     for (int init_y = 0; init_y < h4; init_y += 16) {
866         const int sub_h4 = imin(h4, 16 + init_y);
867         for (int init_x = 0; init_x < w4; init_x += 16) {
868             const int sub_w4 = imin(w4, init_x + 16);
869             int y_off = !!init_y, y, x;
870             for (y = init_y, t->by += init_y; y < sub_h4;
871                  y += t_dim->h, t->by += t_dim->h, y_off++)
872             {
873                 struct CodedBlockInfo *const cbi =
874                     &f->frame_thread.cbi[t->by * f->b4_stride];
875                 int x_off = !!init_x;
876                 for (x = init_x, t->bx += init_x; x < sub_w4;
877                      x += t_dim->w, t->bx += t_dim->w, x_off++)
878                 {
879                     if (!b->intra) {
880                         read_coef_tree(t, bs, b, b->max_ytx, 0, tx_split,
881                                        x_off, y_off, NULL);
882                     } else {
883                         uint8_t cf_ctx = 0x40;
884                         enum TxfmType txtp;
885                         const int eob = cbi[t->bx].eob[0] =
886                             decode_coefs(t, &t->a->lcoef[bx4 + x],
887                                          &t->l.lcoef[by4 + y], b->tx, bs, b, 1,
888                                          0, ts->frame_thread[1].cf, &txtp, &cf_ctx);
889                         if (DEBUG_BLOCK_INFO)
890                             printf("Post-y-cf-blk[tx=%d,txtp=%d,eob=%d]: r=%d\n",
891                                    b->tx, txtp, eob, ts->msac.rng);
892                         cbi[t->bx].txtp[0] = txtp;
893                         ts->frame_thread[1].cf += imin(t_dim->w, 8) * imin(t_dim->h, 8) * 16;
894 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
895                         rep_macro(type, t->dir lcoef, off, mul * cf_ctx)
896 #define default_memset(dir, diridx, off, sz) \
897                         memset(&t->dir lcoef[off], cf_ctx, sz)
898                         case_set_upto16_with_default(imin(t_dim->h, f->bh - t->by),
899                                                      l., 1, by4 + y);
900                         case_set_upto16_with_default(imin(t_dim->w, f->bw - t->bx),
901                                                      a->, 0, bx4 + x);
902 #undef default_memset
903 #undef set_ctx
904                     }
905                 }
906                 t->bx -= x;
907             }
908             t->by -= y;
909 
910             if (!has_chroma) continue;
911 
912             const int sub_ch4 = imin(ch4, (init_y + 16) >> ss_ver);
913             const int sub_cw4 = imin(cw4, (init_x + 16) >> ss_hor);
914             for (int pl = 0; pl < 2; pl++) {
915                 for (y = init_y >> ss_ver, t->by += init_y; y < sub_ch4;
916                      y += uv_t_dim->h, t->by += uv_t_dim->h << ss_ver)
917                 {
918                     struct CodedBlockInfo *const cbi =
919                         &f->frame_thread.cbi[t->by * f->b4_stride];
920                     for (x = init_x >> ss_hor, t->bx += init_x; x < sub_cw4;
921                          x += uv_t_dim->w, t->bx += uv_t_dim->w << ss_hor)
922                     {
923                         uint8_t cf_ctx = 0x40;
924                         enum TxfmType txtp;
925                         if (!b->intra)
926                             txtp = t->txtp_map[(by4 + (y << ss_ver)) * 32 +
927                                                 bx4 + (x << ss_hor)];
928                         const int eob = cbi[t->bx].eob[1 + pl] =
929                             decode_coefs(t, &t->a->ccoef[pl][cbx4 + x],
930                                          &t->l.ccoef[pl][cby4 + y], b->uvtx, bs,
931                                          b, b->intra, 1 + pl, ts->frame_thread[1].cf,
932                                          &txtp, &cf_ctx);
933                         if (DEBUG_BLOCK_INFO)
934                             printf("Post-uv-cf-blk[pl=%d,tx=%d,"
935                                    "txtp=%d,eob=%d]: r=%d\n",
936                                    pl, b->uvtx, txtp, eob, ts->msac.rng);
937                         cbi[t->bx].txtp[1 + pl] = txtp;
938                         ts->frame_thread[1].cf += uv_t_dim->w * uv_t_dim->h * 16;
939 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
940                         rep_macro(type, t->dir ccoef[pl], off, mul * cf_ctx)
941 #define default_memset(dir, diridx, off, sz) \
942                         memset(&t->dir ccoef[pl][off], cf_ctx, sz)
943                         case_set_upto16_with_default( \
944                                  imin(uv_t_dim->h, (f->bh - t->by + ss_ver) >> ss_ver),
945                                  l., 1, cby4 + y);
946                         case_set_upto16_with_default( \
947                                  imin(uv_t_dim->w, (f->bw - t->bx + ss_hor) >> ss_hor),
948                                  a->, 0, cbx4 + x);
949 #undef default_memset
950 #undef set_ctx
951                     }
952                     t->bx -= x << ss_hor;
953                 }
954                 t->by -= y << ss_ver;
955             }
956         }
957     }
958 }
959 
mc(Dav1dTaskContext * const t,pixel * const dst8,int16_t * const dst16,const ptrdiff_t dst_stride,const int bw4,const int bh4,const int bx,const int by,const int pl,const mv mv,const Dav1dThreadPicture * const refp,const int refidx,const enum Filter2d filter_2d)960 static int mc(Dav1dTaskContext *const t,
961               pixel *const dst8, int16_t *const dst16, const ptrdiff_t dst_stride,
962               const int bw4, const int bh4,
963               const int bx, const int by, const int pl,
964               const mv mv, const Dav1dThreadPicture *const refp, const int refidx,
965               const enum Filter2d filter_2d)
966 {
967     assert((dst8 != NULL) ^ (dst16 != NULL));
968     const Dav1dFrameContext *const f = t->f;
969     const int ss_ver = !!pl && f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
970     const int ss_hor = !!pl && f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
971     const int h_mul = 4 >> ss_hor, v_mul = 4 >> ss_ver;
972     const int mvx = mv.x, mvy = mv.y;
973     const int mx = mvx & (15 >> !ss_hor), my = mvy & (15 >> !ss_ver);
974     ptrdiff_t ref_stride = refp->p.stride[!!pl];
975     const pixel *ref;
976 
977     if (refp->p.p.w == f->cur.p.w && refp->p.p.h == f->cur.p.h) {
978         const int dx = bx * h_mul + (mvx >> (3 + ss_hor));
979         const int dy = by * v_mul + (mvy >> (3 + ss_ver));
980         int w, h;
981 
982         if (refp->p.data[0] != f->cur.data[0]) { // i.e. not for intrabc
983             w = (f->cur.p.w + ss_hor) >> ss_hor;
984             h = (f->cur.p.h + ss_ver) >> ss_ver;
985         } else {
986             w = f->bw * 4 >> ss_hor;
987             h = f->bh * 4 >> ss_ver;
988         }
989         if (dx < !!mx * 3 || dy < !!my * 3 ||
990             dx + bw4 * h_mul + !!mx * 4 > w ||
991             dy + bh4 * v_mul + !!my * 4 > h)
992         {
993             pixel *const emu_edge_buf = bitfn(t->scratch.emu_edge);
994             f->dsp->mc.emu_edge(bw4 * h_mul + !!mx * 7, bh4 * v_mul + !!my * 7,
995                                 w, h, dx - !!mx * 3, dy - !!my * 3,
996                                 emu_edge_buf, 192 * sizeof(pixel),
997                                 refp->p.data[pl], ref_stride);
998             ref = &emu_edge_buf[192 * !!my * 3 + !!mx * 3];
999             ref_stride = 192 * sizeof(pixel);
1000         } else {
1001             ref = ((pixel *) refp->p.data[pl]) + PXSTRIDE(ref_stride) * dy + dx;
1002         }
1003 
1004         if (dst8 != NULL) {
1005             f->dsp->mc.mc[filter_2d](dst8, dst_stride, ref, ref_stride, bw4 * h_mul,
1006                                      bh4 * v_mul, mx << !ss_hor, my << !ss_ver
1007                                      HIGHBD_CALL_SUFFIX);
1008         } else {
1009             f->dsp->mc.mct[filter_2d](dst16, ref, ref_stride, bw4 * h_mul,
1010                                       bh4 * v_mul, mx << !ss_hor, my << !ss_ver
1011                                       HIGHBD_CALL_SUFFIX);
1012         }
1013     } else {
1014         assert(refp != &f->sr_cur);
1015 
1016         const int orig_pos_y = (by * v_mul << 4) + mvy * (1 << !ss_ver);
1017         const int orig_pos_x = (bx * h_mul << 4) + mvx * (1 << !ss_hor);
1018 #define scale_mv(res, val, scale) do { \
1019             const int64_t tmp = (int64_t)(val) * scale + (scale - 0x4000) * 8; \
1020             res = apply_sign64((int) ((llabs(tmp) + 128) >> 8), tmp) + 32;     \
1021         } while (0)
1022         int pos_y, pos_x;
1023         scale_mv(pos_x, orig_pos_x, f->svc[refidx][0].scale);
1024         scale_mv(pos_y, orig_pos_y, f->svc[refidx][1].scale);
1025 #undef scale_mv
1026         const int left = pos_x >> 10;
1027         const int top = pos_y >> 10;
1028         const int right =
1029             ((pos_x + (bw4 * h_mul - 1) * f->svc[refidx][0].step) >> 10) + 1;
1030         const int bottom =
1031             ((pos_y + (bh4 * v_mul - 1) * f->svc[refidx][1].step) >> 10) + 1;
1032 
1033         if (DEBUG_BLOCK_INFO)
1034             printf("Off %dx%d [%d,%d,%d], size %dx%d [%d,%d]\n",
1035                    left, top, orig_pos_x, f->svc[refidx][0].scale, refidx,
1036                    right-left, bottom-top,
1037                    f->svc[refidx][0].step, f->svc[refidx][1].step);
1038 
1039         const int w = (refp->p.p.w + ss_hor) >> ss_hor;
1040         const int h = (refp->p.p.h + ss_ver) >> ss_ver;
1041         if (left < 3 || top < 3 || right + 4 > w || bottom + 4 > h) {
1042             pixel *const emu_edge_buf = bitfn(t->scratch.emu_edge);
1043             f->dsp->mc.emu_edge(right - left + 7, bottom - top + 7,
1044                                 w, h, left - 3, top - 3,
1045                                 emu_edge_buf, 320 * sizeof(pixel),
1046                                 refp->p.data[pl], ref_stride);
1047             ref = &emu_edge_buf[320 * 3 + 3];
1048             ref_stride = 320 * sizeof(pixel);
1049             if (DEBUG_BLOCK_INFO) printf("Emu\n");
1050         } else {
1051             ref = ((pixel *) refp->p.data[pl]) + PXSTRIDE(ref_stride) * top + left;
1052         }
1053 
1054         if (dst8 != NULL) {
1055             f->dsp->mc.mc_scaled[filter_2d](dst8, dst_stride, ref, ref_stride,
1056                                             bw4 * h_mul, bh4 * v_mul,
1057                                             pos_x & 0x3ff, pos_y & 0x3ff,
1058                                             f->svc[refidx][0].step,
1059                                             f->svc[refidx][1].step
1060                                             HIGHBD_CALL_SUFFIX);
1061         } else {
1062             f->dsp->mc.mct_scaled[filter_2d](dst16, ref, ref_stride,
1063                                              bw4 * h_mul, bh4 * v_mul,
1064                                              pos_x & 0x3ff, pos_y & 0x3ff,
1065                                              f->svc[refidx][0].step,
1066                                              f->svc[refidx][1].step
1067                                              HIGHBD_CALL_SUFFIX);
1068         }
1069     }
1070 
1071     return 0;
1072 }
1073 
obmc(Dav1dTaskContext * const t,pixel * const dst,const ptrdiff_t dst_stride,const uint8_t * const b_dim,const int pl,const int bx4,const int by4,const int w4,const int h4)1074 static int obmc(Dav1dTaskContext *const t,
1075                 pixel *const dst, const ptrdiff_t dst_stride,
1076                 const uint8_t *const b_dim, const int pl,
1077                 const int bx4, const int by4, const int w4, const int h4)
1078 {
1079     assert(!(t->bx & 1) && !(t->by & 1));
1080     const Dav1dFrameContext *const f = t->f;
1081     /*const*/ refmvs_block **r = &t->rt.r[(t->by & 31) + 5];
1082     pixel *const lap = bitfn(t->scratch.lap);
1083     const int ss_ver = !!pl && f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
1084     const int ss_hor = !!pl && f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
1085     const int h_mul = 4 >> ss_hor, v_mul = 4 >> ss_ver;
1086     int res;
1087 
1088     if (t->by > t->ts->tiling.row_start &&
1089         (!pl || b_dim[0] * h_mul + b_dim[1] * v_mul >= 16))
1090     {
1091         for (int i = 0, x = 0; x < w4 && i < imin(b_dim[2], 4); ) {
1092             // only odd blocks are considered for overlap handling, hence +1
1093             const refmvs_block *const a_r = &r[-1][t->bx + x + 1];
1094             const uint8_t *const a_b_dim = dav1d_block_dimensions[a_r->bs];
1095 
1096             if (a_r->ref.ref[0] > 0) {
1097                 const int ow4 = iclip(a_b_dim[0], 2, b_dim[0]);
1098                 const int oh4 = imin(b_dim[1], 16) >> 1;
1099                 res = mc(t, lap, NULL, ow4 * h_mul * sizeof(pixel), ow4, (oh4 * 3 + 3) >> 2,
1100                          t->bx + x, t->by, pl, a_r->mv.mv[0],
1101                          &f->refp[a_r->ref.ref[0] - 1], a_r->ref.ref[0] - 1,
1102                          dav1d_filter_2d[t->a->filter[1][bx4 + x + 1]][t->a->filter[0][bx4 + x + 1]]);
1103                 if (res) return res;
1104                 f->dsp->mc.blend_h(&dst[x * h_mul], dst_stride, lap,
1105                                    h_mul * ow4, v_mul * oh4);
1106                 i++;
1107             }
1108             x += imax(a_b_dim[0], 2);
1109         }
1110     }
1111 
1112     if (t->bx > t->ts->tiling.col_start)
1113         for (int i = 0, y = 0; y < h4 && i < imin(b_dim[3], 4); ) {
1114             // only odd blocks are considered for overlap handling, hence +1
1115             const refmvs_block *const l_r = &r[y + 1][t->bx - 1];
1116             const uint8_t *const l_b_dim = dav1d_block_dimensions[l_r->bs];
1117 
1118             if (l_r->ref.ref[0] > 0) {
1119                 const int ow4 = imin(b_dim[0], 16) >> 1;
1120                 const int oh4 = iclip(l_b_dim[1], 2, b_dim[1]);
1121                 res = mc(t, lap, NULL, h_mul * ow4 * sizeof(pixel), ow4, oh4,
1122                          t->bx, t->by + y, pl, l_r->mv.mv[0],
1123                          &f->refp[l_r->ref.ref[0] - 1], l_r->ref.ref[0] - 1,
1124                          dav1d_filter_2d[t->l.filter[1][by4 + y + 1]][t->l.filter[0][by4 + y + 1]]);
1125                 if (res) return res;
1126                 f->dsp->mc.blend_v(&dst[y * v_mul * PXSTRIDE(dst_stride)],
1127                                    dst_stride, lap, h_mul * ow4, v_mul * oh4);
1128                 i++;
1129             }
1130             y += imax(l_b_dim[1], 2);
1131         }
1132     return 0;
1133 }
1134 
warp_affine(Dav1dTaskContext * const t,pixel * dst8,int16_t * dst16,const ptrdiff_t dstride,const uint8_t * const b_dim,const int pl,const Dav1dThreadPicture * const refp,const Dav1dWarpedMotionParams * const wmp)1135 static int warp_affine(Dav1dTaskContext *const t,
1136                        pixel *dst8, int16_t *dst16, const ptrdiff_t dstride,
1137                        const uint8_t *const b_dim, const int pl,
1138                        const Dav1dThreadPicture *const refp,
1139                        const Dav1dWarpedMotionParams *const wmp)
1140 {
1141     assert((dst8 != NULL) ^ (dst16 != NULL));
1142     const Dav1dFrameContext *const f = t->f;
1143     const Dav1dDSPContext *const dsp = f->dsp;
1144     const int ss_ver = !!pl && f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
1145     const int ss_hor = !!pl && f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
1146     const int h_mul = 4 >> ss_hor, v_mul = 4 >> ss_ver;
1147     assert(!((b_dim[0] * h_mul) & 7) && !((b_dim[1] * v_mul) & 7));
1148     const int32_t *const mat = wmp->matrix;
1149     const int width = (refp->p.p.w + ss_hor) >> ss_hor;
1150     const int height = (refp->p.p.h + ss_ver) >> ss_ver;
1151 
1152     for (int y = 0; y < b_dim[1] * v_mul; y += 8) {
1153         const int src_y = t->by * 4 + ((y + 4) << ss_ver);
1154         const int64_t mat3_y = (int64_t) mat[3] * src_y + mat[0];
1155         const int64_t mat5_y = (int64_t) mat[5] * src_y + mat[1];
1156         for (int x = 0; x < b_dim[0] * h_mul; x += 8) {
1157             // calculate transformation relative to center of 8x8 block in
1158             // luma pixel units
1159             const int src_x = t->bx * 4 + ((x + 4) << ss_hor);
1160             const int64_t mvx = ((int64_t) mat[2] * src_x + mat3_y) >> ss_hor;
1161             const int64_t mvy = ((int64_t) mat[4] * src_x + mat5_y) >> ss_ver;
1162 
1163             const int dx = (int) (mvx >> 16) - 4;
1164             const int mx = (((int) mvx & 0xffff) - wmp->u.p.alpha * 4 -
1165                                                    wmp->u.p.beta  * 7) & ~0x3f;
1166             const int dy = (int) (mvy >> 16) - 4;
1167             const int my = (((int) mvy & 0xffff) - wmp->u.p.gamma * 4 -
1168                                                    wmp->u.p.delta * 4) & ~0x3f;
1169 
1170             const pixel *ref_ptr;
1171             ptrdiff_t ref_stride = refp->p.stride[!!pl];
1172 
1173             if (dx < 3 || dx + 8 + 4 > width || dy < 3 || dy + 8 + 4 > height) {
1174                 pixel *const emu_edge_buf = bitfn(t->scratch.emu_edge);
1175                 f->dsp->mc.emu_edge(15, 15, width, height, dx - 3, dy - 3,
1176                                     emu_edge_buf, 32 * sizeof(pixel),
1177                                     refp->p.data[pl], ref_stride);
1178                 ref_ptr = &emu_edge_buf[32 * 3 + 3];
1179                 ref_stride = 32 * sizeof(pixel);
1180             } else {
1181                 ref_ptr = ((pixel *) refp->p.data[pl]) + PXSTRIDE(ref_stride) * dy + dx;
1182             }
1183             if (dst16 != NULL)
1184                 dsp->mc.warp8x8t(&dst16[x], dstride, ref_ptr, ref_stride,
1185                                  wmp->u.abcd, mx, my HIGHBD_CALL_SUFFIX);
1186             else
1187                 dsp->mc.warp8x8(&dst8[x], dstride, ref_ptr, ref_stride,
1188                                 wmp->u.abcd, mx, my HIGHBD_CALL_SUFFIX);
1189         }
1190         if (dst8) dst8  += 8 * PXSTRIDE(dstride);
1191         else      dst16 += 8 * dstride;
1192     }
1193     return 0;
1194 }
1195 
bytefn(dav1d_recon_b_intra)1196 void bytefn(dav1d_recon_b_intra)(Dav1dTaskContext *const t, const enum BlockSize bs,
1197                                  const enum EdgeFlags intra_edge_flags,
1198                                  const Av1Block *const b)
1199 {
1200     Dav1dTileState *const ts = t->ts;
1201     const Dav1dFrameContext *const f = t->f;
1202     const Dav1dDSPContext *const dsp = f->dsp;
1203     const int bx4 = t->bx & 31, by4 = t->by & 31;
1204     const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
1205     const int ss_hor = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
1206     const int cbx4 = bx4 >> ss_hor, cby4 = by4 >> ss_ver;
1207     const uint8_t *const b_dim = dav1d_block_dimensions[bs];
1208     const int bw4 = b_dim[0], bh4 = b_dim[1];
1209     const int w4 = imin(bw4, f->bw - t->bx), h4 = imin(bh4, f->bh - t->by);
1210     const int cw4 = (w4 + ss_hor) >> ss_hor, ch4 = (h4 + ss_ver) >> ss_ver;
1211     const int has_chroma = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I400 &&
1212                            (bw4 > ss_hor || t->bx & 1) &&
1213                            (bh4 > ss_ver || t->by & 1);
1214     const TxfmInfo *const t_dim = &dav1d_txfm_dimensions[b->tx];
1215     const TxfmInfo *const uv_t_dim = &dav1d_txfm_dimensions[b->uvtx];
1216 
1217     // coefficient coding
1218     pixel *const edge = bitfn(t->scratch.edge) + 128;
1219     const int cbw4 = (bw4 + ss_hor) >> ss_hor, cbh4 = (bh4 + ss_ver) >> ss_ver;
1220 
1221     const int intra_edge_filter_flag = f->seq_hdr->intra_edge_filter << 10;
1222 
1223     for (int init_y = 0; init_y < h4; init_y += 16) {
1224         const int sub_h4 = imin(h4, 16 + init_y);
1225         const int sub_ch4 = imin(ch4, (init_y + 16) >> ss_ver);
1226         for (int init_x = 0; init_x < w4; init_x += 16) {
1227             if (b->pal_sz[0]) {
1228                 pixel *dst = ((pixel *) f->cur.data[0]) +
1229                              4 * (t->by * PXSTRIDE(f->cur.stride[0]) + t->bx);
1230                 const uint8_t *pal_idx;
1231                 if (t->frame_thread.pass) {
1232                     const int p = t->frame_thread.pass & 1;
1233                     assert(ts->frame_thread[p].pal_idx);
1234                     pal_idx = ts->frame_thread[p].pal_idx;
1235                     ts->frame_thread[p].pal_idx += bw4 * bh4 * 16;
1236                 } else {
1237                     pal_idx = t->scratch.pal_idx;
1238                 }
1239                 const uint16_t *const pal = t->frame_thread.pass ?
1240                     f->frame_thread.pal[((t->by >> 1) + (t->bx & 1)) * (f->b4_stride >> 1) +
1241                                         ((t->bx >> 1) + (t->by & 1))][0] : t->scratch.pal[0];
1242                 f->dsp->ipred.pal_pred(dst, f->cur.stride[0], pal,
1243                                        pal_idx, bw4 * 4, bh4 * 4);
1244                 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
1245                     hex_dump(dst, PXSTRIDE(f->cur.stride[0]),
1246                              bw4 * 4, bh4 * 4, "y-pal-pred");
1247             }
1248 
1249             const int intra_flags = (sm_flag(t->a, bx4) |
1250                                      sm_flag(&t->l, by4) |
1251                                      intra_edge_filter_flag);
1252             const int sb_has_tr = init_x + 16 < w4 ? 1 : init_y ? 0 :
1253                               intra_edge_flags & EDGE_I444_TOP_HAS_RIGHT;
1254             const int sb_has_bl = init_x ? 0 : init_y + 16 < h4 ? 1 :
1255                               intra_edge_flags & EDGE_I444_LEFT_HAS_BOTTOM;
1256             int y, x;
1257             const int sub_w4 = imin(w4, init_x + 16);
1258             for (y = init_y, t->by += init_y; y < sub_h4;
1259                  y += t_dim->h, t->by += t_dim->h)
1260             {
1261                 pixel *dst = ((pixel *) f->cur.data[0]) +
1262                                4 * (t->by * PXSTRIDE(f->cur.stride[0]) +
1263                                     t->bx + init_x);
1264                 for (x = init_x, t->bx += init_x; x < sub_w4;
1265                      x += t_dim->w, t->bx += t_dim->w)
1266                 {
1267                     if (b->pal_sz[0]) goto skip_y_pred;
1268 
1269                     int angle = b->y_angle;
1270                     const enum EdgeFlags edge_flags =
1271                         (((y > init_y || !sb_has_tr) && (x + t_dim->w >= sub_w4)) ?
1272                              0 : EDGE_I444_TOP_HAS_RIGHT) |
1273                         ((x > init_x || (!sb_has_bl && y + t_dim->h >= sub_h4)) ?
1274                              0 : EDGE_I444_LEFT_HAS_BOTTOM);
1275                     const pixel *top_sb_edge = NULL;
1276                     if (!(t->by & (f->sb_step - 1))) {
1277                         top_sb_edge = f->ipred_edge[0];
1278                         const int sby = t->by >> f->sb_shift;
1279                         top_sb_edge += f->sb128w * 128 * (sby - 1);
1280                     }
1281                     const enum IntraPredMode m =
1282                         bytefn(dav1d_prepare_intra_edges)(t->bx,
1283                                                           t->bx > ts->tiling.col_start,
1284                                                           t->by,
1285                                                           t->by > ts->tiling.row_start,
1286                                                           ts->tiling.col_end,
1287                                                           ts->tiling.row_end,
1288                                                           edge_flags, dst,
1289                                                           f->cur.stride[0], top_sb_edge,
1290                                                           b->y_mode, &angle,
1291                                                           t_dim->w, t_dim->h,
1292                                                           f->seq_hdr->intra_edge_filter,
1293                                                           edge HIGHBD_CALL_SUFFIX);
1294                     dsp->ipred.intra_pred[m](dst, f->cur.stride[0], edge,
1295                                              t_dim->w * 4, t_dim->h * 4,
1296                                              angle | intra_flags,
1297                                              4 * f->bw - 4 * t->bx,
1298                                              4 * f->bh - 4 * t->by
1299                                              HIGHBD_CALL_SUFFIX);
1300 
1301                     if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS) {
1302                         hex_dump(edge - t_dim->h * 4, t_dim->h * 4,
1303                                  t_dim->h * 4, 2, "l");
1304                         hex_dump(edge, 0, 1, 1, "tl");
1305                         hex_dump(edge + 1, t_dim->w * 4,
1306                                  t_dim->w * 4, 2, "t");
1307                         hex_dump(dst, f->cur.stride[0],
1308                                  t_dim->w * 4, t_dim->h * 4, "y-intra-pred");
1309                     }
1310 
1311                 skip_y_pred: {}
1312                     if (!b->skip) {
1313                         coef *cf;
1314                         int eob;
1315                         enum TxfmType txtp;
1316                         if (t->frame_thread.pass) {
1317                             const int p = t->frame_thread.pass & 1;
1318                             cf = ts->frame_thread[p].cf;
1319                             ts->frame_thread[p].cf += imin(t_dim->w, 8) * imin(t_dim->h, 8) * 16;
1320                             const struct CodedBlockInfo *const cbi =
1321                                 &f->frame_thread.cbi[t->by * f->b4_stride + t->bx];
1322                             eob = cbi->eob[0];
1323                             txtp = cbi->txtp[0];
1324                         } else {
1325                             uint8_t cf_ctx;
1326                             cf = bitfn(t->cf);
1327                             eob = decode_coefs(t, &t->a->lcoef[bx4 + x],
1328                                                &t->l.lcoef[by4 + y], b->tx, bs,
1329                                                b, 1, 0, cf, &txtp, &cf_ctx);
1330                             if (DEBUG_BLOCK_INFO)
1331                                 printf("Post-y-cf-blk[tx=%d,txtp=%d,eob=%d]: r=%d\n",
1332                                        b->tx, txtp, eob, ts->msac.rng);
1333 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1334                             rep_macro(type, t->dir lcoef, off, mul * cf_ctx)
1335 #define default_memset(dir, diridx, off, sz) \
1336                             memset(&t->dir lcoef[off], cf_ctx, sz)
1337                             case_set_upto16_with_default(imin(t_dim->h, f->bh - t->by), \
1338                                                          l., 1, by4 + y);
1339                             case_set_upto16_with_default(imin(t_dim->w, f->bw - t->bx), \
1340                                                          a->, 0, bx4 + x);
1341 #undef default_memset
1342 #undef set_ctx
1343                         }
1344                         if (eob >= 0) {
1345                             if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
1346                                 coef_dump(cf, imin(t_dim->h, 8) * 4,
1347                                           imin(t_dim->w, 8) * 4, 3, "dq");
1348                             dsp->itx.itxfm_add[b->tx]
1349                                               [txtp](dst,
1350                                                      f->cur.stride[0],
1351                                                      cf, eob HIGHBD_CALL_SUFFIX);
1352                             if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
1353                                 hex_dump(dst, f->cur.stride[0],
1354                                          t_dim->w * 4, t_dim->h * 4, "recon");
1355                         }
1356                     } else if (!t->frame_thread.pass) {
1357 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1358                         rep_macro(type, t->dir lcoef, off, mul * 0x40)
1359                         case_set_upto16(t_dim->h, l., 1, by4 + y);
1360                         case_set_upto16(t_dim->w, a->, 0, bx4 + x);
1361 #undef set_ctx
1362                     }
1363                     dst += 4 * t_dim->w;
1364                 }
1365                 t->bx -= x;
1366             }
1367             t->by -= y;
1368 
1369             if (!has_chroma) continue;
1370 
1371             const ptrdiff_t stride = f->cur.stride[1];
1372 
1373             if (b->uv_mode == CFL_PRED) {
1374                 assert(!init_x && !init_y);
1375 
1376                 int16_t *const ac = t->scratch.ac;
1377                 pixel *y_src = ((pixel *) f->cur.data[0]) + 4 * (t->bx & ~ss_hor) +
1378                                  4 * (t->by & ~ss_ver) * PXSTRIDE(f->cur.stride[0]);
1379                 const ptrdiff_t uv_off = 4 * ((t->bx >> ss_hor) +
1380                                               (t->by >> ss_ver) * PXSTRIDE(stride));
1381                 pixel *const uv_dst[2] = { ((pixel *) f->cur.data[1]) + uv_off,
1382                                            ((pixel *) f->cur.data[2]) + uv_off };
1383 
1384                 const int furthest_r =
1385                     ((cw4 << ss_hor) + t_dim->w - 1) & ~(t_dim->w - 1);
1386                 const int furthest_b =
1387                     ((ch4 << ss_ver) + t_dim->h - 1) & ~(t_dim->h - 1);
1388                 dsp->ipred.cfl_ac[f->cur.p.layout - 1](ac, y_src, f->cur.stride[0],
1389                                                          cbw4 - (furthest_r >> ss_hor),
1390                                                          cbh4 - (furthest_b >> ss_ver),
1391                                                          cbw4 * 4, cbh4 * 4);
1392                 for (int pl = 0; pl < 2; pl++) {
1393                     if (!b->cfl_alpha[pl]) continue;
1394                     int angle = 0;
1395                     const pixel *top_sb_edge = NULL;
1396                     if (!((t->by & ~ss_ver) & (f->sb_step - 1))) {
1397                         top_sb_edge = f->ipred_edge[pl + 1];
1398                         const int sby = t->by >> f->sb_shift;
1399                         top_sb_edge += f->sb128w * 128 * (sby - 1);
1400                     }
1401                     const int xpos = t->bx >> ss_hor, ypos = t->by >> ss_ver;
1402                     const int xstart = ts->tiling.col_start >> ss_hor;
1403                     const int ystart = ts->tiling.row_start >> ss_ver;
1404                     const enum IntraPredMode m =
1405                         bytefn(dav1d_prepare_intra_edges)(xpos, xpos > xstart,
1406                                                           ypos, ypos > ystart,
1407                                                           ts->tiling.col_end >> ss_hor,
1408                                                           ts->tiling.row_end >> ss_ver,
1409                                                           0, uv_dst[pl], stride,
1410                                                           top_sb_edge, DC_PRED, &angle,
1411                                                           uv_t_dim->w, uv_t_dim->h, 0,
1412                                                           edge HIGHBD_CALL_SUFFIX);
1413                     dsp->ipred.cfl_pred[m](uv_dst[pl], stride, edge,
1414                                            uv_t_dim->w * 4,
1415                                            uv_t_dim->h * 4,
1416                                            ac, b->cfl_alpha[pl]
1417                                            HIGHBD_CALL_SUFFIX);
1418                 }
1419                 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS) {
1420                     ac_dump(ac, 4*cbw4, 4*cbh4, "ac");
1421                     hex_dump(uv_dst[0], stride, cbw4 * 4, cbh4 * 4, "u-cfl-pred");
1422                     hex_dump(uv_dst[1], stride, cbw4 * 4, cbh4 * 4, "v-cfl-pred");
1423                 }
1424             } else if (b->pal_sz[1]) {
1425                 const ptrdiff_t uv_dstoff = 4 * ((t->bx >> ss_hor) +
1426                                               (t->by >> ss_ver) * PXSTRIDE(f->cur.stride[1]));
1427                 const uint16_t (*pal)[8];
1428                 const uint8_t *pal_idx;
1429                 if (t->frame_thread.pass) {
1430                     const int p = t->frame_thread.pass & 1;
1431                     assert(ts->frame_thread[p].pal_idx);
1432                     pal = f->frame_thread.pal[((t->by >> 1) + (t->bx & 1)) * (f->b4_stride >> 1) +
1433                                               ((t->bx >> 1) + (t->by & 1))];
1434                     pal_idx = ts->frame_thread[p].pal_idx;
1435                     ts->frame_thread[p].pal_idx += cbw4 * cbh4 * 16;
1436                 } else {
1437                     pal = t->scratch.pal;
1438                     pal_idx = &t->scratch.pal_idx[bw4 * bh4 * 16];
1439                 }
1440 
1441                 f->dsp->ipred.pal_pred(((pixel *) f->cur.data[1]) + uv_dstoff,
1442                                        f->cur.stride[1], pal[1],
1443                                        pal_idx, cbw4 * 4, cbh4 * 4);
1444                 f->dsp->ipred.pal_pred(((pixel *) f->cur.data[2]) + uv_dstoff,
1445                                        f->cur.stride[1], pal[2],
1446                                        pal_idx, cbw4 * 4, cbh4 * 4);
1447                 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS) {
1448                     hex_dump(((pixel *) f->cur.data[1]) + uv_dstoff,
1449                              PXSTRIDE(f->cur.stride[1]),
1450                              cbw4 * 4, cbh4 * 4, "u-pal-pred");
1451                     hex_dump(((pixel *) f->cur.data[2]) + uv_dstoff,
1452                              PXSTRIDE(f->cur.stride[1]),
1453                              cbw4 * 4, cbh4 * 4, "v-pal-pred");
1454                 }
1455             }
1456 
1457             const int sm_uv_fl = sm_uv_flag(t->a, cbx4) |
1458                                  sm_uv_flag(&t->l, cby4);
1459             const int uv_sb_has_tr =
1460                 ((init_x + 16) >> ss_hor) < cw4 ? 1 : init_y ? 0 :
1461                 intra_edge_flags & (EDGE_I420_TOP_HAS_RIGHT >> (f->cur.p.layout - 1));
1462             const int uv_sb_has_bl =
1463                 init_x ? 0 : ((init_y + 16) >> ss_ver) < ch4 ? 1 :
1464                 intra_edge_flags & (EDGE_I420_LEFT_HAS_BOTTOM >> (f->cur.p.layout - 1));
1465             const int sub_cw4 = imin(cw4, (init_x + 16) >> ss_hor);
1466             for (int pl = 0; pl < 2; pl++) {
1467                 for (y = init_y >> ss_ver, t->by += init_y; y < sub_ch4;
1468                      y += uv_t_dim->h, t->by += uv_t_dim->h << ss_ver)
1469                 {
1470                     pixel *dst = ((pixel *) f->cur.data[1 + pl]) +
1471                                    4 * ((t->by >> ss_ver) * PXSTRIDE(stride) +
1472                                         ((t->bx + init_x) >> ss_hor));
1473                     for (x = init_x >> ss_hor, t->bx += init_x; x < sub_cw4;
1474                          x += uv_t_dim->w, t->bx += uv_t_dim->w << ss_hor)
1475                     {
1476                         if ((b->uv_mode == CFL_PRED && b->cfl_alpha[pl]) ||
1477                             b->pal_sz[1])
1478                         {
1479                             goto skip_uv_pred;
1480                         }
1481 
1482                         int angle = b->uv_angle;
1483                         // this probably looks weird because we're using
1484                         // luma flags in a chroma loop, but that's because
1485                         // prepare_intra_edges() expects luma flags as input
1486                         const enum EdgeFlags edge_flags =
1487                             (((y > (init_y >> ss_ver) || !uv_sb_has_tr) &&
1488                               (x + uv_t_dim->w >= sub_cw4)) ?
1489                                  0 : EDGE_I444_TOP_HAS_RIGHT) |
1490                             ((x > (init_x >> ss_hor) ||
1491                               (!uv_sb_has_bl && y + uv_t_dim->h >= sub_ch4)) ?
1492                                  0 : EDGE_I444_LEFT_HAS_BOTTOM);
1493                         const pixel *top_sb_edge = NULL;
1494                         if (!((t->by & ~ss_ver) & (f->sb_step - 1))) {
1495                             top_sb_edge = f->ipred_edge[1 + pl];
1496                             const int sby = t->by >> f->sb_shift;
1497                             top_sb_edge += f->sb128w * 128 * (sby - 1);
1498                         }
1499                         const enum IntraPredMode uv_mode =
1500                              b->uv_mode == CFL_PRED ? DC_PRED : b->uv_mode;
1501                         const int xpos = t->bx >> ss_hor, ypos = t->by >> ss_ver;
1502                         const int xstart = ts->tiling.col_start >> ss_hor;
1503                         const int ystart = ts->tiling.row_start >> ss_ver;
1504                         const enum IntraPredMode m =
1505                             bytefn(dav1d_prepare_intra_edges)(xpos, xpos > xstart,
1506                                                               ypos, ypos > ystart,
1507                                                               ts->tiling.col_end >> ss_hor,
1508                                                               ts->tiling.row_end >> ss_ver,
1509                                                               edge_flags, dst, stride,
1510                                                               top_sb_edge, uv_mode,
1511                                                               &angle, uv_t_dim->w,
1512                                                               uv_t_dim->h,
1513                                                               f->seq_hdr->intra_edge_filter,
1514                                                               edge HIGHBD_CALL_SUFFIX);
1515                         angle |= intra_edge_filter_flag;
1516                         dsp->ipred.intra_pred[m](dst, stride, edge,
1517                                                  uv_t_dim->w * 4,
1518                                                  uv_t_dim->h * 4,
1519                                                  angle | sm_uv_fl,
1520                                                  (4 * f->bw + ss_hor -
1521                                                   4 * (t->bx & ~ss_hor)) >> ss_hor,
1522                                                  (4 * f->bh + ss_ver -
1523                                                   4 * (t->by & ~ss_ver)) >> ss_ver
1524                                                  HIGHBD_CALL_SUFFIX);
1525                         if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS) {
1526                             hex_dump(edge - uv_t_dim->h * 4, uv_t_dim->h * 4,
1527                                      uv_t_dim->h * 4, 2, "l");
1528                             hex_dump(edge, 0, 1, 1, "tl");
1529                             hex_dump(edge + 1, uv_t_dim->w * 4,
1530                                      uv_t_dim->w * 4, 2, "t");
1531                             hex_dump(dst, stride, uv_t_dim->w * 4,
1532                                      uv_t_dim->h * 4, pl ? "v-intra-pred" : "u-intra-pred");
1533                         }
1534 
1535                     skip_uv_pred: {}
1536                         if (!b->skip) {
1537                             enum TxfmType txtp;
1538                             int eob;
1539                             coef *cf;
1540                             if (t->frame_thread.pass) {
1541                                 const int p = t->frame_thread.pass & 1;
1542                                 cf = ts->frame_thread[p].cf;
1543                                 ts->frame_thread[p].cf += uv_t_dim->w * uv_t_dim->h * 16;
1544                                 const struct CodedBlockInfo *const cbi =
1545                                     &f->frame_thread.cbi[t->by * f->b4_stride + t->bx];
1546                                 eob = cbi->eob[pl + 1];
1547                                 txtp = cbi->txtp[pl + 1];
1548                             } else {
1549                                 uint8_t cf_ctx;
1550                                 cf = bitfn(t->cf);
1551                                 eob = decode_coefs(t, &t->a->ccoef[pl][cbx4 + x],
1552                                                    &t->l.ccoef[pl][cby4 + y],
1553                                                    b->uvtx, bs, b, 1, 1 + pl, cf,
1554                                                    &txtp, &cf_ctx);
1555                                 if (DEBUG_BLOCK_INFO)
1556                                     printf("Post-uv-cf-blk[pl=%d,tx=%d,"
1557                                            "txtp=%d,eob=%d]: r=%d [x=%d,cbx4=%d]\n",
1558                                            pl, b->uvtx, txtp, eob, ts->msac.rng, x, cbx4);
1559 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1560                                 rep_macro(type, t->dir ccoef[pl], off, mul * cf_ctx)
1561 #define default_memset(dir, diridx, off, sz) \
1562                                 memset(&t->dir ccoef[pl][off], cf_ctx, sz)
1563                                 case_set_upto16_with_default( \
1564                                          imin(uv_t_dim->h, (f->bh - t->by + ss_ver) >> ss_ver),
1565                                          l., 1, cby4 + y);
1566                                 case_set_upto16_with_default( \
1567                                          imin(uv_t_dim->w, (f->bw - t->bx + ss_hor) >> ss_hor),
1568                                          a->, 0, cbx4 + x);
1569 #undef default_memset
1570 #undef set_ctx
1571                             }
1572                             if (eob >= 0) {
1573                                 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
1574                                     coef_dump(cf, uv_t_dim->h * 4,
1575                                               uv_t_dim->w * 4, 3, "dq");
1576                                 dsp->itx.itxfm_add[b->uvtx]
1577                                                   [txtp](dst, stride,
1578                                                          cf, eob HIGHBD_CALL_SUFFIX);
1579                                 if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
1580                                     hex_dump(dst, stride, uv_t_dim->w * 4,
1581                                              uv_t_dim->h * 4, "recon");
1582                             }
1583                         } else if (!t->frame_thread.pass) {
1584 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1585                             rep_macro(type, t->dir ccoef[pl], off, mul * 0x40)
1586                             case_set_upto16(uv_t_dim->h, l., 1, cby4 + y);
1587                             case_set_upto16(uv_t_dim->w, a->, 0, cbx4 + x);
1588 #undef set_ctx
1589                         }
1590                         dst += uv_t_dim->w * 4;
1591                     }
1592                     t->bx -= x << ss_hor;
1593                 }
1594                 t->by -= y << ss_ver;
1595             }
1596         }
1597     }
1598 }
1599 
bytefn(dav1d_recon_b_inter)1600 int bytefn(dav1d_recon_b_inter)(Dav1dTaskContext *const t, const enum BlockSize bs,
1601                                 const Av1Block *const b)
1602 {
1603     Dav1dTileState *const ts = t->ts;
1604     const Dav1dFrameContext *const f = t->f;
1605     const Dav1dDSPContext *const dsp = f->dsp;
1606     const int bx4 = t->bx & 31, by4 = t->by & 31;
1607     const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
1608     const int ss_hor = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
1609     const int cbx4 = bx4 >> ss_hor, cby4 = by4 >> ss_ver;
1610     const uint8_t *const b_dim = dav1d_block_dimensions[bs];
1611     const int bw4 = b_dim[0], bh4 = b_dim[1];
1612     const int w4 = imin(bw4, f->bw - t->bx), h4 = imin(bh4, f->bh - t->by);
1613     const int has_chroma = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I400 &&
1614                            (bw4 > ss_hor || t->bx & 1) &&
1615                            (bh4 > ss_ver || t->by & 1);
1616     const int chr_layout_idx = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I400 ? 0 :
1617                                DAV1D_PIXEL_LAYOUT_I444 - f->cur.p.layout;
1618     int res;
1619 
1620     // prediction
1621     const int cbh4 = (bh4 + ss_ver) >> ss_ver, cbw4 = (bw4 + ss_hor) >> ss_hor;
1622     pixel *dst = ((pixel *) f->cur.data[0]) +
1623         4 * (t->by * PXSTRIDE(f->cur.stride[0]) + t->bx);
1624     const ptrdiff_t uvdstoff =
1625         4 * ((t->bx >> ss_hor) + (t->by >> ss_ver) * PXSTRIDE(f->cur.stride[1]));
1626     if (IS_KEY_OR_INTRA(f->frame_hdr)) {
1627         // intrabc
1628         assert(!f->frame_hdr->super_res.enabled);
1629         res = mc(t, dst, NULL, f->cur.stride[0], bw4, bh4, t->bx, t->by, 0,
1630                  b->mv[0], &f->sr_cur, 0 /* unused */, FILTER_2D_BILINEAR);
1631         if (res) return res;
1632         if (has_chroma) for (int pl = 1; pl < 3; pl++) {
1633             res = mc(t, ((pixel *)f->cur.data[pl]) + uvdstoff, NULL, f->cur.stride[1],
1634                      bw4 << (bw4 == ss_hor), bh4 << (bh4 == ss_ver),
1635                      t->bx & ~ss_hor, t->by & ~ss_ver, pl, b->mv[0],
1636                      &f->sr_cur, 0 /* unused */, FILTER_2D_BILINEAR);
1637             if (res) return res;
1638         }
1639     } else if (b->comp_type == COMP_INTER_NONE) {
1640         const Dav1dThreadPicture *const refp = &f->refp[b->ref[0]];
1641         const enum Filter2d filter_2d = b->filter2d;
1642 
1643         if (imin(bw4, bh4) > 1 &&
1644             ((b->inter_mode == GLOBALMV && f->gmv_warp_allowed[b->ref[0]]) ||
1645              (b->motion_mode == MM_WARP && t->warpmv.type > DAV1D_WM_TYPE_TRANSLATION)))
1646         {
1647             res = warp_affine(t, dst, NULL, f->cur.stride[0], b_dim, 0, refp,
1648                               b->motion_mode == MM_WARP ? &t->warpmv :
1649                                   &f->frame_hdr->gmv[b->ref[0]]);
1650             if (res) return res;
1651         } else {
1652             res = mc(t, dst, NULL, f->cur.stride[0],
1653                      bw4, bh4, t->bx, t->by, 0, b->mv[0], refp, b->ref[0], filter_2d);
1654             if (res) return res;
1655             if (b->motion_mode == MM_OBMC) {
1656                 res = obmc(t, dst, f->cur.stride[0], b_dim, 0, bx4, by4, w4, h4);
1657                 if (res) return res;
1658             }
1659         }
1660         if (b->interintra_type) {
1661             pixel *const tl_edge = bitfn(t->scratch.edge) + 32;
1662             enum IntraPredMode m = b->interintra_mode == II_SMOOTH_PRED ?
1663                                    SMOOTH_PRED : b->interintra_mode;
1664             pixel *const tmp = bitfn(t->scratch.interintra);
1665             int angle = 0;
1666             const pixel *top_sb_edge = NULL;
1667             if (!(t->by & (f->sb_step - 1))) {
1668                 top_sb_edge = f->ipred_edge[0];
1669                 const int sby = t->by >> f->sb_shift;
1670                 top_sb_edge += f->sb128w * 128 * (sby - 1);
1671             }
1672             m = bytefn(dav1d_prepare_intra_edges)(t->bx, t->bx > ts->tiling.col_start,
1673                                                   t->by, t->by > ts->tiling.row_start,
1674                                                   ts->tiling.col_end, ts->tiling.row_end,
1675                                                   0, dst, f->cur.stride[0], top_sb_edge,
1676                                                   m, &angle, bw4, bh4, 0, tl_edge
1677                                                   HIGHBD_CALL_SUFFIX);
1678             dsp->ipred.intra_pred[m](tmp, 4 * bw4 * sizeof(pixel),
1679                                      tl_edge, bw4 * 4, bh4 * 4, 0, 0, 0
1680                                      HIGHBD_CALL_SUFFIX);
1681             const uint8_t *const ii_mask =
1682                 b->interintra_type == INTER_INTRA_BLEND ?
1683                      dav1d_ii_masks[bs][0][b->interintra_mode] :
1684                      dav1d_wedge_masks[bs][0][0][b->wedge_idx];
1685             dsp->mc.blend(dst, f->cur.stride[0], tmp,
1686                           bw4 * 4, bh4 * 4, ii_mask);
1687         }
1688 
1689         if (!has_chroma) goto skip_inter_chroma_pred;
1690 
1691         // sub8x8 derivation
1692         int is_sub8x8 = bw4 == ss_hor || bh4 == ss_ver;
1693         refmvs_block *const *r;
1694         if (is_sub8x8) {
1695             assert(ss_hor == 1);
1696             r = &t->rt.r[(t->by & 31) + 5];
1697             if (bw4 == 1) is_sub8x8 &= r[0][t->bx - 1].ref.ref[0] > 0;
1698             if (bh4 == ss_ver) is_sub8x8 &= r[-1][t->bx].ref.ref[0] > 0;
1699             if (bw4 == 1 && bh4 == ss_ver)
1700                 is_sub8x8 &= r[-1][t->bx - 1].ref.ref[0] > 0;
1701         }
1702 
1703         // chroma prediction
1704         if (is_sub8x8) {
1705             assert(ss_hor == 1);
1706             ptrdiff_t h_off = 0, v_off = 0;
1707             if (bw4 == 1 && bh4 == ss_ver) {
1708                 for (int pl = 0; pl < 2; pl++) {
1709                     res = mc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff,
1710                              NULL, f->cur.stride[1],
1711                              bw4, bh4, t->bx - 1, t->by - 1, 1 + pl,
1712                              r[-1][t->bx - 1].mv.mv[0],
1713                              &f->refp[r[-1][t->bx - 1].ref.ref[0] - 1],
1714                              r[-1][t->bx - 1].ref.ref[0] - 1,
1715                              t->frame_thread.pass != 2 ? t->tl_4x4_filter :
1716                                  f->frame_thread.b[((t->by - 1) * f->b4_stride) + t->bx - 1].filter2d);
1717                     if (res) return res;
1718                 }
1719                 v_off = 2 * PXSTRIDE(f->cur.stride[1]);
1720                 h_off = 2;
1721             }
1722             if (bw4 == 1) {
1723                 const enum Filter2d left_filter_2d =
1724                     dav1d_filter_2d[t->l.filter[1][by4]][t->l.filter[0][by4]];
1725                 for (int pl = 0; pl < 2; pl++) {
1726                     res = mc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff + v_off, NULL,
1727                              f->cur.stride[1], bw4, bh4, t->bx - 1,
1728                              t->by, 1 + pl, r[0][t->bx - 1].mv.mv[0],
1729                              &f->refp[r[0][t->bx - 1].ref.ref[0] - 1],
1730                              r[0][t->bx - 1].ref.ref[0] - 1,
1731                              t->frame_thread.pass != 2 ? left_filter_2d :
1732                                  f->frame_thread.b[(t->by * f->b4_stride) + t->bx - 1].filter2d);
1733                     if (res) return res;
1734                 }
1735                 h_off = 2;
1736             }
1737             if (bh4 == ss_ver) {
1738                 const enum Filter2d top_filter_2d =
1739                     dav1d_filter_2d[t->a->filter[1][bx4]][t->a->filter[0][bx4]];
1740                 for (int pl = 0; pl < 2; pl++) {
1741                     res = mc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff + h_off, NULL,
1742                              f->cur.stride[1], bw4, bh4, t->bx, t->by - 1,
1743                              1 + pl, r[-1][t->bx].mv.mv[0],
1744                              &f->refp[r[-1][t->bx].ref.ref[0] - 1],
1745                              r[-1][t->bx].ref.ref[0] - 1,
1746                              t->frame_thread.pass != 2 ? top_filter_2d :
1747                                  f->frame_thread.b[((t->by - 1) * f->b4_stride) + t->bx].filter2d);
1748                     if (res) return res;
1749                 }
1750                 v_off = 2 * PXSTRIDE(f->cur.stride[1]);
1751             }
1752             for (int pl = 0; pl < 2; pl++) {
1753                 res = mc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff + h_off + v_off, NULL, f->cur.stride[1],
1754                          bw4, bh4, t->bx, t->by, 1 + pl, b->mv[0],
1755                          refp, b->ref[0], filter_2d);
1756                 if (res) return res;
1757             }
1758         } else {
1759             if (imin(cbw4, cbh4) > 1 &&
1760                 ((b->inter_mode == GLOBALMV && f->gmv_warp_allowed[b->ref[0]]) ||
1761                  (b->motion_mode == MM_WARP && t->warpmv.type > DAV1D_WM_TYPE_TRANSLATION)))
1762             {
1763                 for (int pl = 0; pl < 2; pl++) {
1764                     res = warp_affine(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff, NULL,
1765                                       f->cur.stride[1], b_dim, 1 + pl, refp,
1766                                       b->motion_mode == MM_WARP ? &t->warpmv :
1767                                           &f->frame_hdr->gmv[b->ref[0]]);
1768                     if (res) return res;
1769                 }
1770             } else {
1771                 for (int pl = 0; pl < 2; pl++) {
1772                     res = mc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff,
1773                              NULL, f->cur.stride[1],
1774                              bw4 << (bw4 == ss_hor), bh4 << (bh4 == ss_ver),
1775                              t->bx & ~ss_hor, t->by & ~ss_ver,
1776                              1 + pl, b->mv[0], refp, b->ref[0], filter_2d);
1777                     if (res) return res;
1778                     if (b->motion_mode == MM_OBMC) {
1779                         res = obmc(t, ((pixel *) f->cur.data[1 + pl]) + uvdstoff,
1780                                    f->cur.stride[1], b_dim, 1 + pl, bx4, by4, w4, h4);
1781                         if (res) return res;
1782                     }
1783                 }
1784             }
1785             if (b->interintra_type) {
1786                 // FIXME for 8x32 with 4:2:2 subsampling, this probably does
1787                 // the wrong thing since it will select 4x16, not 4x32, as a
1788                 // transform size...
1789                 const uint8_t *const ii_mask =
1790                     b->interintra_type == INTER_INTRA_BLEND ?
1791                          dav1d_ii_masks[bs][chr_layout_idx][b->interintra_mode] :
1792                          dav1d_wedge_masks[bs][chr_layout_idx][0][b->wedge_idx];
1793 
1794                 for (int pl = 0; pl < 2; pl++) {
1795                     pixel *const tmp = bitfn(t->scratch.interintra);
1796                     pixel *const tl_edge = bitfn(t->scratch.edge) + 32;
1797                     enum IntraPredMode m =
1798                         b->interintra_mode == II_SMOOTH_PRED ?
1799                         SMOOTH_PRED : b->interintra_mode;
1800                     int angle = 0;
1801                     pixel *const uvdst = ((pixel *) f->cur.data[1 + pl]) + uvdstoff;
1802                     const pixel *top_sb_edge = NULL;
1803                     if (!(t->by & (f->sb_step - 1))) {
1804                         top_sb_edge = f->ipred_edge[pl + 1];
1805                         const int sby = t->by >> f->sb_shift;
1806                         top_sb_edge += f->sb128w * 128 * (sby - 1);
1807                     }
1808                     m = bytefn(dav1d_prepare_intra_edges)(t->bx >> ss_hor,
1809                                                           (t->bx >> ss_hor) >
1810                                                               (ts->tiling.col_start >> ss_hor),
1811                                                           t->by >> ss_ver,
1812                                                           (t->by >> ss_ver) >
1813                                                               (ts->tiling.row_start >> ss_ver),
1814                                                           ts->tiling.col_end >> ss_hor,
1815                                                           ts->tiling.row_end >> ss_ver,
1816                                                           0, uvdst, f->cur.stride[1],
1817                                                           top_sb_edge, m,
1818                                                           &angle, cbw4, cbh4, 0, tl_edge
1819                                                           HIGHBD_CALL_SUFFIX);
1820                     dsp->ipred.intra_pred[m](tmp, cbw4 * 4 * sizeof(pixel),
1821                                              tl_edge, cbw4 * 4, cbh4 * 4, 0, 0, 0
1822                                              HIGHBD_CALL_SUFFIX);
1823                     dsp->mc.blend(uvdst, f->cur.stride[1], tmp,
1824                                   cbw4 * 4, cbh4 * 4, ii_mask);
1825                 }
1826             }
1827         }
1828 
1829     skip_inter_chroma_pred: {}
1830         t->tl_4x4_filter = filter_2d;
1831     } else {
1832         const enum Filter2d filter_2d = b->filter2d;
1833         // Maximum super block size is 128x128
1834         int16_t (*tmp)[128 * 128] = t->scratch.compinter;
1835         int jnt_weight;
1836         uint8_t *const seg_mask = t->scratch.seg_mask;
1837         const uint8_t *mask;
1838 
1839         for (int i = 0; i < 2; i++) {
1840             const Dav1dThreadPicture *const refp = &f->refp[b->ref[i]];
1841 
1842             if (b->inter_mode == GLOBALMV_GLOBALMV && f->gmv_warp_allowed[b->ref[i]]) {
1843                 res = warp_affine(t, NULL, tmp[i], bw4 * 4, b_dim, 0, refp,
1844                                   &f->frame_hdr->gmv[b->ref[i]]);
1845                 if (res) return res;
1846             } else {
1847                 res = mc(t, NULL, tmp[i], 0, bw4, bh4, t->bx, t->by, 0,
1848                          b->mv[i], refp, b->ref[i], filter_2d);
1849                 if (res) return res;
1850             }
1851         }
1852         switch (b->comp_type) {
1853         case COMP_INTER_AVG:
1854             dsp->mc.avg(dst, f->cur.stride[0], tmp[0], tmp[1],
1855                         bw4 * 4, bh4 * 4 HIGHBD_CALL_SUFFIX);
1856             break;
1857         case COMP_INTER_WEIGHTED_AVG:
1858             jnt_weight = f->jnt_weights[b->ref[0]][b->ref[1]];
1859             dsp->mc.w_avg(dst, f->cur.stride[0], tmp[0], tmp[1],
1860                           bw4 * 4, bh4 * 4, jnt_weight HIGHBD_CALL_SUFFIX);
1861             break;
1862         case COMP_INTER_SEG:
1863             dsp->mc.w_mask[chr_layout_idx](dst, f->cur.stride[0],
1864                                            tmp[b->mask_sign], tmp[!b->mask_sign],
1865                                            bw4 * 4, bh4 * 4, seg_mask,
1866                                            b->mask_sign HIGHBD_CALL_SUFFIX);
1867             mask = seg_mask;
1868             break;
1869         case COMP_INTER_WEDGE:
1870             mask = dav1d_wedge_masks[bs][0][0][b->wedge_idx];
1871             dsp->mc.mask(dst, f->cur.stride[0],
1872                          tmp[b->mask_sign], tmp[!b->mask_sign],
1873                          bw4 * 4, bh4 * 4, mask HIGHBD_CALL_SUFFIX);
1874             if (has_chroma)
1875                 mask = dav1d_wedge_masks[bs][chr_layout_idx][b->mask_sign][b->wedge_idx];
1876             break;
1877         }
1878 
1879         // chroma
1880         if (has_chroma) for (int pl = 0; pl < 2; pl++) {
1881             for (int i = 0; i < 2; i++) {
1882                 const Dav1dThreadPicture *const refp = &f->refp[b->ref[i]];
1883                 if (b->inter_mode == GLOBALMV_GLOBALMV &&
1884                     imin(cbw4, cbh4) > 1 && f->gmv_warp_allowed[b->ref[i]])
1885                 {
1886                     res = warp_affine(t, NULL, tmp[i], bw4 * 4 >> ss_hor,
1887                                       b_dim, 1 + pl,
1888                                       refp, &f->frame_hdr->gmv[b->ref[i]]);
1889                     if (res) return res;
1890                 } else {
1891                     res = mc(t, NULL, tmp[i], 0, bw4, bh4, t->bx, t->by,
1892                              1 + pl, b->mv[i], refp, b->ref[i], filter_2d);
1893                     if (res) return res;
1894                 }
1895             }
1896             pixel *const uvdst = ((pixel *) f->cur.data[1 + pl]) + uvdstoff;
1897             switch (b->comp_type) {
1898             case COMP_INTER_AVG:
1899                 dsp->mc.avg(uvdst, f->cur.stride[1], tmp[0], tmp[1],
1900                             bw4 * 4 >> ss_hor, bh4 * 4 >> ss_ver
1901                             HIGHBD_CALL_SUFFIX);
1902                 break;
1903             case COMP_INTER_WEIGHTED_AVG:
1904                 dsp->mc.w_avg(uvdst, f->cur.stride[1], tmp[0], tmp[1],
1905                               bw4 * 4 >> ss_hor, bh4 * 4 >> ss_ver, jnt_weight
1906                               HIGHBD_CALL_SUFFIX);
1907                 break;
1908             case COMP_INTER_WEDGE:
1909             case COMP_INTER_SEG:
1910                 dsp->mc.mask(uvdst, f->cur.stride[1],
1911                              tmp[b->mask_sign], tmp[!b->mask_sign],
1912                              bw4 * 4 >> ss_hor, bh4 * 4 >> ss_ver, mask
1913                              HIGHBD_CALL_SUFFIX);
1914                 break;
1915             }
1916         }
1917     }
1918 
1919     if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS) {
1920         hex_dump(dst, f->cur.stride[0], b_dim[0] * 4, b_dim[1] * 4, "y-pred");
1921         if (has_chroma) {
1922             hex_dump(&((pixel *) f->cur.data[1])[uvdstoff], f->cur.stride[1],
1923                      cbw4 * 4, cbh4 * 4, "u-pred");
1924             hex_dump(&((pixel *) f->cur.data[2])[uvdstoff], f->cur.stride[1],
1925                      cbw4 * 4, cbh4 * 4, "v-pred");
1926         }
1927     }
1928 
1929     const int cw4 = (w4 + ss_hor) >> ss_hor, ch4 = (h4 + ss_ver) >> ss_ver;
1930 
1931     if (b->skip) {
1932         // reset coef contexts
1933 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1934         rep_macro(type, t->dir lcoef, off, mul * 0x40)
1935         case_set(bh4, l., 1, by4);
1936         case_set(bw4, a->, 0, bx4);
1937 #undef set_ctx
1938         if (has_chroma) {
1939 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
1940             rep_macro(type, t->dir ccoef[0], off, mul * 0x40); \
1941             rep_macro(type, t->dir ccoef[1], off, mul * 0x40)
1942             case_set(cbh4, l., 1, cby4);
1943             case_set(cbw4, a->, 0, cbx4);
1944 #undef set_ctx
1945         }
1946         return 0;
1947     }
1948 
1949     const TxfmInfo *const uvtx = &dav1d_txfm_dimensions[b->uvtx];
1950     const TxfmInfo *const ytx = &dav1d_txfm_dimensions[b->max_ytx];
1951     const uint16_t tx_split[2] = { b->tx_split0, b->tx_split1 };
1952 
1953     for (int init_y = 0; init_y < bh4; init_y += 16) {
1954         for (int init_x = 0; init_x < bw4; init_x += 16) {
1955             // coefficient coding & inverse transforms
1956             int y_off = !!init_y, y;
1957             dst += PXSTRIDE(f->cur.stride[0]) * 4 * init_y;
1958             for (y = init_y, t->by += init_y; y < imin(h4, init_y + 16);
1959                  y += ytx->h, y_off++)
1960             {
1961                 int x, x_off = !!init_x;
1962                 for (x = init_x, t->bx += init_x; x < imin(w4, init_x + 16);
1963                      x += ytx->w, x_off++)
1964                 {
1965                     read_coef_tree(t, bs, b, b->max_ytx, 0, tx_split,
1966                                    x_off, y_off, &dst[x * 4]);
1967                     t->bx += ytx->w;
1968                 }
1969                 dst += PXSTRIDE(f->cur.stride[0]) * 4 * ytx->h;
1970                 t->bx -= x;
1971                 t->by += ytx->h;
1972             }
1973             dst -= PXSTRIDE(f->cur.stride[0]) * 4 * y;
1974             t->by -= y;
1975 
1976             // chroma coefs and inverse transform
1977             if (has_chroma) for (int pl = 0; pl < 2; pl++) {
1978                 pixel *uvdst = ((pixel *) f->cur.data[1 + pl]) + uvdstoff +
1979                     (PXSTRIDE(f->cur.stride[1]) * init_y * 4 >> ss_ver);
1980                 for (y = init_y >> ss_ver, t->by += init_y;
1981                      y < imin(ch4, (init_y + 16) >> ss_ver); y += uvtx->h)
1982                 {
1983                     int x;
1984                     for (x = init_x >> ss_hor, t->bx += init_x;
1985                          x < imin(cw4, (init_x + 16) >> ss_hor); x += uvtx->w)
1986                     {
1987                         coef *cf;
1988                         int eob;
1989                         enum TxfmType txtp;
1990                         if (t->frame_thread.pass) {
1991                             const int p = t->frame_thread.pass & 1;
1992                             cf = ts->frame_thread[p].cf;
1993                             ts->frame_thread[p].cf += uvtx->w * uvtx->h * 16;
1994                             const struct CodedBlockInfo *const cbi =
1995                                 &f->frame_thread.cbi[t->by * f->b4_stride + t->bx];
1996                             eob = cbi->eob[1 + pl];
1997                             txtp = cbi->txtp[1 + pl];
1998                         } else {
1999                             uint8_t cf_ctx;
2000                             cf = bitfn(t->cf);
2001                             txtp = t->txtp_map[(by4 + (y << ss_ver)) * 32 +
2002                                                 bx4 + (x << ss_hor)];
2003                             eob = decode_coefs(t, &t->a->ccoef[pl][cbx4 + x],
2004                                                &t->l.ccoef[pl][cby4 + y],
2005                                                b->uvtx, bs, b, 0, 1 + pl,
2006                                                cf, &txtp, &cf_ctx);
2007                             if (DEBUG_BLOCK_INFO)
2008                                 printf("Post-uv-cf-blk[pl=%d,tx=%d,"
2009                                        "txtp=%d,eob=%d]: r=%d\n",
2010                                        pl, b->uvtx, txtp, eob, ts->msac.rng);
2011 #define set_ctx(type, dir, diridx, off, mul, rep_macro) \
2012                             rep_macro(type, t->dir ccoef[pl], off, mul * cf_ctx)
2013 #define default_memset(dir, diridx, off, sz) \
2014                             memset(&t->dir ccoef[pl][off], cf_ctx, sz)
2015                             case_set_upto16_with_default( \
2016                                      imin(uvtx->h, (f->bh - t->by + ss_ver) >> ss_ver),
2017                                      l., 1, cby4 + y);
2018                             case_set_upto16_with_default( \
2019                                      imin(uvtx->w, (f->bw - t->bx + ss_hor) >> ss_hor),
2020                                      a->, 0, cbx4 + x);
2021 #undef default_memset
2022 #undef set_ctx
2023                         }
2024                         if (eob >= 0) {
2025                             if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
2026                                 coef_dump(cf, uvtx->h * 4, uvtx->w * 4, 3, "dq");
2027                             dsp->itx.itxfm_add[b->uvtx]
2028                                               [txtp](&uvdst[4 * x],
2029                                                      f->cur.stride[1],
2030                                                      cf, eob HIGHBD_CALL_SUFFIX);
2031                             if (DEBUG_BLOCK_INFO && DEBUG_B_PIXELS)
2032                                 hex_dump(&uvdst[4 * x], f->cur.stride[1],
2033                                          uvtx->w * 4, uvtx->h * 4, "recon");
2034                         }
2035                         t->bx += uvtx->w << ss_hor;
2036                     }
2037                     uvdst += PXSTRIDE(f->cur.stride[1]) * 4 * uvtx->h;
2038                     t->bx -= x << ss_hor;
2039                     t->by += uvtx->h << ss_ver;
2040                 }
2041                 t->by -= y << ss_ver;
2042             }
2043         }
2044     }
2045     return 0;
2046 }
2047 
bytefn(dav1d_filter_sbrow_deblock_cols)2048 void bytefn(dav1d_filter_sbrow_deblock_cols)(Dav1dFrameContext *const f, const int sby) {
2049     if (!(f->c->inloop_filters & DAV1D_INLOOPFILTER_DEBLOCK) ||
2050         (!f->frame_hdr->loopfilter.level_y[0] && !f->frame_hdr->loopfilter.level_y[1]))
2051     {
2052         return;
2053     }
2054     const int y = sby * f->sb_step * 4;
2055     const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2056     pixel *const p[3] = {
2057         f->lf.p[0] + y * PXSTRIDE(f->cur.stride[0]),
2058         f->lf.p[1] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver),
2059         f->lf.p[2] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver)
2060     };
2061     Av1Filter *mask = f->lf.mask + (sby >> !f->seq_hdr->sb128) * f->sb128w;
2062     bytefn(dav1d_loopfilter_sbrow_cols)(f, p, mask, sby,
2063                                         f->lf.start_of_tile_row[sby]);
2064 }
2065 
bytefn(dav1d_filter_sbrow_deblock_rows)2066 void bytefn(dav1d_filter_sbrow_deblock_rows)(Dav1dFrameContext *const f, const int sby) {
2067     const int y = sby * f->sb_step * 4;
2068     const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2069     pixel *const p[3] = {
2070         f->lf.p[0] + y * PXSTRIDE(f->cur.stride[0]),
2071         f->lf.p[1] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver),
2072         f->lf.p[2] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver)
2073     };
2074     Av1Filter *mask = f->lf.mask + (sby >> !f->seq_hdr->sb128) * f->sb128w;
2075     if (f->c->inloop_filters & DAV1D_INLOOPFILTER_DEBLOCK &&
2076         (f->frame_hdr->loopfilter.level_y[0] || f->frame_hdr->loopfilter.level_y[1]))
2077     {
2078         bytefn(dav1d_loopfilter_sbrow_rows)(f, p, mask, sby);
2079     }
2080     if (f->seq_hdr->cdef || f->lf.restore_planes) {
2081         // Store loop filtered pixels required by CDEF / LR
2082         bytefn(dav1d_copy_lpf)(f, p, sby);
2083     }
2084 }
2085 
bytefn(dav1d_filter_sbrow_cdef)2086 void bytefn(dav1d_filter_sbrow_cdef)(Dav1dTaskContext *const tc, const int sby) {
2087     const Dav1dFrameContext *const f = tc->f;
2088     if (!(f->c->inloop_filters & DAV1D_INLOOPFILTER_CDEF)) return;
2089     const int sbsz = f->sb_step;
2090     const int y = sby * sbsz * 4;
2091     const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2092     pixel *const p[3] = {
2093         f->lf.p[0] + y * PXSTRIDE(f->cur.stride[0]),
2094         f->lf.p[1] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver),
2095         f->lf.p[2] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver)
2096     };
2097     Av1Filter *prev_mask = f->lf.mask + ((sby - 1) >> !f->seq_hdr->sb128) * f->sb128w;
2098     Av1Filter *mask = f->lf.mask + (sby >> !f->seq_hdr->sb128) * f->sb128w;
2099     const int start = sby * sbsz;
2100     if (sby) {
2101         const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2102         pixel *p_up[3] = {
2103             p[0] - 8 * PXSTRIDE(f->cur.stride[0]),
2104             p[1] - (8 * PXSTRIDE(f->cur.stride[1]) >> ss_ver),
2105             p[2] - (8 * PXSTRIDE(f->cur.stride[1]) >> ss_ver),
2106         };
2107         bytefn(dav1d_cdef_brow)(tc, p_up, prev_mask, start - 2, start, 1, sby);
2108     }
2109     const int n_blks = sbsz - 2 * (sby + 1 < f->sbh);
2110     const int end = imin(start + n_blks, f->bh);
2111     bytefn(dav1d_cdef_brow)(tc, p, mask, start, end, 0, sby);
2112 }
2113 
bytefn(dav1d_filter_sbrow_resize)2114 void bytefn(dav1d_filter_sbrow_resize)(Dav1dFrameContext *const f, const int sby) {
2115     const int sbsz = f->sb_step;
2116     const int y = sby * sbsz * 4;
2117     const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2118     const pixel *const p[3] = {
2119         f->lf.p[0] + y * PXSTRIDE(f->cur.stride[0]),
2120         f->lf.p[1] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver),
2121         f->lf.p[2] + (y * PXSTRIDE(f->cur.stride[1]) >> ss_ver)
2122     };
2123     pixel *const sr_p[3] = {
2124         f->lf.sr_p[0] + y * PXSTRIDE(f->sr_cur.p.stride[0]),
2125         f->lf.sr_p[1] + (y * PXSTRIDE(f->sr_cur.p.stride[1]) >> ss_ver),
2126         f->lf.sr_p[2] + (y * PXSTRIDE(f->sr_cur.p.stride[1]) >> ss_ver)
2127     };
2128     const int has_chroma = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I400;
2129     for (int pl = 0; pl < 1 + 2 * has_chroma; pl++) {
2130         const int ss_ver = pl && f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2131         const int h_start = 8 * !!sby >> ss_ver;
2132         const ptrdiff_t dst_stride = f->sr_cur.p.stride[!!pl];
2133         pixel *dst = sr_p[pl] - h_start * PXSTRIDE(dst_stride);
2134         const ptrdiff_t src_stride = f->cur.stride[!!pl];
2135         const pixel *src = p[pl] - h_start * PXSTRIDE(src_stride);
2136         const int h_end = 4 * (sbsz - 2 * (sby + 1 < f->sbh)) >> ss_ver;
2137         const int ss_hor = pl && f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
2138         const int dst_w = (f->sr_cur.p.p.w + ss_hor) >> ss_hor;
2139         const int src_w = (4 * f->bw + ss_hor) >> ss_hor;
2140         const int img_h = (f->cur.p.h - sbsz * 4 * sby + ss_ver) >> ss_ver;
2141 
2142         f->dsp->mc.resize(dst, dst_stride, src, src_stride, dst_w,
2143                           imin(img_h, h_end) + h_start, src_w,
2144                           f->resize_step[!!pl], f->resize_start[!!pl]
2145                           HIGHBD_CALL_SUFFIX);
2146     }
2147 }
2148 
bytefn(dav1d_filter_sbrow_lr)2149 void bytefn(dav1d_filter_sbrow_lr)(Dav1dFrameContext *const f, const int sby) {
2150     if (!(f->c->inloop_filters & DAV1D_INLOOPFILTER_RESTORATION)) return;
2151     const int y = sby * f->sb_step * 4;
2152     const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2153     pixel *const sr_p[3] = {
2154         f->lf.sr_p[0] + y * PXSTRIDE(f->sr_cur.p.stride[0]),
2155         f->lf.sr_p[1] + (y * PXSTRIDE(f->sr_cur.p.stride[1]) >> ss_ver),
2156         f->lf.sr_p[2] + (y * PXSTRIDE(f->sr_cur.p.stride[1]) >> ss_ver)
2157     };
2158     bytefn(dav1d_lr_sbrow)(f, sr_p, sby);
2159 }
2160 
bytefn(dav1d_filter_sbrow)2161 void bytefn(dav1d_filter_sbrow)(Dav1dFrameContext *const f, const int sby) {
2162     bytefn(dav1d_filter_sbrow_deblock_cols)(f, sby);
2163     bytefn(dav1d_filter_sbrow_deblock_rows)(f, sby);
2164     if (f->seq_hdr->cdef)
2165         bytefn(dav1d_filter_sbrow_cdef)(f->c->tc, sby);
2166     if (f->frame_hdr->width[0] != f->frame_hdr->width[1])
2167         bytefn(dav1d_filter_sbrow_resize)(f, sby);
2168     if (f->lf.restore_planes)
2169         bytefn(dav1d_filter_sbrow_lr)(f, sby);
2170 }
2171 
bytefn(dav1d_backup_ipred_edge)2172 void bytefn(dav1d_backup_ipred_edge)(Dav1dTaskContext *const t) {
2173     const Dav1dFrameContext *const f = t->f;
2174     Dav1dTileState *const ts = t->ts;
2175     const int sby = t->by >> f->sb_shift;
2176     const int sby_off = f->sb128w * 128 * sby;
2177     const int x_off = ts->tiling.col_start;
2178 
2179     const pixel *const y =
2180         ((const pixel *) f->cur.data[0]) + x_off * 4 +
2181                     ((t->by + f->sb_step) * 4 - 1) * PXSTRIDE(f->cur.stride[0]);
2182     pixel_copy(&f->ipred_edge[0][sby_off + x_off * 4], y,
2183                4 * (ts->tiling.col_end - x_off));
2184 
2185     if (f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I400) {
2186         const int ss_ver = f->cur.p.layout == DAV1D_PIXEL_LAYOUT_I420;
2187         const int ss_hor = f->cur.p.layout != DAV1D_PIXEL_LAYOUT_I444;
2188 
2189         const ptrdiff_t uv_off = (x_off * 4 >> ss_hor) +
2190             (((t->by + f->sb_step) * 4 >> ss_ver) - 1) * PXSTRIDE(f->cur.stride[1]);
2191         for (int pl = 1; pl <= 2; pl++)
2192             pixel_copy(&f->ipred_edge[pl][sby_off + (x_off * 4 >> ss_hor)],
2193                        &((const pixel *) f->cur.data[pl])[uv_off],
2194                        4 * (ts->tiling.col_end - x_off) >> ss_hor);
2195     }
2196 }
2197