1 /*
2 * Copyright © 2018, VideoLAN and dav1d authors
3 * Copyright © 2018, Two Orioles, LLC
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #ifndef DAV1D_SRC_ENV_H
29 #define DAV1D_SRC_ENV_H
30
31 #include <stddef.h>
32 #include <stdint.h>
33 #include <stdlib.h>
34
35 #include "src/levels.h"
36 #include "src/refmvs.h"
37 #include "src/tables.h"
38
39 typedef struct BlockContext {
40 uint8_t ALIGN(mode[32], 8);
41 uint8_t ALIGN(lcoef[32], 8);
42 uint8_t ALIGN(ccoef[2][32], 8);
43 uint8_t ALIGN(seg_pred[32], 8);
44 uint8_t ALIGN(skip[32], 8);
45 uint8_t ALIGN(skip_mode[32], 8);
46 uint8_t ALIGN(intra[32], 8);
47 uint8_t ALIGN(comp_type[32], 8);
48 int8_t ALIGN(ref[2][32], 8); // -1 means intra
49 uint8_t ALIGN(filter[2][32], 8); // 3 means unset
50 int8_t ALIGN(tx_intra[32], 8);
51 int8_t ALIGN(tx[32], 8);
52 uint8_t ALIGN(tx_lpf_y[32], 8);
53 uint8_t ALIGN(tx_lpf_uv[32], 8);
54 uint8_t ALIGN(partition[16], 8);
55 uint8_t ALIGN(uvmode[32], 8);
56 uint8_t ALIGN(pal_sz[32], 8);
57 } BlockContext;
58
get_intra_ctx(const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4,const int have_top,const int have_left)59 static inline int get_intra_ctx(const BlockContext *const a,
60 const BlockContext *const l,
61 const int yb4, const int xb4,
62 const int have_top, const int have_left)
63 {
64 if (have_left) {
65 if (have_top) {
66 const int ctx = l->intra[yb4] + a->intra[xb4];
67 return ctx + (ctx == 2);
68 } else
69 return l->intra[yb4] * 2;
70 } else {
71 return have_top ? a->intra[xb4] * 2 : 0;
72 }
73 }
74
get_tx_ctx(const BlockContext * const a,const BlockContext * const l,const TxfmInfo * const max_tx,const int yb4,const int xb4)75 static inline int get_tx_ctx(const BlockContext *const a,
76 const BlockContext *const l,
77 const TxfmInfo *const max_tx,
78 const int yb4, const int xb4)
79 {
80 return (l->tx_intra[yb4] >= max_tx->lh) + (a->tx_intra[xb4] >= max_tx->lw);
81 }
82
get_partition_ctx(const BlockContext * const a,const BlockContext * const l,const enum BlockLevel bl,const int yb8,const int xb8)83 static inline int get_partition_ctx(const BlockContext *const a,
84 const BlockContext *const l,
85 const enum BlockLevel bl,
86 const int yb8, const int xb8)
87 {
88 return ((a->partition[xb8] >> (4 - bl)) & 1) +
89 (((l->partition[yb8] >> (4 - bl)) & 1) << 1);
90 }
91
gather_left_partition_prob(const uint16_t * const in,const enum BlockLevel bl)92 static inline unsigned gather_left_partition_prob(const uint16_t *const in,
93 const enum BlockLevel bl)
94 {
95 unsigned out = in[PARTITION_H - 1] - in[PARTITION_H];
96 // Exploit the fact that cdfs for PARTITION_SPLIT, PARTITION_T_TOP_SPLIT,
97 // PARTITION_T_BOTTOM_SPLIT and PARTITION_T_LEFT_SPLIT are neighbors.
98 out += in[PARTITION_SPLIT - 1] - in[PARTITION_T_LEFT_SPLIT];
99 if (bl != BL_128X128)
100 out += in[PARTITION_H4 - 1] - in[PARTITION_H4];
101 return out;
102 }
103
gather_top_partition_prob(const uint16_t * const in,const enum BlockLevel bl)104 static inline unsigned gather_top_partition_prob(const uint16_t *const in,
105 const enum BlockLevel bl)
106 {
107 // Exploit the fact that cdfs for PARTITION_V, PARTITION_SPLIT and
108 // PARTITION_T_TOP_SPLIT are neighbors.
109 unsigned out = in[PARTITION_V - 1] - in[PARTITION_T_TOP_SPLIT];
110 // Exploit the facts that cdfs for PARTITION_T_LEFT_SPLIT and
111 // PARTITION_T_RIGHT_SPLIT are neighbors, the probability for
112 // PARTITION_V4 is always zero, and the probability for
113 // PARTITION_T_RIGHT_SPLIT is zero in 128x128 blocks.
114 out += in[PARTITION_T_LEFT_SPLIT - 1];
115 if (bl != BL_128X128)
116 out += in[PARTITION_V4 - 1] - in[PARTITION_T_RIGHT_SPLIT];
117 return out;
118 }
119
get_uv_inter_txtp(const TxfmInfo * const uvt_dim,const enum TxfmType ytxtp)120 static inline enum TxfmType get_uv_inter_txtp(const TxfmInfo *const uvt_dim,
121 const enum TxfmType ytxtp)
122 {
123 if (uvt_dim->max == TX_32X32)
124 return ytxtp == IDTX ? IDTX : DCT_DCT;
125 if (uvt_dim->min == TX_16X16 &&
126 ((1 << ytxtp) & ((1 << H_FLIPADST) | (1 << V_FLIPADST) |
127 (1 << H_ADST) | (1 << V_ADST))))
128 {
129 return DCT_DCT;
130 }
131
132 return ytxtp;
133 }
134
get_filter_ctx(const BlockContext * const a,const BlockContext * const l,const int comp,const int dir,const int ref,const int yb4,const int xb4)135 static inline int get_filter_ctx(const BlockContext *const a,
136 const BlockContext *const l,
137 const int comp, const int dir, const int ref,
138 const int yb4, const int xb4)
139 {
140 const int a_filter = (a->ref[0][xb4] == ref || a->ref[1][xb4] == ref) ?
141 a->filter[dir][xb4] : DAV1D_N_SWITCHABLE_FILTERS;
142 const int l_filter = (l->ref[0][yb4] == ref || l->ref[1][yb4] == ref) ?
143 l->filter[dir][yb4] : DAV1D_N_SWITCHABLE_FILTERS;
144
145 if (a_filter == l_filter) {
146 return comp * 4 + a_filter;
147 } else if (a_filter == DAV1D_N_SWITCHABLE_FILTERS) {
148 return comp * 4 + l_filter;
149 } else if (l_filter == DAV1D_N_SWITCHABLE_FILTERS) {
150 return comp * 4 + a_filter;
151 } else {
152 return comp * 4 + DAV1D_N_SWITCHABLE_FILTERS;
153 }
154 }
155
get_comp_ctx(const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4,const int have_top,const int have_left)156 static inline int get_comp_ctx(const BlockContext *const a,
157 const BlockContext *const l,
158 const int yb4, const int xb4,
159 const int have_top, const int have_left)
160 {
161 if (have_top) {
162 if (have_left) {
163 if (a->comp_type[xb4]) {
164 if (l->comp_type[yb4]) {
165 return 4;
166 } else {
167 // 4U means intra (-1) or bwd (>= 4)
168 return 2 + ((unsigned)l->ref[0][yb4] >= 4U);
169 }
170 } else if (l->comp_type[yb4]) {
171 // 4U means intra (-1) or bwd (>= 4)
172 return 2 + ((unsigned)a->ref[0][xb4] >= 4U);
173 } else {
174 return (l->ref[0][yb4] >= 4) ^ (a->ref[0][xb4] >= 4);
175 }
176 } else {
177 return a->comp_type[xb4] ? 3 : a->ref[0][xb4] >= 4;
178 }
179 } else if (have_left) {
180 return l->comp_type[yb4] ? 3 : l->ref[0][yb4] >= 4;
181 } else {
182 return 1;
183 }
184 }
185
get_comp_dir_ctx(const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4,const int have_top,const int have_left)186 static inline int get_comp_dir_ctx(const BlockContext *const a,
187 const BlockContext *const l,
188 const int yb4, const int xb4,
189 const int have_top, const int have_left)
190 {
191 #define has_uni_comp(edge, off) \
192 ((edge->ref[0][off] < 4) == (edge->ref[1][off] < 4))
193
194 if (have_top && have_left) {
195 const int a_intra = a->intra[xb4], l_intra = l->intra[yb4];
196
197 if (a_intra && l_intra) return 2;
198 if (a_intra || l_intra) {
199 const BlockContext *const edge = a_intra ? l : a;
200 const int off = a_intra ? yb4 : xb4;
201
202 if (edge->comp_type[off] == COMP_INTER_NONE) return 2;
203 return 1 + 2 * has_uni_comp(edge, off);
204 }
205
206 const int a_comp = a->comp_type[xb4] != COMP_INTER_NONE;
207 const int l_comp = l->comp_type[yb4] != COMP_INTER_NONE;
208 const int a_ref0 = a->ref[0][xb4], l_ref0 = l->ref[0][yb4];
209
210 if (!a_comp && !l_comp) {
211 return 1 + 2 * ((a_ref0 >= 4) == (l_ref0 >= 4));
212 } else if (!a_comp || !l_comp) {
213 const BlockContext *const edge = a_comp ? a : l;
214 const int off = a_comp ? xb4 : yb4;
215
216 if (!has_uni_comp(edge, off)) return 1;
217 return 3 + ((a_ref0 >= 4) == (l_ref0 >= 4));
218 } else {
219 const int a_uni = has_uni_comp(a, xb4), l_uni = has_uni_comp(l, yb4);
220
221 if (!a_uni && !l_uni) return 0;
222 if (!a_uni || !l_uni) return 2;
223 return 3 + ((a_ref0 == 4) == (l_ref0 == 4));
224 }
225 } else if (have_top || have_left) {
226 const BlockContext *const edge = have_left ? l : a;
227 const int off = have_left ? yb4 : xb4;
228
229 if (edge->intra[off]) return 2;
230 if (edge->comp_type[off] == COMP_INTER_NONE) return 2;
231 return 4 * has_uni_comp(edge, off);
232 } else {
233 return 2;
234 }
235 }
236
get_poc_diff(const int order_hint_n_bits,const int poc0,const int poc1)237 static inline int get_poc_diff(const int order_hint_n_bits,
238 const int poc0, const int poc1)
239 {
240 if (!order_hint_n_bits) return 0;
241 const int mask = 1 << (order_hint_n_bits - 1);
242 const int diff = poc0 - poc1;
243 return (diff & (mask - 1)) - (diff & mask);
244 }
245
get_jnt_comp_ctx(const int order_hint_n_bits,const unsigned poc,const unsigned ref0poc,const unsigned ref1poc,const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4)246 static inline int get_jnt_comp_ctx(const int order_hint_n_bits,
247 const unsigned poc, const unsigned ref0poc,
248 const unsigned ref1poc,
249 const BlockContext *const a,
250 const BlockContext *const l,
251 const int yb4, const int xb4)
252 {
253 const unsigned d0 = abs(get_poc_diff(order_hint_n_bits, ref0poc, poc));
254 const unsigned d1 = abs(get_poc_diff(order_hint_n_bits, poc, ref1poc));
255 const int offset = d0 == d1;
256 const int a_ctx = a->comp_type[xb4] >= COMP_INTER_AVG ||
257 a->ref[0][xb4] == 6;
258 const int l_ctx = l->comp_type[yb4] >= COMP_INTER_AVG ||
259 l->ref[0][yb4] == 6;
260
261 return 3 * offset + a_ctx + l_ctx;
262 }
263
get_mask_comp_ctx(const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4)264 static inline int get_mask_comp_ctx(const BlockContext *const a,
265 const BlockContext *const l,
266 const int yb4, const int xb4)
267 {
268 const int a_ctx = a->comp_type[xb4] >= COMP_INTER_SEG ? 1 :
269 a->ref[0][xb4] == 6 ? 3 : 0;
270 const int l_ctx = l->comp_type[yb4] >= COMP_INTER_SEG ? 1 :
271 l->ref[0][yb4] == 6 ? 3 : 0;
272
273 return imin(a_ctx + l_ctx, 5);
274 }
275
276 #define av1_get_ref_2_ctx av1_get_bwd_ref_ctx
277 #define av1_get_ref_3_ctx av1_get_fwd_ref_ctx
278 #define av1_get_ref_4_ctx av1_get_fwd_ref_1_ctx
279 #define av1_get_ref_5_ctx av1_get_fwd_ref_2_ctx
280 #define av1_get_ref_6_ctx av1_get_bwd_ref_1_ctx
281 #define av1_get_uni_p_ctx av1_get_ref_ctx
282 #define av1_get_uni_p2_ctx av1_get_fwd_ref_2_ctx
283
av1_get_ref_ctx(const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4,int have_top,int have_left)284 static inline int av1_get_ref_ctx(const BlockContext *const a,
285 const BlockContext *const l,
286 const int yb4, const int xb4,
287 int have_top, int have_left)
288 {
289 int cnt[2] = { 0 };
290
291 if (have_top && !a->intra[xb4]) {
292 cnt[a->ref[0][xb4] >= 4]++;
293 if (a->comp_type[xb4]) cnt[a->ref[1][xb4] >= 4]++;
294 }
295
296 if (have_left && !l->intra[yb4]) {
297 cnt[l->ref[0][yb4] >= 4]++;
298 if (l->comp_type[yb4]) cnt[l->ref[1][yb4] >= 4]++;
299 }
300
301 return cnt[0] == cnt[1] ? 1 : cnt[0] < cnt[1] ? 0 : 2;
302 }
303
av1_get_fwd_ref_ctx(const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4,const int have_top,const int have_left)304 static inline int av1_get_fwd_ref_ctx(const BlockContext *const a,
305 const BlockContext *const l,
306 const int yb4, const int xb4,
307 const int have_top, const int have_left)
308 {
309 int cnt[4] = { 0 };
310
311 if (have_top && !a->intra[xb4]) {
312 if (a->ref[0][xb4] < 4) cnt[a->ref[0][xb4]]++;
313 if (a->comp_type[xb4] && a->ref[1][xb4] < 4) cnt[a->ref[1][xb4]]++;
314 }
315
316 if (have_left && !l->intra[yb4]) {
317 if (l->ref[0][yb4] < 4) cnt[l->ref[0][yb4]]++;
318 if (l->comp_type[yb4] && l->ref[1][yb4] < 4) cnt[l->ref[1][yb4]]++;
319 }
320
321 cnt[0] += cnt[1];
322 cnt[2] += cnt[3];
323
324 return cnt[0] == cnt[2] ? 1 : cnt[0] < cnt[2] ? 0 : 2;
325 }
326
av1_get_fwd_ref_1_ctx(const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4,const int have_top,const int have_left)327 static inline int av1_get_fwd_ref_1_ctx(const BlockContext *const a,
328 const BlockContext *const l,
329 const int yb4, const int xb4,
330 const int have_top, const int have_left)
331 {
332 int cnt[2] = { 0 };
333
334 if (have_top && !a->intra[xb4]) {
335 if (a->ref[0][xb4] < 2) cnt[a->ref[0][xb4]]++;
336 if (a->comp_type[xb4] && a->ref[1][xb4] < 2) cnt[a->ref[1][xb4]]++;
337 }
338
339 if (have_left && !l->intra[yb4]) {
340 if (l->ref[0][yb4] < 2) cnt[l->ref[0][yb4]]++;
341 if (l->comp_type[yb4] && l->ref[1][yb4] < 2) cnt[l->ref[1][yb4]]++;
342 }
343
344 return cnt[0] == cnt[1] ? 1 : cnt[0] < cnt[1] ? 0 : 2;
345 }
346
av1_get_fwd_ref_2_ctx(const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4,const int have_top,const int have_left)347 static inline int av1_get_fwd_ref_2_ctx(const BlockContext *const a,
348 const BlockContext *const l,
349 const int yb4, const int xb4,
350 const int have_top, const int have_left)
351 {
352 int cnt[2] = { 0 };
353
354 if (have_top && !a->intra[xb4]) {
355 if ((a->ref[0][xb4] ^ 2U) < 2) cnt[a->ref[0][xb4] - 2]++;
356 if (a->comp_type[xb4] && (a->ref[1][xb4] ^ 2U) < 2) cnt[a->ref[1][xb4] - 2]++;
357 }
358
359 if (have_left && !l->intra[yb4]) {
360 if ((l->ref[0][yb4] ^ 2U) < 2) cnt[l->ref[0][yb4] - 2]++;
361 if (l->comp_type[yb4] && (l->ref[1][yb4] ^ 2U) < 2) cnt[l->ref[1][yb4] - 2]++;
362 }
363
364 return cnt[0] == cnt[1] ? 1 : cnt[0] < cnt[1] ? 0 : 2;
365 }
366
av1_get_bwd_ref_ctx(const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4,const int have_top,const int have_left)367 static inline int av1_get_bwd_ref_ctx(const BlockContext *const a,
368 const BlockContext *const l,
369 const int yb4, const int xb4,
370 const int have_top, const int have_left)
371 {
372 int cnt[3] = { 0 };
373
374 if (have_top && !a->intra[xb4]) {
375 if (a->ref[0][xb4] >= 4) cnt[a->ref[0][xb4] - 4]++;
376 if (a->comp_type[xb4] && a->ref[1][xb4] >= 4) cnt[a->ref[1][xb4] - 4]++;
377 }
378
379 if (have_left && !l->intra[yb4]) {
380 if (l->ref[0][yb4] >= 4) cnt[l->ref[0][yb4] - 4]++;
381 if (l->comp_type[yb4] && l->ref[1][yb4] >= 4) cnt[l->ref[1][yb4] - 4]++;
382 }
383
384 cnt[1] += cnt[0];
385
386 return cnt[2] == cnt[1] ? 1 : cnt[1] < cnt[2] ? 0 : 2;
387 }
388
av1_get_bwd_ref_1_ctx(const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4,const int have_top,const int have_left)389 static inline int av1_get_bwd_ref_1_ctx(const BlockContext *const a,
390 const BlockContext *const l,
391 const int yb4, const int xb4,
392 const int have_top, const int have_left)
393 {
394 int cnt[3] = { 0 };
395
396 if (have_top && !a->intra[xb4]) {
397 if (a->ref[0][xb4] >= 4) cnt[a->ref[0][xb4] - 4]++;
398 if (a->comp_type[xb4] && a->ref[1][xb4] >= 4) cnt[a->ref[1][xb4] - 4]++;
399 }
400
401 if (have_left && !l->intra[yb4]) {
402 if (l->ref[0][yb4] >= 4) cnt[l->ref[0][yb4] - 4]++;
403 if (l->comp_type[yb4] && l->ref[1][yb4] >= 4) cnt[l->ref[1][yb4] - 4]++;
404 }
405
406 return cnt[0] == cnt[1] ? 1 : cnt[0] < cnt[1] ? 0 : 2;
407 }
408
av1_get_uni_p1_ctx(const BlockContext * const a,const BlockContext * const l,const int yb4,const int xb4,const int have_top,const int have_left)409 static inline int av1_get_uni_p1_ctx(const BlockContext *const a,
410 const BlockContext *const l,
411 const int yb4, const int xb4,
412 const int have_top, const int have_left)
413 {
414 int cnt[3] = { 0 };
415
416 if (have_top && !a->intra[xb4]) {
417 if (a->ref[0][xb4] - 1U < 3) cnt[a->ref[0][xb4] - 1]++;
418 if (a->comp_type[xb4] && a->ref[1][xb4] - 1U < 3) cnt[a->ref[1][xb4] - 1]++;
419 }
420
421 if (have_left && !l->intra[yb4]) {
422 if (l->ref[0][yb4] - 1U < 3) cnt[l->ref[0][yb4] - 1]++;
423 if (l->comp_type[yb4] && l->ref[1][yb4] - 1U < 3) cnt[l->ref[1][yb4] - 1]++;
424 }
425
426 cnt[1] += cnt[2];
427
428 return cnt[0] == cnt[1] ? 1 : cnt[0] < cnt[1] ? 0 : 2;
429 }
430
get_drl_context(const refmvs_candidate * const ref_mv_stack,const int ref_idx)431 static inline int get_drl_context(const refmvs_candidate *const ref_mv_stack,
432 const int ref_idx)
433 {
434 if (ref_mv_stack[ref_idx].weight >= 640)
435 return ref_mv_stack[ref_idx + 1].weight < 640;
436
437 return ref_mv_stack[ref_idx + 1].weight < 640 ? 2 : 0;
438 }
439
get_cur_frame_segid(const int by,const int bx,const int have_top,const int have_left,int * const seg_ctx,const uint8_t * cur_seg_map,const ptrdiff_t stride)440 static inline unsigned get_cur_frame_segid(const int by, const int bx,
441 const int have_top,
442 const int have_left,
443 int *const seg_ctx,
444 const uint8_t *cur_seg_map,
445 const ptrdiff_t stride)
446 {
447 cur_seg_map += bx + by * stride;
448 if (have_left && have_top) {
449 const int l = cur_seg_map[-1];
450 const int a = cur_seg_map[-stride];
451 const int al = cur_seg_map[-(stride + 1)];
452
453 if (l == a && al == l) *seg_ctx = 2;
454 else if (l == a || al == l || a == al) *seg_ctx = 1;
455 else *seg_ctx = 0;
456 return a == al ? a : l;
457 } else {
458 *seg_ctx = 0;
459 return have_left ? cur_seg_map[-1] : have_top ? cur_seg_map[-stride] : 0;
460 }
461 }
462
fix_int_mv_precision(mv * const mv)463 static inline void fix_int_mv_precision(mv *const mv) {
464 mv->x = (mv->x - (mv->x >> 15) + 3) & ~7U;
465 mv->y = (mv->y - (mv->y >> 15) + 3) & ~7U;
466 }
467
fix_mv_precision(const Dav1dFrameHeader * const hdr,mv * const mv)468 static inline void fix_mv_precision(const Dav1dFrameHeader *const hdr,
469 mv *const mv)
470 {
471 if (hdr->force_integer_mv) {
472 fix_int_mv_precision(mv);
473 } else if (!hdr->hp) {
474 mv->x = (mv->x - (mv->x >> 15)) & ~1U;
475 mv->y = (mv->y - (mv->y >> 15)) & ~1U;
476 }
477 }
478
get_gmv_2d(const Dav1dWarpedMotionParams * const gmv,const int bx4,const int by4,const int bw4,const int bh4,const Dav1dFrameHeader * const hdr)479 static inline mv get_gmv_2d(const Dav1dWarpedMotionParams *const gmv,
480 const int bx4, const int by4,
481 const int bw4, const int bh4,
482 const Dav1dFrameHeader *const hdr)
483 {
484 switch (gmv->type) {
485 case DAV1D_WM_TYPE_ROT_ZOOM:
486 assert(gmv->matrix[5] == gmv->matrix[2]);
487 assert(gmv->matrix[4] == -gmv->matrix[3]);
488 // fall-through
489 default:
490 case DAV1D_WM_TYPE_AFFINE: {
491 const int x = bx4 * 4 + bw4 * 2 - 1;
492 const int y = by4 * 4 + bh4 * 2 - 1;
493 const int xc = (gmv->matrix[2] - (1 << 16)) * x +
494 gmv->matrix[3] * y + gmv->matrix[0];
495 const int yc = (gmv->matrix[5] - (1 << 16)) * y +
496 gmv->matrix[4] * x + gmv->matrix[1];
497 const int shift = 16 - (3 - !hdr->hp);
498 const int round = (1 << shift) >> 1;
499 mv res = (mv) {
500 .y = apply_sign(((abs(yc) + round) >> shift) << !hdr->hp, yc),
501 .x = apply_sign(((abs(xc) + round) >> shift) << !hdr->hp, xc),
502 };
503 if (hdr->force_integer_mv)
504 fix_int_mv_precision(&res);
505 return res;
506 }
507 case DAV1D_WM_TYPE_TRANSLATION: {
508 mv res = (mv) {
509 .y = gmv->matrix[0] >> 13,
510 .x = gmv->matrix[1] >> 13,
511 };
512 if (hdr->force_integer_mv)
513 fix_int_mv_precision(&res);
514 return res;
515 }
516 case DAV1D_WM_TYPE_IDENTITY:
517 return (mv) { .x = 0, .y = 0 };
518 }
519 }
520
521 #endif /* DAV1D_SRC_ENV_H */
522