1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <math.h>
13 
14 #include "av1/common/common.h"
15 #include "av1/common/entropymode.h"
16 
17 #include "av1/encoder/cost.h"
18 #include "av1/encoder/encodemv.h"
19 #include "av1/encoder/subexp.h"
20 
21 #include "aom_dsp/aom_dsp_common.h"
22 
23 static struct av1_token mv_joint_encodings[MV_JOINTS];
24 static struct av1_token mv_class_encodings[MV_CLASSES];
25 static struct av1_token mv_fp_encodings[MV_FP_SIZE];
26 
av1_entropy_mv_init(void)27 void av1_entropy_mv_init(void) {
28   av1_tokens_from_tree(mv_joint_encodings, av1_mv_joint_tree);
29   av1_tokens_from_tree(mv_class_encodings, av1_mv_class_tree);
30   av1_tokens_from_tree(mv_fp_encodings, av1_mv_fp_tree);
31 }
32 
encode_mv_component(aom_writer * w,int comp,nmv_component * mvcomp,MvSubpelPrecision precision)33 static void encode_mv_component(aom_writer *w, int comp, nmv_component *mvcomp,
34                                 MvSubpelPrecision precision) {
35   int offset;
36   const int sign = comp < 0;
37   const int mag = sign ? -comp : comp;
38   const int mv_class = av1_get_mv_class(mag - 1, &offset);
39   const int d = offset >> 3;         // int mv data
40   const int fr = (offset >> 1) & 3;  // fractional mv data
41   const int hp = offset & 1;         // high precision mv data
42 
43   assert(comp != 0);
44 
45 // Sign
46 #if CONFIG_NEW_MULTISYMBOL
47   aom_write_bit(w, sign);
48 #else
49   aom_write(w, sign, mvcomp->sign);
50 #endif
51 
52   // Class
53   aom_write_symbol(w, mv_class, mvcomp->class_cdf, MV_CLASSES);
54 
55   // Integer bits
56   if (mv_class == MV_CLASS_0) {
57 #if CONFIG_NEW_MULTISYMBOL
58     aom_write_symbol(w, d, mvcomp->class0_cdf, CLASS0_SIZE);
59 #else
60     aom_write(w, d, mvcomp->class0[0]);
61 #endif
62   } else {
63     int i;
64     const int n = mv_class + CLASS0_BITS - 1;  // number of bits
65 #if CONFIG_NEW_MULTISYMBOL
66     for (i = 0; i < n; ++i)
67       aom_write_symbol(w, (d >> i) & 1, mvcomp->bits_cdf[(i + 1) / 2], 2);
68 #else
69     for (i = 0; i < n; ++i) aom_write(w, (d >> i) & 1, mvcomp->bits[i]);
70 #endif
71   }
72 // Fractional bits
73 #if CONFIG_INTRABC || CONFIG_AMVR
74   if (precision > MV_SUBPEL_NONE)
75 #endif  // CONFIG_INTRABC || CONFIG_AMVR
76   {
77     aom_write_symbol(
78         w, fr,
79         mv_class == MV_CLASS_0 ? mvcomp->class0_fp_cdf[d] : mvcomp->fp_cdf,
80         MV_FP_SIZE);
81   }
82 
83   // High precision bit
84   if (precision > MV_SUBPEL_LOW_PRECISION)
85 #if CONFIG_NEW_MULTISYMBOL
86     aom_write_symbol(
87         w, hp, mv_class == MV_CLASS_0 ? mvcomp->class0_hp_cdf : mvcomp->hp_cdf,
88         2);
89 #else
90     aom_write(w, hp, mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
91 #endif
92 }
93 
build_nmv_component_cost_table(int * mvcost,const nmv_component * const mvcomp,MvSubpelPrecision precision)94 static void build_nmv_component_cost_table(int *mvcost,
95                                            const nmv_component *const mvcomp,
96                                            MvSubpelPrecision precision) {
97   int i, v;
98   int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE];
99   int bits_cost[MV_OFFSET_BITS][2];
100   int class0_fp_cost[CLASS0_SIZE][MV_FP_SIZE], fp_cost[MV_FP_SIZE];
101   int class0_hp_cost[2], hp_cost[2];
102 
103   sign_cost[0] = av1_cost_zero(mvcomp->sign);
104   sign_cost[1] = av1_cost_one(mvcomp->sign);
105   av1_cost_tokens(class_cost, mvcomp->classes, av1_mv_class_tree);
106   av1_cost_tokens(class0_cost, mvcomp->class0, av1_mv_class0_tree);
107   for (i = 0; i < MV_OFFSET_BITS; ++i) {
108     bits_cost[i][0] = av1_cost_zero(mvcomp->bits[i]);
109     bits_cost[i][1] = av1_cost_one(mvcomp->bits[i]);
110   }
111 
112   for (i = 0; i < CLASS0_SIZE; ++i)
113     av1_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], av1_mv_fp_tree);
114   av1_cost_tokens(fp_cost, mvcomp->fp, av1_mv_fp_tree);
115 
116   if (precision > MV_SUBPEL_LOW_PRECISION) {
117     class0_hp_cost[0] = av1_cost_zero(mvcomp->class0_hp);
118     class0_hp_cost[1] = av1_cost_one(mvcomp->class0_hp);
119     hp_cost[0] = av1_cost_zero(mvcomp->hp);
120     hp_cost[1] = av1_cost_one(mvcomp->hp);
121   }
122   mvcost[0] = 0;
123   for (v = 1; v <= MV_MAX; ++v) {
124     int z, c, o, d, e, f, cost = 0;
125     z = v - 1;
126     c = av1_get_mv_class(z, &o);
127     cost += class_cost[c];
128     d = (o >> 3);     /* int mv data */
129     f = (o >> 1) & 3; /* fractional pel mv data */
130     e = (o & 1);      /* high precision mv data */
131     if (c == MV_CLASS_0) {
132       cost += class0_cost[d];
133     } else {
134       const int b = c + CLASS0_BITS - 1; /* number of bits */
135       for (i = 0; i < b; ++i) cost += bits_cost[i][((d >> i) & 1)];
136     }
137 #if CONFIG_INTRABC || CONFIG_AMVR
138     if (precision > MV_SUBPEL_NONE)
139 #endif  // CONFIG_INTRABC || CONFIG_AMVR
140     {
141       if (c == MV_CLASS_0) {
142         cost += class0_fp_cost[d][f];
143       } else {
144         cost += fp_cost[f];
145       }
146       if (precision > MV_SUBPEL_LOW_PRECISION) {
147         if (c == MV_CLASS_0) {
148           cost += class0_hp_cost[e];
149         } else {
150           cost += hp_cost[e];
151         }
152       }
153     }
154     mvcost[v] = cost + sign_cost[0];
155     mvcost[-v] = cost + sign_cost[1];
156   }
157 }
158 
159 #if !CONFIG_NEW_MULTISYMBOL
update_mv(aom_writer * w,const unsigned int ct[2],aom_prob * cur_p,aom_prob upd_p)160 static void update_mv(aom_writer *w, const unsigned int ct[2], aom_prob *cur_p,
161                       aom_prob upd_p) {
162   (void)upd_p;
163   // Just use the default maximum number of tile groups to avoid passing in the
164   // actual
165   // number
166   av1_cond_prob_diff_update(w, cur_p, ct, DEFAULT_MAX_NUM_TG);
167 }
168 
av1_write_nmv_probs(AV1_COMMON * cm,int usehp,aom_writer * w,nmv_context_counts * const nmv_counts)169 void av1_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
170                          nmv_context_counts *const nmv_counts) {
171   int i;
172   int nmv_ctx = 0;
173 #if CONFIG_AMVR
174   if (cm->cur_frame_mv_precision_level) {
175     return;
176   }
177 #endif
178   for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
179     nmv_context *const mvc = &cm->fc->nmvc[nmv_ctx];
180     nmv_context_counts *const counts = &nmv_counts[nmv_ctx];
181 
182     if (usehp) {
183       for (i = 0; i < 2; ++i) {
184         update_mv(w, counts->comps[i].class0_hp, &mvc->comps[i].class0_hp,
185                   MV_UPDATE_PROB);
186         update_mv(w, counts->comps[i].hp, &mvc->comps[i].hp, MV_UPDATE_PROB);
187       }
188     }
189   }
190 }
191 #endif
192 
av1_encode_mv(AV1_COMP * cpi,aom_writer * w,const MV * mv,const MV * ref,nmv_context * mvctx,int usehp)193 void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
194                    nmv_context *mvctx, int usehp) {
195   const MV diff = { mv->row - ref->row, mv->col - ref->col };
196   const MV_JOINT_TYPE j = av1_get_mv_joint(&diff);
197 #if CONFIG_AMVR
198   if (cpi->common.cur_frame_mv_precision_level) {
199     usehp = MV_SUBPEL_NONE;
200   }
201 #endif
202   aom_write_symbol(w, j, mvctx->joint_cdf, MV_JOINTS);
203   if (mv_joint_vertical(j))
204     encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
205 
206   if (mv_joint_horizontal(j))
207     encode_mv_component(w, diff.col, &mvctx->comps[1], usehp);
208 
209   // If auto_mv_step_size is enabled then keep track of the largest
210   // motion vector component used.
211   if (cpi->sf.mv.auto_mv_step_size) {
212     unsigned int maxv = AOMMAX(abs(mv->row), abs(mv->col)) >> 3;
213     cpi->max_mv_magnitude = AOMMAX(maxv, cpi->max_mv_magnitude);
214   }
215 }
216 
217 #if CONFIG_INTRABC
av1_encode_dv(aom_writer * w,const MV * mv,const MV * ref,nmv_context * mvctx)218 void av1_encode_dv(aom_writer *w, const MV *mv, const MV *ref,
219                    nmv_context *mvctx) {
220   const MV diff = { mv->row - ref->row, mv->col - ref->col };
221   const MV_JOINT_TYPE j = av1_get_mv_joint(&diff);
222 
223   aom_write_symbol(w, j, mvctx->joint_cdf, MV_JOINTS);
224   if (mv_joint_vertical(j))
225     encode_mv_component(w, diff.row, &mvctx->comps[0], MV_SUBPEL_NONE);
226 
227   if (mv_joint_horizontal(j))
228     encode_mv_component(w, diff.col, &mvctx->comps[1], MV_SUBPEL_NONE);
229 }
230 #endif  // CONFIG_INTRABC
231 
av1_build_nmv_cost_table(int * mvjoint,int * mvcost[2],const nmv_context * ctx,MvSubpelPrecision precision)232 void av1_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
233                               const nmv_context *ctx,
234                               MvSubpelPrecision precision) {
235   av1_cost_tokens(mvjoint, ctx->joints, av1_mv_joint_tree);
236   build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], precision);
237   build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], precision);
238 }
239 
inc_mvs(const MB_MODE_INFO * mbmi,const MB_MODE_INFO_EXT * mbmi_ext,const int_mv mvs[2],const int_mv pred_mvs[2],nmv_context_counts * nmv_counts,MvSubpelPrecision precision)240 static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
241                     const int_mv mvs[2], const int_mv pred_mvs[2],
242                     nmv_context_counts *nmv_counts
243 #if CONFIG_AMVR
244                     ,
245                     MvSubpelPrecision precision
246 #endif
247                     ) {
248   int i;
249   PREDICTION_MODE mode = mbmi->mode;
250 
251   if (mode == NEWMV || mode == NEW_NEWMV) {
252     for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
253       const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv;
254       const MV diff = { mvs[i].as_mv.row - ref->row,
255                         mvs[i].as_mv.col - ref->col };
256       int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
257       int nmv_ctx =
258           av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
259                       mbmi_ext->ref_mv_stack[rf_type], i, mbmi->ref_mv_idx);
260       nmv_context_counts *counts = &nmv_counts[nmv_ctx];
261       (void)pred_mvs;
262 #if CONFIG_AMVR
263       av1_inc_mv(&diff, counts, precision);
264 #else
265       av1_inc_mv(&diff, counts, 1);
266 #endif
267     }
268   } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
269     const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv;
270     const MV diff = { mvs[1].as_mv.row - ref->row,
271                       mvs[1].as_mv.col - ref->col };
272     int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
273     int nmv_ctx =
274         av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
275                     mbmi_ext->ref_mv_stack[rf_type], 1, mbmi->ref_mv_idx);
276     nmv_context_counts *counts = &nmv_counts[nmv_ctx];
277 #if CONFIG_AMVR
278     av1_inc_mv(&diff, counts, precision);
279 #else
280     av1_inc_mv(&diff, counts, 1);
281 #endif
282   } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
283     const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv;
284     const MV diff = { mvs[0].as_mv.row - ref->row,
285                       mvs[0].as_mv.col - ref->col };
286     int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
287     int nmv_ctx =
288         av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
289                     mbmi_ext->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx);
290     nmv_context_counts *counts = &nmv_counts[nmv_ctx];
291 #if CONFIG_AMVR
292     av1_inc_mv(&diff, counts, precision);
293 #else
294     av1_inc_mv(&diff, counts, 1);
295 #endif
296 #if CONFIG_COMPOUND_SINGLEREF
297   } else {
298     assert(  // mode == SR_NEAREST_NEWMV ||
299         mode == SR_NEAR_NEWMV || mode == SR_ZERO_NEWMV || mode == SR_NEW_NEWMV);
300     const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv;
301     int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
302     int nmv_ctx =
303         av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
304                     mbmi_ext->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx);
305     nmv_context_counts *counts = &nmv_counts[nmv_ctx];
306     (void)pred_mvs;
307     MV diff;
308     if (mode == SR_NEW_NEWMV) {
309       diff.row = mvs[0].as_mv.row - ref->row;
310       diff.col = mvs[0].as_mv.col - ref->col;
311       av1_inc_mv(&diff, counts, 1);
312     }
313     diff.row = mvs[1].as_mv.row - ref->row;
314     diff.col = mvs[1].as_mv.col - ref->col;
315     av1_inc_mv(&diff, counts, 1);
316 #endif  // CONFIG_COMPOUND_SINGLEREF
317   }
318 }
319 
inc_mvs_sub8x8(const MODE_INFO * mi,int block,const int_mv mvs[2],const MB_MODE_INFO_EXT * mbmi_ext,nmv_context_counts * nmv_counts,MvSubpelPrecision precision)320 static void inc_mvs_sub8x8(const MODE_INFO *mi, int block, const int_mv mvs[2],
321                            const MB_MODE_INFO_EXT *mbmi_ext,
322                            nmv_context_counts *nmv_counts
323 #if CONFIG_AMVR
324                            ,
325                            MvSubpelPrecision precision
326 #endif
327                            ) {
328   int i;
329   PREDICTION_MODE mode = mi->bmi[block].as_mode;
330   const MB_MODE_INFO *mbmi = &mi->mbmi;
331 
332   if (mode == NEWMV || mode == NEW_NEWMV) {
333     for (i = 0; i < 1 + has_second_ref(&mi->mbmi); ++i) {
334       const MV *ref = &mi->bmi[block].ref_mv[i].as_mv;
335       const MV diff = { mvs[i].as_mv.row - ref->row,
336                         mvs[i].as_mv.col - ref->col };
337       int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
338       int nmv_ctx =
339           av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
340                       mbmi_ext->ref_mv_stack[rf_type], i, mbmi->ref_mv_idx);
341       nmv_context_counts *counts = &nmv_counts[nmv_ctx];
342 #if CONFIG_AMVR
343       av1_inc_mv(&diff, counts, precision);
344 #else
345       av1_inc_mv(&diff, counts, 1);
346 #endif
347     }
348   } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
349     const MV *ref = &mi->bmi[block].ref_mv[1].as_mv;
350     const MV diff = { mvs[1].as_mv.row - ref->row,
351                       mvs[1].as_mv.col - ref->col };
352     int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
353     int nmv_ctx =
354         av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
355                     mbmi_ext->ref_mv_stack[rf_type], 1, mbmi->ref_mv_idx);
356     nmv_context_counts *counts = &nmv_counts[nmv_ctx];
357 #if CONFIG_AMVR
358     av1_inc_mv(&diff, counts, precision);
359 #else
360     av1_inc_mv(&diff, counts, 1);
361 #endif
362   } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
363     const MV *ref = &mi->bmi[block].ref_mv[0].as_mv;
364     const MV diff = { mvs[0].as_mv.row - ref->row,
365                       mvs[0].as_mv.col - ref->col };
366     int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
367     int nmv_ctx =
368         av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
369                     mbmi_ext->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx);
370     nmv_context_counts *counts = &nmv_counts[nmv_ctx];
371 #if CONFIG_AMVR
372     av1_inc_mv(&diff, counts, precision);
373 #else
374     av1_inc_mv(&diff, counts, 1);
375 #endif
376   }
377 }
378 
av1_update_mv_count(ThreadData * td)379 void av1_update_mv_count(ThreadData *td) {
380   const MACROBLOCKD *xd = &td->mb.e_mbd;
381   const MODE_INFO *mi = xd->mi[0];
382   const MB_MODE_INFO *const mbmi = &mi->mbmi;
383   const MB_MODE_INFO_EXT *mbmi_ext = td->mb.mbmi_ext;
384 #if CONFIG_CB4X4
385   const int unify_bsize = 1;
386 #else
387   const int unify_bsize = 0;
388 #endif
389 #if CONFIG_AMVR
390   MvSubpelPrecision precision = 1;
391   if (xd->cur_frame_mv_precision_level) {
392     precision = MV_SUBPEL_NONE;
393   }
394 #endif
395 
396   if (mbmi->sb_type < BLOCK_8X8 && !unify_bsize) {
397     const int num_4x4_w = num_4x4_blocks_wide_lookup[mbmi->sb_type];
398     const int num_4x4_h = num_4x4_blocks_high_lookup[mbmi->sb_type];
399     int idx, idy;
400 
401     for (idy = 0; idy < 2; idy += num_4x4_h) {
402       for (idx = 0; idx < 2; idx += num_4x4_w) {
403         const int i = idy * 2 + idx;
404 
405         if (have_newmv_in_inter_mode(mi->bmi[i].as_mode))
406 
407 #if CONFIG_AMVR
408           inc_mvs_sub8x8(mi, i, mi->bmi[i].as_mv, mbmi_ext, td->counts->mv,
409                          precision);
410 #else
411           inc_mvs_sub8x8(mi, i, mi->bmi[i].as_mv, mbmi_ext, td->counts->mv);
412 #endif
413       }
414     }
415   } else {
416     if (have_newmv_in_inter_mode(mbmi->mode))
417 
418 #if CONFIG_AMVR
419       inc_mvs(mbmi, mbmi_ext, mbmi->mv, mbmi->pred_mv, td->counts->mv,
420               precision);
421 #else
422       inc_mvs(mbmi, mbmi_ext, mbmi->mv, mbmi->pred_mv, td->counts->mv);
423 #endif
424   }
425 }
426