1 /*****************************************************************************
2 * rdo.c: rate-distortion optimization
3 *****************************************************************************
4 * Copyright (C) 2005-2014 x264 project
5 *
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Fiona Glaser <fiona@x264.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22 *
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
26
27 /* duplicate all the writer functions, just calculating bit cost
28 * instead of writing the bitstream.
29 * TODO: use these for fast 1st pass too. */
30
31 #define RDO_SKIP_BS 1
32
33 /* Transition and size tables for abs<9 MVD and residual coding */
34 /* Consist of i_prefix-2 1s, one zero, and a bypass sign bit */
35 uint8_t x264_cabac_transition_unary[15][128];
36 uint16_t x264_cabac_size_unary[15][128];
37 /* Transition and size tables for abs>9 MVD */
38 /* Consist of 5 1s and a bypass sign bit */
39 static uint8_t cabac_transition_5ones[128];
40 static uint16_t cabac_size_5ones[128];
41
42 /* CAVLC: produces exactly the same bit count as a normal encode */
43 /* this probably still leaves some unnecessary computations */
44 #define bs_write1(s,v) ((s)->i_bits_encoded += 1)
45 #define bs_write(s,n,v) ((s)->i_bits_encoded += (n))
46 #define bs_write_ue(s,v) ((s)->i_bits_encoded += bs_size_ue(v))
47 #define bs_write_se(s,v) ((s)->i_bits_encoded += bs_size_se(v))
48 #define bs_write_te(s,v,l) ((s)->i_bits_encoded += bs_size_te(v,l))
49 #define x264_macroblock_write_cavlc static x264_macroblock_size_cavlc
50 #include "cavlc.c"
51
52 /* CABAC: not exactly the same. x264_cabac_size_decision() keeps track of
53 * fractional bits, but only finite precision. */
54 #undef x264_cabac_encode_decision
55 #undef x264_cabac_encode_decision_noup
56 #undef x264_cabac_encode_bypass
57 #undef x264_cabac_encode_terminal
58 #define x264_cabac_encode_decision(c,x,v) x264_cabac_size_decision(c,x,v)
59 #define x264_cabac_encode_decision_noup(c,x,v) x264_cabac_size_decision_noup(c,x,v)
60 #define x264_cabac_encode_terminal(c) ((c)->f8_bits_encoded += 7)
61 #define x264_cabac_encode_bypass(c,v) ((c)->f8_bits_encoded += 256)
62 #define x264_cabac_encode_ue_bypass(c,e,v) ((c)->f8_bits_encoded += (bs_size_ue_big(v+(1<<e)-1)-e)<<8)
63 #define x264_macroblock_write_cabac static x264_macroblock_size_cabac
64 #include "cabac.c"
65
66 #define COPY_CABAC h->mc.memcpy_aligned( &cabac_tmp.f8_bits_encoded, &h->cabac.f8_bits_encoded, \
67 sizeof(x264_cabac_t) - offsetof(x264_cabac_t,f8_bits_encoded) - (CHROMA444 ? 0 : (1024+12)-460) )
68 #define COPY_CABAC_PART( pos, size )\
69 memcpy( &cb->state[pos], &h->cabac.state[pos], size )
70
cached_hadamard(x264_t * h,int size,int x,int y)71 static ALWAYS_INLINE uint64_t cached_hadamard( x264_t *h, int size, int x, int y )
72 {
73 static const uint8_t hadamard_shift_x[4] = {4, 4, 3, 3};
74 static const uint8_t hadamard_shift_y[4] = {4-0, 3-0, 4-1, 3-1};
75 static const uint8_t hadamard_offset[4] = {0, 1, 3, 5};
76 int cache_index = (x >> hadamard_shift_x[size]) + (y >> hadamard_shift_y[size])
77 + hadamard_offset[size];
78 uint64_t res = h->mb.pic.fenc_hadamard_cache[cache_index];
79 if( res )
80 return res - 1;
81 else
82 {
83 pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
84 res = h->pixf.hadamard_ac[size]( fenc, FENC_STRIDE );
85 h->mb.pic.fenc_hadamard_cache[cache_index] = res + 1;
86 return res;
87 }
88 }
89
cached_satd(x264_t * h,int size,int x,int y)90 static ALWAYS_INLINE int cached_satd( x264_t *h, int size, int x, int y )
91 {
92 static const uint8_t satd_shift_x[3] = {3, 2, 2};
93 static const uint8_t satd_shift_y[3] = {2-1, 3-2, 2-2};
94 static const uint8_t satd_offset[3] = {0, 8, 16};
95 ALIGNED_16( static pixel zero[16] ) = {0};
96 int cache_index = (x >> satd_shift_x[size - PIXEL_8x4]) + (y >> satd_shift_y[size - PIXEL_8x4])
97 + satd_offset[size - PIXEL_8x4];
98 int res = h->mb.pic.fenc_satd_cache[cache_index];
99 if( res )
100 return res - 1;
101 else
102 {
103 pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
104 int dc = h->pixf.sad[size]( fenc, FENC_STRIDE, zero, 0 ) >> 1;
105 res = h->pixf.satd[size]( fenc, FENC_STRIDE, zero, 0 ) - dc;
106 h->mb.pic.fenc_satd_cache[cache_index] = res + 1;
107 return res;
108 }
109 }
110
111 /* Psy RD distortion metric: SSD plus "Absolute Difference of Complexities" */
112 /* SATD and SA8D are used to measure block complexity. */
113 /* The difference between SATD and SA8D scores are both used to avoid bias from the DCT size. Using SATD */
114 /* only, for example, results in overusage of 8x8dct, while the opposite occurs when using SA8D. */
115
116 /* FIXME: Is there a better metric than averaged SATD/SA8D difference for complexity difference? */
117 /* Hadamard transform is recursive, so a SATD+SA8D can be done faster by taking advantage of this fact. */
118 /* This optimization can also be used in non-RD transform decision. */
119
ssd_plane(x264_t * h,int size,int p,int x,int y)120 static inline int ssd_plane( x264_t *h, int size, int p, int x, int y )
121 {
122 ALIGNED_16( static pixel zero[16] ) = {0};
123 int satd = 0;
124 pixel *fdec = h->mb.pic.p_fdec[p] + x + y*FDEC_STRIDE;
125 pixel *fenc = h->mb.pic.p_fenc[p] + x + y*FENC_STRIDE;
126 if( p == 0 && h->mb.i_psy_rd )
127 {
128 /* If the plane is smaller than 8x8, we can't do an SA8D; this probably isn't a big problem. */
129 if( size <= PIXEL_8x8 )
130 {
131 uint64_t fdec_acs = h->pixf.hadamard_ac[size]( fdec, FDEC_STRIDE );
132 uint64_t fenc_acs = cached_hadamard( h, size, x, y );
133 satd = abs((int32_t)fdec_acs - (int32_t)fenc_acs)
134 + abs((int32_t)(fdec_acs>>32) - (int32_t)(fenc_acs>>32));
135 satd >>= 1;
136 }
137 else
138 {
139 int dc = h->pixf.sad[size]( fdec, FDEC_STRIDE, zero, 0 ) >> 1;
140 satd = abs(h->pixf.satd[size]( fdec, FDEC_STRIDE, zero, 0 ) - dc - cached_satd( h, size, x, y ));
141 }
142 satd = (satd * h->mb.i_psy_rd * h->mb.i_psy_rd_lambda + 128) >> 8;
143 }
144 return h->pixf.ssd[size](fenc, FENC_STRIDE, fdec, FDEC_STRIDE) + satd;
145 }
146
ssd_mb(x264_t * h)147 static inline int ssd_mb( x264_t *h )
148 {
149 int chroma_size = h->luma2chroma_pixel[PIXEL_16x16];
150 int chroma_ssd = ssd_plane(h, chroma_size, 1, 0, 0) + ssd_plane(h, chroma_size, 2, 0, 0);
151 chroma_ssd = ((uint64_t)chroma_ssd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
152 return ssd_plane(h, PIXEL_16x16, 0, 0, 0) + chroma_ssd;
153 }
154
x264_rd_cost_mb(x264_t * h,int i_lambda2)155 static int x264_rd_cost_mb( x264_t *h, int i_lambda2 )
156 {
157 int b_transform_bak = h->mb.b_transform_8x8;
158 int i_ssd;
159 int i_bits;
160 int type_bak = h->mb.i_type;
161
162 x264_macroblock_encode( h );
163
164 if( h->mb.b_deblock_rdo )
165 x264_macroblock_deblock( h );
166
167 i_ssd = ssd_mb( h );
168
169 if( IS_SKIP( h->mb.i_type ) )
170 {
171 i_bits = (1 * i_lambda2 + 128) >> 8;
172 }
173 else if( h->param.b_cabac )
174 {
175 x264_cabac_t cabac_tmp;
176 COPY_CABAC;
177 x264_macroblock_size_cabac( h, &cabac_tmp );
178 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 32768 ) >> 16;
179 }
180 else
181 {
182 x264_macroblock_size_cavlc( h );
183 i_bits = ( h->out.bs.i_bits_encoded * i_lambda2 + 128 ) >> 8;
184 }
185
186 h->mb.b_transform_8x8 = b_transform_bak;
187 h->mb.i_type = type_bak;
188
189 return i_ssd + i_bits;
190 }
191
192 /* partition RD functions use 8 bits more precision to avoid large rounding errors at low QPs */
193
x264_rd_cost_subpart(x264_t * h,int i_lambda2,int i4,int i_pixel)194 static uint64_t x264_rd_cost_subpart( x264_t *h, int i_lambda2, int i4, int i_pixel )
195 {
196 uint64_t i_ssd, i_bits;
197
198 x264_macroblock_encode_p4x4( h, i4 );
199 if( i_pixel == PIXEL_8x4 )
200 x264_macroblock_encode_p4x4( h, i4+1 );
201 if( i_pixel == PIXEL_4x8 )
202 x264_macroblock_encode_p4x4( h, i4+2 );
203
204 i_ssd = ssd_plane( h, i_pixel, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
205 if( CHROMA444 )
206 {
207 int chromassd = ssd_plane( h, i_pixel, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 )
208 + ssd_plane( h, i_pixel, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 );
209 chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
210 i_ssd += chromassd;
211 }
212
213 if( h->param.b_cabac )
214 {
215 x264_cabac_t cabac_tmp;
216 COPY_CABAC;
217 x264_subpartition_size_cabac( h, &cabac_tmp, i4, i_pixel );
218 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
219 }
220 else
221 i_bits = x264_subpartition_size_cavlc( h, i4, i_pixel );
222
223 return (i_ssd<<8) + i_bits;
224 }
225
x264_rd_cost_part(x264_t * h,int i_lambda2,int i4,int i_pixel)226 uint64_t x264_rd_cost_part( x264_t *h, int i_lambda2, int i4, int i_pixel )
227 {
228 uint64_t i_ssd, i_bits;
229 int i8 = i4 >> 2;
230 int ssd_x;
231 int ssd_y;
232 int chromapix;
233 int chromassd;
234
235 if( i_pixel == PIXEL_16x16 )
236 {
237 int i_cost = x264_rd_cost_mb( h, i_lambda2 );
238 return i_cost;
239 }
240
241 if( i_pixel > PIXEL_8x8 )
242 return x264_rd_cost_subpart( h, i_lambda2, i4, i_pixel );
243
244 h->mb.i_cbp_luma = 0;
245
246 x264_macroblock_encode_p8x8( h, i8 );
247 if( i_pixel == PIXEL_16x8 )
248 x264_macroblock_encode_p8x8( h, i8+1 );
249 if( i_pixel == PIXEL_8x16 )
250 x264_macroblock_encode_p8x8( h, i8+2 );
251
252 ssd_x = 8*(i8&1);
253 ssd_y = 8*(i8>>1);
254 i_ssd = ssd_plane( h, i_pixel, 0, ssd_x, ssd_y );
255 chromapix = h->luma2chroma_pixel[i_pixel];
256 chromassd = ssd_plane( h, chromapix, 1, ssd_x>>CHROMA_H_SHIFT, ssd_y>>CHROMA_V_SHIFT )
257 + ssd_plane( h, chromapix, 2, ssd_x>>CHROMA_H_SHIFT, ssd_y>>CHROMA_V_SHIFT );
258 i_ssd += ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
259
260 if( h->param.b_cabac )
261 {
262 x264_cabac_t cabac_tmp;
263 COPY_CABAC;
264 x264_partition_size_cabac( h, &cabac_tmp, i8, i_pixel );
265 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
266 }
267 else
268 i_bits = x264_partition_size_cavlc( h, i8, i_pixel ) * i_lambda2;
269
270 return (i_ssd<<8) + i_bits;
271 }
272
x264_rd_cost_i8x8(x264_t * h,int i_lambda2,int i8,int i_mode,pixel edge[4][32])273 static uint64_t x264_rd_cost_i8x8( x264_t *h, int i_lambda2, int i8, int i_mode, pixel edge[4][32] )
274 {
275 uint64_t i_ssd, i_bits;
276 int plane_count = CHROMA444 ? 3 : 1;
277 int i_qp = h->mb.i_qp;
278 int p;
279
280 h->mb.i_cbp_luma &= ~(1<<i8);
281 h->mb.b_transform_8x8 = 1;
282
283 for( p = 0; p < plane_count; p++ )
284 {
285 x264_mb_encode_i8x8( h, p, i8, i_qp, i_mode, edge[p], 1 );
286 i_qp = h->mb.i_chroma_qp;
287 }
288
289 i_ssd = ssd_plane( h, PIXEL_8x8, 0, (i8&1)*8, (i8>>1)*8 );
290 if( CHROMA444 )
291 {
292 int chromassd = ssd_plane( h, PIXEL_8x8, 1, (i8&1)*8, (i8>>1)*8 )
293 + ssd_plane( h, PIXEL_8x8, 2, (i8&1)*8, (i8>>1)*8 );
294 chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
295 i_ssd += chromassd;
296 }
297
298 if( h->param.b_cabac )
299 {
300 x264_cabac_t cabac_tmp;
301 COPY_CABAC;
302 x264_partition_i8x8_size_cabac( h, &cabac_tmp, i8, i_mode );
303 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
304 }
305 else
306 i_bits = x264_partition_i8x8_size_cavlc( h, i8, i_mode ) * i_lambda2;
307
308 return (i_ssd<<8) + i_bits;
309 }
310
x264_rd_cost_i4x4(x264_t * h,int i_lambda2,int i4,int i_mode)311 static uint64_t x264_rd_cost_i4x4( x264_t *h, int i_lambda2, int i4, int i_mode )
312 {
313 uint64_t i_ssd, i_bits;
314 int plane_count = CHROMA444 ? 3 : 1;
315 int i_qp = h->mb.i_qp;
316 int p;
317
318 for( p = 0; p < plane_count; p++ )
319 {
320 x264_mb_encode_i4x4( h, p, i4, i_qp, i_mode, 1 );
321 i_qp = h->mb.i_chroma_qp;
322 }
323
324 i_ssd = ssd_plane( h, PIXEL_4x4, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
325 if( CHROMA444 )
326 {
327 int chromassd = ssd_plane( h, PIXEL_4x4, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 )
328 + ssd_plane( h, PIXEL_4x4, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 );
329 chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
330 i_ssd += chromassd;
331 }
332
333 if( h->param.b_cabac )
334 {
335 x264_cabac_t cabac_tmp;
336 COPY_CABAC;
337 x264_partition_i4x4_size_cabac( h, &cabac_tmp, i4, i_mode );
338 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
339 }
340 else
341 i_bits = x264_partition_i4x4_size_cavlc( h, i4, i_mode ) * i_lambda2;
342
343 return (i_ssd<<8) + i_bits;
344 }
345
x264_rd_cost_chroma(x264_t * h,int i_lambda2,int i_mode,int b_dct)346 static uint64_t x264_rd_cost_chroma( x264_t *h, int i_lambda2, int i_mode, int b_dct )
347 {
348 uint64_t i_ssd, i_bits;
349 int chromapix;
350
351 if( b_dct )
352 x264_mb_encode_chroma( h, 0, h->mb.i_chroma_qp );
353
354 chromapix = h->luma2chroma_pixel[PIXEL_16x16];
355 i_ssd = ssd_plane( h, chromapix, 1, 0, 0 )
356 + ssd_plane( h, chromapix, 2, 0, 0 );
357
358 h->mb.i_chroma_pred_mode = i_mode;
359
360 if( h->param.b_cabac )
361 {
362 x264_cabac_t cabac_tmp;
363 COPY_CABAC;
364 x264_chroma_size_cabac( h, &cabac_tmp );
365 i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
366 }
367 else
368 i_bits = x264_chroma_size_cavlc( h ) * i_lambda2;
369
370 return (i_ssd<<8) + i_bits;
371 }
372 /****************************************************************************
373 * Trellis RD quantization
374 ****************************************************************************/
375
376 #define TRELLIS_SCORE_MAX LLN(-1) // negative marks the node as invalid
377 #define TRELLIS_SCORE_BIAS LLN(1)<<60; // bias so that all valid scores are positive, even after negative contributions from psy
378 #define CABAC_SIZE_BITS 8
379 #define LAMBDA_BITS 4
380
381 /* precalculate the cost of coding various combinations of bits in a single context */
x264_rdo_init(void)382 void x264_rdo_init( void )
383 {
384 int i_prefix;
385 int i_ctx;
386
387 for( i_prefix = 0; i_prefix < 15; i_prefix++ )
388 {
389 for( i_ctx = 0; i_ctx < 128; i_ctx++ )
390 {
391 int f8_bits = 0;
392 uint8_t ctx = i_ctx;
393 int i;
394
395 for( i = 1; i < i_prefix; i++ )
396 f8_bits += x264_cabac_size_decision2( &ctx, 1 );
397 if( i_prefix > 0 && i_prefix < 14 )
398 f8_bits += x264_cabac_size_decision2( &ctx, 0 );
399 f8_bits += 1 << CABAC_SIZE_BITS; //sign
400
401 x264_cabac_size_unary[i_prefix][i_ctx] = f8_bits;
402 x264_cabac_transition_unary[i_prefix][i_ctx] = ctx;
403 }
404 }
405 for( i_ctx = 0; i_ctx < 128; i_ctx++ )
406 {
407 int f8_bits = 0;
408 uint8_t ctx = i_ctx;
409 int i;
410
411 for( i = 0; i < 5; i++ )
412 f8_bits += x264_cabac_size_decision2( &ctx, 1 );
413 f8_bits += 1 << CABAC_SIZE_BITS; //sign
414
415 cabac_size_5ones[i_ctx] = f8_bits;
416 cabac_transition_5ones[i_ctx] = ctx;
417 }
418 }
419
420 typedef struct
421 {
422 uint64_t score;
423 int level_idx; // index into level_tree[]
424 uint8_t cabac_state[4]; // just contexts 0,4,8,9 of the 10 relevant to coding abs_level_m1
425 } trellis_node_t;
426
427 typedef struct
428 {
429 uint16_t next;
430 uint16_t abs_level;
431 } trellis_level_t;
432
433 // TODO:
434 // save cabac state between blocks?
435 // use trellis' RD score instead of x264_mb_decimate_score?
436 // code 8x8 sig/last flags forwards with deadzone and save the contexts at
437 // each position?
438 // change weights when using CQMs?
439
440 // possible optimizations:
441 // make scores fit in 32bit
442 // save quantized coefs during rd, to avoid a duplicate trellis in the final encode
443 // if trellissing all MBRD modes, finish SSD calculation so we can skip all of
444 // the normal dequant/idct/ssd/cabac
445
446 // the unquant_mf here is not the same as dequant_mf:
447 // in normal operation (dct->quant->dequant->idct) the dct and idct are not
448 // normalized. quant/dequant absorb those scaling factors.
449 // in this function, we just do (quant->unquant) and want the output to be
450 // comparable to the input. so unquant is the direct inverse of quant,
451 // and uses the dct scaling factors, not the idct ones.
452
453 #define SIGN(x,y) ((x^(y >> 31))-(y >> 31))
454
455 #define SET_LEVEL(ndst, nsrc, l) {\
456 if( sizeof(trellis_level_t) == sizeof(uint32_t) )\
457 M32( &level_tree[levels_used] ) = pack16to32( nsrc.level_idx, l );\
458 else\
459 level_tree[levels_used] = (trellis_level_t){ nsrc.level_idx, l };\
460 ndst.level_idx = levels_used;\
461 levels_used++;\
462 }
463
464 // encode all values of the dc coef in a block which is known to have no ac
465 static NOINLINE
trellis_dc_shortcut(int sign_coef,int quant_coef,int unquant_mf,int coef_weight,int lambda2,uint8_t * cabac_state,int cost_sig)466 int trellis_dc_shortcut( int sign_coef, int quant_coef, int unquant_mf, int coef_weight, int lambda2, uint8_t *cabac_state, int cost_sig )
467 {
468 uint64_t bscore = TRELLIS_SCORE_MAX;
469 int ret = 0;
470 int q = abs( quant_coef );
471 int abs_level;
472
473 for( abs_level = q-1; abs_level <= q; abs_level++ )
474 {
475 int unquant_abs_level = (unquant_mf * abs_level + 128) >> 8;
476
477 /* Optimize rounding for DC coefficients in DC-only luma 4x4/8x8 blocks. */
478 int d = sign_coef - ((SIGN(unquant_abs_level, sign_coef) + 8)&~15);
479 uint64_t score = (uint64_t)d*d * coef_weight;
480
481 /* code the proposed level, and count how much entropy it would take */
482 if( abs_level )
483 {
484 unsigned f8_bits = cost_sig;
485 int prefix = X264_MIN( abs_level - 1, 14 );
486 f8_bits += x264_cabac_size_decision_noup2( cabac_state+1, prefix > 0 );
487 f8_bits += x264_cabac_size_unary[prefix][cabac_state[5]];
488 if( abs_level >= 15 )
489 f8_bits += bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS;
490 score += (uint64_t)f8_bits * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
491 }
492
493 COPY2_IF_LT( bscore, score, ret, abs_level );
494 }
495 return SIGN(ret, sign_coef);
496 }
497
498 // encode one value of one coef in one context
499 static ALWAYS_INLINE
trellis_coef(int j,int const_level,int abs_level,int prefix,int suffix_cost,int node_ctx,int level1_ctx,int levelgt1_ctx,uint64_t ssd,int cost_siglast[3],trellis_node_t * nodes_cur,trellis_node_t * nodes_prev,trellis_level_t * level_tree,int levels_used,int lambda2,uint8_t * level_state)500 int trellis_coef( int j, int const_level, int abs_level, int prefix, int suffix_cost,
501 int node_ctx, int level1_ctx, int levelgt1_ctx, uint64_t ssd, int cost_siglast[3],
502 trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
503 trellis_level_t *level_tree, int levels_used, int lambda2, uint8_t *level_state )
504 {
505 uint64_t score = nodes_prev[j].score + ssd;
506 /* code the proposed level, and count how much entropy it would take */
507 unsigned f8_bits = cost_siglast[ j ? 1 : 2 ];
508 uint8_t level1_state = (j >= 3) ? nodes_prev[j].cabac_state[level1_ctx>>2] : level_state[level1_ctx];
509 uint8_t levelgt1_state;
510 f8_bits += x264_cabac_entropy[level1_state ^ (const_level > 1)];
511
512 if( const_level > 1 )
513 {
514 levelgt1_state = j >= 6 ? nodes_prev[j].cabac_state[levelgt1_ctx-6] : level_state[levelgt1_ctx];
515 f8_bits += x264_cabac_size_unary[prefix][levelgt1_state] + suffix_cost;
516 }
517 else
518 f8_bits += 1 << CABAC_SIZE_BITS;
519 score += (uint64_t)f8_bits * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
520
521 /* save the node if it's better than any existing node with the same cabac ctx */
522 if( score < nodes_cur[node_ctx].score )
523 {
524 nodes_cur[node_ctx].score = score;
525 if( j == 2 || (j <= 3 && node_ctx == 4) ) // init from input state
526 M32(nodes_cur[node_ctx].cabac_state) = M32(level_state+12);
527 else if( j >= 3 )
528 M32(nodes_cur[node_ctx].cabac_state) = M32(nodes_prev[j].cabac_state);
529 if( j >= 3 ) // skip the transition if we're not going to reuse the context
530 nodes_cur[node_ctx].cabac_state[level1_ctx>>2] = x264_cabac_transition[level1_state][const_level > 1];
531 if( const_level > 1 && node_ctx == 7 )
532 nodes_cur[node_ctx].cabac_state[levelgt1_ctx-6] = x264_cabac_transition_unary[prefix][levelgt1_state];
533 nodes_cur[node_ctx].level_idx = nodes_prev[j].level_idx;
534 SET_LEVEL( nodes_cur[node_ctx], nodes_prev[j], abs_level );
535 }
536 return levels_used;
537 }
538
539 // encode one value of one coef in all contexts, templated by which value that is.
540 // in ctx_lo, the set of live nodes is contiguous and starts at ctx0, so return as soon as we've seen one failure.
541 // in ctx_hi, they're contiguous within each block of 4 ctxs, but not necessarily starting at the beginning,
542 // so exploiting that would be more complicated.
543 static NOINLINE
trellis_coef0_0(uint64_t ssd0,trellis_node_t * nodes_cur,trellis_node_t * nodes_prev,trellis_level_t * level_tree,int levels_used)544 int trellis_coef0_0( uint64_t ssd0, trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
545 trellis_level_t *level_tree, int levels_used )
546 {
547 int j;
548
549 nodes_cur[0].score = nodes_prev[0].score + ssd0;
550 nodes_cur[0].level_idx = nodes_prev[0].level_idx;
551 for( j = 1; j < 4 && (int64_t)nodes_prev[j].score >= 0; j++ )
552 {
553 nodes_cur[j].score = nodes_prev[j].score;
554 if( j >= 3 )
555 M32(nodes_cur[j].cabac_state) = M32(nodes_prev[j].cabac_state);
556 SET_LEVEL( nodes_cur[j], nodes_prev[j], 0 );
557 }
558 return levels_used;
559 }
560
561 static NOINLINE
trellis_coef0_1(uint64_t ssd0,trellis_node_t * nodes_cur,trellis_node_t * nodes_prev,trellis_level_t * level_tree,int levels_used)562 int trellis_coef0_1( uint64_t ssd0, trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
563 trellis_level_t *level_tree, int levels_used )
564 {
565 int j;
566 for( j = 1; j < 8; j++ )
567 // this branch only affects speed, not function; there's nothing wrong with updating invalid nodes in coef0.
568 if( (int64_t)nodes_prev[j].score >= 0 )
569 {
570 nodes_cur[j].score = nodes_prev[j].score;
571 if( j >= 3 )
572 M32(nodes_cur[j].cabac_state) = M32(nodes_prev[j].cabac_state);
573 SET_LEVEL( nodes_cur[j], nodes_prev[j], 0 );
574 }
575 return levels_used;
576 }
577
578 #define COEF(const_level, ctx_hi, j, ...)\
579 if( !j || (int64_t)nodes_prev[j].score >= 0 )\
580 levels_used = trellis_coef( j, const_level, abs_level, prefix, suffix_cost, __VA_ARGS__,\
581 j?ssd1:ssd0, cost_siglast, nodes_cur, nodes_prev,\
582 level_tree, levels_used, lambda2, level_state );\
583 else if( !ctx_hi )\
584 return levels_used;
585
586 static NOINLINE
trellis_coef1_0(uint64_t ssd0,uint64_t ssd1,int cost_siglast[3],trellis_node_t * nodes_cur,trellis_node_t * nodes_prev,trellis_level_t * level_tree,int levels_used,int lambda2,uint8_t * level_state)587 int trellis_coef1_0( uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
588 trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
589 trellis_level_t *level_tree, int levels_used, int lambda2,
590 uint8_t *level_state )
591 {
592 int abs_level = 1, prefix = 1, suffix_cost = 0;
593 COEF( 1, 0, 0, 1, 1, 0 );
594 COEF( 1, 0, 1, 2, 2, 0 );
595 COEF( 1, 0, 2, 3, 3, 0 );
596 COEF( 1, 0, 3, 3, 4, 0 );
597 return levels_used;
598 }
599
600 static NOINLINE
trellis_coef1_1(uint64_t ssd0,uint64_t ssd1,int cost_siglast[3],trellis_node_t * nodes_cur,trellis_node_t * nodes_prev,trellis_level_t * level_tree,int levels_used,int lambda2,uint8_t * level_state)601 int trellis_coef1_1( uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
602 trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
603 trellis_level_t *level_tree, int levels_used, int lambda2,
604 uint8_t *level_state )
605 {
606 int abs_level = 1, prefix = 1, suffix_cost = 0;
607 COEF( 1, 1, 1, 2, 2, 0 );
608 COEF( 1, 1, 2, 3, 3, 0 );
609 COEF( 1, 1, 3, 3, 4, 0 );
610 COEF( 1, 1, 4, 4, 0, 0 );
611 COEF( 1, 1, 5, 5, 0, 0 );
612 COEF( 1, 1, 6, 6, 0, 0 );
613 COEF( 1, 1, 7, 7, 0, 0 );
614 return levels_used;
615 }
616
617 static NOINLINE
trellis_coefn_0(int abs_level,uint64_t ssd0,uint64_t ssd1,int cost_siglast[3],trellis_node_t * nodes_cur,trellis_node_t * nodes_prev,trellis_level_t * level_tree,int levels_used,int lambda2,uint8_t * level_state,int levelgt1_ctx)618 int trellis_coefn_0( int abs_level, uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
619 trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
620 trellis_level_t *level_tree, int levels_used, int lambda2,
621 uint8_t *level_state, int levelgt1_ctx )
622 {
623 int prefix = X264_MIN( abs_level-1, 14 );
624 int suffix_cost = abs_level >= 15 ? bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS : 0;
625 COEF( 2, 0, 0, 4, 1, 5 );
626 COEF( 2, 0, 1, 4, 2, 5 );
627 COEF( 2, 0, 2, 4, 3, 5 );
628 COEF( 2, 0, 3, 4, 4, 5 );
629 return levels_used;
630 }
631
632 static NOINLINE
trellis_coefn_1(int abs_level,uint64_t ssd0,uint64_t ssd1,int cost_siglast[3],trellis_node_t * nodes_cur,trellis_node_t * nodes_prev,trellis_level_t * level_tree,int levels_used,int lambda2,uint8_t * level_state,int levelgt1_ctx)633 int trellis_coefn_1( int abs_level, uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
634 trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
635 trellis_level_t *level_tree, int levels_used, int lambda2,
636 uint8_t *level_state, int levelgt1_ctx )
637 {
638 int prefix = X264_MIN( abs_level-1, 14 );
639 int suffix_cost = abs_level >= 15 ? bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS : 0;
640 COEF( 2, 1, 1, 4, 2, 5 );
641 COEF( 2, 1, 2, 4, 3, 5 );
642 COEF( 2, 1, 3, 4, 4, 5 );
643 COEF( 2, 1, 4, 5, 0, 6 );
644 COEF( 2, 1, 5, 6, 0, 7 );
645 COEF( 2, 1, 6, 7, 0, 8 );
646 COEF( 2, 1, 7, 7, 0, levelgt1_ctx );
647 return levels_used;
648 }
649
650 static ALWAYS_INLINE
quant_trellis_cabac(x264_t * h,dctcoef * dct,udctcoef * quant_mf,udctcoef * quant_bias,const int * unquant_mf,const uint8_t * zigzag,int ctx_block_cat,int lambda2,int b_ac,int b_chroma,int dc,int num_coefs,int idx)651 int quant_trellis_cabac( x264_t *h, dctcoef *dct,
652 udctcoef *quant_mf, udctcoef *quant_bias, const int *unquant_mf,
653 const uint8_t *zigzag, int ctx_block_cat, int lambda2, int b_ac,
654 int b_chroma, int dc, int num_coefs, int idx )
655 {
656 ALIGNED_ARRAY_N( dctcoef, orig_coefs, [64] );
657 ALIGNED_ARRAY_N( dctcoef, quant_coefs, [64] );
658
659 const uint32_t *coef_weight1 = num_coefs == 64 ? x264_dct8_weight_tab : x264_dct4_weight_tab;
660 const uint32_t *coef_weight2 = num_coefs == 64 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
661 const int b_interlaced = MB_INTERLACED;
662 uint8_t *cabac_state_sig = &h->cabac.state[ x264_significant_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
663 uint8_t *cabac_state_last = &h->cabac.state[ x264_last_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
664 int levelgt1_ctx = b_chroma && dc ? 8 : 9;
665 int last_nnz;
666 uint8_t *cabac_state;
667 trellis_level_t level_tree[64*8*2];
668 int levels_used;
669 trellis_node_t nodes[2][8];
670 trellis_node_t *nodes_cur;
671 trellis_node_t *nodes_prev;
672 trellis_node_t *bnode;
673 int i;
674 int j2;
675 ALIGNED_4( uint8_t level_state[16] );
676 int level;
677
678 if( dc )
679 {
680 if( num_coefs == 16 )
681 {
682 memcpy( orig_coefs, dct, sizeof(dctcoef)*16 );
683 if( !h->quantf.quant_4x4_dc( dct, quant_mf[0] >> 1, quant_bias[0] << 1 ) )
684 return 0;
685 h->zigzagf.scan_4x4( quant_coefs, dct );
686 }
687 else
688 {
689 int nz;
690 int i2;
691
692 memcpy( orig_coefs, dct, sizeof(dctcoef)*num_coefs );
693 nz = h->quantf.quant_2x2_dc( &dct[0], quant_mf[0] >> 1, quant_bias[0] << 1 );
694 if( num_coefs == 8 )
695 nz |= h->quantf.quant_2x2_dc( &dct[4], quant_mf[0] >> 1, quant_bias[0] << 1 );
696 if( !nz )
697 return 0;
698 for( i2 = 0; i2 < num_coefs; i2++ )
699 quant_coefs[i2] = dct[zigzag[i2]];
700 }
701 }
702 else
703 {
704 if( num_coefs == 64 )
705 {
706 h->mc.memcpy_aligned( orig_coefs, dct, sizeof(dctcoef)*64 );
707 if( !h->quantf.quant_8x8( dct, quant_mf, quant_bias ) )
708 return 0;
709 h->zigzagf.scan_8x8( quant_coefs, dct );
710 }
711 else //if( num_coefs == 16 )
712 {
713 memcpy( orig_coefs, dct, sizeof(dctcoef)*16 );
714 if( !h->quantf.quant_4x4( dct, quant_mf, quant_bias ) )
715 return 0;
716 h->zigzagf.scan_4x4( quant_coefs, dct );
717 }
718 }
719
720 last_nnz = h->quantf.coeff_last[ctx_block_cat]( quant_coefs+b_ac )+b_ac;
721 cabac_state = &h->cabac.state[ x264_coeff_abs_level_m1_offset[ctx_block_cat] ];
722
723 /* shortcut for dc-only blocks.
724 * this doesn't affect the output, but saves some unnecessary computation. */
725 if( last_nnz == 0 && !dc )
726 {
727 int cost_sig = x264_cabac_size_decision_noup2( &cabac_state_sig[0], 1 )
728 + x264_cabac_size_decision_noup2( &cabac_state_last[0], 1 );
729 dct[0] = trellis_dc_shortcut( orig_coefs[0], quant_coefs[0], unquant_mf[0], coef_weight2[0], lambda2, cabac_state, cost_sig );
730 return !!dct[0];
731 }
732
733 #if HAVE_MMX && ARCH_X86_64
734 #define TRELLIS_ARGS unquant_mf, zigzag, lambda2, last_nnz, orig_coefs, quant_coefs, dct,\
735 cabac_state_sig, cabac_state_last, M64(cabac_state), M16(cabac_state+8)
736 if( num_coefs == 16 && !dc )
737 if( b_chroma || !h->mb.i_psy_trellis )
738 return h->quantf.trellis_cabac_4x4( TRELLIS_ARGS, b_ac );
739 else
740 return h->quantf.trellis_cabac_4x4_psy( TRELLIS_ARGS, b_ac, h->mb.pic.fenc_dct4[idx&15], h->mb.i_psy_trellis );
741 else if( num_coefs == 64 && !dc )
742 if( b_chroma || !h->mb.i_psy_trellis )
743 return h->quantf.trellis_cabac_8x8( TRELLIS_ARGS, b_interlaced );
744 else
745 return h->quantf.trellis_cabac_8x8_psy( TRELLIS_ARGS, b_interlaced, h->mb.pic.fenc_dct8[idx&3], h->mb.i_psy_trellis);
746 else if( num_coefs == 8 && dc )
747 return h->quantf.trellis_cabac_chroma_422_dc( TRELLIS_ARGS );
748 else if( dc )
749 return h->quantf.trellis_cabac_dc( TRELLIS_ARGS, num_coefs-1 );
750 #endif
751
752 // (# of coefs) * (# of ctx) * (# of levels tried) = 1024
753 // we don't need to keep all of those: (# of coefs) * (# of ctx) would be enough,
754 // but it takes more time to remove dead states than you gain in reduced memory.
755 levels_used = 1;
756 /* init trellis */
757 nodes_cur = nodes[0];
758 nodes_prev = nodes[1];
759 for( j2 = 1; j2 < 4; j2++ )
760 nodes_cur[j2].score = TRELLIS_SCORE_MAX;
761 nodes_cur[0].score = TRELLIS_SCORE_BIAS;
762 nodes_cur[0].level_idx = 0;
763 level_tree[0].abs_level = 0;
764 level_tree[0].next = 0;
765 memcpy( level_state, cabac_state, 10 );
766 level_state[12] = cabac_state[0]; // packed subset for copying into trellis_node_t
767 level_state[13] = cabac_state[4];
768 level_state[14] = cabac_state[8];
769 level_state[15] = cabac_state[9];
770
771 idx &= num_coefs == 64 ? 3 : 15;
772
773 // coefs are processed in reverse order, because that's how the abs value is coded.
774 // last_coef and significant_coef flags are normally coded in forward order, but
775 // we have to reverse them to match the levels.
776 // in 4x4 blocks, last_coef and significant_coef use a separate context for each
777 // position, so the order doesn't matter, and we don't even have to update their contexts.
778 // in 8x8 blocks, some positions share contexts, so we'll just have to hope that
779 // cabac isn't too sensitive.
780 i = last_nnz;
781 #define TRELLIS_LOOP(ctx_hi)\
782 for( ; i >= b_ac; i-- )\
783 {\
784 /* skip 0s: this doesn't affect the output, but saves some unnecessary computation. */\
785 if( !quant_coefs[i] )\
786 {\
787 /* no need to calculate ssd of 0s: it's the same in all nodes.\
788 * no need to modify level_tree for ctx=0: it starts with an infinite loop of 0s.
789 * subtracting from one score is equivalent to adding to the rest. */\
790 if( !ctx_hi )\
791 {\
792 int sigindex = !dc && num_coefs == 64 ? x264_significant_coeff_flag_offset_8x8[b_interlaced][i] :\
793 b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
794 uint64_t cost_sig0 = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 )\
795 * (uint64_t)lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );\
796 nodes_cur[0].score -= cost_sig0;\
797 }\
798 for( int j = 1; j < (ctx_hi?8:4); j++ )\
799 SET_LEVEL( nodes_cur[j], nodes_cur[j], 0 );\
800 continue;\
801 }\
802 \
803 int sign_coef = orig_coefs[zigzag[i]];\
804 int abs_coef = abs( sign_coef );\
805 int q = abs( quant_coefs[i] );\
806 int cost_siglast[3]; /* { zero, nonzero, nonzero-and-last } */\
807 XCHG( trellis_node_t*, nodes_cur, nodes_prev );\
808 for( int j = ctx_hi; j < 8; j++ )\
809 nodes_cur[j].score = TRELLIS_SCORE_MAX;\
810 \
811 if( i < num_coefs-1 || ctx_hi )\
812 {\
813 int sigindex = !dc && num_coefs == 64 ? x264_significant_coeff_flag_offset_8x8[b_interlaced][i] :\
814 b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
815 int lastindex = !dc && num_coefs == 64 ? x264_last_coeff_flag_offset_8x8[i] :\
816 b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
817 cost_siglast[0] = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 );\
818 int cost_sig1 = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 1 );\
819 cost_siglast[1] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 0 ) + cost_sig1;\
820 if( !ctx_hi )\
821 cost_siglast[2] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 1 ) + cost_sig1;\
822 }\
823 else\
824 {\
825 cost_siglast[0] = cost_siglast[1] = cost_siglast[2] = 0;\
826 }\
827 \
828 /* there are a few cases where increasing the coeff magnitude helps,\
829 * but it's only around .003 dB, and skipping them ~doubles the speed of trellis.\
830 * could also try q-2: that sometimes helps, but also sometimes decimates blocks\
831 * that are better left coded, especially at QP > 40. */\
832 uint64_t ssd0[2], ssd1[2];\
833 for( int k = 0; k < 2; k++ )\
834 {\
835 int abs_level = q-1+k;\
836 int unquant_abs_level = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[i]]) * abs_level + 128) >> 8);\
837 int d = abs_coef - unquant_abs_level;\
838 /* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */\
839 if( h->mb.i_psy_trellis && i && !dc && !b_chroma )\
840 {\
841 int orig_coef = (num_coefs == 64) ? h->mb.pic.fenc_dct8[idx][zigzag[i]] : h->mb.pic.fenc_dct4[idx][zigzag[i]];\
842 int predicted_coef = orig_coef - sign_coef;\
843 int psy_value = abs(unquant_abs_level + SIGN(predicted_coef, sign_coef));\
844 int psy_weight = coef_weight1[zigzag[i]] * h->mb.i_psy_trellis;\
845 ssd1[k] = (uint64_t)d*d * coef_weight2[zigzag[i]] - psy_weight * psy_value;\
846 }\
847 else\
848 /* FIXME: for i16x16 dc is this weight optimal? */\
849 ssd1[k] = (uint64_t)d*d * (dc?256:coef_weight2[zigzag[i]]);\
850 ssd0[k] = ssd1[k];\
851 if( !i && !dc && !ctx_hi )\
852 {\
853 /* Optimize rounding for DC coefficients in DC-only luma 4x4/8x8 blocks. */\
854 d = sign_coef - ((SIGN(unquant_abs_level, sign_coef) + 8)&~15);\
855 ssd0[k] = (uint64_t)d*d * coef_weight2[zigzag[i]];\
856 }\
857 }\
858 \
859 /* argument passing imposes some significant overhead here. gcc's interprocedural register allocation isn't up to it. */\
860 switch( q )\
861 {\
862 case 1:\
863 ssd1[0] += (uint64_t)cost_siglast[0] * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );\
864 levels_used = trellis_coef0_##ctx_hi( ssd0[0]-ssd1[0], nodes_cur, nodes_prev, level_tree, levels_used );\
865 levels_used = trellis_coef1_##ctx_hi( ssd0[1]-ssd1[0], ssd1[1]-ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state );\
866 goto next##ctx_hi;\
867 case 2:\
868 levels_used = trellis_coef1_##ctx_hi( ssd0[0], ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state );\
869 levels_used = trellis_coefn_##ctx_hi( q, ssd0[1], ssd1[1], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
870 goto next1;\
871 default:\
872 levels_used = trellis_coefn_##ctx_hi( q-1, ssd0[0], ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
873 levels_used = trellis_coefn_##ctx_hi( q, ssd0[1], ssd1[1], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
874 goto next1;\
875 }\
876 next##ctx_hi:;\
877 }\
878 /* output levels from the best path through the trellis */\
879 bnode = &nodes_cur[ctx_hi];\
880 for( int j = ctx_hi+1; j < (ctx_hi?8:4); j++ )\
881 if( nodes_cur[j].score < bnode->score )\
882 bnode = &nodes_cur[j];
883
884 // keep 2 versions of the main quantization loop, depending on which subsets of the node_ctxs are live
885 // node_ctx 0..3, i.e. having not yet encountered any coefs that might be quantized to >1
886 TRELLIS_LOOP(0);
887
888 if( bnode == &nodes_cur[0] )
889 {
890 /* We only need to zero an empty 4x4 block. 8x8 can be
891 implicitly emptied via zero nnz, as can dc. */
892 if( num_coefs == 16 && !dc )
893 memset( dct, 0, 16 * sizeof(dctcoef) );
894 return 0;
895 }
896
897 if(0) // accessible only by goto, not fallthrough
898 {
899 // node_ctx 1..7 (ctx0 ruled out because we never try both level0 and level2+ on the same coef)
900 TRELLIS_LOOP(1);
901 }
902
903 level = bnode->level_idx;
904 for( i = b_ac; i <= last_nnz; i++ )
905 {
906 dct[zigzag[i]] = SIGN(level_tree[level].abs_level, dct[zigzag[i]]);
907 level = level_tree[level].next;
908 }
909
910 return 1;
911 }
912
913 /* FIXME: This is a gigantic hack. See below.
914 *
915 * CAVLC is much more difficult to trellis than CABAC.
916 *
917 * CABAC has only three states to track: significance map, last, and the
918 * level state machine.
919 * CAVLC, by comparison, has five: coeff_token (trailing + total),
920 * total_zeroes, zero_run, and the level state machine.
921 *
922 * I know of no paper that has managed to design a close-to-optimal trellis
923 * that covers all five of these and isn't exponential-time. As a result, this
924 * "trellis" isn't: it's just a QNS search. Patches welcome for something better.
925 * It's actually surprisingly fast, albeit not quite optimal. It's pretty close
926 * though; since CAVLC only has 2^16 possible rounding modes (assuming only two
927 * roundings as options), a bruteforce search is feasible. Testing shows
928 * that this QNS is reasonably close to optimal in terms of compression.
929 *
930 * TODO:
931 * Don't bother changing large coefficients when it wouldn't affect bit cost
932 * (e.g. only affecting bypassed suffix bits).
933 * Don't re-run all parts of CAVLC bit cost calculation when not necessary.
934 * e.g. when changing a coefficient from one non-zero value to another in
935 * such a way that trailing ones and suffix length isn't affected. */
936 static ALWAYS_INLINE
quant_trellis_cavlc(x264_t * h,dctcoef * dct,const udctcoef * quant_mf,const int * unquant_mf,const uint8_t * zigzag,int ctx_block_cat,int lambda2,int b_ac,int b_chroma,int dc,int num_coefs,int idx,int b_8x8)937 int quant_trellis_cavlc( x264_t *h, dctcoef *dct,
938 const udctcoef *quant_mf, const int *unquant_mf,
939 const uint8_t *zigzag, int ctx_block_cat, int lambda2, int b_ac,
940 int b_chroma, int dc, int num_coefs, int idx, int b_8x8 )
941 {
942 ALIGNED_16( dctcoef quant_coefs[2][16] );
943 ALIGNED_16( dctcoef coefs[16] ) = {0};
944 const uint32_t *coef_weight1 = b_8x8 ? x264_dct8_weight_tab : x264_dct4_weight_tab;
945 const uint32_t *coef_weight2 = b_8x8 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
946 int delta_distortion[16];
947 int64_t score = ULLN(1)<<62;
948 int i, j;
949 const int f = 1<<15;
950 int nC = b_chroma && dc ? 3 + (num_coefs>>2)
951 : ct_index[x264_mb_predict_non_zero_code( h, !b_chroma && dc ? (idx - LUMA_DC)*16 : idx )];
952
953 /* Code for handling 8x8dct -> 4x4dct CAVLC munging. Input/output use a different
954 * step/start/end than internal processing. */
955 int step = 1;
956 int start = b_ac;
957 int end = num_coefs - 1;
958 int last_nnz;
959 int coef_mask;
960 int round_mask;
961
962
963 if( b_8x8 )
964 {
965 start = idx&3;
966 end = 60 + start;
967 step = 4;
968 }
969 idx &= 15;
970
971 lambda2 <<= LAMBDA_BITS;
972
973 /* Find last non-zero coefficient. */
974 for( i = end; i >= start; i -= step )
975 if( (unsigned)(dct[zigzag[i]] * (dc?quant_mf[0]>>1:quant_mf[zigzag[i]]) + f-1) >= 2*f )
976 break;
977
978 if( i < start )
979 goto zeroblock;
980
981 /* Prepare for QNS search: calculate distortion caused by each DCT coefficient
982 * rounding to be searched.
983 *
984 * We only search two roundings (nearest and nearest-1) like in CABAC trellis,
985 * so we just store the difference in distortion between them. */
986 last_nnz = b_8x8 ? i >> 2 : i;
987 coef_mask = 0;
988 round_mask = 0;
989 for( i = b_ac, j = start; i <= last_nnz; i++, j += step )
990 {
991 int coef = dct[zigzag[j]];
992 int abs_coef = abs(coef);
993 int sign = coef < 0 ? -1 : 1;
994 int nearest_quant = ( f + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16;
995 quant_coefs[1][i] = quant_coefs[0][i] = sign * nearest_quant;
996 coefs[i] = quant_coefs[1][i];
997 if( nearest_quant )
998 {
999 /* We initialize the trellis with a deadzone halfway between nearest rounding
1000 * and always-round-down. This gives much better results than initializing to either
1001 * extreme.
1002 * FIXME: should we initialize to the deadzones used by deadzone quant? */
1003 int deadzone_quant = ( f/2 + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16;
1004 int unquant1 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-0) + 128) >> 8);
1005 int unquant0 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-1) + 128) >> 8);
1006 int d1 = abs_coef - unquant1;
1007 int d0 = abs_coef - unquant0;
1008 delta_distortion[i] = (d0*d0 - d1*d1) * (dc?256:coef_weight2[zigzag[j]]);
1009
1010 /* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */
1011 if( h->mb.i_psy_trellis && j && !dc && !b_chroma )
1012 {
1013 int orig_coef = b_8x8 ? h->mb.pic.fenc_dct8[idx>>2][zigzag[j]] : h->mb.pic.fenc_dct4[idx][zigzag[j]];
1014 int predicted_coef = orig_coef - coef;
1015 int psy_weight = coef_weight1[zigzag[j]];
1016 int psy_value0 = h->mb.i_psy_trellis * abs(predicted_coef + unquant0 * sign);
1017 int psy_value1 = h->mb.i_psy_trellis * abs(predicted_coef + unquant1 * sign);
1018 delta_distortion[i] += (psy_value0 - psy_value1) * psy_weight;
1019 }
1020
1021 quant_coefs[0][i] = sign * (nearest_quant-1);
1022 if( deadzone_quant != nearest_quant )
1023 coefs[i] = quant_coefs[0][i];
1024 else
1025 round_mask |= 1 << i;
1026 }
1027 else
1028 delta_distortion[i] = 0;
1029 coef_mask |= (!!coefs[i]) << i;
1030 }
1031
1032 /* Calculate the cost of the starting state. */
1033 h->out.bs.i_bits_encoded = 0;
1034 if( !coef_mask )
1035 bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] );
1036 else
1037 x264_cavlc_block_residual_internal( h, ctx_block_cat, coefs + b_ac, nC );
1038 score = (int64_t)h->out.bs.i_bits_encoded * lambda2;
1039
1040 /* QNS loop: pick the change that improves RD the most, apply it, repeat.
1041 * coef_mask and round_mask are used to simplify tracking of nonzeroness
1042 * and rounding modes chosen. */
1043 while( 1 )
1044 {
1045 int64_t iter_score = score;
1046 int iter_distortion_delta = 0;
1047 int iter_coef = -1;
1048 int iter_mask = coef_mask;
1049 int iter_round = round_mask;
1050 for( i = b_ac; i <= last_nnz; i++ )
1051 {
1052 int cur_round;
1053 int round_change;
1054 int old_coef;
1055 int new_coef;
1056 int cur_mask;
1057 int cur_distortion_delta;
1058 int64_t cur_score;
1059
1060 if( !delta_distortion[i] )
1061 continue;
1062
1063 /* Set up all the variables for this iteration. */
1064 cur_round = round_mask ^ (1 << i);
1065 round_change = (cur_round >> i)&1;
1066 old_coef = coefs[i];
1067 new_coef = quant_coefs[round_change][i];
1068 cur_mask = (coef_mask&~(1 << i))|(!!new_coef << i);
1069 cur_distortion_delta = delta_distortion[i] * (round_change ? -1 : 1);
1070 cur_score = cur_distortion_delta;
1071 coefs[i] = new_coef;
1072
1073 /* Count up bits. */
1074 h->out.bs.i_bits_encoded = 0;
1075 if( !cur_mask )
1076 bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] );
1077 else
1078 x264_cavlc_block_residual_internal( h, ctx_block_cat, coefs + b_ac, nC );
1079 cur_score += (int64_t)h->out.bs.i_bits_encoded * lambda2;
1080
1081 coefs[i] = old_coef;
1082 if( cur_score < iter_score )
1083 {
1084 iter_score = cur_score;
1085 iter_coef = i;
1086 iter_mask = cur_mask;
1087 iter_round = cur_round;
1088 iter_distortion_delta = cur_distortion_delta;
1089 }
1090 }
1091 if( iter_coef >= 0 )
1092 {
1093 score = iter_score - iter_distortion_delta;
1094 coef_mask = iter_mask;
1095 round_mask = iter_round;
1096 coefs[iter_coef] = quant_coefs[((round_mask >> iter_coef)&1)][iter_coef];
1097 /* Don't try adjusting coefficients we've already adjusted.
1098 * Testing suggests this doesn't hurt results -- and sometimes actually helps. */
1099 delta_distortion[iter_coef] = 0;
1100 }
1101 else
1102 break;
1103 }
1104
1105 if( coef_mask )
1106 {
1107 for( i = b_ac, j = start; i < num_coefs; i++, j += step )
1108 dct[zigzag[j]] = coefs[i];
1109 return 1;
1110 }
1111
1112 zeroblock:
1113 if( !dc )
1114 {
1115 if( b_8x8 )
1116 for( i = start; i <= end; i+=step )
1117 dct[zigzag[i]] = 0;
1118 else
1119 memset( dct, 0, 16*sizeof(dctcoef) );
1120 }
1121 return 0;
1122 }
1123
x264_quant_luma_dc_trellis(x264_t * h,dctcoef * dct,int i_quant_cat,int i_qp,int ctx_block_cat,int b_intra,int idx)1124 int x264_quant_luma_dc_trellis( x264_t *h, dctcoef *dct, int i_quant_cat, int i_qp, int ctx_block_cat, int b_intra, int idx )
1125 {
1126 if( h->param.b_cabac )
1127 return quant_trellis_cabac( h, dct,
1128 h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias0[i_quant_cat][i_qp],
1129 h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
1130 ctx_block_cat, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 1, 16, idx );
1131
1132 return quant_trellis_cavlc( h, dct,
1133 h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
1134 DCT_LUMA_DC, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 1, 16, idx, 0 );
1135 }
1136
1137 static const uint8_t x264_zigzag_scan2x2[4] = { 0, 1, 2, 3 };
1138 static const uint8_t x264_zigzag_scan2x4[8] = { 0, 2, 1, 4, 6, 3, 5, 7 };
1139
x264_quant_chroma_dc_trellis(x264_t * h,dctcoef * dct,int i_qp,int b_intra,int idx)1140 int x264_quant_chroma_dc_trellis( x264_t *h, dctcoef *dct, int i_qp, int b_intra, int idx )
1141 {
1142 const uint8_t *zigzag;
1143 int num_coefs;
1144 int quant_cat = CQM_4IC+1 - b_intra;
1145
1146 if( CHROMA_FORMAT == CHROMA_422 )
1147 {
1148 zigzag = x264_zigzag_scan2x4;
1149 num_coefs = 8;
1150 }
1151 else
1152 {
1153 zigzag = x264_zigzag_scan2x2;
1154 num_coefs = 4;
1155 }
1156
1157 if( h->param.b_cabac )
1158 return quant_trellis_cabac( h, dct,
1159 h->quant4_mf[quant_cat][i_qp], h->quant4_bias0[quant_cat][i_qp],
1160 h->unquant4_mf[quant_cat][i_qp], zigzag,
1161 DCT_CHROMA_DC, h->mb.i_trellis_lambda2[1][b_intra], 0, 1, 1, num_coefs, idx );
1162
1163 return quant_trellis_cavlc( h, dct,
1164 h->quant4_mf[quant_cat][i_qp], h->unquant4_mf[quant_cat][i_qp], zigzag,
1165 DCT_CHROMA_DC, h->mb.i_trellis_lambda2[1][b_intra], 0, 1, 1, num_coefs, idx, 0 );
1166 }
1167
x264_quant_4x4_trellis(x264_t * h,dctcoef * dct,int i_quant_cat,int i_qp,int ctx_block_cat,int b_intra,int b_chroma,int idx)1168 int x264_quant_4x4_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
1169 int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx )
1170 {
1171 static const uint8_t ctx_ac[14] = {0,1,0,0,1,0,0,1,0,0,0,1,0,0};
1172 int b_ac = ctx_ac[ctx_block_cat];
1173 if( h->param.b_cabac )
1174 return quant_trellis_cabac( h, dct,
1175 h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias0[i_quant_cat][i_qp],
1176 h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
1177 ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx );
1178
1179 return quant_trellis_cavlc( h, dct,
1180 h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
1181 x264_zigzag_scan4[MB_INTERLACED],
1182 ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx, 0 );
1183 }
1184
x264_quant_8x8_trellis(x264_t * h,dctcoef * dct,int i_quant_cat,int i_qp,int ctx_block_cat,int b_intra,int b_chroma,int idx)1185 int x264_quant_8x8_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
1186 int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx )
1187 {
1188 int nzaccum;
1189 int i;
1190
1191 if( h->param.b_cabac )
1192 {
1193 return quant_trellis_cabac( h, dct,
1194 h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias0[i_quant_cat][i_qp],
1195 h->unquant8_mf[i_quant_cat][i_qp], x264_zigzag_scan8[MB_INTERLACED],
1196 ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 64, idx );
1197 }
1198
1199 /* 8x8 CAVLC is split into 4 4x4 blocks */
1200 nzaccum = 0;
1201 for( i = 0; i < 4; i++ )
1202 {
1203 int nz = quant_trellis_cavlc( h, dct,
1204 h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp],
1205 x264_zigzag_scan8[MB_INTERLACED],
1206 DCT_LUMA_4x4, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 16, idx*4+i, 1 );
1207 /* Set up nonzero count for future calls */
1208 h->mb.cache.non_zero_count[x264_scan8[idx*4+i]] = nz;
1209 nzaccum |= nz;
1210 }
1211 STORE_8x8_NNZ( 0, idx, 0 );
1212 return nzaccum;
1213 }
1214