1 /*****************************************************************************
2  * This file is part of Kvazaar HEVC encoder.
3  *
4  * Copyright (c) 2021, Tampere University, ITU/ISO/IEC, project contributors
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without modification,
8  * are permitted provided that the following conditions are met:
9  *
10  * * Redistributions of source code must retain the above copyright notice, this
11  *   list of conditions and the following disclaimer.
12  *
13  * * Redistributions in binary form must reproduce the above copyright notice, this
14  *   list of conditions and the following disclaimer in the documentation and/or
15  *   other materials provided with the distribution.
16  *
17  * * Neither the name of the Tampere University or ITU/ISO/IEC nor the names of its
18  *   contributors may be used to endorse or promote products derived from
19  *   this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
25  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION HOWEVER CAUSED AND ON
28  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  * INCLUDING NEGLIGENCE OR OTHERWISE ARISING IN ANY WAY OUT OF THE USE OF THIS
31  ****************************************************************************/
32 
33 /*
34 * \file
35 */
36 
37 #include "strategies/avx2/quant-avx2.h"
38 
39 #if COMPILE_INTEL_AVX2 && defined X86_64
40 #include <immintrin.h>
41 #include <stdlib.h>
42 
43 #include "avx2_common_functions.h"
44 #include "cu.h"
45 #include "encoder.h"
46 #include "encoderstate.h"
47 #include "kvazaar.h"
48 #include "rdo.h"
49 #include "scalinglist.h"
50 #include "strategies/generic/quant-generic.h"
51 #include "strategies/strategies-quant.h"
52 #include "strategyselector.h"
53 #include "tables.h"
54 #include "transform.h"
55 #include "fast_coeff_cost.h"
56 
hsum32_8x32i(__m256i src)57 static INLINE int32_t hsum32_8x32i(__m256i src)
58 {
59   __m128i a = _mm256_extracti128_si256(src, 0);
60   __m128i b = _mm256_extracti128_si256(src, 1);
61 
62   a = _mm_add_epi32(a, b);
63   b = _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 1, 2, 3));
64 
65   a = _mm_add_epi32(a, b);
66   b = _mm_shuffle_epi32(a, _MM_SHUFFLE(2, 3, 0, 1));
67 
68   a = _mm_add_epi32(a, b);
69   return _mm_cvtsi128_si32(a);
70 }
71 
hsum32_16x16i(__m256i src)72 static INLINE int32_t hsum32_16x16i(__m256i src)
73 {
74   __m128i a = _mm256_extracti128_si256(src, 0);
75   __m128i b = _mm256_extracti128_si256(src, 1);
76   __m256i c = _mm256_cvtepi16_epi32(a);
77   __m256i d = _mm256_cvtepi16_epi32(b);
78 
79   c = _mm256_add_epi32(c, d);
80   return hsum32_8x32i(c);
81 }
82 
83 // Rearranges a 16x32b double vector into a format suitable for a stable SIMD
84 // max algorithm:
85 // (abcd|efgh) (ijkl|mnop) => (aceg|ikmo) (bdfh|jlnp)
rearrange_512(__m256i * hi,__m256i * lo)86 static INLINE void rearrange_512(__m256i *hi, __m256i *lo)
87 {
88   const __m256i perm8x32mask = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7);
89 
90   __m256i tmphi = _mm256_permutevar8x32_epi32(*hi, perm8x32mask);
91   __m256i tmplo = _mm256_permutevar8x32_epi32(*lo, perm8x32mask);
92 
93   *hi = _mm256_permute2x128_si256(tmplo, tmphi, 0x31);
94   *lo = _mm256_permute2x128_si256(tmplo, tmphi, 0x20);
95 }
96 
get_cheapest_alternative(__m256i costs_hi,__m256i costs_lo,__m256i ns,__m256i changes,int16_t * final_change,int32_t * min_pos)97 static INLINE void get_cheapest_alternative(__m256i costs_hi, __m256i costs_lo,
98     __m256i ns, __m256i changes,
99     int16_t *final_change, int32_t *min_pos)
100 {
101   // Interleave ns and lo into 32-bit variables and to two 256-bit wide vecs,
102   // to have the same data layout as in costs. Zero extend to 32b width, shift
103   // changes 16 bits to the left, and store them into the same vectors.
104   __m256i tmp1hi = _mm256_unpackhi_epi16(ns, changes);
105   __m256i tmp1lo = _mm256_unpacklo_epi16(ns, changes);
106 
107   __m256i pl1hi = _mm256_permute2x128_si256(tmp1lo, tmp1hi, 0x31);
108   __m256i pl1lo = _mm256_permute2x128_si256(tmp1lo, tmp1hi, 0x20);
109 
110   // Reorder to afford result stability (if multiple atoms tie for cheapest,
111   // rightmost ie. the highest is the wanted one)
112   rearrange_512(&costs_hi, &costs_lo);
113   rearrange_512(&pl1hi, &pl1lo);
114 
115   // 0: pick hi, 1: pick lo (equality evaluates as 0)
116   __m256i cmpmask1 = _mm256_cmpgt_epi32(costs_hi, costs_lo);
117   __m256i cost1    = _mm256_blendv_epi8(costs_hi, costs_lo, cmpmask1);
118   __m256i pl1_1    = _mm256_blendv_epi8(pl1hi,    pl1lo,    cmpmask1);
119 
120   __m256i cost2    = _mm256_shuffle_epi32(cost1, _MM_SHUFFLE(2, 3, 0, 1));
121   __m256i pl1_2    = _mm256_shuffle_epi32(pl1_1, _MM_SHUFFLE(2, 3, 0, 1));
122 
123   __m256i cmpmask2 = _mm256_cmpgt_epi32(cost2, cost1);
124   __m256i cost3    = _mm256_blendv_epi8(cost2, cost1, cmpmask2);
125   __m256i pl1_3    = _mm256_blendv_epi8(pl1_2, pl1_1, cmpmask2);
126 
127   __m256i cost4    = _mm256_shuffle_epi32(cost3, _MM_SHUFFLE(1, 0, 3, 2));
128   __m256i pl1_4    = _mm256_shuffle_epi32(pl1_3, _MM_SHUFFLE(1, 0, 3, 2));
129 
130   __m256i cmpmask3 = _mm256_cmpgt_epi32(cost4, cost3);
131   __m256i cost5    = _mm256_blendv_epi8(cost4, cost3, cmpmask3);
132   __m256i pl1_5    = _mm256_blendv_epi8(pl1_4, pl1_3, cmpmask3);
133 
134   __m256i cost6    = _mm256_permute4x64_epi64(cost5, _MM_SHUFFLE(1, 0, 3, 2));
135   __m256i pl1_6    = _mm256_permute4x64_epi64(pl1_5, _MM_SHUFFLE(1, 0, 3, 2));
136 
137   __m256i cmpmask4 = _mm256_cmpgt_epi32(cost6, cost5);
138   __m256i pl1_7    = _mm256_blendv_epi8(pl1_6, pl1_5, cmpmask4);
139 
140   __m128i res1_128 = _mm256_castsi256_si128(pl1_7);
141   uint32_t tmp1 = (uint32_t)_mm_extract_epi32(res1_128, 0);
142   uint16_t n = (uint16_t)(tmp1 & 0xffff);
143   uint16_t chng = (uint16_t)(tmp1 >> 16);
144 
145   *final_change = (int16_t)chng;
146   *min_pos = (int32_t)n;
147 }
148 
concatenate_2x128i(__m128i lo,__m128i hi)149 static INLINE __m256i concatenate_2x128i(__m128i lo, __m128i hi)
150 {
151   __m256i v = _mm256_castsi128_si256(lo);
152   return _mm256_inserti128_si256(v, hi, 1);
153 }
154 
scanord_read_vector_32(const int32_t * __restrict quant_coeff,const uint32_t * __restrict scan,int8_t scan_mode,int32_t subpos,int32_t width,__m256i * __restrict v_quant_coeffs)155 static INLINE void scanord_read_vector_32(const int32_t  *__restrict quant_coeff,
156                                           const uint32_t *__restrict scan,
157                                           int8_t scan_mode,
158                                           int32_t subpos,
159                                           int32_t width,
160                                           __m256i *__restrict v_quant_coeffs)
161 {
162   const size_t row_offsets[4] = {
163     scan[subpos] + width * 0,
164     scan[subpos] + width * 1,
165     scan[subpos] + width * 2,
166     scan[subpos] + width * 3,
167   };
168 
169   const __m256i shufmasks[3] = {
170     _mm256_setr_epi32(5, 2, 6, 0, 3, 7, 4, 1),
171     _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7),
172     _mm256_setr_epi32(2, 3, 0, 1, 6, 7, 4, 5),
173   };
174 
175   const __m256i blend_masks[3] = {
176     _mm256_setr_epi32( 0,  0,  0, -1,  0,  0, -1, -1),
177     _mm256_setr_epi32( 0,  0,  0,  0,  0,  0,  0,  0),
178     _mm256_setr_epi32( 0,  0, -1, -1,  0,  0, -1, -1),
179   };
180 
181   const __m256i rearr_masks_lo[3] = {
182     _mm256_setr_epi32(0, 4, 1, 3, 5, 2, 6, 7),
183     _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7),
184     _mm256_setr_epi32(0, 4, 2, 6, 1, 5, 3, 7),
185   };
186 
187   const __m256i rearr_masks_hi[3] = {
188     _mm256_setr_epi32(6, 3, 0, 1, 7, 2, 4, 5),
189     _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7),
190     _mm256_setr_epi32(2, 6, 0, 4, 3, 7, 1, 5),
191   };
192 
193   __m128i coeffs[4] = {
194     _mm_loadu_si128((__m128i *)(quant_coeff + row_offsets[0])),
195     _mm_loadu_si128((__m128i *)(quant_coeff + row_offsets[1])),
196     _mm_loadu_si128((__m128i *)(quant_coeff + row_offsets[2])),
197     _mm_loadu_si128((__m128i *)(quant_coeff + row_offsets[3])),
198   };
199 
200   __m256i coeffs_upper = concatenate_2x128i(coeffs[0], coeffs[1]);
201   __m256i coeffs_lower = concatenate_2x128i(coeffs[2], coeffs[3]);
202 
203   __m256i lower_shuffled = _mm256_permutevar8x32_epi32(coeffs_lower, shufmasks[scan_mode]);
204 
205   __m256i upper_blended  = _mm256_blendv_epi8(coeffs_upper,   lower_shuffled, blend_masks[scan_mode]);
206   __m256i lower_blended  = _mm256_blendv_epi8(lower_shuffled, coeffs_upper,   blend_masks[scan_mode]);
207 
208   __m256i result_lo      = _mm256_permutevar8x32_epi32(upper_blended, rearr_masks_lo[scan_mode]);
209   __m256i result_hi      = _mm256_permutevar8x32_epi32(lower_blended, rearr_masks_hi[scan_mode]);
210 
211   v_quant_coeffs[0] = result_lo;
212   v_quant_coeffs[1] = result_hi;
213 }
214 
215 #define VEC_WIDTH 16
216 #define SCAN_SET_SIZE 16
217 #define LOG2_SCAN_SET_SIZE 4
218 
hide_block_sign(__m256i coefs,__m256i q_coefs,__m256i deltas_h,__m256i deltas_l,coeff_t * __restrict q_coef,const uint32_t * __restrict scan,int32_t subpos,int32_t last_cg)219 static INLINE int32_t hide_block_sign(__m256i coefs, __m256i q_coefs, __m256i deltas_h, __m256i deltas_l, coeff_t * __restrict q_coef, const uint32_t * __restrict scan, int32_t subpos, int32_t last_cg)
220 {
221   assert(SCAN_SET_SIZE == 16);
222 
223   int32_t first_nz_pos_in_cg, last_nz_pos_in_cg;
224   int32_t abssum = 0;
225 
226   // Find first and last nonzero coeffs
227   get_first_last_nz_int16(q_coefs, &first_nz_pos_in_cg, &last_nz_pos_in_cg);
228 
229   // Sum all kvz_quant coeffs between first and last
230   abssum = hsum32_16x16i(q_coefs);
231 
232   if (last_nz_pos_in_cg >= 0 && last_cg == -1) {
233     last_cg = 1;
234   }
235 
236   if (last_nz_pos_in_cg - first_nz_pos_in_cg >= 4) {
237 
238     uint32_t q_coef_signbits = _mm256_movemask_epi8(q_coefs);
239     int32_t signbit = (q_coef_signbits >> (2 * first_nz_pos_in_cg + 1)) & 0x1;
240 
241     if (signbit != (abssum & 0x1)) { // compare signbit with sum_parity
242       int32_t min_pos;
243       int16_t final_change;
244       int16_t cheapest_q;
245 
246       const int32_t mask_max = (last_cg == 1) ? last_nz_pos_in_cg : SCAN_SET_SIZE - 1;
247 
248       const __m256i zero = _mm256_setzero_si256();
249       const __m256i ones = _mm256_set1_epi16(1);
250       const __m256i maxiters = _mm256_set1_epi16(mask_max);
251       const __m256i ff = _mm256_set1_epi8(0xff);
252 
253       const __m256i fnpics = _mm256_set1_epi16((int16_t)first_nz_pos_in_cg);
254       const __m256i ns = _mm256_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
255 
256       __m256i block_signbit = _mm256_set1_epi16(((int16_t)signbit) * -1);
257       __m256i coef_signbits = _mm256_cmpgt_epi16(zero, coefs);
258       __m256i signbits_equal_block = _mm256_cmpeq_epi16(coef_signbits, block_signbit);
259 
260       __m256i q_coefs_zero = _mm256_cmpeq_epi16(q_coefs, zero);
261 
262       __m256i dus_packed = _mm256_packs_epi32(deltas_l, deltas_h);
263       __m256i dus_ordered = _mm256_permute4x64_epi64(dus_packed, _MM_SHUFFLE(3, 1, 2, 0));
264       __m256i dus_positive = _mm256_cmpgt_epi16(dus_ordered, zero);
265 
266       __m256i q_coef_abss = _mm256_abs_epi16(q_coefs);
267       __m256i q_coefs_plusminus_one = _mm256_cmpeq_epi16(q_coef_abss, ones);
268 
269       __m256i eq_fnpics = _mm256_cmpeq_epi16(fnpics, ns);
270       __m256i lt_fnpics = _mm256_cmpgt_epi16(fnpics, ns);
271 
272       __m256i maxcost_subcond1s = _mm256_and_si256(eq_fnpics, q_coefs_plusminus_one);
273       __m256i maxcost_subcond2s = _mm256_andnot_si256(signbits_equal_block, lt_fnpics);
274       __m256i elsecond1s_inv = _mm256_or_si256(dus_positive, maxcost_subcond1s);
275       __m256i elsecond1s = _mm256_andnot_si256(elsecond1s_inv, ff);
276 
277       __m256i outside_maxiters = _mm256_cmpgt_epi16(ns, maxiters);
278 
279       __m256i negdelta_cond1s = _mm256_andnot_si256(q_coefs_zero, dus_positive);
280       __m256i negdelta_cond2s = _mm256_andnot_si256(maxcost_subcond2s, q_coefs_zero);
281       __m256i negdelta_mask16s_part1 = _mm256_or_si256(negdelta_cond1s, negdelta_cond2s);
282       __m256i negdelta_mask16s = _mm256_andnot_si256(outside_maxiters, negdelta_mask16s_part1);
283 
284       __m256i posdelta_mask16s_part1 = _mm256_andnot_si256(q_coefs_zero, elsecond1s);
285       __m256i posdelta_mask16s = _mm256_andnot_si256(outside_maxiters, posdelta_mask16s_part1);
286 
287       __m256i maxcost_cond1_parts = _mm256_andnot_si256(dus_positive, maxcost_subcond1s);
288       __m256i maxcost_cond1s = _mm256_andnot_si256(q_coefs_zero, maxcost_cond1_parts);
289       __m256i maxcost_cond2s = _mm256_and_si256(q_coefs_zero, maxcost_subcond2s);
290       __m256i maxcost_mask16s_parts = _mm256_or_si256(maxcost_cond1s, maxcost_cond2s);
291       __m256i maxcost_mask16s = _mm256_or_si256(maxcost_mask16s_parts, outside_maxiters);
292 
293       __m128i tmp_l, tmp_h;
294       tmp_l = _mm256_extracti128_si256(negdelta_mask16s, 0);
295       tmp_h = _mm256_extracti128_si256(negdelta_mask16s, 1);
296       __m256i negdelta_mask32s_l = _mm256_cvtepi16_epi32(tmp_l);
297       __m256i negdelta_mask32s_h = _mm256_cvtepi16_epi32(tmp_h);
298 
299       tmp_l = _mm256_extracti128_si256(posdelta_mask16s, 0);
300       tmp_h = _mm256_extracti128_si256(posdelta_mask16s, 1);
301       __m256i posdelta_mask32s_l = _mm256_cvtepi16_epi32(tmp_l);
302       __m256i posdelta_mask32s_h = _mm256_cvtepi16_epi32(tmp_h);
303 
304       tmp_l = _mm256_extracti128_si256(maxcost_mask16s, 0);
305       tmp_h = _mm256_extracti128_si256(maxcost_mask16s, 1);
306       __m256i maxcost_mask32s_l = _mm256_cvtepi16_epi32(tmp_l);
307       __m256i maxcost_mask32s_h = _mm256_cvtepi16_epi32(tmp_h);
308 
309       // Output value generation
310       // cur_change_max: zero
311       // cur_change_negdelta: ff
312       // cur_change_posdelta: ones
313       __m256i costs_negdelta_h = _mm256_sub_epi32(zero, deltas_h);
314       __m256i costs_negdelta_l = _mm256_sub_epi32(zero, deltas_l);
315       // costs_posdelta_l and _h: deltas_l and _h
316       __m256i costs_max_lh = _mm256_set1_epi32(0x7fffffff);
317 
318       __m256i change_neg = _mm256_and_si256(negdelta_mask16s, ones);
319       __m256i change_pos = _mm256_and_si256(posdelta_mask16s, ff);
320       __m256i change_max = _mm256_and_si256(maxcost_mask16s, zero);
321 
322       __m256i cost_neg_l = _mm256_and_si256(negdelta_mask32s_l, costs_negdelta_l);
323       __m256i cost_neg_h = _mm256_and_si256(negdelta_mask32s_h, costs_negdelta_h);
324       __m256i cost_pos_l = _mm256_and_si256(posdelta_mask32s_l, deltas_l);
325       __m256i cost_pos_h = _mm256_and_si256(posdelta_mask32s_h, deltas_h);
326       __m256i cost_max_l = _mm256_and_si256(maxcost_mask32s_l, costs_max_lh);
327       __m256i cost_max_h = _mm256_and_si256(maxcost_mask32s_h, costs_max_lh);
328 
329       __m256i changes = _mm256_or_si256(change_neg, _mm256_or_si256(change_pos, change_max));
330       __m256i costs_l = _mm256_or_si256(cost_neg_l, _mm256_or_si256(cost_pos_l, cost_max_l));
331       __m256i costs_h = _mm256_or_si256(cost_neg_h, _mm256_or_si256(cost_pos_h, cost_max_h));
332 
333       get_cheapest_alternative(costs_h, costs_l, ns, changes, &final_change, &min_pos);
334       const int32_t best_id = scan[min_pos + subpos];
335 
336       cheapest_q = q_coef[best_id];
337       if (cheapest_q == 32767 || cheapest_q == -32768)
338         final_change = -1;
339 
340       uint32_t coef_signs = _mm256_movemask_epi8(coef_signbits);
341       uint32_t cheapest_coef_sign_mask = (uint32_t)(1 << (2 * min_pos));
342 
343       if (!(coef_signs & cheapest_coef_sign_mask))
344         cheapest_q += final_change;
345       else
346         cheapest_q -= final_change;
347 
348       q_coef[best_id] = cheapest_q;
349     } // Hide
350   }
351   if (last_cg == 1)
352     last_cg = 0;
353 
354   return last_cg;
355 }
356 
357 /**
358  * \brief quantize transformed coefficents
359  *
360  */
kvz_quant_avx2(const encoder_state_t * const state,const coeff_t * __restrict coef,coeff_t * __restrict q_coef,int32_t width,int32_t height,int8_t type,int8_t scan_idx,int8_t block_type)361 void kvz_quant_avx2(const encoder_state_t * const state, const coeff_t * __restrict coef, coeff_t * __restrict q_coef, int32_t width,
362   int32_t height, int8_t type, int8_t scan_idx, int8_t block_type)
363 {
364   const encoder_control_t * const encoder = state->encoder_control;
365   const uint32_t log2_block_size = kvz_g_convert_to_bit[width] + 2;
366   const uint32_t * const  __restrict scan = kvz_g_sig_last_scan[scan_idx][log2_block_size - 1];
367 
368   int32_t qp_scaled = kvz_get_scaled_qp(type, state->qp, (encoder->bitdepth - 8) * 6);
369   const uint32_t log2_tr_size = kvz_g_convert_to_bit[width] + 2;
370   const int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]);
371   const int32_t *quant_coeff = encoder->scaling_list.quant_coeff[log2_tr_size - 2][scalinglist_type][qp_scaled % 6];
372   const int32_t transform_shift = MAX_TR_DYNAMIC_RANGE - encoder->bitdepth - log2_tr_size; //!< Represents scaling through forward transform
373   const int32_t q_bits = QUANT_SHIFT + qp_scaled / 6 + transform_shift;
374   const int32_t add = ((state->frame->slicetype == KVZ_SLICE_I) ? 171 : 85) << (q_bits - 9);
375   const int32_t q_bits8 = q_bits - 8;
376 
377   uint32_t ac_sum = 0;
378   int32_t last_cg = -1;
379 
380   __m256i v_ac_sum = _mm256_setzero_si256();
381 
382   // Loading once is enough if scaling lists are not off
383   __m256i low_b = _mm256_setzero_si256(), high_b = _mm256_setzero_si256();
384   if (!(state->encoder_control->scaling_list.enable)) {
385     low_b  = _mm256_set1_epi32(quant_coeff[0]);
386     high_b = low_b;
387   }
388 
389   for (int32_t n = 0; n < width * height; n += VEC_WIDTH) {
390 
391     __m256i v_level = _mm256_loadu_si256((__m256i *)(coef + n));
392     __m256i v_sign = _mm256_cmpgt_epi16(_mm256_setzero_si256(), v_level);
393     v_sign = _mm256_or_si256(v_sign, _mm256_set1_epi16(1));
394 
395     if (state->encoder_control->scaling_list.enable) {
396       __m256i v_quant_coeff_lo = _mm256_loadu_si256(((__m256i *)(quant_coeff + n)) + 0);
397       __m256i v_quant_coeff_hi = _mm256_loadu_si256(((__m256i *)(quant_coeff + n)) + 1);
398 
399       low_b  = _mm256_permute2x128_si256(v_quant_coeff_lo,
400                                          v_quant_coeff_hi,
401                                          0x20);
402 
403       high_b = _mm256_permute2x128_si256(v_quant_coeff_lo,
404                                          v_quant_coeff_hi,
405                                          0x31);
406     }
407 
408 // TODO: do we need to have this?
409 // #define CHECK_QUANT_COEFFS
410 #ifdef CHECK_QUANT_COEFFS
411       __m256i abs_vq_lo = _mm256_abs_epi32(v_quant_coeff_lo);
412       __m256i abs_vq_hi = _mm256_abs_epi32(v_quant_coeff_hi);
413 
414       __m256i vq_over_16b_lo = _mm256_cmpgt_epi32(abs_vq_lo, _mm256_set1_epi32(0x7fff));
415       __m256i vq_over_16b_hi = _mm256_cmpgt_epi32(abs_vq_hi, _mm256_set1_epi32(0x7fff));
416 
417       uint32_t over_16b_mask_lo = _mm256_movemask_epi8(vq_over_16b_lo);
418       uint32_t over_16b_mask_hi = _mm256_movemask_epi8(vq_over_16b_hi);
419 
420       assert(!(over_16b_mask_lo || over_16b_mask_hi));
421 #endif
422 
423     v_level = _mm256_abs_epi16(v_level);
424     __m256i low_a  = _mm256_unpacklo_epi16(v_level, _mm256_setzero_si256());
425     __m256i high_a = _mm256_unpackhi_epi16(v_level, _mm256_setzero_si256());
426 
427     __m256i v_level32_a = _mm256_mullo_epi32(low_a,  low_b);
428     __m256i v_level32_b = _mm256_mullo_epi32(high_a, high_b);
429 
430     v_level32_a = _mm256_add_epi32(v_level32_a, _mm256_set1_epi32(add));
431     v_level32_b = _mm256_add_epi32(v_level32_b, _mm256_set1_epi32(add));
432 
433     v_level32_a = _mm256_srai_epi32(v_level32_a, q_bits);
434     v_level32_b = _mm256_srai_epi32(v_level32_b, q_bits);
435 
436     v_level = _mm256_packs_epi32(v_level32_a, v_level32_b);
437     v_level = _mm256_sign_epi16(v_level, v_sign);
438 
439     _mm256_storeu_si256((__m256i *)(q_coef + n), v_level);
440 
441     v_ac_sum = _mm256_add_epi32(v_ac_sum, v_level32_a);
442     v_ac_sum = _mm256_add_epi32(v_ac_sum, v_level32_b);
443   }
444 
445   __m128i temp = _mm_add_epi32(_mm256_castsi256_si128(v_ac_sum), _mm256_extracti128_si256(v_ac_sum, 1));
446   temp = _mm_add_epi32(temp, _mm_shuffle_epi32(temp, _MM_SHUFFLE(1, 0, 3, 2)));
447   temp = _mm_add_epi32(temp, _mm_shuffle_epi32(temp, _MM_SHUFFLE(0, 1, 0, 1)));
448   ac_sum += _mm_cvtsi128_si32(temp);
449 
450   if (!encoder->cfg.signhide_enable || ac_sum < 2)
451     return;
452 
453   assert(VEC_WIDTH == SCAN_SET_SIZE);
454   for (int32_t subpos = (width * height - 1) & (~(VEC_WIDTH - 1)); subpos >= 0; subpos -= VEC_WIDTH) {
455     const int16_t *coeffs[2] = {coef, q_coef};
456     __m256i result_coeffs[2];
457     __m256i v_quant_coeffs[2];
458 
459     __m256i v_coef, q_coefs;
460     __m256i v_quant_coeff_lo, v_quant_coeff_hi;
461 
462     scanord_read_vector(coeffs, scan, scan_idx, subpos, width, result_coeffs, 2);
463 
464     v_coef  = result_coeffs[0];
465     q_coefs = result_coeffs[1];
466 
467     if (state->encoder_control->scaling_list.enable) {
468       scanord_read_vector_32(quant_coeff, scan, scan_idx, subpos, width, v_quant_coeffs);
469 
470       v_quant_coeff_lo = v_quant_coeffs[0];
471       v_quant_coeff_hi = v_quant_coeffs[1];
472 
473       low_b  = _mm256_permute2x128_si256(v_quant_coeff_lo,
474                                          v_quant_coeff_hi,
475                                          0x20);
476 
477       high_b = _mm256_permute2x128_si256(v_quant_coeff_lo,
478                                          v_quant_coeff_hi,
479                                          0x31);
480     }
481 
482     __m256i v_level = _mm256_abs_epi16(v_coef);
483     __m256i low_a  = _mm256_unpacklo_epi16(v_level, _mm256_setzero_si256());
484     __m256i high_a = _mm256_unpackhi_epi16(v_level, _mm256_setzero_si256());
485 
486     __m256i v_quant_coeff_a = _mm256_or_si256(low_b,  _mm256_setzero_si256());
487     __m256i v_quant_coeff_b = _mm256_or_si256(high_b, _mm256_setzero_si256());
488 
489     __m256i v_level32_a = _mm256_mullo_epi32(low_a,  low_b);
490     __m256i v_level32_b = _mm256_mullo_epi32(high_a, high_b);
491 
492     v_level32_a = _mm256_add_epi32(v_level32_a, _mm256_set1_epi32(add));
493     v_level32_b = _mm256_add_epi32(v_level32_b, _mm256_set1_epi32(add));
494 
495     v_level32_a = _mm256_srai_epi32(v_level32_a, q_bits);
496     v_level32_b = _mm256_srai_epi32(v_level32_b, q_bits);
497 
498     v_level = _mm256_packs_epi32(v_level32_a, v_level32_b);
499 
500     __m256i v_coef_a = _mm256_unpacklo_epi16(_mm256_abs_epi16(v_coef), _mm256_set1_epi16(0));
501     __m256i v_coef_b = _mm256_unpackhi_epi16(_mm256_abs_epi16(v_coef), _mm256_set1_epi16(0));
502 
503     v_coef_a = _mm256_mullo_epi32(v_coef_a, v_quant_coeff_a);
504     v_coef_b = _mm256_mullo_epi32(v_coef_b, v_quant_coeff_b);
505 
506     v_coef_a = _mm256_sub_epi32(v_coef_a, _mm256_slli_epi32(_mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0)), q_bits) );
507     v_coef_b = _mm256_sub_epi32(v_coef_b, _mm256_slli_epi32(_mm256_unpackhi_epi16(v_level, _mm256_set1_epi16(0)), q_bits) );
508     v_coef_a = _mm256_srai_epi32(v_coef_a, q_bits8);
509     v_coef_b = _mm256_srai_epi32(v_coef_b, q_bits8);
510 
511     __m256i deltas_h = _mm256_permute2x128_si256(v_coef_a, v_coef_b, 0x31);
512     __m256i deltas_l = _mm256_permute2x128_si256(v_coef_a, v_coef_b, 0x20);
513 
514     last_cg = hide_block_sign(v_coef, q_coefs, deltas_h, deltas_l, q_coef, scan, subpos, last_cg);
515   }
516 
517 #undef VEC_WIDTH
518 #undef SCAN_SET_SIZE
519 #undef LOG2_SCAN_SET_SIZE
520 }
521 
522 #if KVZ_BIT_DEPTH == 8
523 
get_residual_4x1_avx2(const uint8_t * a_in,const uint8_t * b_in)524 static INLINE __m128i get_residual_4x1_avx2(const uint8_t *a_in, const uint8_t *b_in){
525   __m128i a = _mm_cvtsi32_si128(*(int32_t*)a_in);
526   __m128i b = _mm_cvtsi32_si128(*(int32_t*)b_in);
527   __m128i diff = _mm_sub_epi16(_mm_cvtepu8_epi16(a), _mm_cvtepu8_epi16(b) );
528   return diff;
529 }
530 
get_residual_8x1_avx2(const uint8_t * a_in,const uint8_t * b_in)531 static INLINE __m128i get_residual_8x1_avx2(const uint8_t *a_in, const uint8_t *b_in){
532   __m128i a = _mm_cvtsi64_si128(*(int64_t*)a_in);
533   __m128i b = _mm_cvtsi64_si128(*(int64_t*)b_in);
534   __m128i diff = _mm_sub_epi16(_mm_cvtepu8_epi16(a), _mm_cvtepu8_epi16(b) );
535   return diff;
536 }
537 
get_quantized_recon_4x1_avx2(int16_t * residual,const uint8_t * pred_in)538 static INLINE int32_t get_quantized_recon_4x1_avx2(int16_t *residual, const uint8_t *pred_in){
539   __m128i res = _mm_loadl_epi64((__m128i*)residual);
540   __m128i pred = _mm_cvtsi32_si128(*(int32_t*)pred_in);
541   __m128i rec = _mm_add_epi16(res, _mm_cvtepu8_epi16(pred));
542   return _mm_cvtsi128_si32(_mm_packus_epi16(rec, rec));
543 }
544 
get_quantized_recon_8x1_avx2(int16_t * residual,const uint8_t * pred_in)545 static INLINE int64_t get_quantized_recon_8x1_avx2(int16_t *residual, const uint8_t *pred_in){
546   __m128i res = _mm_loadu_si128((__m128i*)residual);
547   __m128i pred = _mm_cvtsi64_si128(*(int64_t*)pred_in);
548   __m128i rec = _mm_add_epi16(res, _mm_cvtepu8_epi16(pred));
549   return _mm_cvtsi128_si64(_mm_packus_epi16(rec, rec));
550 }
551 
get_residual_avx2(const uint8_t * ref_in,const uint8_t * pred_in,int16_t * residual,int width,int in_stride)552 static void get_residual_avx2(const uint8_t *ref_in, const uint8_t *pred_in, int16_t *residual, int width, int in_stride){
553 
554   __m128i diff = _mm_setzero_si128();
555   switch (width) {
556     case 4:
557       diff = get_residual_4x1_avx2(ref_in + 0 * in_stride, pred_in + 0 * in_stride);
558       _mm_storel_epi64((__m128i*)&(residual[0]), diff);
559       diff = get_residual_4x1_avx2(ref_in + 1 * in_stride, pred_in + 1 * in_stride);
560       _mm_storel_epi64((__m128i*)&(residual[4]), diff);
561       diff = get_residual_4x1_avx2(ref_in + 2 * in_stride, pred_in + 2 * in_stride);
562       _mm_storel_epi64((__m128i*)&(residual[8]), diff);
563       diff = get_residual_4x1_avx2(ref_in + 3 * in_stride, pred_in + 3 * in_stride);
564       _mm_storel_epi64((__m128i*)&(residual[12]), diff);
565     break;
566     case 8:
567       diff = get_residual_8x1_avx2(&ref_in[0 * in_stride], &pred_in[0 * in_stride]);
568       _mm_storeu_si128((__m128i*)&(residual[0]), diff);
569       diff = get_residual_8x1_avx2(&ref_in[1 * in_stride], &pred_in[1 * in_stride]);
570       _mm_storeu_si128((__m128i*)&(residual[8]), diff);
571       diff = get_residual_8x1_avx2(&ref_in[2 * in_stride], &pred_in[2 * in_stride]);
572       _mm_storeu_si128((__m128i*)&(residual[16]), diff);
573       diff = get_residual_8x1_avx2(&ref_in[3 * in_stride], &pred_in[3 * in_stride]);
574       _mm_storeu_si128((__m128i*)&(residual[24]), diff);
575       diff = get_residual_8x1_avx2(&ref_in[4 * in_stride], &pred_in[4 * in_stride]);
576       _mm_storeu_si128((__m128i*)&(residual[32]), diff);
577       diff = get_residual_8x1_avx2(&ref_in[5 * in_stride], &pred_in[5 * in_stride]);
578       _mm_storeu_si128((__m128i*)&(residual[40]), diff);
579       diff = get_residual_8x1_avx2(&ref_in[6 * in_stride], &pred_in[6 * in_stride]);
580       _mm_storeu_si128((__m128i*)&(residual[48]), diff);
581       diff = get_residual_8x1_avx2(&ref_in[7 * in_stride], &pred_in[7 * in_stride]);
582       _mm_storeu_si128((__m128i*)&(residual[56]), diff);
583     break;
584     default:
585       for (int y = 0; y < width; ++y) {
586         for (int x = 0; x < width; x+=16) {
587           diff = get_residual_8x1_avx2(&ref_in[x + y * in_stride], &pred_in[x + y * in_stride]);
588           _mm_storeu_si128((__m128i*)&residual[x + y * width], diff);
589           diff = get_residual_8x1_avx2(&ref_in[(x+8) + y * in_stride], &pred_in[(x+8) + y * in_stride]);
590           _mm_storeu_si128((__m128i*)&residual[(x+8) + y * width], diff);
591         }
592       }
593     break;
594   }
595 }
596 
get_quantized_recon_avx2(int16_t * residual,const uint8_t * pred_in,int in_stride,uint8_t * rec_out,int out_stride,int width)597 static void get_quantized_recon_avx2(int16_t *residual, const uint8_t *pred_in, int in_stride, uint8_t *rec_out, int out_stride, int width){
598 
599   switch (width) {
600     case 4:
601       *(int32_t*)&(rec_out[0 * out_stride]) = get_quantized_recon_4x1_avx2(residual + 0 * width, pred_in + 0 * in_stride);
602       *(int32_t*)&(rec_out[1 * out_stride]) = get_quantized_recon_4x1_avx2(residual + 1 * width, pred_in + 1 * in_stride);
603       *(int32_t*)&(rec_out[2 * out_stride]) = get_quantized_recon_4x1_avx2(residual + 2 * width, pred_in + 2 * in_stride);
604       *(int32_t*)&(rec_out[3 * out_stride]) = get_quantized_recon_4x1_avx2(residual + 3 * width, pred_in + 3 * in_stride);
605       break;
606     case 8:
607       *(int64_t*)&(rec_out[0 * out_stride]) = get_quantized_recon_8x1_avx2(residual + 0 * width, pred_in + 0 * in_stride);
608       *(int64_t*)&(rec_out[1 * out_stride]) = get_quantized_recon_8x1_avx2(residual + 1 * width, pred_in + 1 * in_stride);
609       *(int64_t*)&(rec_out[2 * out_stride]) = get_quantized_recon_8x1_avx2(residual + 2 * width, pred_in + 2 * in_stride);
610       *(int64_t*)&(rec_out[3 * out_stride]) = get_quantized_recon_8x1_avx2(residual + 3 * width, pred_in + 3 * in_stride);
611       *(int64_t*)&(rec_out[4 * out_stride]) = get_quantized_recon_8x1_avx2(residual + 4 * width, pred_in + 4 * in_stride);
612       *(int64_t*)&(rec_out[5 * out_stride]) = get_quantized_recon_8x1_avx2(residual + 5 * width, pred_in + 5 * in_stride);
613       *(int64_t*)&(rec_out[6 * out_stride]) = get_quantized_recon_8x1_avx2(residual + 6 * width, pred_in + 6 * in_stride);
614       *(int64_t*)&(rec_out[7 * out_stride]) = get_quantized_recon_8x1_avx2(residual + 7 * width, pred_in + 7 * in_stride);
615       break;
616     default:
617       for (int y = 0; y < width; ++y) {
618         for (int x = 0; x < width; x += 16) {
619           *(int64_t*)&(rec_out[x + y * out_stride]) = get_quantized_recon_8x1_avx2(residual + x + y * width, pred_in + x + y  * in_stride);
620           *(int64_t*)&(rec_out[(x + 8) + y * out_stride]) = get_quantized_recon_8x1_avx2(residual + (x + 8) + y * width, pred_in + (x + 8) + y  * in_stride);
621         }
622       }
623       break;
624   }
625 }
626 
627 /**
628 * \brief Quantize residual and get both the reconstruction and coeffs.
629 *
630 * \param width  Transform width.
631 * \param color  Color.
632 * \param scan_order  Coefficient scan order.
633 * \param use_trskip  Whether transform skip is used.
634 * \param stride  Stride for ref_in, pred_in and rec_out.
635 * \param ref_in  Reference pixels.
636 * \param pred_in  Predicted pixels.
637 * \param rec_out  Reconstructed pixels.
638 * \param coeff_out  Coefficients used for reconstruction of rec_out.
639 * \param early_skip if this is used for early skip, bypass IT and IQ
640 *
641 * \returns  Whether coeff_out contains any non-zero coefficients.
642 */
kvz_quantize_residual_avx2(encoder_state_t * const state,const cu_info_t * const cur_cu,const int width,const color_t color,const coeff_scan_order_t scan_order,const int use_trskip,const int in_stride,const int out_stride,const uint8_t * const ref_in,const uint8_t * const pred_in,uint8_t * rec_out,coeff_t * coeff_out,bool early_skip)643 int kvz_quantize_residual_avx2(encoder_state_t *const state,
644   const cu_info_t *const cur_cu, const int width, const color_t color,
645   const coeff_scan_order_t scan_order, const int use_trskip,
646   const int in_stride, const int out_stride,
647   const uint8_t *const ref_in, const uint8_t *const pred_in,
648   uint8_t *rec_out, coeff_t *coeff_out,
649   bool early_skip)
650 {
651   // Temporary arrays to pass data to and from kvz_quant and transform functions.
652   ALIGNED(64) int16_t residual[TR_MAX_WIDTH * TR_MAX_WIDTH];
653   ALIGNED(64) coeff_t coeff[TR_MAX_WIDTH * TR_MAX_WIDTH];
654 
655   int has_coeffs = 0;
656 
657   assert(width <= TR_MAX_WIDTH);
658   assert(width >= TR_MIN_WIDTH);
659 
660   // Get residual. (ref_in - pred_in -> residual)
661   get_residual_avx2(ref_in, pred_in, residual, width, in_stride);
662 
663   // Transform residual. (residual -> coeff)
664   if (use_trskip) {
665     kvz_transformskip(state->encoder_control, residual, coeff, width);
666   }
667   else {
668     kvz_transform2d(state->encoder_control, residual, coeff, width, color, cur_cu->type);
669   }
670 
671   // Quantize coeffs. (coeff -> coeff_out)
672   if (state->encoder_control->cfg.rdoq_enable &&
673       (width > 4 || !state->encoder_control->cfg.rdoq_skip))
674   {
675     int8_t tr_depth = cur_cu->tr_depth - cur_cu->depth;
676     tr_depth += (cur_cu->part_size == SIZE_NxN ? 1 : 0);
677     kvz_rdoq(state, coeff, coeff_out, width, width, (color == COLOR_Y ? 0 : 2),
678       scan_order, cur_cu->type, tr_depth);
679   } else {
680     kvz_quant(state, coeff, coeff_out, width, width, (color == COLOR_Y ? 0 : 2),
681       scan_order, cur_cu->type);
682   }
683 
684   // Check if there are any non-zero coefficients.
685   for (int i = 0; i < width * width; i += 8) {
686     __m128i v_quant_coeff = _mm_loadu_si128((__m128i*)&(coeff_out[i]));
687     has_coeffs = !_mm_testz_si128(_mm_set1_epi8(0xFF), v_quant_coeff);
688     if(has_coeffs) break;
689   }
690 
691   // Do the inverse quantization and transformation and the reconstruction to
692   // rec_out.
693   if (has_coeffs && !early_skip) {
694 
695     // Get quantized residual. (coeff_out -> coeff -> residual)
696     kvz_dequant(state, coeff_out, coeff, width, width, (color == COLOR_Y ? 0 : (color == COLOR_U ? 2 : 3)), cur_cu->type);
697     if (use_trskip) {
698       kvz_itransformskip(state->encoder_control, residual, coeff, width);
699     }
700     else {
701       kvz_itransform2d(state->encoder_control, residual, coeff, width, color, cur_cu->type);
702     }
703 
704     // Get quantized reconstruction. (residual + pred_in -> rec_out)
705     get_quantized_recon_avx2(residual, pred_in, in_stride, rec_out, out_stride, width);
706   }
707   else if (rec_out != pred_in) {
708     // With no coeffs and rec_out == pred_int we skip copying the coefficients
709     // because the reconstruction is just the prediction.
710     int y, x;
711 
712     for (y = 0; y < width; ++y) {
713       for (x = 0; x < width; ++x) {
714         rec_out[x + y * out_stride] = pred_in[x + y * in_stride];
715       }
716     }
717   }
718 
719   return has_coeffs;
720 }
721 
722 /**
723  * \brief inverse quantize transformed and quantized coefficents
724  *
725  */
kvz_dequant_avx2(const encoder_state_t * const state,coeff_t * q_coef,coeff_t * coef,int32_t width,int32_t height,int8_t type,int8_t block_type)726 void kvz_dequant_avx2(const encoder_state_t * const state, coeff_t *q_coef, coeff_t *coef, int32_t width, int32_t height,int8_t type, int8_t block_type)
727 {
728   const encoder_control_t * const encoder = state->encoder_control;
729   int32_t shift,add,coeff_q;
730   int32_t n;
731   int32_t transform_shift = 15 - encoder->bitdepth - (kvz_g_convert_to_bit[ width ] + 2);
732 
733   int32_t qp_scaled = kvz_get_scaled_qp(type, state->qp, (encoder->bitdepth-8)*6);
734 
735   shift = 20 - QUANT_SHIFT - transform_shift;
736 
737   if (encoder->scaling_list.enable)
738   {
739     uint32_t log2_tr_size = kvz_g_convert_to_bit[ width ] + 2;
740     int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]);
741 
742     const int32_t *dequant_coef = encoder->scaling_list.de_quant_coeff[log2_tr_size-2][scalinglist_type][qp_scaled%6];
743     shift += 4;
744 
745     if (shift >qp_scaled / 6) {
746       add = 1 << (shift - qp_scaled/6 - 1);
747 
748       for (n = 0; n < width * height; n++) {
749         coeff_q = ((q_coef[n] * dequant_coef[n]) + add ) >> (shift -  qp_scaled/6);
750         coef[n] = (coeff_t)CLIP(-32768,32767,coeff_q);
751       }
752     } else {
753       for (n = 0; n < width * height; n++) {
754         // Clip to avoid possible overflow in following shift left operation
755         coeff_q   = CLIP(-32768, 32767, q_coef[n] * dequant_coef[n]);
756         coef[n] = (coeff_t)CLIP(-32768, 32767, coeff_q << (qp_scaled/6 - shift));
757       }
758     }
759   } else {
760     int32_t scale = kvz_g_inv_quant_scales[qp_scaled%6] << (qp_scaled/6);
761     add = 1 << (shift-1);
762 
763     __m256i v_scale = _mm256_set1_epi32(scale);
764     __m256i v_add = _mm256_set1_epi32(add);
765 
766     for (n = 0; n < width*height; n+=16) {
767       __m128i temp0 = _mm_loadu_si128((__m128i*)&(q_coef[n]));
768       __m128i temp1 = _mm_loadu_si128((__m128i*)&(q_coef[n + 8]));
769       __m256i v_coeff_q_lo = _mm256_cvtepi16_epi32(_mm_unpacklo_epi64(temp0, temp1));
770       __m256i v_coeff_q_hi = _mm256_cvtepi16_epi32(_mm_unpackhi_epi64(temp0, temp1));
771       v_coeff_q_lo = _mm256_mullo_epi32(v_coeff_q_lo, v_scale);
772       v_coeff_q_hi = _mm256_mullo_epi32(v_coeff_q_hi, v_scale);
773       v_coeff_q_lo = _mm256_add_epi32(v_coeff_q_lo, v_add);
774       v_coeff_q_hi = _mm256_add_epi32(v_coeff_q_hi, v_add);
775       v_coeff_q_lo = _mm256_srai_epi32(v_coeff_q_lo, shift);
776       v_coeff_q_hi = _mm256_srai_epi32(v_coeff_q_hi, shift);
777       v_coeff_q_lo = _mm256_packs_epi32(v_coeff_q_lo, v_coeff_q_hi);
778       _mm_storeu_si128((__m128i*)&(coef[n]), _mm256_castsi256_si128(v_coeff_q_lo) );
779       _mm_storeu_si128((__m128i*)&(coef[n + 8]), _mm256_extracti128_si256(v_coeff_q_lo, 1) );
780     }
781   }
782 }
783 
784 #endif // KVZ_BIT_DEPTH == 8
785 
coeff_abs_sum_avx2(const coeff_t * coeffs,const size_t length)786 static uint32_t coeff_abs_sum_avx2(const coeff_t *coeffs, const size_t length)
787 {
788   assert(length % 8 == 0);
789 
790   __m256i total = _mm256_abs_epi32(_mm256_cvtepi16_epi32(_mm_loadu_si128((__m128i*) coeffs)));
791 
792   for (int i = 8; i < length; i += 8) {
793     __m256i temp = _mm256_abs_epi32(_mm256_cvtepi16_epi32(_mm_loadu_si128((__m128i*) &coeffs[i])));
794     total = _mm256_add_epi32(total, temp);
795   }
796 
797   __m128i result128 = _mm_add_epi32(
798     _mm256_castsi256_si128(total),
799     _mm256_extractf128_si256(total, 1)
800   );
801 
802   uint32_t parts[4];
803   _mm_storeu_si128((__m128i*) parts, result128);
804 
805   return parts[0] + parts[1] + parts[2] + parts[3];
806 }
807 
fast_coeff_cost_avx2(const coeff_t * coeff,int32_t width,uint64_t weights)808 static uint32_t fast_coeff_cost_avx2(const coeff_t *coeff, int32_t width, uint64_t weights)
809 {
810   const __m256i zero           = _mm256_setzero_si256();
811   const __m256i threes         = _mm256_set1_epi16(3);
812   const __m256i negate_hibytes = _mm256_set1_epi16(0xff00);
813   const __m128i wt_extract_los = _mm_cvtsi32_si128(0x06040200);
814   const __m128i wt_extract_his = _mm_cvtsi32_si128(0x07050301);
815 
816   __m256i lo_sum     = _mm256_setzero_si256();
817   __m256i hi_sum     = _mm256_setzero_si256();
818 
819   __m128i wts_128    = _mm_loadl_epi64 ((const __m128i *)&weights);
820   __m128i wts_lo_128 = _mm_shuffle_epi8(wts_128, wt_extract_los);
821   __m128i wts_hi_128 = _mm_shuffle_epi8(wts_128, wt_extract_his);
822 
823   __m256i wts_lo     = _mm256_broadcastsi128_si256(wts_lo_128);
824   __m256i wts_hi     = _mm256_broadcastsi128_si256(wts_hi_128);
825 
826   for (int i = 0; i < width * width; i += 32) {
827     __m256i curr_lo      = _mm256_loadu_si256 ((const __m256i *)(coeff + i));
828     __m256i curr_abs_lo  = _mm256_abs_epi16   (curr_lo);
829     __m256i curr_max3_lo = _mm256_min_epu16   (curr_abs_lo, threes);
830 
831     // 4x4 blocks only have 16 coeffs, so handle them separately
832     __m256i curr_max3_hi;
833     if (width >= 8) {
834       __m256i curr_hi      = _mm256_loadu_si256 ((const __m256i *)(coeff + i + 16));
835       __m256i curr_abs_hi  = _mm256_abs_epi16   (curr_hi);
836               curr_max3_hi = _mm256_min_epu16   (curr_abs_hi, threes);
837               curr_max3_hi = _mm256_slli_epi16  (curr_max3_hi, 8);
838     } else {
839       // Set MSBs for high bytes if they're meaningless, so shuffles will
840       // return zeros for them
841       curr_max3_hi = negate_hibytes;
842     }
843     __m256i curr_max3    = _mm256_or_si256    (curr_max3_lo, curr_max3_hi);
844     __m256i curr_wts_lo  = _mm256_shuffle_epi8(wts_lo, curr_max3);
845     __m256i curr_wts_hi  = _mm256_shuffle_epi8(wts_hi, curr_max3);
846 
847     __m256i curr_sum_lo  = _mm256_sad_epu8    (curr_wts_lo, zero);
848     __m256i curr_sum_hi  = _mm256_sad_epu8    (curr_wts_hi, zero);
849 
850             lo_sum       = _mm256_add_epi64   (lo_sum, curr_sum_lo);
851             hi_sum       = _mm256_add_epi64   (hi_sum, curr_sum_hi);
852   }
853           hi_sum = _mm256_slli_epi64(hi_sum, 8);
854   __m256i sum0   = _mm256_add_epi64(lo_sum, hi_sum);
855 
856   __m256i sum1   = _mm256_permute4x64_epi64(sum0, _MM_SHUFFLE(1, 0, 3, 2));
857   __m256i sum2   = _mm256_add_epi64        (sum0, sum1);
858   __m256i sum3   = _mm256_shuffle_epi32    (sum2, _MM_SHUFFLE(1, 0, 3, 2));
859   __m256i sum4   = _mm256_add_epi64        (sum2, sum3);
860 
861   __m128i sum128 = _mm256_castsi256_si128  (sum4);
862   return (_mm_cvtsi128_si32(sum128) + (1 << 7)) >> 8;
863 }
864 
865 #endif //COMPILE_INTEL_AVX2 && defined X86_64
866 
kvz_strategy_register_quant_avx2(void * opaque,uint8_t bitdepth)867 int kvz_strategy_register_quant_avx2(void* opaque, uint8_t bitdepth)
868 {
869   bool success = true;
870 
871 #if COMPILE_INTEL_AVX2 && defined X86_64
872 #if KVZ_BIT_DEPTH == 8
873   if (bitdepth == 8) {
874     success &= kvz_strategyselector_register(opaque, "quantize_residual", "avx2", 40, &kvz_quantize_residual_avx2);
875     success &= kvz_strategyselector_register(opaque, "dequant", "avx2", 40, &kvz_dequant_avx2);
876   }
877 #endif // KVZ_BIT_DEPTH == 8
878   success &= kvz_strategyselector_register(opaque, "quant", "avx2", 40, &kvz_quant_avx2);
879   success &= kvz_strategyselector_register(opaque, "coeff_abs_sum", "avx2", 0, &coeff_abs_sum_avx2);
880   success &= kvz_strategyselector_register(opaque, "fast_coeff_cost", "avx2", 40, &fast_coeff_cost_avx2);
881 #endif //COMPILE_INTEL_AVX2 && defined X86_64
882 
883   return success;
884 }
885