1 /*****************************************************************************
2  * This file is part of Kvazaar HEVC encoder.
3  *
4  * Copyright (c) 2021, Tampere University, ITU/ISO/IEC, project contributors
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without modification,
8  * are permitted provided that the following conditions are met:
9  *
10  * * Redistributions of source code must retain the above copyright notice, this
11  *   list of conditions and the following disclaimer.
12  *
13  * * Redistributions in binary form must reproduce the above copyright notice, this
14  *   list of conditions and the following disclaimer in the documentation and/or
15  *   other materials provided with the distribution.
16  *
17  * * Neither the name of the Tampere University or ITU/ISO/IEC nor the names of its
18  *   contributors may be used to endorse or promote products derived from
19  *   this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
25  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION HOWEVER CAUSED AND ON
28  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  * INCLUDING NEGLIGENCE OR OTHERWISE ARISING IN ANY WAY OUT OF THE USE OF THIS
31  ****************************************************************************/
32 
33 #include "global.h"
34 
35 #if COMPILE_INTEL_SSE41
36 #include "kvazaar.h"
37 #if KVZ_BIT_DEPTH == 8
38 #include "strategies/sse41/picture-sse41.h"
39 #include "strategies/sse41/reg_sad_pow2_widths-sse41.h"
40 
41 #include <immintrin.h>
42 #include <stdlib.h>
43 
44 #include "strategyselector.h"
45 
kvz_reg_sad_sse41(const uint8_t * const data1,const uint8_t * const data2,const int32_t width,const int32_t height,const uint32_t stride1,const uint32_t stride2)46 uint32_t kvz_reg_sad_sse41(const uint8_t * const data1, const uint8_t * const data2,
47                            const int32_t width, const int32_t height, const uint32_t stride1,
48                            const uint32_t stride2)
49 {
50   if (width == 0)
51     return 0;
52   if (width == 4)
53     return reg_sad_w4(data1, data2, height, stride1, stride2);
54   if (width == 8)
55     return reg_sad_w8(data1, data2, height, stride1, stride2);
56   if (width == 12)
57     return reg_sad_w12(data1, data2, height, stride1, stride2);
58   if (width == 16)
59     return reg_sad_w16(data1, data2, height, stride1, stride2);
60   if (width == 24)
61     return reg_sad_w24(data1, data2, height, stride1, stride2);
62   else
63     return reg_sad_arbitrary(data1, data2, width, height, stride1, stride2);
64 }
65 
get_optimized_sad_sse41(int32_t width)66 static optimized_sad_func_ptr_t get_optimized_sad_sse41(int32_t width)
67 {
68   if (width == 0)
69     return reg_sad_w0;
70   if (width == 4)
71     return reg_sad_w4;
72   if (width == 8)
73     return reg_sad_w8;
74   if (width == 12)
75     return reg_sad_w12;
76   if (width == 16)
77     return reg_sad_w16;
78   if (width == 24)
79     return reg_sad_w24;
80   else
81     return NULL;
82 }
83 
ver_sad_sse41(const uint8_t * pic_data,const uint8_t * ref_data,int32_t width,int32_t height,uint32_t stride)84 static uint32_t ver_sad_sse41(const uint8_t *pic_data, const uint8_t *ref_data,
85                               int32_t width, int32_t height, uint32_t stride)
86 {
87   if (width == 0)
88     return 0;
89   if (width == 4)
90     return ver_sad_w4(pic_data, ref_data, height, stride);
91   if (width == 8)
92     return ver_sad_w8(pic_data, ref_data, height, stride);
93   if (width == 12)
94     return ver_sad_w12(pic_data, ref_data, height, stride);
95   if (width == 16)
96     return ver_sad_w16(pic_data, ref_data, height, stride);
97   else
98     return ver_sad_arbitrary(pic_data, ref_data, width, height, stride);
99 }
100 
hor_sad_sse41_w32(const uint8_t * pic_data,const uint8_t * ref_data,int32_t height,uint32_t pic_stride,uint32_t ref_stride,uint32_t left,uint32_t right)101 static uint32_t hor_sad_sse41_w32(const uint8_t *pic_data, const uint8_t *ref_data,
102                                   int32_t height, uint32_t pic_stride, uint32_t ref_stride,
103                                   uint32_t left, uint32_t right)
104 {
105   const size_t vec_width       = 16;
106   const uint32_t blkwidth_log2 = 5;
107   const uint32_t left_eq_wid   = left  >> blkwidth_log2;
108   const uint32_t right_eq_wid  = right >> blkwidth_log2;
109   const int32_t  left_clamped  = left  - left_eq_wid;
110   const int32_t  right_clamped = right - right_eq_wid;
111 
112   const int32_t height_twoline_groups = height & ~1;
113   const int32_t height_residual_lines = height &  1;
114 
115   const __m128i zero       = _mm_setzero_si128();
116   const __m128i vec_widths = _mm_set1_epi8((uint8_t)vec_width);
117   const __m128i lefts      = _mm_set1_epi8((uint8_t)left_clamped);
118   const __m128i rights     = _mm_set1_epi8((uint8_t)right_clamped);
119   const __m128i nslo       = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
120   const __m128i nshi       = _mm_add_epi8 (nslo, vec_widths);
121 
122   const __m128i rightmost_good_idx = _mm_set1_epi8((uint8_t)((vec_width << 1) - right - 1));
123 
124   const __m128i epol_mask_right_lo = _mm_min_epi8  (nslo,            rightmost_good_idx);
125   const __m128i epol_mask_right_hi = _mm_min_epi8  (nshi,            rightmost_good_idx);
126   const __m128i epol_mask_lo       = _mm_max_epi8  (lefts,           epol_mask_right_lo);
127   const __m128i epol_mask_hi       = _mm_max_epi8  (lefts,           epol_mask_right_hi);
128 
129   const __m128i is_left            = _mm_cmpeq_epi8(rights,          zero);
130   const __m128i vecwid_for_left    = _mm_and_si128 (is_left,         vec_widths);
131   const __m128i ns_for_shufmask    = _mm_or_si128  (nslo,            vecwid_for_left);
132 
133   const __m128i shufmask1_right    = _mm_add_epi8  (ns_for_shufmask, rights);
134   const __m128i shufmask1          = _mm_sub_epi8  (shufmask1_right, lefts);
135 
136   const __m128i md2bimask          = _mm_cmpgt_epi8(vec_widths,      shufmask1);
137   const __m128i move_d_to_b_imask  = _mm_or_si128  (is_left,         md2bimask);
138   const __m128i move_b_to_d_mask   = _mm_cmpgt_epi8(lefts,           nslo);
139 
140   // If we're straddling the left border, start from the left border instead,
141   // and if right border, end on the border
142   const int32_t ld_offset = left - right;
143 
144   int32_t y;
145   __m128i sse_inc = _mm_setzero_si128();
146   for (y = 0; y < height_twoline_groups; y += 2) {
147     __m128i a = _mm_loadu_si128((__m128i *)(pic_data + (y + 0) * pic_stride + 0));
148     __m128i b = _mm_loadu_si128((__m128i *)(ref_data + (y + 0) * ref_stride + 0  + ld_offset));
149     __m128i c = _mm_loadu_si128((__m128i *)(pic_data + (y + 0) * pic_stride + 16));
150     __m128i d = _mm_loadu_si128((__m128i *)(ref_data + (y + 0) * ref_stride + 16 + ld_offset));
151     __m128i e = _mm_loadu_si128((__m128i *)(pic_data + (y + 1) * pic_stride + 0));
152     __m128i f = _mm_loadu_si128((__m128i *)(ref_data + (y + 1) * ref_stride + 0  + ld_offset));
153     __m128i g = _mm_loadu_si128((__m128i *)(pic_data + (y + 1) * pic_stride + 16));
154     __m128i h = _mm_loadu_si128((__m128i *)(ref_data + (y + 1) * ref_stride + 16 + ld_offset));
155 
156     __m128i b_shifted         = _mm_shuffle_epi8(b, shufmask1);
157     __m128i d_shifted         = _mm_shuffle_epi8(d, shufmask1);
158     __m128i f_shifted         = _mm_shuffle_epi8(f, shufmask1);
159     __m128i h_shifted         = _mm_shuffle_epi8(h, shufmask1);
160 
161     // TODO: could these be optimized for two-operand efficiency? Only one of
162     // these ever does useful work, the other should leave the vector untouched,
163     // so could the first result be used in the second calculation or something?
164     __m128i b_with_d_data     = _mm_blendv_epi8(d_shifted, b_shifted, move_d_to_b_imask);
165     __m128i d_with_b_data     = _mm_blendv_epi8(d_shifted, b_shifted, move_b_to_d_mask);
166     __m128i f_with_h_data     = _mm_blendv_epi8(h_shifted, f_shifted, move_d_to_b_imask);
167     __m128i h_with_f_data     = _mm_blendv_epi8(h_shifted, f_shifted, move_b_to_d_mask);
168 
169     __m128i b_final           = _mm_shuffle_epi8(b_with_d_data, epol_mask_lo);
170     __m128i d_final           = _mm_shuffle_epi8(d_with_b_data, epol_mask_hi);
171     __m128i f_final           = _mm_shuffle_epi8(f_with_h_data, epol_mask_lo);
172     __m128i h_final           = _mm_shuffle_epi8(h_with_f_data, epol_mask_hi);
173 
174     __m128i curr_sads_ab      = _mm_sad_epu8    (a, b_final);
175     __m128i curr_sads_cd      = _mm_sad_epu8    (c, d_final);
176     __m128i curr_sads_ef      = _mm_sad_epu8    (e, f_final);
177     __m128i curr_sads_gh      = _mm_sad_epu8    (g, h_final);
178 
179     sse_inc = _mm_add_epi64(sse_inc, curr_sads_ab);
180     sse_inc = _mm_add_epi64(sse_inc, curr_sads_cd);
181     sse_inc = _mm_add_epi64(sse_inc, curr_sads_ef);
182     sse_inc = _mm_add_epi64(sse_inc, curr_sads_gh);
183   }
184   if (height_residual_lines) {
185     __m128i a = _mm_loadu_si128((__m128i *)(pic_data + (y + 0) * pic_stride + 0));
186     __m128i b = _mm_loadu_si128((__m128i *)(ref_data + (y + 0) * ref_stride + 0  + ld_offset));
187     __m128i c = _mm_loadu_si128((__m128i *)(pic_data + (y + 0) * pic_stride + 16));
188     __m128i d = _mm_loadu_si128((__m128i *)(ref_data + (y + 0) * ref_stride + 16 + ld_offset));
189 
190     __m128i b_shifted         = _mm_shuffle_epi8(b, shufmask1);
191     __m128i d_shifted         = _mm_shuffle_epi8(d, shufmask1);
192 
193     __m128i b_with_d_data     = _mm_blendv_epi8(d_shifted, b_shifted, move_d_to_b_imask);
194     __m128i d_with_b_data     = _mm_blendv_epi8(d_shifted, b_shifted, move_b_to_d_mask);
195 
196     __m128i b_final           = _mm_shuffle_epi8(b_with_d_data, epol_mask_lo);
197     __m128i d_final           = _mm_shuffle_epi8(d_with_b_data, epol_mask_hi);
198 
199     __m128i curr_sads_ab      = _mm_sad_epu8    (a, b_final);
200     __m128i curr_sads_cd      = _mm_sad_epu8    (c, d_final);
201 
202     sse_inc = _mm_add_epi64(sse_inc, curr_sads_ab);
203     sse_inc = _mm_add_epi64(sse_inc, curr_sads_cd);
204   }
205   __m128i sse_inc_2 = _mm_shuffle_epi32(sse_inc, _MM_SHUFFLE(1, 0, 3, 2));
206   __m128i sad       = _mm_add_epi64    (sse_inc, sse_inc_2);
207   return _mm_cvtsi128_si32(sad);
208 }
209 
hor_sad_sse41(const uint8_t * pic_data,const uint8_t * ref_data,int32_t width,int32_t height,uint32_t pic_stride,uint32_t ref_stride,uint32_t left,uint32_t right)210 static uint32_t hor_sad_sse41(const uint8_t *pic_data, const uint8_t *ref_data,
211                               int32_t width, int32_t height, uint32_t pic_stride,
212                               uint32_t ref_stride, uint32_t left, uint32_t right)
213 {
214   if (width == 4)
215     return hor_sad_sse41_w4(pic_data, ref_data, height,
216                             pic_stride, ref_stride, left, right);
217   if (width == 8)
218     return hor_sad_sse41_w8(pic_data, ref_data, height,
219                             pic_stride, ref_stride, left, right);
220   if (width == 16)
221     return hor_sad_sse41_w16(pic_data, ref_data, height,
222                              pic_stride, ref_stride, left, right);
223   if (width == 32)
224     return hor_sad_sse41_w32(pic_data, ref_data, height,
225                              pic_stride, ref_stride, left, right);
226   else
227     return hor_sad_sse41_arbitrary(pic_data, ref_data, width, height,
228                                    pic_stride, ref_stride, left, right);
229 }
230 
231 #endif // KVZ_BIT_DEPTH == 8
232 #endif //COMPILE_INTEL_SSE41
233 
234 
kvz_strategy_register_picture_sse41(void * opaque,uint8_t bitdepth)235 int kvz_strategy_register_picture_sse41(void* opaque, uint8_t bitdepth) {
236   bool success = true;
237 #if COMPILE_INTEL_SSE41
238 #if KVZ_BIT_DEPTH == 8
239   if (bitdepth == 8){
240     success &= kvz_strategyselector_register(opaque, "reg_sad", "sse41", 20, &kvz_reg_sad_sse41);
241     success &= kvz_strategyselector_register(opaque, "get_optimized_sad", "sse41", 20, &get_optimized_sad_sse41);
242     success &= kvz_strategyselector_register(opaque, "ver_sad", "sse41", 20, &ver_sad_sse41);
243     success &= kvz_strategyselector_register(opaque, "hor_sad", "sse41", 20, &hor_sad_sse41);
244   }
245 #endif // KVZ_BIT_DEPTH == 8
246 #endif
247   return success;
248 }
249