1 // Copyright 2019 The libgav1 Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "src/dsp/intra_edge.h"
16 #include "src/utils/cpu.h"
17
18 #if LIBGAV1_ENABLE_SSE4_1
19
20 #include <xmmintrin.h>
21
22 #include <cassert>
23 #include <cstddef>
24 #include <cstdint>
25 #include <cstring> // memcpy
26
27 #include "src/dsp/constants.h"
28 #include "src/dsp/dsp.h"
29 #include "src/dsp/x86/common_sse4.h"
30 #include "src/utils/common.h"
31
32 namespace libgav1 {
33 namespace dsp {
34 namespace {
35
36 constexpr int kKernelTaps = 5;
37 constexpr int kKernels[3][kKernelTaps] = {
38 {0, 4, 8, 4, 0}, {0, 5, 6, 5, 0}, {2, 4, 4, 4, 2}};
39 constexpr int kMaxEdgeBufferSize = 129;
40
41 // This function applies the kernel [0, 4, 8, 4, 0] to 12 values.
42 // Assumes |edge| has 16 packed byte values. Produces 12 filter outputs to
43 // write as overlapping sets of 8-bytes.
ComputeKernel1Store12(uint8_t * dest,const uint8_t * source)44 inline void ComputeKernel1Store12(uint8_t* dest, const uint8_t* source) {
45 const __m128i edge_lo = LoadUnaligned16(source);
46 const __m128i edge_hi = _mm_srli_si128(edge_lo, 6);
47 // Samples matched with the '4' tap, expanded to 16-bit.
48 const __m128i outers_lo = _mm_cvtepu8_epi16(edge_lo);
49 const __m128i outers_hi = _mm_cvtepu8_epi16(edge_hi);
50 // Samples matched with the '8' tap, expanded to 16-bit.
51 const __m128i centers_lo = _mm_srli_si128(outers_lo, 2);
52 const __m128i centers_hi = _mm_srli_si128(outers_hi, 2);
53
54 // Apply the taps by shifting.
55 const __m128i outers4_lo = _mm_slli_epi16(outers_lo, 2);
56 const __m128i outers4_hi = _mm_slli_epi16(outers_hi, 2);
57 const __m128i centers8_lo = _mm_slli_epi16(centers_lo, 3);
58 const __m128i centers8_hi = _mm_slli_epi16(centers_hi, 3);
59 // Move latter 4x values down to add with first 4x values for each output.
60 const __m128i partial_sums_lo =
61 _mm_add_epi16(outers4_lo, _mm_srli_si128(outers4_lo, 4));
62 const __m128i partial_sums_hi =
63 _mm_add_epi16(outers4_hi, _mm_srli_si128(outers4_hi, 4));
64 // Move 6x values down to add for the final kernel sum for each output.
65 const __m128i sums_lo = RightShiftWithRounding_U16(
66 _mm_add_epi16(partial_sums_lo, centers8_lo), 4);
67 const __m128i sums_hi = RightShiftWithRounding_U16(
68 _mm_add_epi16(partial_sums_hi, centers8_hi), 4);
69
70 const __m128i result_lo = _mm_packus_epi16(sums_lo, sums_lo);
71 const __m128i result_hi = _mm_packus_epi16(sums_hi, sums_hi);
72 const __m128i result =
73 _mm_alignr_epi8(result_hi, _mm_slli_si128(result_lo, 10), 10);
74 StoreUnaligned16(dest, result);
75 }
76
77 // This function applies the kernel [0, 5, 6, 5, 0] to 12 values.
78 // Assumes |edge| has 8 packed byte values, and that the 2 invalid values will
79 // be overwritten or safely discarded.
ComputeKernel2Store12(uint8_t * dest,const uint8_t * source)80 inline void ComputeKernel2Store12(uint8_t* dest, const uint8_t* source) {
81 const __m128i edge_lo = LoadUnaligned16(source);
82 const __m128i edge_hi = _mm_srli_si128(edge_lo, 6);
83 const __m128i outers_lo = _mm_cvtepu8_epi16(edge_lo);
84 const __m128i centers_lo = _mm_srli_si128(outers_lo, 2);
85 const __m128i outers_hi = _mm_cvtepu8_epi16(edge_hi);
86 const __m128i centers_hi = _mm_srli_si128(outers_hi, 2);
87 // Samples matched with the '5' tap, expanded to 16-bit. Add x + 4x.
88 const __m128i outers5_lo =
89 _mm_add_epi16(outers_lo, _mm_slli_epi16(outers_lo, 2));
90 const __m128i outers5_hi =
91 _mm_add_epi16(outers_hi, _mm_slli_epi16(outers_hi, 2));
92 // Samples matched with the '6' tap, expanded to 16-bit. Add 2x + 4x.
93 const __m128i centers6_lo = _mm_add_epi16(_mm_slli_epi16(centers_lo, 1),
94 _mm_slli_epi16(centers_lo, 2));
95 const __m128i centers6_hi = _mm_add_epi16(_mm_slli_epi16(centers_hi, 1),
96 _mm_slli_epi16(centers_hi, 2));
97 // Move latter 5x values down to add with first 5x values for each output.
98 const __m128i partial_sums_lo =
99 _mm_add_epi16(outers5_lo, _mm_srli_si128(outers5_lo, 4));
100 // Move 6x values down to add for the final kernel sum for each output.
101 const __m128i sums_lo = RightShiftWithRounding_U16(
102 _mm_add_epi16(centers6_lo, partial_sums_lo), 4);
103 // Shift latter 5x values to add with first 5x values for each output.
104 const __m128i partial_sums_hi =
105 _mm_add_epi16(outers5_hi, _mm_srli_si128(outers5_hi, 4));
106 // Move 6x values down to add for the final kernel sum for each output.
107 const __m128i sums_hi = RightShiftWithRounding_U16(
108 _mm_add_epi16(centers6_hi, partial_sums_hi), 4);
109 // First 6 values are valid outputs.
110 const __m128i result_lo = _mm_packus_epi16(sums_lo, sums_lo);
111 const __m128i result_hi = _mm_packus_epi16(sums_hi, sums_hi);
112 const __m128i result =
113 _mm_alignr_epi8(result_hi, _mm_slli_si128(result_lo, 10), 10);
114 StoreUnaligned16(dest, result);
115 }
116
117 // This function applies the kernel [2, 4, 4, 4, 2] to 8 values.
ComputeKernel3Store8(uint8_t * dest,const uint8_t * source)118 inline void ComputeKernel3Store8(uint8_t* dest, const uint8_t* source) {
119 const __m128i edge_lo = LoadUnaligned16(source);
120 const __m128i edge_hi = _mm_srli_si128(edge_lo, 4);
121 // Finish |edge_lo| life cycle quickly.
122 // Multiply for 2x.
123 const __m128i source2_lo = _mm_slli_epi16(_mm_cvtepu8_epi16(edge_lo), 1);
124 // Multiply 2x by 2 and align.
125 const __m128i source4_lo = _mm_srli_si128(_mm_slli_epi16(source2_lo, 1), 2);
126 // Finish |source2| life cycle quickly.
127 // Move latter 2x values down to add with first 2x values for each output.
128 __m128i sum = _mm_add_epi16(source2_lo, _mm_srli_si128(source2_lo, 8));
129 // First 4x values already aligned to add with running total.
130 sum = _mm_add_epi16(sum, source4_lo);
131 // Move second 4x values down to add with running total.
132 sum = _mm_add_epi16(sum, _mm_srli_si128(source4_lo, 2));
133 // Move third 4x values down to add with running total.
134 sum = _mm_add_epi16(sum, _mm_srli_si128(source4_lo, 4));
135 // Multiply for 2x.
136 const __m128i source2_hi = _mm_slli_epi16(_mm_cvtepu8_epi16(edge_hi), 1);
137 // Multiply 2x by 2 and align.
138 const __m128i source4_hi = _mm_srli_si128(_mm_slli_epi16(source2_hi, 1), 2);
139 // Move latter 2x values down to add with first 2x values for each output.
140 __m128i sum_hi = _mm_add_epi16(source2_hi, _mm_srli_si128(source2_hi, 8));
141 // First 4x values already aligned to add with running total.
142 sum_hi = _mm_add_epi16(sum_hi, source4_hi);
143 // Move second 4x values down to add with running total.
144 sum_hi = _mm_add_epi16(sum_hi, _mm_srli_si128(source4_hi, 2));
145 // Move third 4x values down to add with running total.
146 sum_hi = _mm_add_epi16(sum_hi, _mm_srli_si128(source4_hi, 4));
147
148 // Because we have only 8 values here, it is safe to align before packing down
149 // to 8-bit without losing data.
150 sum = _mm_alignr_epi8(sum_hi, _mm_slli_si128(sum, 8), 8);
151 sum = RightShiftWithRounding_U16(sum, 4);
152 StoreLo8(dest, _mm_packus_epi16(sum, sum));
153 }
154
IntraEdgeFilter_SSE4_1(void * buffer,int size,int strength)155 void IntraEdgeFilter_SSE4_1(void* buffer, int size, int strength) {
156 uint8_t edge[kMaxEdgeBufferSize + 4];
157 memcpy(edge, buffer, size);
158 auto* dst_buffer = static_cast<uint8_t*>(buffer);
159
160 // Only process |size| - 1 elements. Nothing to do in this case.
161 if (size == 1) return;
162
163 int i = 0;
164 switch (strength) {
165 case 1:
166 // To avoid overwriting, we stop short from the total write size plus the
167 // initial offset. In this case 12 valid values are written in two blocks
168 // of 8 bytes each.
169 for (; i < size - 17; i += 12) {
170 ComputeKernel1Store12(dst_buffer + i + 1, edge + i);
171 }
172 break;
173 case 2:
174 // See the comment for case 1.
175 for (; i < size - 17; i += 12) {
176 ComputeKernel2Store12(dst_buffer + i + 1, edge + i);
177 }
178 break;
179 default:
180 assert(strength == 3);
181 // The first filter input is repeated for taps of value 2 and 4.
182 dst_buffer[1] = RightShiftWithRounding(
183 (6 * edge[0] + 4 * edge[1] + 4 * edge[2] + 2 * edge[3]), 4);
184 // In this case, one block of 8 bytes is written in each iteration, with
185 // an offset of 2.
186 for (; i < size - 10; i += 8) {
187 ComputeKernel3Store8(dst_buffer + i + 2, edge + i);
188 }
189 }
190 const int kernel_index = strength - 1;
191 for (int final_index = Clip3(i, 1, size - 2); final_index < size;
192 ++final_index) {
193 int sum = 0;
194 for (int j = 0; j < kKernelTaps; ++j) {
195 const int k = Clip3(final_index + j - 2, 0, size - 1);
196 sum += kKernels[kernel_index][j] * edge[k];
197 }
198 dst_buffer[final_index] = RightShiftWithRounding(sum, 4);
199 }
200 }
201
202 constexpr int kMaxUpsampleSize = 16;
203
204 // Applies the upsampling kernel [-1, 9, 9, -1] to alternating pixels, and
205 // interleaves the results with the original values. This implementation assumes
206 // that it is safe to write the maximum number of upsampled pixels (32) to the
207 // edge buffer, even when |size| is small.
IntraEdgeUpsampler_SSE4_1(void * buffer,int size)208 void IntraEdgeUpsampler_SSE4_1(void* buffer, int size) {
209 assert(size % 4 == 0 && size <= kMaxUpsampleSize);
210 auto* const pixel_buffer = static_cast<uint8_t*>(buffer);
211 uint8_t temp[kMaxUpsampleSize + 8];
212 temp[0] = temp[1] = pixel_buffer[-1];
213 memcpy(temp + 2, pixel_buffer, sizeof(temp[0]) * size);
214 temp[size + 2] = pixel_buffer[size - 1];
215
216 pixel_buffer[-2] = temp[0];
217 const __m128i data = LoadUnaligned16(temp);
218 const __m128i src_lo = _mm_cvtepu8_epi16(data);
219 const __m128i src_hi = _mm_unpackhi_epi8(data, _mm_setzero_si128());
220 const __m128i src9_hi = _mm_add_epi16(src_hi, _mm_slli_epi16(src_hi, 3));
221 const __m128i src9_lo = _mm_add_epi16(src_lo, _mm_slli_epi16(src_lo, 3));
222 __m128i sum_lo = _mm_sub_epi16(_mm_alignr_epi8(src9_hi, src9_lo, 2), src_lo);
223 sum_lo = _mm_add_epi16(sum_lo, _mm_alignr_epi8(src9_hi, src9_lo, 4));
224 sum_lo = _mm_sub_epi16(sum_lo, _mm_alignr_epi8(src_hi, src_lo, 6));
225 sum_lo = RightShiftWithRounding_S16(sum_lo, 4);
226 const __m128i result_lo = _mm_unpacklo_epi8(_mm_packus_epi16(sum_lo, sum_lo),
227 _mm_srli_si128(data, 2));
228 StoreUnaligned16(pixel_buffer - 1, result_lo);
229 if (size > 8) {
230 const __m128i src_hi_extra = _mm_cvtepu8_epi16(LoadLo8(temp + 16));
231 const __m128i src9_hi_extra =
232 _mm_add_epi16(src_hi_extra, _mm_slli_epi16(src_hi_extra, 3));
233 __m128i sum_hi =
234 _mm_sub_epi16(_mm_alignr_epi8(src9_hi_extra, src9_hi, 2), src_hi);
235 sum_hi = _mm_add_epi16(sum_hi, _mm_alignr_epi8(src9_hi_extra, src9_hi, 4));
236 sum_hi = _mm_sub_epi16(sum_hi, _mm_alignr_epi8(src_hi_extra, src_hi, 6));
237 sum_hi = RightShiftWithRounding_S16(sum_hi, 4);
238 const __m128i result_hi =
239 _mm_unpacklo_epi8(_mm_packus_epi16(sum_hi, sum_hi), LoadLo8(temp + 10));
240 StoreUnaligned16(pixel_buffer + 15, result_hi);
241 }
242 }
243
Init8bpp()244 void Init8bpp() {
245 Dsp* const dsp = dsp_internal::GetWritableDspTable(kBitdepth8);
246 assert(dsp != nullptr);
247 #if DSP_ENABLED_8BPP_SSE4_1(IntraEdgeFilter)
248 dsp->intra_edge_filter = IntraEdgeFilter_SSE4_1;
249 #endif
250 #if DSP_ENABLED_8BPP_SSE4_1(IntraEdgeUpsampler)
251 dsp->intra_edge_upsampler = IntraEdgeUpsampler_SSE4_1;
252 #endif
253 }
254
255 } // namespace
256
IntraEdgeInit_SSE4_1()257 void IntraEdgeInit_SSE4_1() { Init8bpp(); }
258
259 } // namespace dsp
260 } // namespace libgav1
261
262 #else // !LIBGAV1_ENABLE_SSE4_1
263 namespace libgav1 {
264 namespace dsp {
265
IntraEdgeInit_SSE4_1()266 void IntraEdgeInit_SSE4_1() {}
267
268 } // namespace dsp
269 } // namespace libgav1
270 #endif // LIBGAV1_ENABLE_SSE4_1
271