1 /*
2 * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12 #include <smmintrin.h>
13
14 #include "./vp9_rtcd.h"
15 #include "./vpx_config.h"
16 #include "vpx/vpx_integer.h"
17 #include "vp9/encoder/vp9_encoder.h"
18 #include "vp9/encoder/vp9_temporal_filter.h"
19 #include "vp9/encoder/x86/temporal_filter_constants.h"
20
21 // Read in 8 pixels from a and b as 8-bit unsigned integers, compute the
22 // difference squared, and store as unsigned 16-bit integer to dst.
store_dist_8(const uint8_t * a,const uint8_t * b,uint16_t * dst)23 static INLINE void store_dist_8(const uint8_t *a, const uint8_t *b,
24 uint16_t *dst) {
25 const __m128i a_reg = _mm_loadl_epi64((const __m128i *)a);
26 const __m128i b_reg = _mm_loadl_epi64((const __m128i *)b);
27
28 const __m128i a_first = _mm_cvtepu8_epi16(a_reg);
29 const __m128i b_first = _mm_cvtepu8_epi16(b_reg);
30
31 __m128i dist_first;
32
33 dist_first = _mm_sub_epi16(a_first, b_first);
34 dist_first = _mm_mullo_epi16(dist_first, dist_first);
35
36 _mm_storeu_si128((__m128i *)dst, dist_first);
37 }
38
store_dist_16(const uint8_t * a,const uint8_t * b,uint16_t * dst)39 static INLINE void store_dist_16(const uint8_t *a, const uint8_t *b,
40 uint16_t *dst) {
41 const __m128i zero = _mm_setzero_si128();
42 const __m128i a_reg = _mm_loadu_si128((const __m128i *)a);
43 const __m128i b_reg = _mm_loadu_si128((const __m128i *)b);
44
45 const __m128i a_first = _mm_cvtepu8_epi16(a_reg);
46 const __m128i a_second = _mm_unpackhi_epi8(a_reg, zero);
47 const __m128i b_first = _mm_cvtepu8_epi16(b_reg);
48 const __m128i b_second = _mm_unpackhi_epi8(b_reg, zero);
49
50 __m128i dist_first, dist_second;
51
52 dist_first = _mm_sub_epi16(a_first, b_first);
53 dist_second = _mm_sub_epi16(a_second, b_second);
54 dist_first = _mm_mullo_epi16(dist_first, dist_first);
55 dist_second = _mm_mullo_epi16(dist_second, dist_second);
56
57 _mm_storeu_si128((__m128i *)dst, dist_first);
58 _mm_storeu_si128((__m128i *)(dst + 8), dist_second);
59 }
60
read_dist_8(const uint16_t * dist,__m128i * dist_reg)61 static INLINE void read_dist_8(const uint16_t *dist, __m128i *dist_reg) {
62 *dist_reg = _mm_loadu_si128((const __m128i *)dist);
63 }
64
read_dist_16(const uint16_t * dist,__m128i * reg_first,__m128i * reg_second)65 static INLINE void read_dist_16(const uint16_t *dist, __m128i *reg_first,
66 __m128i *reg_second) {
67 read_dist_8(dist, reg_first);
68 read_dist_8(dist + 8, reg_second);
69 }
70
71 // Average the value based on the number of values summed (9 for pixels away
72 // from the border, 4 for pixels in corners, and 6 for other edge values).
73 //
74 // Add in the rounding factor and shift, clamp to 16, invert and shift. Multiply
75 // by weight.
average_8(__m128i sum,const __m128i * mul_constants,const int strength,const int rounding,const __m128i * weight)76 static INLINE __m128i average_8(__m128i sum, const __m128i *mul_constants,
77 const int strength, const int rounding,
78 const __m128i *weight) {
79 // _mm_srl_epi16 uses the lower 64 bit value for the shift.
80 const __m128i strength_u128 = _mm_set_epi32(0, 0, 0, strength);
81 const __m128i rounding_u16 = _mm_set1_epi16(rounding);
82 const __m128i weight_u16 = *weight;
83 const __m128i sixteen = _mm_set1_epi16(16);
84
85 // modifier * 3 / index;
86 sum = _mm_mulhi_epu16(sum, *mul_constants);
87
88 sum = _mm_adds_epu16(sum, rounding_u16);
89 sum = _mm_srl_epi16(sum, strength_u128);
90
91 // The maximum input to this comparison is UINT16_MAX * NEIGHBOR_CONSTANT_4
92 // >> 16 (also NEIGHBOR_CONSTANT_4 -1) which is 49151 / 0xbfff / -16385
93 // So this needs to use the epu16 version which did not come until SSE4.
94 sum = _mm_min_epu16(sum, sixteen);
95
96 sum = _mm_sub_epi16(sixteen, sum);
97
98 return _mm_mullo_epi16(sum, weight_u16);
99 }
100
101 // Add 'sum_u16' to 'count'. Multiply by 'pred' and add to 'accumulator.'
accumulate_and_store_8(const __m128i sum_u16,const uint8_t * pred,uint16_t * count,uint32_t * accumulator)102 static void accumulate_and_store_8(const __m128i sum_u16, const uint8_t *pred,
103 uint16_t *count, uint32_t *accumulator) {
104 const __m128i pred_u8 = _mm_loadl_epi64((const __m128i *)pred);
105 const __m128i zero = _mm_setzero_si128();
106 __m128i count_u16 = _mm_loadu_si128((const __m128i *)count);
107 __m128i pred_u16 = _mm_cvtepu8_epi16(pred_u8);
108 __m128i pred_0_u32, pred_1_u32;
109 __m128i accum_0_u32, accum_1_u32;
110
111 count_u16 = _mm_adds_epu16(count_u16, sum_u16);
112 _mm_storeu_si128((__m128i *)count, count_u16);
113
114 pred_u16 = _mm_mullo_epi16(sum_u16, pred_u16);
115
116 pred_0_u32 = _mm_cvtepu16_epi32(pred_u16);
117 pred_1_u32 = _mm_unpackhi_epi16(pred_u16, zero);
118
119 accum_0_u32 = _mm_loadu_si128((const __m128i *)accumulator);
120 accum_1_u32 = _mm_loadu_si128((const __m128i *)(accumulator + 4));
121
122 accum_0_u32 = _mm_add_epi32(pred_0_u32, accum_0_u32);
123 accum_1_u32 = _mm_add_epi32(pred_1_u32, accum_1_u32);
124
125 _mm_storeu_si128((__m128i *)accumulator, accum_0_u32);
126 _mm_storeu_si128((__m128i *)(accumulator + 4), accum_1_u32);
127 }
128
accumulate_and_store_16(const __m128i sum_0_u16,const __m128i sum_1_u16,const uint8_t * pred,uint16_t * count,uint32_t * accumulator)129 static INLINE void accumulate_and_store_16(const __m128i sum_0_u16,
130 const __m128i sum_1_u16,
131 const uint8_t *pred, uint16_t *count,
132 uint32_t *accumulator) {
133 const __m128i pred_u8 = _mm_loadu_si128((const __m128i *)pred);
134 const __m128i zero = _mm_setzero_si128();
135 __m128i count_0_u16 = _mm_loadu_si128((const __m128i *)count),
136 count_1_u16 = _mm_loadu_si128((const __m128i *)(count + 8));
137 __m128i pred_0_u16 = _mm_cvtepu8_epi16(pred_u8),
138 pred_1_u16 = _mm_unpackhi_epi8(pred_u8, zero);
139 __m128i pred_0_u32, pred_1_u32, pred_2_u32, pred_3_u32;
140 __m128i accum_0_u32, accum_1_u32, accum_2_u32, accum_3_u32;
141
142 count_0_u16 = _mm_adds_epu16(count_0_u16, sum_0_u16);
143 _mm_storeu_si128((__m128i *)count, count_0_u16);
144
145 count_1_u16 = _mm_adds_epu16(count_1_u16, sum_1_u16);
146 _mm_storeu_si128((__m128i *)(count + 8), count_1_u16);
147
148 pred_0_u16 = _mm_mullo_epi16(sum_0_u16, pred_0_u16);
149 pred_1_u16 = _mm_mullo_epi16(sum_1_u16, pred_1_u16);
150
151 pred_0_u32 = _mm_cvtepu16_epi32(pred_0_u16);
152 pred_1_u32 = _mm_unpackhi_epi16(pred_0_u16, zero);
153 pred_2_u32 = _mm_cvtepu16_epi32(pred_1_u16);
154 pred_3_u32 = _mm_unpackhi_epi16(pred_1_u16, zero);
155
156 accum_0_u32 = _mm_loadu_si128((const __m128i *)accumulator);
157 accum_1_u32 = _mm_loadu_si128((const __m128i *)(accumulator + 4));
158 accum_2_u32 = _mm_loadu_si128((const __m128i *)(accumulator + 8));
159 accum_3_u32 = _mm_loadu_si128((const __m128i *)(accumulator + 12));
160
161 accum_0_u32 = _mm_add_epi32(pred_0_u32, accum_0_u32);
162 accum_1_u32 = _mm_add_epi32(pred_1_u32, accum_1_u32);
163 accum_2_u32 = _mm_add_epi32(pred_2_u32, accum_2_u32);
164 accum_3_u32 = _mm_add_epi32(pred_3_u32, accum_3_u32);
165
166 _mm_storeu_si128((__m128i *)accumulator, accum_0_u32);
167 _mm_storeu_si128((__m128i *)(accumulator + 4), accum_1_u32);
168 _mm_storeu_si128((__m128i *)(accumulator + 8), accum_2_u32);
169 _mm_storeu_si128((__m128i *)(accumulator + 12), accum_3_u32);
170 }
171
172 // Read in 8 pixels from y_dist. For each index i, compute y_dist[i-1] +
173 // y_dist[i] + y_dist[i+1] and store in sum as 16-bit unsigned int.
get_sum_8(const uint16_t * y_dist,__m128i * sum)174 static INLINE void get_sum_8(const uint16_t *y_dist, __m128i *sum) {
175 __m128i dist_reg, dist_left, dist_right;
176
177 dist_reg = _mm_loadu_si128((const __m128i *)y_dist);
178 dist_left = _mm_loadu_si128((const __m128i *)(y_dist - 1));
179 dist_right = _mm_loadu_si128((const __m128i *)(y_dist + 1));
180
181 *sum = _mm_adds_epu16(dist_reg, dist_left);
182 *sum = _mm_adds_epu16(*sum, dist_right);
183 }
184
185 // Read in 16 pixels from y_dist. For each index i, compute y_dist[i-1] +
186 // y_dist[i] + y_dist[i+1]. Store the result for first 8 pixels in sum_first and
187 // the rest in sum_second.
get_sum_16(const uint16_t * y_dist,__m128i * sum_first,__m128i * sum_second)188 static INLINE void get_sum_16(const uint16_t *y_dist, __m128i *sum_first,
189 __m128i *sum_second) {
190 get_sum_8(y_dist, sum_first);
191 get_sum_8(y_dist + 8, sum_second);
192 }
193
194 // Read in a row of chroma values corresponds to a row of 16 luma values.
read_chroma_dist_row_16(int ss_x,const uint16_t * u_dist,const uint16_t * v_dist,__m128i * u_first,__m128i * u_second,__m128i * v_first,__m128i * v_second)195 static INLINE void read_chroma_dist_row_16(int ss_x, const uint16_t *u_dist,
196 const uint16_t *v_dist,
197 __m128i *u_first, __m128i *u_second,
198 __m128i *v_first,
199 __m128i *v_second) {
200 if (!ss_x) {
201 // If there is no chroma subsampling in the horizontal direction, then we
202 // need to load 16 entries from chroma.
203 read_dist_16(u_dist, u_first, u_second);
204 read_dist_16(v_dist, v_first, v_second);
205 } else { // ss_x == 1
206 // Otherwise, we only need to load 8 entries
207 __m128i u_reg, v_reg;
208
209 read_dist_8(u_dist, &u_reg);
210
211 *u_first = _mm_unpacklo_epi16(u_reg, u_reg);
212 *u_second = _mm_unpackhi_epi16(u_reg, u_reg);
213
214 read_dist_8(v_dist, &v_reg);
215
216 *v_first = _mm_unpacklo_epi16(v_reg, v_reg);
217 *v_second = _mm_unpackhi_epi16(v_reg, v_reg);
218 }
219 }
220
221 // Horizontal add unsigned 16-bit ints in src and store them as signed 32-bit
222 // int in dst.
hadd_epu16(__m128i * src,__m128i * dst)223 static INLINE void hadd_epu16(__m128i *src, __m128i *dst) {
224 const __m128i zero = _mm_setzero_si128();
225 const __m128i shift_right = _mm_srli_si128(*src, 2);
226
227 const __m128i odd = _mm_blend_epi16(shift_right, zero, 170);
228 const __m128i even = _mm_blend_epi16(*src, zero, 170);
229
230 *dst = _mm_add_epi32(even, odd);
231 }
232
233 // Add a row of luma distortion to 8 corresponding chroma mods.
add_luma_dist_to_8_chroma_mod(const uint16_t * y_dist,int ss_x,int ss_y,__m128i * u_mod,__m128i * v_mod)234 static INLINE void add_luma_dist_to_8_chroma_mod(const uint16_t *y_dist,
235 int ss_x, int ss_y,
236 __m128i *u_mod,
237 __m128i *v_mod) {
238 __m128i y_reg;
239 if (!ss_x) {
240 read_dist_8(y_dist, &y_reg);
241 if (ss_y == 1) {
242 __m128i y_tmp;
243 read_dist_8(y_dist + DIST_STRIDE, &y_tmp);
244
245 y_reg = _mm_adds_epu16(y_reg, y_tmp);
246 }
247 } else {
248 __m128i y_first, y_second;
249 read_dist_16(y_dist, &y_first, &y_second);
250 if (ss_y == 1) {
251 __m128i y_tmp_0, y_tmp_1;
252 read_dist_16(y_dist + DIST_STRIDE, &y_tmp_0, &y_tmp_1);
253
254 y_first = _mm_adds_epu16(y_first, y_tmp_0);
255 y_second = _mm_adds_epu16(y_second, y_tmp_1);
256 }
257
258 hadd_epu16(&y_first, &y_first);
259 hadd_epu16(&y_second, &y_second);
260
261 y_reg = _mm_packus_epi32(y_first, y_second);
262 }
263
264 *u_mod = _mm_adds_epu16(*u_mod, y_reg);
265 *v_mod = _mm_adds_epu16(*v_mod, y_reg);
266 }
267
268 // Apply temporal filter to the luma components. This performs temporal
269 // filtering on a luma block of 16 X block_height. Use blk_fw as an array of
270 // size 4 for the weights for each of the 4 subblocks if blk_fw is not NULL,
271 // else use top_weight for top half, and bottom weight for bottom half.
vp9_apply_temporal_filter_luma_16(const uint8_t * y_src,int y_src_stride,const uint8_t * y_pre,int y_pre_stride,const uint8_t * u_src,const uint8_t * v_src,int uv_src_stride,const uint8_t * u_pre,const uint8_t * v_pre,int uv_pre_stride,unsigned int block_width,unsigned int block_height,int ss_x,int ss_y,int strength,int use_whole_blk,uint32_t * y_accum,uint16_t * y_count,const uint16_t * y_dist,const uint16_t * u_dist,const uint16_t * v_dist,const int16_t * const * neighbors_first,const int16_t * const * neighbors_second,int top_weight,int bottom_weight,const int * blk_fw)272 static void vp9_apply_temporal_filter_luma_16(
273 const uint8_t *y_src, int y_src_stride, const uint8_t *y_pre,
274 int y_pre_stride, const uint8_t *u_src, const uint8_t *v_src,
275 int uv_src_stride, const uint8_t *u_pre, const uint8_t *v_pre,
276 int uv_pre_stride, unsigned int block_width, unsigned int block_height,
277 int ss_x, int ss_y, int strength, int use_whole_blk, uint32_t *y_accum,
278 uint16_t *y_count, const uint16_t *y_dist, const uint16_t *u_dist,
279 const uint16_t *v_dist, const int16_t *const *neighbors_first,
280 const int16_t *const *neighbors_second, int top_weight, int bottom_weight,
281 const int *blk_fw) {
282 const int rounding = (1 << strength) >> 1;
283 __m128i weight_first, weight_second;
284
285 __m128i mul_first, mul_second;
286
287 __m128i sum_row_1_first, sum_row_1_second;
288 __m128i sum_row_2_first, sum_row_2_second;
289 __m128i sum_row_3_first, sum_row_3_second;
290
291 __m128i u_first, u_second;
292 __m128i v_first, v_second;
293
294 __m128i sum_row_first;
295 __m128i sum_row_second;
296
297 // Loop variables
298 unsigned int h;
299
300 assert(strength >= 0);
301 assert(strength <= 6);
302
303 assert(block_width == 16);
304
305 (void)block_width;
306
307 // Initialize the weights
308 if (blk_fw) {
309 weight_first = _mm_set1_epi16(blk_fw[0]);
310 weight_second = _mm_set1_epi16(blk_fw[1]);
311 } else {
312 weight_first = _mm_set1_epi16(top_weight);
313 weight_second = weight_first;
314 }
315
316 // First row
317 mul_first = _mm_load_si128((const __m128i *)neighbors_first[0]);
318 mul_second = _mm_load_si128((const __m128i *)neighbors_second[0]);
319
320 // Add luma values
321 get_sum_16(y_dist, &sum_row_2_first, &sum_row_2_second);
322 get_sum_16(y_dist + DIST_STRIDE, &sum_row_3_first, &sum_row_3_second);
323
324 sum_row_first = _mm_adds_epu16(sum_row_2_first, sum_row_3_first);
325 sum_row_second = _mm_adds_epu16(sum_row_2_second, sum_row_3_second);
326
327 // Add chroma values
328 read_chroma_dist_row_16(ss_x, u_dist, v_dist, &u_first, &u_second, &v_first,
329 &v_second);
330
331 sum_row_first = _mm_adds_epu16(sum_row_first, u_first);
332 sum_row_second = _mm_adds_epu16(sum_row_second, u_second);
333
334 sum_row_first = _mm_adds_epu16(sum_row_first, v_first);
335 sum_row_second = _mm_adds_epu16(sum_row_second, v_second);
336
337 // Get modifier and store result
338 sum_row_first =
339 average_8(sum_row_first, &mul_first, strength, rounding, &weight_first);
340 sum_row_second = average_8(sum_row_second, &mul_second, strength, rounding,
341 &weight_second);
342 accumulate_and_store_16(sum_row_first, sum_row_second, y_pre, y_count,
343 y_accum);
344
345 y_src += y_src_stride;
346 y_pre += y_pre_stride;
347 y_count += y_pre_stride;
348 y_accum += y_pre_stride;
349 y_dist += DIST_STRIDE;
350
351 u_src += uv_src_stride;
352 u_pre += uv_pre_stride;
353 u_dist += DIST_STRIDE;
354 v_src += uv_src_stride;
355 v_pre += uv_pre_stride;
356 v_dist += DIST_STRIDE;
357
358 // Then all the rows except the last one
359 mul_first = _mm_load_si128((const __m128i *)neighbors_first[1]);
360 mul_second = _mm_load_si128((const __m128i *)neighbors_second[1]);
361
362 for (h = 1; h < block_height - 1; ++h) {
363 // Move the weight to bottom half
364 if (!use_whole_blk && h == block_height / 2) {
365 if (blk_fw) {
366 weight_first = _mm_set1_epi16(blk_fw[2]);
367 weight_second = _mm_set1_epi16(blk_fw[3]);
368 } else {
369 weight_first = _mm_set1_epi16(bottom_weight);
370 weight_second = weight_first;
371 }
372 }
373 // Shift the rows up
374 sum_row_1_first = sum_row_2_first;
375 sum_row_1_second = sum_row_2_second;
376 sum_row_2_first = sum_row_3_first;
377 sum_row_2_second = sum_row_3_second;
378
379 // Add luma values to the modifier
380 sum_row_first = _mm_adds_epu16(sum_row_1_first, sum_row_2_first);
381 sum_row_second = _mm_adds_epu16(sum_row_1_second, sum_row_2_second);
382
383 get_sum_16(y_dist + DIST_STRIDE, &sum_row_3_first, &sum_row_3_second);
384
385 sum_row_first = _mm_adds_epu16(sum_row_first, sum_row_3_first);
386 sum_row_second = _mm_adds_epu16(sum_row_second, sum_row_3_second);
387
388 // Add chroma values to the modifier
389 if (ss_y == 0 || h % 2 == 0) {
390 // Only calculate the new chroma distortion if we are at a pixel that
391 // corresponds to a new chroma row
392 read_chroma_dist_row_16(ss_x, u_dist, v_dist, &u_first, &u_second,
393 &v_first, &v_second);
394
395 u_src += uv_src_stride;
396 u_pre += uv_pre_stride;
397 u_dist += DIST_STRIDE;
398 v_src += uv_src_stride;
399 v_pre += uv_pre_stride;
400 v_dist += DIST_STRIDE;
401 }
402
403 sum_row_first = _mm_adds_epu16(sum_row_first, u_first);
404 sum_row_second = _mm_adds_epu16(sum_row_second, u_second);
405 sum_row_first = _mm_adds_epu16(sum_row_first, v_first);
406 sum_row_second = _mm_adds_epu16(sum_row_second, v_second);
407
408 // Get modifier and store result
409 sum_row_first =
410 average_8(sum_row_first, &mul_first, strength, rounding, &weight_first);
411 sum_row_second = average_8(sum_row_second, &mul_second, strength, rounding,
412 &weight_second);
413 accumulate_and_store_16(sum_row_first, sum_row_second, y_pre, y_count,
414 y_accum);
415
416 y_src += y_src_stride;
417 y_pre += y_pre_stride;
418 y_count += y_pre_stride;
419 y_accum += y_pre_stride;
420 y_dist += DIST_STRIDE;
421 }
422
423 // The last row
424 mul_first = _mm_load_si128((const __m128i *)neighbors_first[0]);
425 mul_second = _mm_load_si128((const __m128i *)neighbors_second[0]);
426
427 // Shift the rows up
428 sum_row_1_first = sum_row_2_first;
429 sum_row_1_second = sum_row_2_second;
430 sum_row_2_first = sum_row_3_first;
431 sum_row_2_second = sum_row_3_second;
432
433 // Add luma values to the modifier
434 sum_row_first = _mm_adds_epu16(sum_row_1_first, sum_row_2_first);
435 sum_row_second = _mm_adds_epu16(sum_row_1_second, sum_row_2_second);
436
437 // Add chroma values to the modifier
438 if (ss_y == 0) {
439 // Only calculate the new chroma distortion if we are at a pixel that
440 // corresponds to a new chroma row
441 read_chroma_dist_row_16(ss_x, u_dist, v_dist, &u_first, &u_second, &v_first,
442 &v_second);
443 }
444
445 sum_row_first = _mm_adds_epu16(sum_row_first, u_first);
446 sum_row_second = _mm_adds_epu16(sum_row_second, u_second);
447 sum_row_first = _mm_adds_epu16(sum_row_first, v_first);
448 sum_row_second = _mm_adds_epu16(sum_row_second, v_second);
449
450 // Get modifier and store result
451 sum_row_first =
452 average_8(sum_row_first, &mul_first, strength, rounding, &weight_first);
453 sum_row_second = average_8(sum_row_second, &mul_second, strength, rounding,
454 &weight_second);
455 accumulate_and_store_16(sum_row_first, sum_row_second, y_pre, y_count,
456 y_accum);
457 }
458
459 // Perform temporal filter for the luma component.
vp9_apply_temporal_filter_luma(const uint8_t * y_src,int y_src_stride,const uint8_t * y_pre,int y_pre_stride,const uint8_t * u_src,const uint8_t * v_src,int uv_src_stride,const uint8_t * u_pre,const uint8_t * v_pre,int uv_pre_stride,unsigned int block_width,unsigned int block_height,int ss_x,int ss_y,int strength,const int * blk_fw,int use_whole_blk,uint32_t * y_accum,uint16_t * y_count,const uint16_t * y_dist,const uint16_t * u_dist,const uint16_t * v_dist)460 static void vp9_apply_temporal_filter_luma(
461 const uint8_t *y_src, int y_src_stride, const uint8_t *y_pre,
462 int y_pre_stride, const uint8_t *u_src, const uint8_t *v_src,
463 int uv_src_stride, const uint8_t *u_pre, const uint8_t *v_pre,
464 int uv_pre_stride, unsigned int block_width, unsigned int block_height,
465 int ss_x, int ss_y, int strength, const int *blk_fw, int use_whole_blk,
466 uint32_t *y_accum, uint16_t *y_count, const uint16_t *y_dist,
467 const uint16_t *u_dist, const uint16_t *v_dist) {
468 unsigned int blk_col = 0, uv_blk_col = 0;
469 const unsigned int blk_col_step = 16, uv_blk_col_step = 16 >> ss_x;
470 const unsigned int mid_width = block_width >> 1,
471 last_width = block_width - blk_col_step;
472 int top_weight = blk_fw[0],
473 bottom_weight = use_whole_blk ? blk_fw[0] : blk_fw[2];
474 const int16_t *const *neighbors_first;
475 const int16_t *const *neighbors_second;
476
477 if (block_width == 16) {
478 // Special Case: The blockwidth is 16 and we are operating on a row of 16
479 // chroma pixels. In this case, we can't use the usualy left-midle-right
480 // pattern. We also don't support splitting now.
481 neighbors_first = LUMA_LEFT_COLUMN_NEIGHBORS;
482 neighbors_second = LUMA_RIGHT_COLUMN_NEIGHBORS;
483 if (use_whole_blk) {
484 vp9_apply_temporal_filter_luma_16(
485 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
486 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride,
487 u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, 16,
488 block_height, ss_x, ss_y, strength, use_whole_blk, y_accum + blk_col,
489 y_count + blk_col, y_dist + blk_col, u_dist + uv_blk_col,
490 v_dist + uv_blk_col, neighbors_first, neighbors_second, top_weight,
491 bottom_weight, NULL);
492 } else {
493 vp9_apply_temporal_filter_luma_16(
494 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
495 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride,
496 u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, 16,
497 block_height, ss_x, ss_y, strength, use_whole_blk, y_accum + blk_col,
498 y_count + blk_col, y_dist + blk_col, u_dist + uv_blk_col,
499 v_dist + uv_blk_col, neighbors_first, neighbors_second, 0, 0, blk_fw);
500 }
501
502 return;
503 }
504
505 // Left
506 neighbors_first = LUMA_LEFT_COLUMN_NEIGHBORS;
507 neighbors_second = LUMA_MIDDLE_COLUMN_NEIGHBORS;
508 vp9_apply_temporal_filter_luma_16(
509 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
510 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride, u_pre + uv_blk_col,
511 v_pre + uv_blk_col, uv_pre_stride, 16, block_height, ss_x, ss_y, strength,
512 use_whole_blk, y_accum + blk_col, y_count + blk_col, y_dist + blk_col,
513 u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors_first,
514 neighbors_second, top_weight, bottom_weight, NULL);
515
516 blk_col += blk_col_step;
517 uv_blk_col += uv_blk_col_step;
518
519 // Middle First
520 neighbors_first = LUMA_MIDDLE_COLUMN_NEIGHBORS;
521 for (; blk_col < mid_width;
522 blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
523 vp9_apply_temporal_filter_luma_16(
524 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
525 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride,
526 u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, 16, block_height,
527 ss_x, ss_y, strength, use_whole_blk, y_accum + blk_col,
528 y_count + blk_col, y_dist + blk_col, u_dist + uv_blk_col,
529 v_dist + uv_blk_col, neighbors_first, neighbors_second, top_weight,
530 bottom_weight, NULL);
531 }
532
533 if (!use_whole_blk) {
534 top_weight = blk_fw[1];
535 bottom_weight = blk_fw[3];
536 }
537
538 // Middle Second
539 for (; blk_col < last_width;
540 blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
541 vp9_apply_temporal_filter_luma_16(
542 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
543 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride,
544 u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, 16, block_height,
545 ss_x, ss_y, strength, use_whole_blk, y_accum + blk_col,
546 y_count + blk_col, y_dist + blk_col, u_dist + uv_blk_col,
547 v_dist + uv_blk_col, neighbors_first, neighbors_second, top_weight,
548 bottom_weight, NULL);
549 }
550
551 // Right
552 neighbors_second = LUMA_RIGHT_COLUMN_NEIGHBORS;
553 vp9_apply_temporal_filter_luma_16(
554 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
555 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride, u_pre + uv_blk_col,
556 v_pre + uv_blk_col, uv_pre_stride, 16, block_height, ss_x, ss_y, strength,
557 use_whole_blk, y_accum + blk_col, y_count + blk_col, y_dist + blk_col,
558 u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors_first,
559 neighbors_second, top_weight, bottom_weight, NULL);
560 }
561
562 // Apply temporal filter to the chroma components. This performs temporal
563 // filtering on a chroma block of 8 X uv_height. If blk_fw is not NULL, use
564 // blk_fw as an array of size 4 for the weights for each of the 4 subblocks,
565 // else use top_weight for top half, and bottom weight for bottom half.
vp9_apply_temporal_filter_chroma_8(const uint8_t * y_src,int y_src_stride,const uint8_t * y_pre,int y_pre_stride,const uint8_t * u_src,const uint8_t * v_src,int uv_src_stride,const uint8_t * u_pre,const uint8_t * v_pre,int uv_pre_stride,unsigned int uv_block_width,unsigned int uv_block_height,int ss_x,int ss_y,int strength,uint32_t * u_accum,uint16_t * u_count,uint32_t * v_accum,uint16_t * v_count,const uint16_t * y_dist,const uint16_t * u_dist,const uint16_t * v_dist,const int16_t * const * neighbors,int top_weight,int bottom_weight,const int * blk_fw)566 static void vp9_apply_temporal_filter_chroma_8(
567 const uint8_t *y_src, int y_src_stride, const uint8_t *y_pre,
568 int y_pre_stride, const uint8_t *u_src, const uint8_t *v_src,
569 int uv_src_stride, const uint8_t *u_pre, const uint8_t *v_pre,
570 int uv_pre_stride, unsigned int uv_block_width,
571 unsigned int uv_block_height, int ss_x, int ss_y, int strength,
572 uint32_t *u_accum, uint16_t *u_count, uint32_t *v_accum, uint16_t *v_count,
573 const uint16_t *y_dist, const uint16_t *u_dist, const uint16_t *v_dist,
574 const int16_t *const *neighbors, int top_weight, int bottom_weight,
575 const int *blk_fw) {
576 const int rounding = (1 << strength) >> 1;
577
578 __m128i weight;
579
580 __m128i mul;
581
582 __m128i u_sum_row_1, u_sum_row_2, u_sum_row_3;
583 __m128i v_sum_row_1, v_sum_row_2, v_sum_row_3;
584
585 __m128i u_sum_row, v_sum_row;
586
587 // Loop variable
588 unsigned int h;
589
590 (void)uv_block_width;
591
592 // Initilize weight
593 if (blk_fw) {
594 weight = _mm_setr_epi16(blk_fw[0], blk_fw[0], blk_fw[0], blk_fw[0],
595 blk_fw[1], blk_fw[1], blk_fw[1], blk_fw[1]);
596 } else {
597 weight = _mm_set1_epi16(top_weight);
598 }
599
600 // First row
601 mul = _mm_load_si128((const __m128i *)neighbors[0]);
602
603 // Add chroma values
604 get_sum_8(u_dist, &u_sum_row_2);
605 get_sum_8(u_dist + DIST_STRIDE, &u_sum_row_3);
606
607 u_sum_row = _mm_adds_epu16(u_sum_row_2, u_sum_row_3);
608
609 get_sum_8(v_dist, &v_sum_row_2);
610 get_sum_8(v_dist + DIST_STRIDE, &v_sum_row_3);
611
612 v_sum_row = _mm_adds_epu16(v_sum_row_2, v_sum_row_3);
613
614 // Add luma values
615 add_luma_dist_to_8_chroma_mod(y_dist, ss_x, ss_y, &u_sum_row, &v_sum_row);
616
617 // Get modifier and store result
618 u_sum_row = average_8(u_sum_row, &mul, strength, rounding, &weight);
619 v_sum_row = average_8(v_sum_row, &mul, strength, rounding, &weight);
620
621 accumulate_and_store_8(u_sum_row, u_pre, u_count, u_accum);
622 accumulate_and_store_8(v_sum_row, v_pre, v_count, v_accum);
623
624 u_src += uv_src_stride;
625 u_pre += uv_pre_stride;
626 u_dist += DIST_STRIDE;
627 v_src += uv_src_stride;
628 v_pre += uv_pre_stride;
629 v_dist += DIST_STRIDE;
630 u_count += uv_pre_stride;
631 u_accum += uv_pre_stride;
632 v_count += uv_pre_stride;
633 v_accum += uv_pre_stride;
634
635 y_src += y_src_stride * (1 + ss_y);
636 y_pre += y_pre_stride * (1 + ss_y);
637 y_dist += DIST_STRIDE * (1 + ss_y);
638
639 // Then all the rows except the last one
640 mul = _mm_load_si128((const __m128i *)neighbors[1]);
641
642 for (h = 1; h < uv_block_height - 1; ++h) {
643 // Move the weight pointer to the bottom half of the blocks
644 if (h == uv_block_height / 2) {
645 if (blk_fw) {
646 weight = _mm_setr_epi16(blk_fw[2], blk_fw[2], blk_fw[2], blk_fw[2],
647 blk_fw[3], blk_fw[3], blk_fw[3], blk_fw[3]);
648 } else {
649 weight = _mm_set1_epi16(bottom_weight);
650 }
651 }
652
653 // Shift the rows up
654 u_sum_row_1 = u_sum_row_2;
655 u_sum_row_2 = u_sum_row_3;
656
657 v_sum_row_1 = v_sum_row_2;
658 v_sum_row_2 = v_sum_row_3;
659
660 // Add chroma values
661 u_sum_row = _mm_adds_epu16(u_sum_row_1, u_sum_row_2);
662 get_sum_8(u_dist + DIST_STRIDE, &u_sum_row_3);
663 u_sum_row = _mm_adds_epu16(u_sum_row, u_sum_row_3);
664
665 v_sum_row = _mm_adds_epu16(v_sum_row_1, v_sum_row_2);
666 get_sum_8(v_dist + DIST_STRIDE, &v_sum_row_3);
667 v_sum_row = _mm_adds_epu16(v_sum_row, v_sum_row_3);
668
669 // Add luma values
670 add_luma_dist_to_8_chroma_mod(y_dist, ss_x, ss_y, &u_sum_row, &v_sum_row);
671
672 // Get modifier and store result
673 u_sum_row = average_8(u_sum_row, &mul, strength, rounding, &weight);
674 v_sum_row = average_8(v_sum_row, &mul, strength, rounding, &weight);
675
676 accumulate_and_store_8(u_sum_row, u_pre, u_count, u_accum);
677 accumulate_and_store_8(v_sum_row, v_pre, v_count, v_accum);
678
679 u_src += uv_src_stride;
680 u_pre += uv_pre_stride;
681 u_dist += DIST_STRIDE;
682 v_src += uv_src_stride;
683 v_pre += uv_pre_stride;
684 v_dist += DIST_STRIDE;
685 u_count += uv_pre_stride;
686 u_accum += uv_pre_stride;
687 v_count += uv_pre_stride;
688 v_accum += uv_pre_stride;
689
690 y_src += y_src_stride * (1 + ss_y);
691 y_pre += y_pre_stride * (1 + ss_y);
692 y_dist += DIST_STRIDE * (1 + ss_y);
693 }
694
695 // The last row
696 mul = _mm_load_si128((const __m128i *)neighbors[0]);
697
698 // Shift the rows up
699 u_sum_row_1 = u_sum_row_2;
700 u_sum_row_2 = u_sum_row_3;
701
702 v_sum_row_1 = v_sum_row_2;
703 v_sum_row_2 = v_sum_row_3;
704
705 // Add chroma values
706 u_sum_row = _mm_adds_epu16(u_sum_row_1, u_sum_row_2);
707 v_sum_row = _mm_adds_epu16(v_sum_row_1, v_sum_row_2);
708
709 // Add luma values
710 add_luma_dist_to_8_chroma_mod(y_dist, ss_x, ss_y, &u_sum_row, &v_sum_row);
711
712 // Get modifier and store result
713 u_sum_row = average_8(u_sum_row, &mul, strength, rounding, &weight);
714 v_sum_row = average_8(v_sum_row, &mul, strength, rounding, &weight);
715
716 accumulate_and_store_8(u_sum_row, u_pre, u_count, u_accum);
717 accumulate_and_store_8(v_sum_row, v_pre, v_count, v_accum);
718 }
719
720 // Perform temporal filter for the chroma components.
vp9_apply_temporal_filter_chroma(const uint8_t * y_src,int y_src_stride,const uint8_t * y_pre,int y_pre_stride,const uint8_t * u_src,const uint8_t * v_src,int uv_src_stride,const uint8_t * u_pre,const uint8_t * v_pre,int uv_pre_stride,unsigned int block_width,unsigned int block_height,int ss_x,int ss_y,int strength,const int * blk_fw,int use_whole_blk,uint32_t * u_accum,uint16_t * u_count,uint32_t * v_accum,uint16_t * v_count,const uint16_t * y_dist,const uint16_t * u_dist,const uint16_t * v_dist)721 static void vp9_apply_temporal_filter_chroma(
722 const uint8_t *y_src, int y_src_stride, const uint8_t *y_pre,
723 int y_pre_stride, const uint8_t *u_src, const uint8_t *v_src,
724 int uv_src_stride, const uint8_t *u_pre, const uint8_t *v_pre,
725 int uv_pre_stride, unsigned int block_width, unsigned int block_height,
726 int ss_x, int ss_y, int strength, const int *blk_fw, int use_whole_blk,
727 uint32_t *u_accum, uint16_t *u_count, uint32_t *v_accum, uint16_t *v_count,
728 const uint16_t *y_dist, const uint16_t *u_dist, const uint16_t *v_dist) {
729 const unsigned int uv_width = block_width >> ss_x,
730 uv_height = block_height >> ss_y;
731
732 unsigned int blk_col = 0, uv_blk_col = 0;
733 const unsigned int uv_blk_col_step = 8, blk_col_step = 8 << ss_x;
734 const unsigned int uv_mid_width = uv_width >> 1,
735 uv_last_width = uv_width - uv_blk_col_step;
736 int top_weight = blk_fw[0],
737 bottom_weight = use_whole_blk ? blk_fw[0] : blk_fw[2];
738 const int16_t *const *neighbors;
739
740 if (uv_width == 8) {
741 // Special Case: We are subsampling in x direction on a 16x16 block. Since
742 // we are operating on a row of 8 chroma pixels, we can't use the usual
743 // left-middle-right pattern.
744 assert(ss_x);
745
746 if (ss_y) {
747 neighbors = CHROMA_DOUBLE_SS_SINGLE_COLUMN_NEIGHBORS;
748 } else {
749 neighbors = CHROMA_SINGLE_SS_SINGLE_COLUMN_NEIGHBORS;
750 }
751
752 if (use_whole_blk) {
753 vp9_apply_temporal_filter_chroma_8(
754 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
755 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride,
756 u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_width,
757 uv_height, ss_x, ss_y, strength, u_accum + uv_blk_col,
758 u_count + uv_blk_col, v_accum + uv_blk_col, v_count + uv_blk_col,
759 y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors,
760 top_weight, bottom_weight, NULL);
761 } else {
762 vp9_apply_temporal_filter_chroma_8(
763 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
764 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride,
765 u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_width,
766 uv_height, ss_x, ss_y, strength, u_accum + uv_blk_col,
767 u_count + uv_blk_col, v_accum + uv_blk_col, v_count + uv_blk_col,
768 y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors,
769 0, 0, blk_fw);
770 }
771
772 return;
773 }
774
775 // Left
776 if (ss_x && ss_y) {
777 neighbors = CHROMA_DOUBLE_SS_LEFT_COLUMN_NEIGHBORS;
778 } else if (ss_x || ss_y) {
779 neighbors = CHROMA_SINGLE_SS_LEFT_COLUMN_NEIGHBORS;
780 } else {
781 neighbors = CHROMA_NO_SS_LEFT_COLUMN_NEIGHBORS;
782 }
783
784 vp9_apply_temporal_filter_chroma_8(
785 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
786 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride, u_pre + uv_blk_col,
787 v_pre + uv_blk_col, uv_pre_stride, uv_width, uv_height, ss_x, ss_y,
788 strength, u_accum + uv_blk_col, u_count + uv_blk_col,
789 v_accum + uv_blk_col, v_count + uv_blk_col, y_dist + blk_col,
790 u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors, top_weight,
791 bottom_weight, NULL);
792
793 blk_col += blk_col_step;
794 uv_blk_col += uv_blk_col_step;
795
796 // Middle First
797 if (ss_x && ss_y) {
798 neighbors = CHROMA_DOUBLE_SS_MIDDLE_COLUMN_NEIGHBORS;
799 } else if (ss_x || ss_y) {
800 neighbors = CHROMA_SINGLE_SS_MIDDLE_COLUMN_NEIGHBORS;
801 } else {
802 neighbors = CHROMA_NO_SS_MIDDLE_COLUMN_NEIGHBORS;
803 }
804
805 for (; uv_blk_col < uv_mid_width;
806 blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
807 vp9_apply_temporal_filter_chroma_8(
808 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
809 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride,
810 u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_width,
811 uv_height, ss_x, ss_y, strength, u_accum + uv_blk_col,
812 u_count + uv_blk_col, v_accum + uv_blk_col, v_count + uv_blk_col,
813 y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors,
814 top_weight, bottom_weight, NULL);
815 }
816
817 if (!use_whole_blk) {
818 top_weight = blk_fw[1];
819 bottom_weight = blk_fw[3];
820 }
821
822 // Middle Second
823 for (; uv_blk_col < uv_last_width;
824 blk_col += blk_col_step, uv_blk_col += uv_blk_col_step) {
825 vp9_apply_temporal_filter_chroma_8(
826 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
827 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride,
828 u_pre + uv_blk_col, v_pre + uv_blk_col, uv_pre_stride, uv_width,
829 uv_height, ss_x, ss_y, strength, u_accum + uv_blk_col,
830 u_count + uv_blk_col, v_accum + uv_blk_col, v_count + uv_blk_col,
831 y_dist + blk_col, u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors,
832 top_weight, bottom_weight, NULL);
833 }
834
835 // Right
836 if (ss_x && ss_y) {
837 neighbors = CHROMA_DOUBLE_SS_RIGHT_COLUMN_NEIGHBORS;
838 } else if (ss_x || ss_y) {
839 neighbors = CHROMA_SINGLE_SS_RIGHT_COLUMN_NEIGHBORS;
840 } else {
841 neighbors = CHROMA_NO_SS_RIGHT_COLUMN_NEIGHBORS;
842 }
843
844 vp9_apply_temporal_filter_chroma_8(
845 y_src + blk_col, y_src_stride, y_pre + blk_col, y_pre_stride,
846 u_src + uv_blk_col, v_src + uv_blk_col, uv_src_stride, u_pre + uv_blk_col,
847 v_pre + uv_blk_col, uv_pre_stride, uv_width, uv_height, ss_x, ss_y,
848 strength, u_accum + uv_blk_col, u_count + uv_blk_col,
849 v_accum + uv_blk_col, v_count + uv_blk_col, y_dist + blk_col,
850 u_dist + uv_blk_col, v_dist + uv_blk_col, neighbors, top_weight,
851 bottom_weight, NULL);
852 }
853
vp9_apply_temporal_filter_sse4_1(const uint8_t * y_src,int y_src_stride,const uint8_t * y_pre,int y_pre_stride,const uint8_t * u_src,const uint8_t * v_src,int uv_src_stride,const uint8_t * u_pre,const uint8_t * v_pre,int uv_pre_stride,unsigned int block_width,unsigned int block_height,int ss_x,int ss_y,int strength,const int * const blk_fw,int use_whole_blk,uint32_t * y_accum,uint16_t * y_count,uint32_t * u_accum,uint16_t * u_count,uint32_t * v_accum,uint16_t * v_count)854 void vp9_apply_temporal_filter_sse4_1(
855 const uint8_t *y_src, int y_src_stride, const uint8_t *y_pre,
856 int y_pre_stride, const uint8_t *u_src, const uint8_t *v_src,
857 int uv_src_stride, const uint8_t *u_pre, const uint8_t *v_pre,
858 int uv_pre_stride, unsigned int block_width, unsigned int block_height,
859 int ss_x, int ss_y, int strength, const int *const blk_fw,
860 int use_whole_blk, uint32_t *y_accum, uint16_t *y_count, uint32_t *u_accum,
861 uint16_t *u_count, uint32_t *v_accum, uint16_t *v_count) {
862 const unsigned int chroma_height = block_height >> ss_y,
863 chroma_width = block_width >> ss_x;
864
865 DECLARE_ALIGNED(16, uint16_t, y_dist[BH * DIST_STRIDE]) = { 0 };
866 DECLARE_ALIGNED(16, uint16_t, u_dist[BH * DIST_STRIDE]) = { 0 };
867 DECLARE_ALIGNED(16, uint16_t, v_dist[BH * DIST_STRIDE]) = { 0 };
868 const int *blk_fw_ptr = blk_fw;
869
870 uint16_t *y_dist_ptr = y_dist + 1, *u_dist_ptr = u_dist + 1,
871 *v_dist_ptr = v_dist + 1;
872 const uint8_t *y_src_ptr = y_src, *u_src_ptr = u_src, *v_src_ptr = v_src;
873 const uint8_t *y_pre_ptr = y_pre, *u_pre_ptr = u_pre, *v_pre_ptr = v_pre;
874
875 // Loop variables
876 unsigned int row, blk_col;
877
878 assert(block_width <= BW && "block width too large");
879 assert(block_height <= BH && "block height too large");
880 assert(block_width % 16 == 0 && "block width must be multiple of 16");
881 assert(block_height % 2 == 0 && "block height must be even");
882 assert((ss_x == 0 || ss_x == 1) && (ss_y == 0 || ss_y == 1) &&
883 "invalid chroma subsampling");
884 assert(strength >= 0 && strength <= 6 && "invalid temporal filter strength");
885 assert(blk_fw[0] >= 0 && "filter weight must be positive");
886 assert(
887 (use_whole_blk || (blk_fw[1] >= 0 && blk_fw[2] >= 0 && blk_fw[3] >= 0)) &&
888 "subblock filter weight must be positive");
889 assert(blk_fw[0] <= 2 && "sublock filter weight must be less than 2");
890 assert(
891 (use_whole_blk || (blk_fw[1] <= 2 && blk_fw[2] <= 2 && blk_fw[3] <= 2)) &&
892 "subblock filter weight must be less than 2");
893
894 // Precompute the difference sqaured
895 for (row = 0; row < block_height; row++) {
896 for (blk_col = 0; blk_col < block_width; blk_col += 16) {
897 store_dist_16(y_src_ptr + blk_col, y_pre_ptr + blk_col,
898 y_dist_ptr + blk_col);
899 }
900 y_src_ptr += y_src_stride;
901 y_pre_ptr += y_pre_stride;
902 y_dist_ptr += DIST_STRIDE;
903 }
904
905 for (row = 0; row < chroma_height; row++) {
906 for (blk_col = 0; blk_col < chroma_width; blk_col += 8) {
907 store_dist_8(u_src_ptr + blk_col, u_pre_ptr + blk_col,
908 u_dist_ptr + blk_col);
909 store_dist_8(v_src_ptr + blk_col, v_pre_ptr + blk_col,
910 v_dist_ptr + blk_col);
911 }
912
913 u_src_ptr += uv_src_stride;
914 u_pre_ptr += uv_pre_stride;
915 u_dist_ptr += DIST_STRIDE;
916 v_src_ptr += uv_src_stride;
917 v_pre_ptr += uv_pre_stride;
918 v_dist_ptr += DIST_STRIDE;
919 }
920
921 y_dist_ptr = y_dist + 1;
922 u_dist_ptr = u_dist + 1;
923 v_dist_ptr = v_dist + 1;
924
925 vp9_apply_temporal_filter_luma(
926 y_src, y_src_stride, y_pre, y_pre_stride, u_src, v_src, uv_src_stride,
927 u_pre, v_pre, uv_pre_stride, block_width, block_height, ss_x, ss_y,
928 strength, blk_fw_ptr, use_whole_blk, y_accum, y_count, y_dist_ptr,
929 u_dist_ptr, v_dist_ptr);
930
931 vp9_apply_temporal_filter_chroma(
932 y_src, y_src_stride, y_pre, y_pre_stride, u_src, v_src, uv_src_stride,
933 u_pre, v_pre, uv_pre_stride, block_width, block_height, ss_x, ss_y,
934 strength, blk_fw_ptr, use_whole_blk, u_accum, u_count, v_accum, v_count,
935 y_dist_ptr, u_dist_ptr, v_dist_ptr);
936 }
937