1 /*****************************************************************************
2 * deblock.c: ppc deblocking
3 *****************************************************************************
4 * Copyright (C) 2007-2021 x264 project
5 *
6 * Authors: Guillaume Poirier <gpoirier@mplayerhq.hu>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
21 *
22 * This program is also available under a commercial proprietary license.
23 * For more information, contact us at licensing@x264.com.
24 *****************************************************************************/
25
26 #include "common/common.h"
27 #include "ppccommon.h"
28 #include "deblock.h"
29
30 #if !HIGH_BIT_DEPTH
31 #define transpose4x16(r0, r1, r2, r3) \
32 { \
33 register vec_u8_t r4; \
34 register vec_u8_t r5; \
35 register vec_u8_t r6; \
36 register vec_u8_t r7; \
37 \
38 r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \
39 r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \
40 r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \
41 r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \
42 \
43 r0 = vec_mergeh(r4, r6); /*all set 0*/ \
44 r1 = vec_mergel(r4, r6); /*all set 1*/ \
45 r2 = vec_mergeh(r5, r7); /*all set 2*/ \
46 r3 = vec_mergel(r5, r7); /*all set 3*/ \
47 }
48
write16x4(uint8_t * dst,int dst_stride,register vec_u8_t r0,register vec_u8_t r1,register vec_u8_t r2,register vec_u8_t r3)49 static inline void write16x4( uint8_t *dst, int dst_stride,
50 register vec_u8_t r0, register vec_u8_t r1,
51 register vec_u8_t r2, register vec_u8_t r3 )
52 {
53 ALIGNED_16(unsigned char result[64]);
54 uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
55 int int_dst_stride = dst_stride >> 2;
56
57 vec_st(r0, 0, result);
58 vec_st(r1, 16, result);
59 vec_st(r2, 32, result);
60 vec_st(r3, 48, result);
61 /* FIXME: there has to be a better way!!!! */
62 *dst_int = *src_int;
63 *(dst_int+ int_dst_stride) = *(src_int + 1);
64 *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
65 *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
66 *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
67 *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
68 *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
69 *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
70 *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
71 *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
72 *(dst_int+10*int_dst_stride) = *(src_int + 10);
73 *(dst_int+11*int_dst_stride) = *(src_int + 11);
74 *(dst_int+12*int_dst_stride) = *(src_int + 12);
75 *(dst_int+13*int_dst_stride) = *(src_int + 13);
76 *(dst_int+14*int_dst_stride) = *(src_int + 14);
77 *(dst_int+15*int_dst_stride) = *(src_int + 15);
78 }
79
80 /** \brief performs a 6x16 transpose of data in src, and stores it to dst */
81 #define read_and_transpose16x6(src, src_stride, r8, r9, r10, r11, r12, r13)\
82 {\
83 register vec_u8_t r0, r1, r2, r3, r4, r5, r6, r7, r14, r15;\
84 r0 = vec_vsx_ld(0, src); \
85 r1 = vec_vsx_ld(src_stride, src); \
86 r2 = vec_vsx_ld(2*src_stride, src); \
87 r3 = vec_vsx_ld(3*src_stride, src); \
88 r4 = vec_vsx_ld(4*src_stride, src); \
89 r5 = vec_vsx_ld(5*src_stride, src); \
90 r6 = vec_vsx_ld(6*src_stride, src); \
91 r7 = vec_vsx_ld(7*src_stride, src); \
92 r8 = vec_vsx_ld(8*src_stride, src); \
93 r9 = vec_vsx_ld(9*src_stride, src); \
94 r10 = vec_vsx_ld(10*src_stride, src); \
95 r11 = vec_vsx_ld(11*src_stride, src); \
96 r12 = vec_vsx_ld(12*src_stride, src); \
97 r13 = vec_vsx_ld(13*src_stride, src); \
98 r14 = vec_vsx_ld(14*src_stride, src); \
99 r15 = vec_vsx_ld(15*src_stride, src); \
100 \
101 /*Merge first pairs*/ \
102 r0 = vec_mergeh(r0, r8); /*0, 8*/ \
103 r1 = vec_mergeh(r1, r9); /*1, 9*/ \
104 r2 = vec_mergeh(r2, r10); /*2,10*/ \
105 r3 = vec_mergeh(r3, r11); /*3,11*/ \
106 r4 = vec_mergeh(r4, r12); /*4,12*/ \
107 r5 = vec_mergeh(r5, r13); /*5,13*/ \
108 r6 = vec_mergeh(r6, r14); /*6,14*/ \
109 r7 = vec_mergeh(r7, r15); /*7,15*/ \
110 \
111 /*Merge second pairs*/ \
112 r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \
113 r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \
114 r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \
115 r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \
116 r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \
117 r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \
118 r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \
119 r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \
120 \
121 /*Third merge*/ \
122 r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
123 r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
124 r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
125 r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \
126 r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \
127 r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \
128 /* Don't need to compute 3 and 7*/ \
129 \
130 /*Final merge*/ \
131 r8 = vec_mergeh(r0, r4); /*all set 0*/ \
132 r9 = vec_mergel(r0, r4); /*all set 1*/ \
133 r10 = vec_mergeh(r1, r5); /*all set 2*/ \
134 r11 = vec_mergel(r1, r5); /*all set 3*/ \
135 r12 = vec_mergeh(r2, r6); /*all set 4*/ \
136 r13 = vec_mergel(r2, r6); /*all set 5*/ \
137 /* Don't need to compute 14 and 15*/ \
138 \
139 }
140
141 // out: o = |x-y| < a
diff_lt_altivec(register vec_u8_t x,register vec_u8_t y,register vec_u8_t a)142 static inline vec_u8_t diff_lt_altivec( register vec_u8_t x, register vec_u8_t y, register vec_u8_t a )
143 {
144 return (vec_u8_t)vec_cmplt(vec_absd(x, y), a);
145 }
146
h264_deblock_mask(register vec_u8_t p0,register vec_u8_t p1,register vec_u8_t q0,register vec_u8_t q1,register vec_u8_t alpha,register vec_u8_t beta)147 static inline vec_u8_t h264_deblock_mask( register vec_u8_t p0, register vec_u8_t p1, register vec_u8_t q0,
148 register vec_u8_t q1, register vec_u8_t alpha, register vec_u8_t beta )
149 {
150 register vec_u8_t mask;
151 register vec_u8_t tempmask;
152
153 mask = diff_lt_altivec(p0, q0, alpha);
154 tempmask = diff_lt_altivec(p1, p0, beta);
155 mask = vec_and(mask, tempmask);
156 tempmask = diff_lt_altivec(q1, q0, beta);
157 mask = vec_and(mask, tempmask);
158
159 return mask;
160 }
161
162 // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
h264_deblock_q1(register vec_u8_t p0,register vec_u8_t p1,register vec_u8_t p2,register vec_u8_t q0,register vec_u8_t tc0)163 static inline vec_u8_t h264_deblock_q1( register vec_u8_t p0, register vec_u8_t p1, register vec_u8_t p2,
164 register vec_u8_t q0, register vec_u8_t tc0 )
165 {
166
167 register vec_u8_t average = vec_avg(p0, q0);
168 register vec_u8_t temp;
169 register vec_u8_t uncliped;
170 register vec_u8_t ones;
171 register vec_u8_t max;
172 register vec_u8_t min;
173 register vec_u8_t newp1;
174
175 temp = vec_xor(average, p2);
176 average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */
177 ones = vec_splat_u8(1);
178 temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */
179 uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
180 max = vec_adds(p1, tc0);
181 min = vec_subs(p1, tc0);
182 newp1 = vec_max(min, uncliped);
183 newp1 = vec_min(max, newp1);
184 return newp1;
185 }
186
187 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) \
188 { \
189 const vec_u8_t A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
190 \
191 register vec_u8_t pq0bit = vec_xor(p0,q0); \
192 register vec_u8_t q1minus; \
193 register vec_u8_t p0minus; \
194 register vec_u8_t stage1; \
195 register vec_u8_t stage2; \
196 register vec_u8_t vec160; \
197 register vec_u8_t delta; \
198 register vec_u8_t deltaneg; \
199 \
200 q1minus = vec_nor(q1, q1); /* 255 - q1 */ \
201 stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \
202 stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \
203 p0minus = vec_nor(p0, p0); /* 255 - p0 */ \
204 stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \
205 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
206 stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */\
207 stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \
208 vec160 = vec_ld(0, &A0v); \
209 deltaneg = vec_subs(vec160, stage2); /* -d */ \
210 delta = vec_subs(stage2, vec160); /* d */ \
211 deltaneg = vec_min(tc0masked, deltaneg); \
212 delta = vec_min(tc0masked, delta); \
213 p0 = vec_subs(p0, deltaneg); \
214 q0 = vec_subs(q0, delta); \
215 p0 = vec_adds(p0, delta); \
216 q0 = vec_adds(q0, deltaneg); \
217 }
218
219 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) \
220 { \
221 ALIGNED_16(unsigned char temp[16]); \
222 register vec_u8_t alphavec; \
223 register vec_u8_t betavec; \
224 register vec_u8_t mask; \
225 register vec_u8_t p1mask; \
226 register vec_u8_t q1mask; \
227 register vec_s8_t tc0vec; \
228 register vec_u8_t finaltc0; \
229 register vec_u8_t tc0masked; \
230 register vec_u8_t newp1; \
231 register vec_u8_t newq1; \
232 \
233 temp[0] = alpha; \
234 temp[1] = beta; \
235 alphavec = vec_ld(0, temp); \
236 betavec = vec_splat(alphavec, 0x1); \
237 alphavec = vec_splat(alphavec, 0x0); \
238 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \
239 \
240 M32( temp ) = M32( tc0 ); \
241 tc0vec = vec_ld(0, (signed char*)temp); \
242 tc0vec = vec_mergeh(tc0vec, tc0vec); \
243 tc0vec = vec_mergeh(tc0vec, tc0vec); \
244 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \
245 finaltc0 = vec_and((vec_u8_t)tc0vec, mask); /* tc = tc0 */ \
246 \
247 p1mask = diff_lt_altivec(p2, p0, betavec); \
248 p1mask = vec_and(p1mask, mask); /* if( |p2 - p0| < beta ) */ \
249 tc0masked = vec_and(p1mask, (vec_u8_t)tc0vec); \
250 finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \
251 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
252 /*end if*/ \
253 \
254 q1mask = diff_lt_altivec(q2, q0, betavec); \
255 q1mask = vec_and(q1mask, mask); /* if( |q2 - q0| < beta ) */ \
256 tc0masked = vec_and(q1mask, (vec_u8_t)tc0vec); \
257 finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \
258 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
259 /*end if*/ \
260 \
261 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
262 p1 = newp1; \
263 q1 = newq1; \
264 }
265
x264_deblock_v_luma_altivec(uint8_t * pix,intptr_t stride,int alpha,int beta,int8_t * tc0)266 void x264_deblock_v_luma_altivec( uint8_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
267 {
268 if( (tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0 )
269 {
270 register vec_u8_t p2 = vec_ld(-3*stride, pix);
271 register vec_u8_t p1 = vec_ld(-2*stride, pix);
272 register vec_u8_t p0 = vec_ld(-1*stride, pix);
273 register vec_u8_t q0 = vec_ld(0, pix);
274 register vec_u8_t q1 = vec_ld(stride, pix);
275 register vec_u8_t q2 = vec_ld(2*stride, pix);
276 h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
277 vec_st(p1, -2*stride, pix);
278 vec_st(p0, -1*stride, pix);
279 vec_st(q0, 0, pix);
280 vec_st(q1, stride, pix);
281 }
282 }
283
x264_deblock_h_luma_altivec(uint8_t * pix,intptr_t stride,int alpha,int beta,int8_t * tc0)284 void x264_deblock_h_luma_altivec( uint8_t *pix, intptr_t stride, int alpha, int beta, int8_t *tc0 )
285 {
286
287 register vec_u8_t line0, line1, line2, line3, line4, line5;
288 if( (tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0 )
289 return;
290 read_and_transpose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
291 h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
292 transpose4x16(line1, line2, line3, line4);
293 write16x4(pix-2, stride, line1, line2, line3, line4);
294 }
295 #endif // !HIGH_BIT_DEPTH
296