1 /*
2  *  Based on node-yencode library by Anime Tosho:
3  *  https://github.com/animetosho/node-yencode
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  *  You should have received a copy of the GNU General Public License
16  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 // taken from zlib-ng / Intel's zlib patch, modified to remove zlib dependencies
20 /*
21  * Compute the CRC32 using a parallelized folding approach with the PCLMULQDQ
22  * instruction.
23  *
24  * A white paper describing this algorithm can be found at:
25  * http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
26  *
27  * Copyright (C) 2013 Intel Corporation. All rights reserved.
28  * Authors:
29  *  Wajdi Feghali   <wajdi.k.feghali@intel.com>
30  *  Jim Guilford	<james.guilford@intel.com>
31  *  Vinodh Gopal	<vinodh.gopal@intel.com>
32  *  Erdinc Ozturk   <erdinc.ozturk@intel.com>
33  *  Jim Kukunas	 <james.t.kukunas@linux.intel.com>
34  *
35  * For conditions of distribution and use, see copyright notice in zlib.h
36  */
37 
38 #include "nzbget.h"
39 
40 #include "YEncode.h"
41 
42 #ifdef __PCLMUL__
43 #include <immintrin.h>
44 #endif
45 
46 namespace YEncode
47 {
48 #ifdef __PCLMUL__
49 
fold_1(__m128i * xmm_crc0,__m128i * xmm_crc1,__m128i * xmm_crc2,__m128i * xmm_crc3)50 void fold_1(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
51 	const __m128i xmm_fold4 = _mm_set_epi32(
52 			0x00000001, 0x54442bd4,
53 			0x00000001, 0xc6e41596);
54 
55 	__m128i x_tmp3;
56 	__m128 ps_crc0, ps_crc3, ps_res;
57 
58 	x_tmp3 = *xmm_crc3;
59 
60 	*xmm_crc3 = *xmm_crc0;
61 	*xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
62 	*xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
63 	ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
64 	ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
65 	ps_res = _mm_xor_ps(ps_crc0, ps_crc3);
66 
67 	*xmm_crc0 = *xmm_crc1;
68 	*xmm_crc1 = *xmm_crc2;
69 	*xmm_crc2 = x_tmp3;
70 	*xmm_crc3 = _mm_castps_si128(ps_res);
71 }
72 
fold_2(__m128i * xmm_crc0,__m128i * xmm_crc1,__m128i * xmm_crc2,__m128i * xmm_crc3)73 void fold_2(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
74 	const __m128i xmm_fold4 = _mm_set_epi32(
75 			0x00000001, 0x54442bd4,
76 			0x00000001, 0xc6e41596);
77 
78 	__m128i x_tmp3, x_tmp2;
79 	__m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res31, ps_res20;
80 
81 	x_tmp3 = *xmm_crc3;
82 	x_tmp2 = *xmm_crc2;
83 
84 	*xmm_crc3 = *xmm_crc1;
85 	*xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
86 	*xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
87 	ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
88 	ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
89 	ps_res31 = _mm_xor_ps(ps_crc3, ps_crc1);
90 
91 	*xmm_crc2 = *xmm_crc0;
92 	*xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
93 	*xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
94 	ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
95 	ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
96 	ps_res20 = _mm_xor_ps(ps_crc0, ps_crc2);
97 
98 	*xmm_crc0 = x_tmp2;
99 	*xmm_crc1 = x_tmp3;
100 	*xmm_crc2 = _mm_castps_si128(ps_res20);
101 	*xmm_crc3 = _mm_castps_si128(ps_res31);
102 }
103 
fold_3(__m128i * xmm_crc0,__m128i * xmm_crc1,__m128i * xmm_crc2,__m128i * xmm_crc3)104 void fold_3(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
105 	const __m128i xmm_fold4 = _mm_set_epi32(
106 			0x00000001, 0x54442bd4,
107 			0x00000001, 0xc6e41596);
108 
109 	__m128i x_tmp3;
110 	__m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res32, ps_res21, ps_res10;
111 
112 	x_tmp3 = *xmm_crc3;
113 
114 	*xmm_crc3 = *xmm_crc2;
115 	*xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
116 	*xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
117 	ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
118 	ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
119 	ps_res32 = _mm_xor_ps(ps_crc2, ps_crc3);
120 
121 	*xmm_crc2 = *xmm_crc1;
122 	*xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
123 	*xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
124 	ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
125 	ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
126 	ps_res21 = _mm_xor_ps(ps_crc1, ps_crc2);
127 
128 	*xmm_crc1 = *xmm_crc0;
129 	*xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
130 	*xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x10);
131 	ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
132 	ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
133 	ps_res10 = _mm_xor_ps(ps_crc0, ps_crc1);
134 
135 	*xmm_crc0 = x_tmp3;
136 	*xmm_crc1 = _mm_castps_si128(ps_res10);
137 	*xmm_crc2 = _mm_castps_si128(ps_res21);
138 	*xmm_crc3 = _mm_castps_si128(ps_res32);
139 }
140 
fold_4(__m128i * xmm_crc0,__m128i * xmm_crc1,__m128i * xmm_crc2,__m128i * xmm_crc3)141 void fold_4(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
142 	const __m128i xmm_fold4 = _mm_set_epi32(
143 			0x00000001, 0x54442bd4,
144 			0x00000001, 0xc6e41596);
145 
146 	__m128i x_tmp0, x_tmp1, x_tmp2, x_tmp3;
147 	__m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3;
148 	__m128 ps_t0, ps_t1, ps_t2, ps_t3;
149 	__m128 ps_res0, ps_res1, ps_res2, ps_res3;
150 
151 	x_tmp0 = *xmm_crc0;
152 	x_tmp1 = *xmm_crc1;
153 	x_tmp2 = *xmm_crc2;
154 	x_tmp3 = *xmm_crc3;
155 
156 	*xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
157 	x_tmp0 = _mm_clmulepi64_si128(x_tmp0, xmm_fold4, 0x10);
158 	ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
159 	ps_t0 = _mm_castsi128_ps(x_tmp0);
160 	ps_res0 = _mm_xor_ps(ps_crc0, ps_t0);
161 
162 	*xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
163 	x_tmp1 = _mm_clmulepi64_si128(x_tmp1, xmm_fold4, 0x10);
164 	ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
165 	ps_t1 = _mm_castsi128_ps(x_tmp1);
166 	ps_res1 = _mm_xor_ps(ps_crc1, ps_t1);
167 
168 	*xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
169 	x_tmp2 = _mm_clmulepi64_si128(x_tmp2, xmm_fold4, 0x10);
170 	ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
171 	ps_t2 = _mm_castsi128_ps(x_tmp2);
172 	ps_res2 = _mm_xor_ps(ps_crc2, ps_t2);
173 
174 	*xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x01);
175 	x_tmp3 = _mm_clmulepi64_si128(x_tmp3, xmm_fold4, 0x10);
176 	ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
177 	ps_t3 = _mm_castsi128_ps(x_tmp3);
178 	ps_res3 = _mm_xor_ps(ps_crc3, ps_t3);
179 
180 	*xmm_crc0 = _mm_castps_si128(ps_res0);
181 	*xmm_crc1 = _mm_castps_si128(ps_res1);
182 	*xmm_crc2 = _mm_castps_si128(ps_res2);
183 	*xmm_crc3 = _mm_castps_si128(ps_res3);
184 }
185 
186 alignas(32) const unsigned  pshufb_shf_table[60] = {
187 	0x84838281, 0x88878685, 0x8c8b8a89, 0x008f8e8d, /* shl 15 (16 - 1)/shr1 */
188 	0x85848382, 0x89888786, 0x8d8c8b8a, 0x01008f8e, /* shl 14 (16 - 3)/shr2 */
189 	0x86858483, 0x8a898887, 0x8e8d8c8b, 0x0201008f, /* shl 13 (16 - 4)/shr3 */
190 	0x87868584, 0x8b8a8988, 0x8f8e8d8c, 0x03020100, /* shl 12 (16 - 4)/shr4 */
191 	0x88878685, 0x8c8b8a89, 0x008f8e8d, 0x04030201, /* shl 11 (16 - 5)/shr5 */
192 	0x89888786, 0x8d8c8b8a, 0x01008f8e, 0x05040302, /* shl 10 (16 - 6)/shr6 */
193 	0x8a898887, 0x8e8d8c8b, 0x0201008f, 0x06050403, /* shl  9 (16 - 7)/shr7 */
194 	0x8b8a8988, 0x8f8e8d8c, 0x03020100, 0x07060504, /* shl  8 (16 - 8)/shr8 */
195 	0x8c8b8a89, 0x008f8e8d, 0x04030201, 0x08070605, /* shl  7 (16 - 9)/shr9 */
196 	0x8d8c8b8a, 0x01008f8e, 0x05040302, 0x09080706, /* shl  6 (16 -10)/shr10*/
197 	0x8e8d8c8b, 0x0201008f, 0x06050403, 0x0a090807, /* shl  5 (16 -11)/shr11*/
198 	0x8f8e8d8c, 0x03020100, 0x07060504, 0x0b0a0908, /* shl  4 (16 -12)/shr12*/
199 	0x008f8e8d, 0x04030201, 0x08070605, 0x0c0b0a09, /* shl  3 (16 -13)/shr13*/
200 	0x01008f8e, 0x05040302, 0x09080706, 0x0d0c0b0a, /* shl  2 (16 -14)/shr14*/
201 	0x0201008f, 0x06050403, 0x0a090807, 0x0e0d0c0b  /* shl  1 (16 -15)/shr15*/
202 };
203 
partial_fold(const size_t len,__m128i * xmm_crc0,__m128i * xmm_crc1,__m128i * xmm_crc2,__m128i * xmm_crc3,__m128i * xmm_crc_part)204 void partial_fold(const size_t len, __m128i *xmm_crc0, __m128i *xmm_crc1,
205 		__m128i *xmm_crc2, __m128i *xmm_crc3, __m128i *xmm_crc_part) {
206 
207 	const __m128i xmm_fold4 = _mm_set_epi32(
208 			0x00000001, 0x54442bd4,
209 			0x00000001, 0xc6e41596);
210 	const __m128i xmm_mask3 = _mm_set1_epi32(0x80808080);
211 
212 	__m128i xmm_shl, xmm_shr, xmm_tmp1, xmm_tmp2, xmm_tmp3;
213 	__m128i xmm_a0_0, xmm_a0_1;
214 	__m128 ps_crc3, psa0_0, psa0_1, ps_res;
215 
216 	xmm_shl = _mm_load_si128((__m128i *)pshufb_shf_table + (len - 1));
217 	xmm_shr = xmm_shl;
218 	xmm_shr = _mm_xor_si128(xmm_shr, xmm_mask3);
219 
220 	xmm_a0_0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shl);
221 
222 	*xmm_crc0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shr);
223 	xmm_tmp1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shl);
224 	*xmm_crc0 = _mm_or_si128(*xmm_crc0, xmm_tmp1);
225 
226 	*xmm_crc1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shr);
227 	xmm_tmp2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shl);
228 	*xmm_crc1 = _mm_or_si128(*xmm_crc1, xmm_tmp2);
229 
230 	*xmm_crc2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shr);
231 	xmm_tmp3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shl);
232 	*xmm_crc2 = _mm_or_si128(*xmm_crc2, xmm_tmp3);
233 
234 	*xmm_crc3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shr);
235 	*xmm_crc_part = _mm_shuffle_epi8(*xmm_crc_part, xmm_shl);
236 	*xmm_crc3 = _mm_or_si128(*xmm_crc3, *xmm_crc_part);
237 
238 	xmm_a0_1 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x10);
239 	xmm_a0_0 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x01);
240 
241 	ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
242 	psa0_0 = _mm_castsi128_ps(xmm_a0_0);
243 	psa0_1 = _mm_castsi128_ps(xmm_a0_1);
244 
245 	ps_res = _mm_xor_ps(ps_crc3, psa0_0);
246 	ps_res = _mm_xor_ps(ps_res, psa0_1);
247 
248 	*xmm_crc3 = _mm_castps_si128(ps_res);
249 }
250 
251 alignas(16) const unsigned crc_k[] = {
252 	0xccaa009e, 0x00000000, /* rk1 */
253 	0x751997d0, 0x00000001, /* rk2 */
254 	0xccaa009e, 0x00000000, /* rk5 */
255 	0x63cd6124, 0x00000001, /* rk6 */
256 	0xf7011640, 0x00000001, /* rk7 */
257 	0xdb710640, 0x00000001  /* rk8 */
258 };
259 
260 alignas(16) const unsigned crc_mask[4] = {
261 	0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000
262 };
263 
264 alignas(16) const unsigned crc_mask2[4] = {
265 	0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF
266 };
267 
268 #define CRC_LOAD(s) \
269     do { \
270         __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)s->crc0 + 0);\
271         __m128i xmm_crc1 = _mm_loadu_si128((__m128i *)s->crc0 + 1);\
272         __m128i xmm_crc2 = _mm_loadu_si128((__m128i *)s->crc0 + 2);\
273         __m128i xmm_crc3 = _mm_loadu_si128((__m128i *)s->crc0 + 3);\
274         __m128i xmm_crc_part = _mm_loadu_si128((__m128i *)s->crc0 + 4);
275 
276 #define CRC_SAVE(s) \
277         _mm_storeu_si128((__m128i *)s->crc0 + 0, xmm_crc0);\
278         _mm_storeu_si128((__m128i *)s->crc0 + 1, xmm_crc1);\
279         _mm_storeu_si128((__m128i *)s->crc0 + 2, xmm_crc2);\
280         _mm_storeu_si128((__m128i *)s->crc0 + 3, xmm_crc3);\
281         _mm_storeu_si128((__m128i *)s->crc0 + 4, xmm_crc_part);\
282     } while (0);
283 
crc_fold_init(crc_state * const s)284 void crc_fold_init(crc_state *const s) {
285     CRC_LOAD(s)
286 
287     xmm_crc0 = _mm_cvtsi32_si128(0x9db42487);
288     xmm_crc1 = _mm_setzero_si128();
289     xmm_crc2 = _mm_setzero_si128();
290     xmm_crc3 = _mm_setzero_si128();
291 
292     CRC_SAVE(s)
293 }
294 
crc_fold(crc_state * const s,const unsigned char * src,long len)295 void crc_fold(crc_state *const s, const unsigned char *src, long len) {
296     unsigned long algn_diff;
297     __m128i xmm_t0, xmm_t1, xmm_t2, xmm_t3;
298 
299     CRC_LOAD(s)
300 
301     if (len < 16) {
302         if (len == 0)
303             return;
304         xmm_crc_part = _mm_loadu_si128((__m128i *)src);
305         goto partial;
306     }
307 
308     algn_diff = (0 - (uintptr_t)src) & 0xF;
309     if (algn_diff) {
310         xmm_crc_part = _mm_loadu_si128((__m128i *)src);
311         src += algn_diff;
312         len -= algn_diff;
313 
314         partial_fold(algn_diff, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, &xmm_crc_part);
315     }
316 
317     while ((len -= 64) >= 0) {
318         xmm_t0 = _mm_load_si128((__m128i *)src);
319         xmm_t1 = _mm_load_si128((__m128i *)src + 1);
320         xmm_t2 = _mm_load_si128((__m128i *)src + 2);
321         xmm_t3 = _mm_load_si128((__m128i *)src + 3);
322 
323         fold_4(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
324 
325         xmm_crc0 = _mm_xor_si128(xmm_crc0, xmm_t0);
326         xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t1);
327         xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t2);
328         xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t3);
329 
330         src += 64;
331     }
332 
333     /*
334      * len = num bytes left - 64
335      */
336     if (len + 16 >= 0) {
337         len += 16;
338 
339         xmm_t0 = _mm_load_si128((__m128i *)src);
340         xmm_t1 = _mm_load_si128((__m128i *)src + 1);
341         xmm_t2 = _mm_load_si128((__m128i *)src + 2);
342 
343         fold_3(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
344 
345         xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t0);
346         xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t1);
347         xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t2);
348 
349         if (len == 0)
350             goto done;
351 
352         xmm_crc_part = _mm_load_si128((__m128i *)src + 3);
353     } else if (len + 32 >= 0) {
354         len += 32;
355 
356         xmm_t0 = _mm_load_si128((__m128i *)src);
357         xmm_t1 = _mm_load_si128((__m128i *)src + 1);
358 
359         fold_2(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
360 
361         xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t0);
362         xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t1);
363 
364         if (len == 0)
365             goto done;
366 
367         xmm_crc_part = _mm_load_si128((__m128i *)src + 2);
368     } else if (len + 48 >= 0) {
369         len += 48;
370 
371         xmm_t0 = _mm_load_si128((__m128i *)src);
372 
373         fold_1(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
374 
375         xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t0);
376 
377         if (len == 0)
378             goto done;
379 
380         xmm_crc_part = _mm_load_si128((__m128i *)src + 1);
381     } else {
382         len += 64;
383         if (len == 0)
384             goto done;
385         xmm_crc_part = _mm_load_si128((__m128i *)src);
386     }
387 
388 partial:
389     partial_fold(len, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, &xmm_crc_part);
390 done:
391     CRC_SAVE(s)
392 }
393 
crc_fold_512to32(crc_state * const s)394 uint32_t crc_fold_512to32(crc_state *const s) {
395     const __m128i xmm_mask  = _mm_load_si128((__m128i *)crc_mask);
396     const __m128i xmm_mask2 = _mm_load_si128((__m128i *)crc_mask2);
397 
398     uint32_t crc;
399     __m128i x_tmp0, x_tmp1, x_tmp2, crc_fold;
400 
401     CRC_LOAD(s)
402 
403     /*
404      * k1
405      */
406     crc_fold = _mm_load_si128((__m128i *)crc_k);
407 
408     x_tmp0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x10);
409     xmm_crc0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x01);
410     xmm_crc1 = _mm_xor_si128(xmm_crc1, x_tmp0);
411     xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_crc0);
412 
413     x_tmp1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x10);
414     xmm_crc1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x01);
415     xmm_crc2 = _mm_xor_si128(xmm_crc2, x_tmp1);
416     xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_crc1);
417 
418     x_tmp2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x10);
419     xmm_crc2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x01);
420     xmm_crc3 = _mm_xor_si128(xmm_crc3, x_tmp2);
421     xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
422 
423     /*
424      * k5
425      */
426     crc_fold = _mm_load_si128((__m128i *)crc_k + 1);
427 
428     xmm_crc0 = xmm_crc3;
429     xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
430     xmm_crc0 = _mm_srli_si128(xmm_crc0, 8);
431     xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
432 
433     xmm_crc0 = xmm_crc3;
434     xmm_crc3 = _mm_slli_si128(xmm_crc3, 4);
435     xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
436     xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
437     xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask2);
438 
439     /*
440      * k7
441      */
442     xmm_crc1 = xmm_crc3;
443     xmm_crc2 = xmm_crc3;
444     crc_fold = _mm_load_si128((__m128i *)crc_k + 2);
445 
446     xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
447     xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
448     xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask);
449 
450     xmm_crc2 = xmm_crc3;
451     xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
452     xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
453     xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc1);
454 
455     crc = _mm_extract_epi32(xmm_crc3, 2);
456     return ~crc;
457     CRC_SAVE(s)
458 }
459 #endif
460 
init_crc_pclmul()461 void init_crc_pclmul()
462 {
463 #ifdef __PCLMUL__
464 	crc_init = &crc_fold_init;
465 	crc_incr = &crc_fold;
466 	crc_finish = &crc_fold_512to32;
467 	crc_simd = true;
468 #endif
469 }
470 
471 }
472