1 // speck128_simd.cpp - written and placed in the public domain by Jeffrey Walton
2 //
3 // This source file uses intrinsics and built-ins to gain access to
4 // SSSE3, ARM NEON and ARMv8a, and Altivec instructions. A separate
5 // source file is needed because additional CXXFLAGS are required to enable
6 // the appropriate instructions sets in some build configurations.
7
8 #include "pch.h"
9 #include "config.h"
10
11 #include "speck.h"
12 #include "misc.h"
13
14 // Uncomment for benchmarking C++ against SSE or NEON.
15 // Do so in both speck.cpp and speck_simd.cpp.
16 // #undef CRYPTOPP_SSSE3_AVAILABLE
17 // #undef CRYPTOPP_ARM_NEON_AVAILABLE
18
19 #if (CRYPTOPP_SSSE3_AVAILABLE)
20 # include "adv_simd.h"
21 # include <pmmintrin.h>
22 # include <tmmintrin.h>
23 #endif
24
25 #if defined(__XOP__)
26 # include <ammintrin.h>
27 # if defined(__GNUC__)
28 # include <x86intrin.h>
29 # endif
30 #endif
31
32 #if (CRYPTOPP_ARM_NEON_HEADER)
33 # include "adv_simd.h"
34 # include <arm_neon.h>
35 #endif
36
37 #if (CRYPTOPP_ARM_ACLE_HEADER)
38 # include <stdint.h>
39 # include <arm_acle.h>
40 #endif
41
42 #if defined(_M_ARM64)
43 # include "adv_simd.h"
44 #endif
45
46 #if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
47 # include "adv_simd.h"
48 # include "ppc_simd.h"
49 #endif
50
51 // Squash MS LNK4221 and libtool warnings
52 extern const char SPECK128_SIMD_FNAME[] = __FILE__;
53
54 ANONYMOUS_NAMESPACE_BEGIN
55
56 using CryptoPP::byte;
57 using CryptoPP::word32;
58 using CryptoPP::word64;
59
60 // *************************** ARM NEON ************************** //
61
62 #if (CRYPTOPP_ARM_NEON_AVAILABLE)
63
64 // Missing from Microsoft's ARM A-32 implementation
65 #if defined(_MSC_VER) && !defined(_M_ARM64)
vld1q_dup_u64(const uint64_t * ptr)66 inline uint64x2_t vld1q_dup_u64(const uint64_t* ptr)
67 {
68 return vmovq_n_u64(*ptr);
69 }
70 #endif
71
72 template <class T>
UnpackHigh64(const T & a,const T & b)73 inline T UnpackHigh64(const T& a, const T& b)
74 {
75 const uint64x1_t x(vget_high_u64((uint64x2_t)a));
76 const uint64x1_t y(vget_high_u64((uint64x2_t)b));
77 return (T)vcombine_u64(x, y);
78 }
79
80 template <class T>
UnpackLow64(const T & a,const T & b)81 inline T UnpackLow64(const T& a, const T& b)
82 {
83 const uint64x1_t x(vget_low_u64((uint64x2_t)a));
84 const uint64x1_t y(vget_low_u64((uint64x2_t)b));
85 return (T)vcombine_u64(x, y);
86 }
87
88 template <unsigned int R>
RotateLeft64(const uint64x2_t & val)89 inline uint64x2_t RotateLeft64(const uint64x2_t& val)
90 {
91 const uint64x2_t a(vshlq_n_u64(val, R));
92 const uint64x2_t b(vshrq_n_u64(val, 64 - R));
93 return vorrq_u64(a, b);
94 }
95
96 template <unsigned int R>
RotateRight64(const uint64x2_t & val)97 inline uint64x2_t RotateRight64(const uint64x2_t& val)
98 {
99 const uint64x2_t a(vshlq_n_u64(val, 64 - R));
100 const uint64x2_t b(vshrq_n_u64(val, R));
101 return vorrq_u64(a, b);
102 }
103
104 #if defined(__aarch32__) || defined(__aarch64__)
105 // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
106 template <>
RotateLeft64(const uint64x2_t & val)107 inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val)
108 {
109 const uint8_t maskb[16] = { 7,0,1,2, 3,4,5,6, 15,8,9,10, 11,12,13,14 };
110 const uint8x16_t mask = vld1q_u8(maskb);
111
112 return vreinterpretq_u64_u8(
113 vqtbl1q_u8(vreinterpretq_u8_u64(val), mask));
114 }
115
116 // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
117 template <>
RotateRight64(const uint64x2_t & val)118 inline uint64x2_t RotateRight64<8>(const uint64x2_t& val)
119 {
120 const uint8_t maskb[16] = { 1,2,3,4, 5,6,7,0, 9,10,11,12, 13,14,15,8 };
121 const uint8x16_t mask = vld1q_u8(maskb);
122
123 return vreinterpretq_u64_u8(
124 vqtbl1q_u8(vreinterpretq_u8_u64(val), mask));
125 }
126 #endif
127
SPECK128_Enc_Block(uint64x2_t & block0,uint64x2_t & block1,const word64 * subkeys,unsigned int rounds)128 inline void SPECK128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1,
129 const word64 *subkeys, unsigned int rounds)
130 {
131 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
132 uint64x2_t x1 = UnpackHigh64(block0, block1);
133 uint64x2_t y1 = UnpackLow64(block0, block1);
134
135 for (size_t i=0; i < static_cast<size_t>(rounds); ++i)
136 {
137 const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
138
139 x1 = RotateRight64<8>(x1);
140 x1 = vaddq_u64(x1, y1);
141 x1 = veorq_u64(x1, rk);
142 y1 = RotateLeft64<3>(y1);
143 y1 = veorq_u64(y1, x1);
144 }
145
146 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
147 block0 = UnpackLow64(y1, x1);
148 block1 = UnpackHigh64(y1, x1);
149 }
150
SPECK128_Enc_6_Blocks(uint64x2_t & block0,uint64x2_t & block1,uint64x2_t & block2,uint64x2_t & block3,uint64x2_t & block4,uint64x2_t & block5,const word64 * subkeys,unsigned int rounds)151 inline void SPECK128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
152 uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
153 const word64 *subkeys, unsigned int rounds)
154 {
155 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
156 uint64x2_t x1 = UnpackHigh64(block0, block1);
157 uint64x2_t y1 = UnpackLow64(block0, block1);
158 uint64x2_t x2 = UnpackHigh64(block2, block3);
159 uint64x2_t y2 = UnpackLow64(block2, block3);
160 uint64x2_t x3 = UnpackHigh64(block4, block5);
161 uint64x2_t y3 = UnpackLow64(block4, block5);
162
163 for (size_t i=0; i < static_cast<size_t>(rounds); ++i)
164 {
165 const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
166
167 x1 = RotateRight64<8>(x1);
168 x2 = RotateRight64<8>(x2);
169 x3 = RotateRight64<8>(x3);
170 x1 = vaddq_u64(x1, y1);
171 x2 = vaddq_u64(x2, y2);
172 x3 = vaddq_u64(x3, y3);
173 x1 = veorq_u64(x1, rk);
174 x2 = veorq_u64(x2, rk);
175 x3 = veorq_u64(x3, rk);
176 y1 = RotateLeft64<3>(y1);
177 y2 = RotateLeft64<3>(y2);
178 y3 = RotateLeft64<3>(y3);
179 y1 = veorq_u64(y1, x1);
180 y2 = veorq_u64(y2, x2);
181 y3 = veorq_u64(y3, x3);
182 }
183
184 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
185 block0 = UnpackLow64(y1, x1);
186 block1 = UnpackHigh64(y1, x1);
187 block2 = UnpackLow64(y2, x2);
188 block3 = UnpackHigh64(y2, x2);
189 block4 = UnpackLow64(y3, x3);
190 block5 = UnpackHigh64(y3, x3);
191 }
192
SPECK128_Dec_Block(uint64x2_t & block0,uint64x2_t & block1,const word64 * subkeys,unsigned int rounds)193 inline void SPECK128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1,
194 const word64 *subkeys, unsigned int rounds)
195 {
196 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
197 uint64x2_t x1 = UnpackHigh64(block0, block1);
198 uint64x2_t y1 = UnpackLow64(block0, block1);
199
200 for (int i = static_cast<int>(rounds-1); i >= 0; --i)
201 {
202 const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
203
204 y1 = veorq_u64(y1, x1);
205 y1 = RotateRight64<3>(y1);
206 x1 = veorq_u64(x1, rk);
207 x1 = vsubq_u64(x1, y1);
208 x1 = RotateLeft64<8>(x1);
209 }
210
211 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
212 block0 = UnpackLow64(y1, x1);
213 block1 = UnpackHigh64(y1, x1);
214 }
215
SPECK128_Dec_6_Blocks(uint64x2_t & block0,uint64x2_t & block1,uint64x2_t & block2,uint64x2_t & block3,uint64x2_t & block4,uint64x2_t & block5,const word64 * subkeys,unsigned int rounds)216 inline void SPECK128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
217 uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
218 const word64 *subkeys, unsigned int rounds)
219 {
220 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
221 uint64x2_t x1 = UnpackHigh64(block0, block1);
222 uint64x2_t y1 = UnpackLow64(block0, block1);
223 uint64x2_t x2 = UnpackHigh64(block2, block3);
224 uint64x2_t y2 = UnpackLow64(block2, block3);
225 uint64x2_t x3 = UnpackHigh64(block4, block5);
226 uint64x2_t y3 = UnpackLow64(block4, block5);
227
228 for (int i = static_cast<int>(rounds-1); i >= 0; --i)
229 {
230 const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
231
232 y1 = veorq_u64(y1, x1);
233 y2 = veorq_u64(y2, x2);
234 y3 = veorq_u64(y3, x3);
235 y1 = RotateRight64<3>(y1);
236 y2 = RotateRight64<3>(y2);
237 y3 = RotateRight64<3>(y3);
238 x1 = veorq_u64(x1, rk);
239 x2 = veorq_u64(x2, rk);
240 x3 = veorq_u64(x3, rk);
241 x1 = vsubq_u64(x1, y1);
242 x2 = vsubq_u64(x2, y2);
243 x3 = vsubq_u64(x3, y3);
244 x1 = RotateLeft64<8>(x1);
245 x2 = RotateLeft64<8>(x2);
246 x3 = RotateLeft64<8>(x3);
247 }
248
249 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
250 block0 = UnpackLow64(y1, x1);
251 block1 = UnpackHigh64(y1, x1);
252 block2 = UnpackLow64(y2, x2);
253 block3 = UnpackHigh64(y2, x2);
254 block4 = UnpackLow64(y3, x3);
255 block5 = UnpackHigh64(y3, x3);
256 }
257
258 #endif // CRYPTOPP_ARM_NEON_AVAILABLE
259
260 // ***************************** IA-32 ***************************** //
261
262 #if defined(CRYPTOPP_SSSE3_AVAILABLE)
263
264 // Clang intrinsic casts, http://bugs.llvm.org/show_bug.cgi?id=20670
265 #ifndef M128_CAST
266 # define M128_CAST(x) ((__m128i *)(void *)(x))
267 #endif
268 #ifndef CONST_M128_CAST
269 # define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
270 #endif
271
272 // GCC double casts, https://www.spinics.net/lists/gcchelp/msg47735.html
273 #ifndef DOUBLE_CAST
274 # define DOUBLE_CAST(x) ((double *)(void *)(x))
275 #endif
276 #ifndef CONST_DOUBLE_CAST
277 # define CONST_DOUBLE_CAST(x) ((const double *)(const void *)(x))
278 #endif
279
280 template <unsigned int R>
RotateLeft64(const __m128i & val)281 inline __m128i RotateLeft64(const __m128i& val)
282 {
283 #if defined(__XOP__)
284 return _mm_roti_epi64(val, R);
285 #else
286 return _mm_or_si128(
287 _mm_slli_epi64(val, R), _mm_srli_epi64(val, 64-R));
288 #endif
289 }
290
291 template <unsigned int R>
RotateRight64(const __m128i & val)292 inline __m128i RotateRight64(const __m128i& val)
293 {
294 #if defined(__XOP__)
295 return _mm_roti_epi64(val, 64-R);
296 #else
297 return _mm_or_si128(
298 _mm_slli_epi64(val, 64-R), _mm_srli_epi64(val, R));
299 #endif
300 }
301
302 // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
303 template <>
RotateLeft64(const __m128i & val)304 __m128i RotateLeft64<8>(const __m128i& val)
305 {
306 #if defined(__XOP__)
307 return _mm_roti_epi64(val, 8);
308 #else
309 const __m128i mask = _mm_set_epi8(14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7);
310 return _mm_shuffle_epi8(val, mask);
311 #endif
312 }
313
314 // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
315 template <>
RotateRight64(const __m128i & val)316 __m128i RotateRight64<8>(const __m128i& val)
317 {
318 #if defined(__XOP__)
319 return _mm_roti_epi64(val, 64-8);
320 #else
321 const __m128i mask = _mm_set_epi8(8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1);
322 return _mm_shuffle_epi8(val, mask);
323 #endif
324 }
325
SPECK128_Enc_Block(__m128i & block0,__m128i & block1,const word64 * subkeys,unsigned int rounds)326 inline void SPECK128_Enc_Block(__m128i &block0, __m128i &block1,
327 const word64 *subkeys, unsigned int rounds)
328 {
329 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
330 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
331 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
332
333 for (size_t i=0; i < static_cast<size_t>(rounds); ++i)
334 {
335 // Round keys are pre-splated in forward direction
336 const __m128i rk = _mm_load_si128(CONST_M128_CAST(subkeys+i*2));
337
338 x1 = RotateRight64<8>(x1);
339 x1 = _mm_add_epi64(x1, y1);
340 x1 = _mm_xor_si128(x1, rk);
341 y1 = RotateLeft64<3>(y1);
342 y1 = _mm_xor_si128(y1, x1);
343 }
344
345 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
346 block0 = _mm_unpacklo_epi64(y1, x1);
347 block1 = _mm_unpackhi_epi64(y1, x1);
348 }
349
SPECK128_Enc_6_Blocks(__m128i & block0,__m128i & block1,__m128i & block2,__m128i & block3,__m128i & block4,__m128i & block5,const word64 * subkeys,unsigned int rounds)350 inline void SPECK128_Enc_6_Blocks(__m128i &block0, __m128i &block1,
351 __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
352 const word64 *subkeys, unsigned int rounds)
353 {
354 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
355 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
356 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
357 __m128i x2 = _mm_unpackhi_epi64(block2, block3);
358 __m128i y2 = _mm_unpacklo_epi64(block2, block3);
359 __m128i x3 = _mm_unpackhi_epi64(block4, block5);
360 __m128i y3 = _mm_unpacklo_epi64(block4, block5);
361
362 for (size_t i=0; i < static_cast<size_t>(rounds); ++i)
363 {
364 // Round keys are pre-splated in forward direction
365 const __m128i rk = _mm_load_si128(CONST_M128_CAST(subkeys+i*2));
366
367 x1 = RotateRight64<8>(x1);
368 x2 = RotateRight64<8>(x2);
369 x3 = RotateRight64<8>(x3);
370 x1 = _mm_add_epi64(x1, y1);
371 x2 = _mm_add_epi64(x2, y2);
372 x3 = _mm_add_epi64(x3, y3);
373 x1 = _mm_xor_si128(x1, rk);
374 x2 = _mm_xor_si128(x2, rk);
375 x3 = _mm_xor_si128(x3, rk);
376 y1 = RotateLeft64<3>(y1);
377 y2 = RotateLeft64<3>(y2);
378 y3 = RotateLeft64<3>(y3);
379 y1 = _mm_xor_si128(y1, x1);
380 y2 = _mm_xor_si128(y2, x2);
381 y3 = _mm_xor_si128(y3, x3);
382 }
383
384 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
385 block0 = _mm_unpacklo_epi64(y1, x1);
386 block1 = _mm_unpackhi_epi64(y1, x1);
387 block2 = _mm_unpacklo_epi64(y2, x2);
388 block3 = _mm_unpackhi_epi64(y2, x2);
389 block4 = _mm_unpacklo_epi64(y3, x3);
390 block5 = _mm_unpackhi_epi64(y3, x3);
391 }
392
SPECK128_Dec_Block(__m128i & block0,__m128i & block1,const word64 * subkeys,unsigned int rounds)393 inline void SPECK128_Dec_Block(__m128i &block0, __m128i &block1,
394 const word64 *subkeys, unsigned int rounds)
395 {
396 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
397 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
398 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
399
400 for (int i = static_cast<int>(rounds-1); i >= 0; --i)
401 {
402 const __m128i rk = _mm_castpd_si128(
403 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
404
405 y1 = _mm_xor_si128(y1, x1);
406 y1 = RotateRight64<3>(y1);
407 x1 = _mm_xor_si128(x1, rk);
408 x1 = _mm_sub_epi64(x1, y1);
409 x1 = RotateLeft64<8>(x1);
410 }
411
412 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
413 block0 = _mm_unpacklo_epi64(y1, x1);
414 block1 = _mm_unpackhi_epi64(y1, x1);
415 }
416
SPECK128_Dec_6_Blocks(__m128i & block0,__m128i & block1,__m128i & block2,__m128i & block3,__m128i & block4,__m128i & block5,const word64 * subkeys,unsigned int rounds)417 inline void SPECK128_Dec_6_Blocks(__m128i &block0, __m128i &block1,
418 __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
419 const word64 *subkeys, unsigned int rounds)
420 {
421 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
422 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
423 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
424 __m128i x2 = _mm_unpackhi_epi64(block2, block3);
425 __m128i y2 = _mm_unpacklo_epi64(block2, block3);
426 __m128i x3 = _mm_unpackhi_epi64(block4, block5);
427 __m128i y3 = _mm_unpacklo_epi64(block4, block5);
428
429 for (int i = static_cast<int>(rounds-1); i >= 0; --i)
430 {
431 const __m128i rk = _mm_castpd_si128(
432 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
433
434 y1 = _mm_xor_si128(y1, x1);
435 y2 = _mm_xor_si128(y2, x2);
436 y3 = _mm_xor_si128(y3, x3);
437 y1 = RotateRight64<3>(y1);
438 y2 = RotateRight64<3>(y2);
439 y3 = RotateRight64<3>(y3);
440 x1 = _mm_xor_si128(x1, rk);
441 x2 = _mm_xor_si128(x2, rk);
442 x3 = _mm_xor_si128(x3, rk);
443 x1 = _mm_sub_epi64(x1, y1);
444 x2 = _mm_sub_epi64(x2, y2);
445 x3 = _mm_sub_epi64(x3, y3);
446 x1 = RotateLeft64<8>(x1);
447 x2 = RotateLeft64<8>(x2);
448 x3 = RotateLeft64<8>(x3);
449 }
450
451 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
452 block0 = _mm_unpacklo_epi64(y1, x1);
453 block1 = _mm_unpackhi_epi64(y1, x1);
454 block2 = _mm_unpacklo_epi64(y2, x2);
455 block3 = _mm_unpackhi_epi64(y2, x2);
456 block4 = _mm_unpacklo_epi64(y3, x3);
457 block5 = _mm_unpackhi_epi64(y3, x3);
458 }
459
460 #endif // CRYPTOPP_SSSE3_AVAILABLE
461
462 // ***************************** Altivec ***************************** //
463
464 #if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
465
466 // Altivec uses native 64-bit types on 64-bit environments, or 32-bit types
467 // in 32-bit environments. Speck128 will use the appropriate type for the
468 // environment. Functions like VecAdd64 have two overloads, one for each
469 // environment. The 32-bit overload treats uint32x4_p like a 64-bit type,
470 // and does things like perform a add with carry or subtract with borrow.
471
472 // Speck128 on Power8 performed as expected because of 64-bit environment.
473 // Performance sucked on old PowerPC machines because of 32-bit environments.
474 // At Crypto++ 8.3 we added an implementation that operated on 32-bit words.
475 // Native 64-bit Speck128 performance dropped from about 4.1 to 6.3 cpb, but
476 // 32-bit Speck128 improved from 66.5 cpb to 10.4 cpb. Overall it was a
477 // good win even though we lost some performance in 64-bit environments.
478
479 using CryptoPP::uint8x16_p;
480 using CryptoPP::uint32x4_p;
481 #if defined(_ARCH_PWR8)
482 using CryptoPP::uint64x2_p;
483 #endif
484
485 using CryptoPP::VecAdd64;
486 using CryptoPP::VecSub64;
487 using CryptoPP::VecAnd64;
488 using CryptoPP::VecOr64;
489 using CryptoPP::VecXor64;
490 using CryptoPP::VecSplatWord64;
491 using CryptoPP::VecRotateLeft64;
492 using CryptoPP::VecRotateRight64;
493 using CryptoPP::VecLoad;
494 using CryptoPP::VecLoadAligned;
495 using CryptoPP::VecPermute;
496
497 #if defined(_ARCH_PWR8)
498 #define speck128_t uint64x2_p
499 #else
500 #define speck128_t uint32x4_p
501 #endif
502
SPECK128_Enc_Block(uint32x4_p & block,const word64 * subkeys,unsigned int rounds)503 void SPECK128_Enc_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds)
504 {
505 #if (CRYPTOPP_BIG_ENDIAN)
506 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
507 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
508 #else
509 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
510 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
511 #endif
512
513 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
514 speck128_t x1 = (speck128_t)VecPermute(block, block, m1);
515 speck128_t y1 = (speck128_t)VecPermute(block, block, m2);
516
517 for (size_t i=0; i < static_cast<size_t>(rounds); ++i)
518 {
519 // Round keys are pre-splated in forward direction
520 const word32* ptr = reinterpret_cast<const word32*>(subkeys+i*2);
521 const speck128_t rk = (speck128_t)VecLoadAligned(ptr);
522
523 x1 = (speck128_t)VecRotateRight64<8>(x1);
524 x1 = (speck128_t)VecAdd64(x1, y1);
525 x1 = (speck128_t)VecXor64(x1, rk);
526
527 y1 = (speck128_t)VecRotateLeft64<3>(y1);
528 y1 = (speck128_t)VecXor64(y1, x1);
529 }
530
531 #if (CRYPTOPP_BIG_ENDIAN)
532 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
533 //const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
534 #else
535 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
536 //const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
537 #endif
538
539 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
540 block = (uint32x4_p)VecPermute(x1, y1, m3);
541 }
542
SPECK128_Dec_Block(uint32x4_p & block,const word64 * subkeys,unsigned int rounds)543 void SPECK128_Dec_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds)
544 {
545 #if (CRYPTOPP_BIG_ENDIAN)
546 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
547 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
548 #else
549 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
550 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
551 #endif
552
553 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
554 speck128_t x1 = (speck128_t)VecPermute(block, block, m1);
555 speck128_t y1 = (speck128_t)VecPermute(block, block, m2);
556
557 for (int i = static_cast<int>(rounds-1); i >= 0; --i)
558 {
559 const speck128_t rk = (speck128_t)VecSplatWord64(subkeys[i]);
560
561 y1 = (speck128_t)VecXor64(y1, x1);
562 y1 = (speck128_t)VecRotateRight64<3>(y1);
563 x1 = (speck128_t)VecXor64(x1, rk);
564 x1 = (speck128_t)VecSub64(x1, y1);
565 x1 = (speck128_t)VecRotateLeft64<8>(x1);
566 }
567
568 #if (CRYPTOPP_BIG_ENDIAN)
569 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
570 //const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
571 #else
572 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
573 //const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
574 #endif
575
576 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
577 block = (uint32x4_p)VecPermute(x1, y1, m3);
578 }
579
SPECK128_Enc_6_Blocks(uint32x4_p & block0,uint32x4_p & block1,uint32x4_p & block2,uint32x4_p & block3,uint32x4_p & block4,uint32x4_p & block5,const word64 * subkeys,unsigned int rounds)580 void SPECK128_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1,
581 uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4,
582 uint32x4_p &block5, const word64 *subkeys, unsigned int rounds)
583 {
584 #if (CRYPTOPP_BIG_ENDIAN)
585 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
586 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
587 #else
588 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
589 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
590 #endif
591
592 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
593 speck128_t x1 = (speck128_t)VecPermute(block0, block1, m1);
594 speck128_t y1 = (speck128_t)VecPermute(block0, block1, m2);
595 speck128_t x2 = (speck128_t)VecPermute(block2, block3, m1);
596 speck128_t y2 = (speck128_t)VecPermute(block2, block3, m2);
597 speck128_t x3 = (speck128_t)VecPermute(block4, block5, m1);
598 speck128_t y3 = (speck128_t)VecPermute(block4, block5, m2);
599
600 for (size_t i=0; i < static_cast<size_t>(rounds); ++i)
601 {
602 // Round keys are pre-splated in forward direction
603 const word32* ptr = reinterpret_cast<const word32*>(subkeys+i*2);
604 const speck128_t rk = (speck128_t)VecLoadAligned(ptr);
605
606 x1 = (speck128_t)VecRotateRight64<8>(x1);
607 x2 = (speck128_t)VecRotateRight64<8>(x2);
608 x3 = (speck128_t)VecRotateRight64<8>(x3);
609 x1 = (speck128_t)VecAdd64(x1, y1);
610 x2 = (speck128_t)VecAdd64(x2, y2);
611 x3 = (speck128_t)VecAdd64(x3, y3);
612 x1 = (speck128_t)VecXor64(x1, rk);
613 x2 = (speck128_t)VecXor64(x2, rk);
614 x3 = (speck128_t)VecXor64(x3, rk);
615
616 y1 = (speck128_t)VecRotateLeft64<3>(y1);
617 y2 = (speck128_t)VecRotateLeft64<3>(y2);
618 y3 = (speck128_t)VecRotateLeft64<3>(y3);
619 y1 = (speck128_t)VecXor64(y1, x1);
620 y2 = (speck128_t)VecXor64(y2, x2);
621 y3 = (speck128_t)VecXor64(y3, x3);
622 }
623
624 #if (CRYPTOPP_BIG_ENDIAN)
625 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
626 const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
627 #else
628 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
629 const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
630 #endif
631
632 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
633 block0 = (uint32x4_p)VecPermute(x1, y1, m3);
634 block1 = (uint32x4_p)VecPermute(x1, y1, m4);
635 block2 = (uint32x4_p)VecPermute(x2, y2, m3);
636 block3 = (uint32x4_p)VecPermute(x2, y2, m4);
637 block4 = (uint32x4_p)VecPermute(x3, y3, m3);
638 block5 = (uint32x4_p)VecPermute(x3, y3, m4);
639 }
640
SPECK128_Dec_6_Blocks(uint32x4_p & block0,uint32x4_p & block1,uint32x4_p & block2,uint32x4_p & block3,uint32x4_p & block4,uint32x4_p & block5,const word64 * subkeys,unsigned int rounds)641 void SPECK128_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1,
642 uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4,
643 uint32x4_p &block5, const word64 *subkeys, unsigned int rounds)
644 {
645 #if (CRYPTOPP_BIG_ENDIAN)
646 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
647 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
648 #else
649 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
650 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
651 #endif
652
653 // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
654 speck128_t x1 = (speck128_t)VecPermute(block0, block1, m1);
655 speck128_t y1 = (speck128_t)VecPermute(block0, block1, m2);
656 speck128_t x2 = (speck128_t)VecPermute(block2, block3, m1);
657 speck128_t y2 = (speck128_t)VecPermute(block2, block3, m2);
658 speck128_t x3 = (speck128_t)VecPermute(block4, block5, m1);
659 speck128_t y3 = (speck128_t)VecPermute(block4, block5, m2);
660
661 for (int i = static_cast<int>(rounds-1); i >= 0; --i)
662 {
663 const speck128_t rk = (speck128_t)VecSplatWord64(subkeys[i]);
664
665 y1 = (speck128_t)VecXor64(y1, x1);
666 y2 = (speck128_t)VecXor64(y2, x2);
667 y3 = (speck128_t)VecXor64(y3, x3);
668 y1 = (speck128_t)VecRotateRight64<3>(y1);
669 y2 = (speck128_t)VecRotateRight64<3>(y2);
670 y3 = (speck128_t)VecRotateRight64<3>(y3);
671
672 x1 = (speck128_t)VecXor64(x1, rk);
673 x2 = (speck128_t)VecXor64(x2, rk);
674 x3 = (speck128_t)VecXor64(x3, rk);
675 x1 = (speck128_t)VecSub64(x1, y1);
676 x2 = (speck128_t)VecSub64(x2, y2);
677 x3 = (speck128_t)VecSub64(x3, y3);
678 x1 = (speck128_t)VecRotateLeft64<8>(x1);
679 x2 = (speck128_t)VecRotateLeft64<8>(x2);
680 x3 = (speck128_t)VecRotateLeft64<8>(x3);
681 }
682
683 #if (CRYPTOPP_BIG_ENDIAN)
684 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
685 const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
686 #else
687 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
688 const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
689 #endif
690
691 // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
692 block0 = (uint32x4_p)VecPermute(x1, y1, m3);
693 block1 = (uint32x4_p)VecPermute(x1, y1, m4);
694 block2 = (uint32x4_p)VecPermute(x2, y2, m3);
695 block3 = (uint32x4_p)VecPermute(x2, y2, m4);
696 block4 = (uint32x4_p)VecPermute(x3, y3, m3);
697 block5 = (uint32x4_p)VecPermute(x3, y3, m4);
698 }
699
700 #endif // CRYPTOPP_ALTIVEC_AVAILABLE
701
702 ANONYMOUS_NAMESPACE_END
703
704 ///////////////////////////////////////////////////////////////////////
705
NAMESPACE_BEGIN(CryptoPP)706 NAMESPACE_BEGIN(CryptoPP)
707
708 // *************************** ARM NEON **************************** //
709
710 #if (CRYPTOPP_ARM_NEON_AVAILABLE)
711 size_t SPECK128_Enc_AdvancedProcessBlocks_NEON(const word64* subKeys, size_t rounds,
712 const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
713 {
714 return AdvancedProcessBlocks128_6x2_NEON(SPECK128_Enc_Block, SPECK128_Enc_6_Blocks,
715 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
716 }
717
SPECK128_Dec_AdvancedProcessBlocks_NEON(const word64 * subKeys,size_t rounds,const byte * inBlocks,const byte * xorBlocks,byte * outBlocks,size_t length,word32 flags)718 size_t SPECK128_Dec_AdvancedProcessBlocks_NEON(const word64* subKeys, size_t rounds,
719 const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
720 {
721 return AdvancedProcessBlocks128_6x2_NEON(SPECK128_Dec_Block, SPECK128_Dec_6_Blocks,
722 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
723 }
724 #endif // CRYPTOPP_ARM_NEON_AVAILABLE
725
726 // ***************************** IA-32 ***************************** //
727
728 #if (CRYPTOPP_SSSE3_AVAILABLE)
SPECK128_Enc_AdvancedProcessBlocks_SSSE3(const word64 * subKeys,size_t rounds,const byte * inBlocks,const byte * xorBlocks,byte * outBlocks,size_t length,word32 flags)729 size_t SPECK128_Enc_AdvancedProcessBlocks_SSSE3(const word64* subKeys, size_t rounds,
730 const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
731 {
732 return AdvancedProcessBlocks128_6x2_SSE(SPECK128_Enc_Block, SPECK128_Enc_6_Blocks,
733 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
734 }
735
SPECK128_Dec_AdvancedProcessBlocks_SSSE3(const word64 * subKeys,size_t rounds,const byte * inBlocks,const byte * xorBlocks,byte * outBlocks,size_t length,word32 flags)736 size_t SPECK128_Dec_AdvancedProcessBlocks_SSSE3(const word64* subKeys, size_t rounds,
737 const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
738 {
739 return AdvancedProcessBlocks128_6x2_SSE(SPECK128_Dec_Block, SPECK128_Dec_6_Blocks,
740 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
741 }
742 #endif // CRYPTOPP_SSSE3_AVAILABLE
743
744 // ***************************** Altivec ***************************** //
745
746 #if (CRYPTOPP_ALTIVEC_AVAILABLE)
SPECK128_Enc_AdvancedProcessBlocks_ALTIVEC(const word64 * subKeys,size_t rounds,const byte * inBlocks,const byte * xorBlocks,byte * outBlocks,size_t length,word32 flags)747 size_t SPECK128_Enc_AdvancedProcessBlocks_ALTIVEC(const word64* subKeys, size_t rounds,
748 const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
749 {
750 return AdvancedProcessBlocks128_6x1_ALTIVEC(SPECK128_Enc_Block, SPECK128_Enc_6_Blocks,
751 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
752 }
753
SPECK128_Dec_AdvancedProcessBlocks_ALTIVEC(const word64 * subKeys,size_t rounds,const byte * inBlocks,const byte * xorBlocks,byte * outBlocks,size_t length,word32 flags)754 size_t SPECK128_Dec_AdvancedProcessBlocks_ALTIVEC(const word64* subKeys, size_t rounds,
755 const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
756 {
757 return AdvancedProcessBlocks128_6x1_ALTIVEC(SPECK128_Dec_Block, SPECK128_Dec_6_Blocks,
758 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
759 }
760 #endif // CRYPTOPP_ALTIVEC_AVAILABLE
761
762 NAMESPACE_END
763