1 // blake2_simd.cpp - written and placed in the public domain by
2 //                   Samuel Neves, Jeffrey Walton, Uri Blumenthal
3 //                   and Marcel Raad.
4 //
5 //    This source file uses intrinsics to gain access to ARMv7a/ARMv8a
6 //    NEON, Power7 and SSE4.1 instructions. A separate source file is
7 //    needed because additional CXXFLAGS are required to enable the
8 //    appropriate instructions sets in some build configurations.
9 
10 // The BLAKE2b and BLAKE2s numbers are consistent with the BLAKE2 team's
11 // numbers. However, we have an Altivec implementation of BLAKE2s,
12 // and a POWER8 implementation of BLAKE2b (BLAKE2 team is missing them).
13 // Altivec code is about 2x faster than C++ when using GCC 5.0 or
14 // above. The POWER8 code is about 2.5x faster than C++ when using GCC 5.0
15 // or above. If you use GCC 4.0 (PowerMac) or GCC 4.8 (GCC Compile Farm)
16 // then the PowerPC code will be slower than C++. Be sure to use GCC 5.0
17 // or above for PowerPC builds or disable Altivec for BLAKE2b and BLAKE2s
18 // if using the old compilers.
19 
20 #include "pch.h"
21 #include "config.h"
22 #include "misc.h"
23 #include "blake2.h"
24 
25 // Uncomment for benchmarking C++ against SSE2 or NEON.
26 // Do so in both blake2.cpp and blake2_simd.cpp.
27 // #undef CRYPTOPP_SSE41_AVAILABLE
28 // #undef CRYPTOPP_ARM_NEON_AVAILABLE
29 // #undef CRYPTOPP_ALTIVEC_AVAILABLE
30 
31 // Disable NEON/ASIMD for Cortex-A53 and A57. The shifts are too slow and C/C++ is about
32 // 3 cpb faster than NEON/ASIMD. Also see http://github.com/weidai11/cryptopp/issues/367.
33 #if (defined(__aarch32__) || defined(__aarch64__)) && defined(CRYPTOPP_SLOW_ARMV8_SHIFT)
34 # undef CRYPTOPP_ARM_NEON_AVAILABLE
35 #endif
36 
37 // BLAKE2s bug on AIX 7.1 (POWER7) with XLC 12.01
38 // https://github.com/weidai11/cryptopp/issues/743
39 #if defined(__xlC__) && (__xlC__ < 0x0d01)
40 # define CRYPTOPP_DISABLE_ALTIVEC 1
41 # undef CRYPTOPP_POWER7_AVAILABLE
42 # undef CRYPTOPP_ALTIVEC_AVAILABLE
43 #endif
44 
45 #if defined(__XOP__)
46 # include <ammintrin.h>
47 # if defined(__GNUC__)
48 #  include <x86intrin.h>
49 # endif
50 #endif
51 
52 #if (CRYPTOPP_SSE41_AVAILABLE)
53 # include <emmintrin.h>
54 # include <tmmintrin.h>
55 # include <smmintrin.h>
56 #endif
57 
58 #if (CRYPTOPP_ARM_NEON_HEADER)
59 # include <arm_neon.h>
60 #endif
61 
62 #if (CRYPTOPP_ARM_ACLE_HEADER)
63 # include <stdint.h>
64 # include <arm_acle.h>
65 #endif
66 
67 #if (CRYPTOPP_ALTIVEC_AVAILABLE)
68 # include "ppc_simd.h"
69 #endif
70 
71 #if defined(CRYPTOPP_GCC_DIAGNOSTIC_AVAILABLE)
72 /* Ignore "warning: vec_lvsl is deprecated..." */
73 # pragma GCC diagnostic ignored "-Wdeprecated"
74 #endif
75 
76 // Squash MS LNK4221 and libtool warnings
77 extern const char BLAKE2S_SIMD_FNAME[] = __FILE__;
78 
79 NAMESPACE_BEGIN(CryptoPP)
80 
81 // Exported by blake2.cpp
82 extern const word32 BLAKE2S_IV[8];
83 extern const word64 BLAKE2B_IV[8];
84 
85 #if CRYPTOPP_SSE41_AVAILABLE
86 
87 #define LOADU(p)  _mm_loadu_si128((const __m128i *)(const void*)(p))
88 #define STOREU(p,r) _mm_storeu_si128((__m128i *)(void*)(p), r)
89 #define TOF(reg) _mm_castsi128_ps((reg))
90 #define TOI(reg) _mm_castps_si128((reg))
91 
BLAKE2_Compress32_SSE4(const byte * input,BLAKE2s_State & state)92 void BLAKE2_Compress32_SSE4(const byte* input, BLAKE2s_State& state)
93 {
94     #define BLAKE2S_LOAD_MSG_0_1(buf) \
95     buf = TOI(_mm_shuffle_ps(TOF(m0), TOF(m1), _MM_SHUFFLE(2,0,2,0)));
96 
97     #define BLAKE2S_LOAD_MSG_0_2(buf) \
98     buf = TOI(_mm_shuffle_ps(TOF(m0), TOF(m1), _MM_SHUFFLE(3,1,3,1)));
99 
100     #define BLAKE2S_LOAD_MSG_0_3(buf) \
101     t0 = _mm_shuffle_epi32(m2, _MM_SHUFFLE(3,2,0,1)); \
102     t1 = _mm_shuffle_epi32(m3, _MM_SHUFFLE(0,1,3,2)); \
103     buf = _mm_blend_epi16(t0, t1, 0xC3);
104 
105     #define BLAKE2S_LOAD_MSG_0_4(buf) \
106     t0 = _mm_blend_epi16(t0, t1, 0x3C); \
107     buf = _mm_shuffle_epi32(t0, _MM_SHUFFLE(2,3,0,1));
108 
109     #define BLAKE2S_LOAD_MSG_1_1(buf) \
110     t0 = _mm_blend_epi16(m1, m2, 0x0C); \
111     t1 = _mm_slli_si128(m3, 4); \
112     t2 = _mm_blend_epi16(t0, t1, 0xF0); \
113     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,0,3));
114 
115     #define BLAKE2S_LOAD_MSG_1_2(buf) \
116     t0 = _mm_shuffle_epi32(m2,_MM_SHUFFLE(0,0,2,0)); \
117     t1 = _mm_blend_epi16(m1,m3,0xC0); \
118     t2 = _mm_blend_epi16(t0, t1, 0xF0); \
119     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1));
120 
121     #define BLAKE2S_LOAD_MSG_1_3(buf) \
122     t0 = _mm_slli_si128(m1, 4); \
123     t1 = _mm_blend_epi16(m2, t0, 0x30); \
124     t2 = _mm_blend_epi16(m0, t1, 0xF0); \
125     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,0,1,2));
126 
127     #define BLAKE2S_LOAD_MSG_1_4(buf) \
128     t0 = _mm_unpackhi_epi32(m0,m1); \
129     t1 = _mm_slli_si128(m3, 4); \
130     t2 = _mm_blend_epi16(t0, t1, 0x0C); \
131     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,0,1,2));
132 
133     #define BLAKE2S_LOAD_MSG_2_1(buf) \
134     t0 = _mm_unpackhi_epi32(m2,m3); \
135     t1 = _mm_blend_epi16(m3,m1,0x0C); \
136     t2 = _mm_blend_epi16(t0, t1, 0x0F); \
137     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,1,0,2));
138 
139     #define BLAKE2S_LOAD_MSG_2_2(buf) \
140     t0 = _mm_unpacklo_epi32(m2,m0); \
141     t1 = _mm_blend_epi16(t0, m0, 0xF0); \
142     t2 = _mm_slli_si128(m3, 8); \
143     buf = _mm_blend_epi16(t1, t2, 0xC0);
144 
145     #define BLAKE2S_LOAD_MSG_2_3(buf) \
146     t0 = _mm_blend_epi16(m0, m2, 0x3C); \
147     t1 = _mm_srli_si128(m1, 12); \
148     t2 = _mm_blend_epi16(t0,t1,0x03); \
149     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(0,3,2,1));
150 
151     #define BLAKE2S_LOAD_MSG_2_4(buf) \
152     t0 = _mm_slli_si128(m3, 4); \
153     t1 = _mm_blend_epi16(m0, m1, 0x33); \
154     t2 = _mm_blend_epi16(t1, t0, 0xC0); \
155     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,2,3,0));
156 
157     #define BLAKE2S_LOAD_MSG_3_1(buf) \
158     t0 = _mm_unpackhi_epi32(m0,m1); \
159     t1 = _mm_unpackhi_epi32(t0, m2); \
160     t2 = _mm_blend_epi16(t1, m3, 0x0C); \
161     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,1,0,2));
162 
163     #define BLAKE2S_LOAD_MSG_3_2(buf) \
164     t0 = _mm_slli_si128(m2, 8); \
165     t1 = _mm_blend_epi16(m3,m0,0x0C); \
166     t2 = _mm_blend_epi16(t1, t0, 0xC0); \
167     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,0,1,3));
168 
169     #define BLAKE2S_LOAD_MSG_3_3(buf) \
170     t0 = _mm_blend_epi16(m0,m1,0x0F); \
171     t1 = _mm_blend_epi16(t0, m3, 0xC0); \
172     buf = _mm_shuffle_epi32(t1, _MM_SHUFFLE(0,1,2,3));
173 
174     #define BLAKE2S_LOAD_MSG_3_4(buf) \
175     t0 = _mm_alignr_epi8(m0, m1, 4); \
176     buf = _mm_blend_epi16(t0, m2, 0x33);
177 
178     #define BLAKE2S_LOAD_MSG_4_1(buf) \
179     t0 = _mm_unpacklo_epi64(m1,m2); \
180     t1 = _mm_unpackhi_epi64(m0,m2); \
181     t2 = _mm_blend_epi16(t0,t1,0x33); \
182     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,0,1,3));
183 
184     #define BLAKE2S_LOAD_MSG_4_2(buf) \
185     t0 = _mm_unpackhi_epi64(m1,m3); \
186     t1 = _mm_unpacklo_epi64(m0,m1); \
187     buf = _mm_blend_epi16(t0,t1,0x33);
188 
189     #define BLAKE2S_LOAD_MSG_4_3(buf) \
190     t0 = _mm_unpackhi_epi64(m3,m1); \
191     t1 = _mm_unpackhi_epi64(m2,m0); \
192     t2 = _mm_blend_epi16(t1,t0,0x33); \
193     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,0,3));
194 
195     #define BLAKE2S_LOAD_MSG_4_4(buf) \
196     t0 = _mm_blend_epi16(m0,m2,0x03); \
197     t1 = _mm_slli_si128(t0, 8); \
198     t2 = _mm_blend_epi16(t1,m3,0x0F); \
199     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,0,3,1));
200 
201     #define BLAKE2S_LOAD_MSG_5_1(buf) \
202     t0 = _mm_unpackhi_epi32(m0,m1); \
203     t1 = _mm_unpacklo_epi32(m0,m2); \
204     buf = _mm_unpacklo_epi64(t0,t1);
205 
206     #define BLAKE2S_LOAD_MSG_5_2(buf) \
207     t0 = _mm_srli_si128(m2, 4); \
208     t1 = _mm_blend_epi16(m0,m3,0x03); \
209     buf = _mm_blend_epi16(t1,t0,0x3C);
210 
211     #define BLAKE2S_LOAD_MSG_5_3(buf) \
212     t0 = _mm_blend_epi16(m1,m0,0x0C); \
213     t1 = _mm_srli_si128(m3, 4); \
214     t2 = _mm_blend_epi16(t0,t1,0x30); \
215     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1));
216 
217     #define BLAKE2S_LOAD_MSG_5_4(buf) \
218     t0 = _mm_unpacklo_epi64(m2,m1); \
219     t1 = _mm_shuffle_epi32(m3, _MM_SHUFFLE(2,0,1,0)); \
220     t2 = _mm_srli_si128(t0, 4); \
221     buf = _mm_blend_epi16(t1,t2,0x33);
222 
223     #define BLAKE2S_LOAD_MSG_6_1(buf) \
224     t0 = _mm_slli_si128(m1, 12); \
225     t1 = _mm_blend_epi16(m0,m3,0x33); \
226     buf = _mm_blend_epi16(t1,t0,0xC0);
227 
228     #define BLAKE2S_LOAD_MSG_6_2(buf) \
229     t0 = _mm_blend_epi16(m3,m2,0x30); \
230     t1 = _mm_srli_si128(m1, 4); \
231     t2 = _mm_blend_epi16(t0,t1,0x03); \
232     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,3,0));
233 
234     #define BLAKE2S_LOAD_MSG_6_3(buf) \
235     t0 = _mm_unpacklo_epi64(m0,m2); \
236     t1 = _mm_srli_si128(m1, 4); \
237     t2 = _mm_blend_epi16(t0,t1,0x0C); \
238     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,1,0,2));
239 
240     #define BLAKE2S_LOAD_MSG_6_4(buf) \
241     t0 = _mm_unpackhi_epi32(m1,m2); \
242     t1 = _mm_unpackhi_epi64(m0,t0); \
243     buf = _mm_shuffle_epi32(t1, _MM_SHUFFLE(0,1,2,3));
244 
245     #define BLAKE2S_LOAD_MSG_7_1(buf) \
246     t0 = _mm_unpackhi_epi32(m0,m1); \
247     t1 = _mm_blend_epi16(t0,m3,0x0F); \
248     buf = _mm_shuffle_epi32(t1,_MM_SHUFFLE(2,0,3,1));
249 
250     #define BLAKE2S_LOAD_MSG_7_2(buf) \
251     t0 = _mm_blend_epi16(m2,m3,0x30); \
252     t1 = _mm_srli_si128(m0,4); \
253     t2 = _mm_blend_epi16(t0,t1,0x03); \
254     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,0,2,3));
255 
256     #define BLAKE2S_LOAD_MSG_7_3(buf) \
257     t0 = _mm_unpackhi_epi64(m0,m3); \
258     t1 = _mm_unpacklo_epi64(m1,m2); \
259     t2 = _mm_blend_epi16(t0,t1,0x3C); \
260     buf = _mm_shuffle_epi32(t2,_MM_SHUFFLE(2,3,1,0));
261 
262     #define BLAKE2S_LOAD_MSG_7_4(buf) \
263     t0 = _mm_unpacklo_epi32(m0,m1); \
264     t1 = _mm_unpackhi_epi32(m1,m2); \
265     t2 = _mm_unpacklo_epi64(t0,t1); \
266     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,0,3));
267 
268     #define BLAKE2S_LOAD_MSG_8_1(buf) \
269     t0 = _mm_unpackhi_epi32(m1,m3); \
270     t1 = _mm_unpacklo_epi64(t0,m0); \
271     t2 = _mm_blend_epi16(t1,m2,0xC0); \
272     buf = _mm_shufflehi_epi16(t2,_MM_SHUFFLE(1,0,3,2));
273 
274     #define BLAKE2S_LOAD_MSG_8_2(buf) \
275     t0 = _mm_unpackhi_epi32(m0,m3); \
276     t1 = _mm_blend_epi16(m2,t0,0xF0); \
277     buf = _mm_shuffle_epi32(t1,_MM_SHUFFLE(0,2,1,3));
278 
279     #define BLAKE2S_LOAD_MSG_8_3(buf) \
280     t0 = _mm_unpacklo_epi64(m0,m3); \
281     t1 = _mm_srli_si128(m2,8); \
282     t2 = _mm_blend_epi16(t0,t1,0x03); \
283     buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,3,2,0));
284 
285     #define BLAKE2S_LOAD_MSG_8_4(buf) \
286     t0 = _mm_blend_epi16(m1,m0,0x30); \
287     buf = _mm_shuffle_epi32(t0,_MM_SHUFFLE(0,3,2,1));
288 
289     #define BLAKE2S_LOAD_MSG_9_1(buf) \
290     t0 = _mm_blend_epi16(m0,m2,0x03); \
291     t1 = _mm_blend_epi16(m1,m2,0x30); \
292     t2 = _mm_blend_epi16(t1,t0,0x0F); \
293     buf = _mm_shuffle_epi32(t2,_MM_SHUFFLE(1,3,0,2));
294 
295     #define BLAKE2S_LOAD_MSG_9_2(buf) \
296     t0 = _mm_slli_si128(m0,4); \
297     t1 = _mm_blend_epi16(m1,t0,0xC0); \
298     buf = _mm_shuffle_epi32(t1,_MM_SHUFFLE(1,2,0,3));
299 
300     #define BLAKE2S_LOAD_MSG_9_3(buf) \
301     t0 = _mm_unpackhi_epi32(m0,m3); \
302     t1 = _mm_unpacklo_epi32(m2,m3); \
303     t2 = _mm_unpackhi_epi64(t0,t1); \
304     buf = _mm_shuffle_epi32(t2,_MM_SHUFFLE(0,2,1,3));
305 
306     #define BLAKE2S_LOAD_MSG_9_4(buf) \
307     t0 = _mm_blend_epi16(m3,m2,0xC0); \
308     t1 = _mm_unpacklo_epi32(m0,m3); \
309     t2 = _mm_blend_epi16(t0,t1,0x0F); \
310     buf = _mm_shuffle_epi32(t2,_MM_SHUFFLE(1,2,3,0));
311 
312 #ifdef __XOP__
313 # define MM_ROTI_EPI32(r, c) \
314     _mm_roti_epi32(r, c)
315 #else
316 # define MM_ROTI_EPI32(r, c) ( \
317       (8==-(c)) ? _mm_shuffle_epi8(r,r8) \
318     : (16==-(c)) ? _mm_shuffle_epi8(r,r16) \
319     : _mm_xor_si128(_mm_srli_epi32((r), -(c)), \
320       _mm_slli_epi32((r), 32-(-(c)))))
321 #endif
322 
323 #define BLAKE2S_G1(row1,row2,row3,row4,buf) \
324     row1 = _mm_add_epi32( _mm_add_epi32( row1, buf), row2 ); \
325     row4 = _mm_xor_si128( row4, row1 ); \
326     row4 = MM_ROTI_EPI32(row4, -16); \
327     row3 = _mm_add_epi32( row3, row4 );   \
328     row2 = _mm_xor_si128( row2, row3 ); \
329     row2 = MM_ROTI_EPI32(row2, -12);
330 
331 #define BLAKE2S_G2(row1,row2,row3,row4,buf) \
332     row1 = _mm_add_epi32( _mm_add_epi32( row1, buf), row2 ); \
333     row4 = _mm_xor_si128( row4, row1 ); \
334     row4 = MM_ROTI_EPI32(row4, -8); \
335     row3 = _mm_add_epi32( row3, row4 );   \
336     row2 = _mm_xor_si128( row2, row3 ); \
337     row2 = MM_ROTI_EPI32(row2, -7);
338 
339 #define DIAGONALIZE(row1,row2,row3,row4) \
340     row1 = _mm_shuffle_epi32( row1, _MM_SHUFFLE(2,1,0,3) ); \
341     row4 = _mm_shuffle_epi32( row4, _MM_SHUFFLE(1,0,3,2) ); \
342     row3 = _mm_shuffle_epi32( row3, _MM_SHUFFLE(0,3,2,1) );
343 
344 #define UNDIAGONALIZE(row1,row2,row3,row4) \
345     row1 = _mm_shuffle_epi32( row1, _MM_SHUFFLE(0,3,2,1) ); \
346     row4 = _mm_shuffle_epi32( row4, _MM_SHUFFLE(1,0,3,2) ); \
347     row3 = _mm_shuffle_epi32( row3, _MM_SHUFFLE(2,1,0,3) );
348 
349 #define BLAKE2S_ROUND(r)  \
350     BLAKE2S_LOAD_MSG_ ##r ##_1(buf1); \
351     BLAKE2S_G1(row1,row2,row3,row4,buf1); \
352     BLAKE2S_LOAD_MSG_ ##r ##_2(buf2); \
353     BLAKE2S_G2(row1,row2,row3,row4,buf2); \
354     DIAGONALIZE(row1,row2,row3,row4); \
355     BLAKE2S_LOAD_MSG_ ##r ##_3(buf3); \
356     BLAKE2S_G1(row1,row2,row3,row4,buf3); \
357     BLAKE2S_LOAD_MSG_ ##r ##_4(buf4); \
358     BLAKE2S_G2(row1,row2,row3,row4,buf4); \
359     UNDIAGONALIZE(row1,row2,row3,row4);
360 
361     __m128i row1, row2, row3, row4;
362     __m128i buf1, buf2, buf3, buf4;
363     __m128i t0, t1, t2, ff0, ff1;
364 
365     const __m128i r8 = _mm_set_epi8(12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1);
366     const __m128i r16 = _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
367 
368     const __m128i m0 = LOADU(input + 00);
369     const __m128i m1 = LOADU(input + 16);
370     const __m128i m2 = LOADU(input + 32);
371     const __m128i m3 = LOADU(input + 48);
372 
373     row1 = ff0 = LOADU(state.h()+0);
374     row2 = ff1 = LOADU(state.h()+4);
375     row3 = LOADU(BLAKE2S_IV+0);
376     row4 = _mm_xor_si128(LOADU(BLAKE2S_IV+4), LOADU(state.t()+0));
377 
378     BLAKE2S_ROUND(0);
379     BLAKE2S_ROUND(1);
380     BLAKE2S_ROUND(2);
381     BLAKE2S_ROUND(3);
382     BLAKE2S_ROUND(4);
383     BLAKE2S_ROUND(5);
384     BLAKE2S_ROUND(6);
385     BLAKE2S_ROUND(7);
386     BLAKE2S_ROUND(8);
387     BLAKE2S_ROUND(9);
388 
389     STOREU(state.h()+0, _mm_xor_si128(ff0, _mm_xor_si128(row1, row3)));
390     STOREU(state.h()+4, _mm_xor_si128(ff1, _mm_xor_si128(row2, row4)));
391 }
392 #endif  // CRYPTOPP_SSE41_AVAILABLE
393 
394 #if CRYPTOPP_ARM_NEON_AVAILABLE
BLAKE2_Compress32_NEON(const byte * input,BLAKE2s_State & state)395 void BLAKE2_Compress32_NEON(const byte* input, BLAKE2s_State& state)
396 {
397     #define BLAKE2S_LOAD_MSG_0_1(buf) \
398     do { uint32x2_t t0, t1; \
399     t0 = vzip_u32(vget_low_u32(m0), vget_high_u32(m0)).val[0]; \
400     t1 = vzip_u32(vget_low_u32(m1), vget_high_u32(m1)).val[0]; \
401     buf = vcombine_u32(t0, t1); } while(0)
402 
403     #define BLAKE2S_LOAD_MSG_0_2(buf) \
404     do { uint32x2_t t0, t1; \
405     t0 = vzip_u32(vget_low_u32(m0), vget_high_u32(m0)).val[1]; \
406     t1 = vzip_u32(vget_low_u32(m1), vget_high_u32(m1)).val[1]; \
407     buf = vcombine_u32(t0, t1); } while(0)
408 
409     #define BLAKE2S_LOAD_MSG_0_3(buf) \
410     do { uint32x2_t t0, t1; \
411     t0 = vzip_u32(vget_low_u32(m2), vget_high_u32(m2)).val[0]; \
412     t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m3)).val[0]; \
413     buf = vcombine_u32(t0, t1); } while(0)
414 
415     #define BLAKE2S_LOAD_MSG_0_4(buf) \
416     do { uint32x2_t t0, t1; \
417     t0 = vzip_u32(vget_low_u32(m2), vget_high_u32(m2)).val[1]; \
418     t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m3)).val[1]; \
419     buf = vcombine_u32(t0, t1); } while(0)
420 
421     #define BLAKE2S_LOAD_MSG_1_1(buf) \
422     do { uint32x2_t t0, t1; \
423     t0 = vzip_u32(vget_high_u32(m3), vget_low_u32(m1)).val[0]; \
424     t1 = vzip_u32(vget_low_u32(m2), vget_low_u32(m3)).val[1]; \
425     buf = vcombine_u32(t0, t1); } while(0)
426 
427     #define BLAKE2S_LOAD_MSG_1_2(buf) \
428     do { uint32x2_t t0, t1; \
429     t0 = vzip_u32(vget_high_u32(m2), vget_low_u32(m2)).val[0]; \
430     t1 = vext_u32(vget_high_u32(m3), vget_high_u32(m1), 1); \
431     buf = vcombine_u32(t0, t1); } while(0)
432 
433     #define BLAKE2S_LOAD_MSG_1_3(buf) \
434     do { uint32x2_t t0, t1; \
435     t0 = vext_u32(vget_low_u32(m0), vget_low_u32(m0), 1); \
436     t1 = vzip_u32(vget_high_u32(m2), vget_low_u32(m1)).val[1]; \
437     buf = vcombine_u32(t0, t1); } while(0)
438 
439     #define BLAKE2S_LOAD_MSG_1_4(buf) \
440     do { uint32x2_t t0, t1; \
441     t0 = vzip_u32(vget_low_u32(m3), vget_high_u32(m0)).val[0]; \
442     t1 = vzip_u32(vget_high_u32(m1), vget_high_u32(m0)).val[1]; \
443     buf = vcombine_u32(t0, t1); } while(0)
444 
445     #define BLAKE2S_LOAD_MSG_2_1(buf) \
446     do { uint32x2_t t0, t1; \
447     t0 = vext_u32(vget_high_u32(m2), vget_low_u32(m3), 1); \
448     t1 = vzip_u32(vget_low_u32(m1), vget_high_u32(m3)).val[1]; \
449     buf = vcombine_u32(t0, t1); } while(0)
450 
451     #define BLAKE2S_LOAD_MSG_2_2(buf) \
452     do { uint32x2_t t0, t1; \
453     t0 = vzip_u32(vget_low_u32(m2), vget_low_u32(m0)).val[0]; \
454     t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_low_u32(m3)); \
455     buf = vcombine_u32(t0, t1); } while(0)
456 
457     #define BLAKE2S_LOAD_MSG_2_3(buf) \
458     do { uint32x2_t t0, t1; \
459     t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m2), vget_high_u32(m0)); \
460     t1 = vzip_u32(vget_high_u32(m1), vget_low_u32(m2)).val[1]; \
461     buf = vcombine_u32(t0, t1); } while(0)
462 
463     #define BLAKE2S_LOAD_MSG_2_4(buf) \
464     do { uint32x2_t t0, t1; \
465     t0 = vzip_u32(vget_high_u32(m3), vget_high_u32(m1)).val[0]; \
466     t1 = vext_u32(vget_low_u32(m0), vget_low_u32(m1), 1); \
467     buf = vcombine_u32(t0, t1); } while(0)
468 
469     #define BLAKE2S_LOAD_MSG_3_1(buf) \
470     do { uint32x2_t t0, t1; \
471     t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m0)).val[1]; \
472     t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m2)).val[1]; \
473     buf = vcombine_u32(t0, t1); } while(0)
474 
475     #define BLAKE2S_LOAD_MSG_3_2(buf) \
476     do { uint32x2_t t0, t1; \
477     t0 = vzip_u32(vget_low_u32(m2), vget_low_u32(m0)).val[1]; \
478     t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m3)).val[0]; \
479     buf = vcombine_u32(t0, t1); } while(0)
480 
481     #define BLAKE2S_LOAD_MSG_3_3(buf) \
482     do { uint32x2_t t0, t1; \
483     t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_low_u32(m1)); \
484     t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_high_u32(m3)); \
485     buf = vcombine_u32(t0, t1); } while(0)
486 
487     #define BLAKE2S_LOAD_MSG_3_4(buf) \
488     do { uint32x2_t t0, t1; \
489     t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m2)).val[0]; \
490     t1 = vzip_u32(vget_low_u32(m0), vget_low_u32(m2)).val[0]; \
491     buf = vcombine_u32(t0, t1); } while(0)
492 
493     #define BLAKE2S_LOAD_MSG_4_1(buf) \
494     do { uint32x2_t t0, t1; \
495     t0 = vzip_u32(vget_low_u32(m2), vget_low_u32(m1)).val[1]; \
496     t1 = vzip_u32((vget_high_u32(m0)), vget_high_u32(m2)).val[0]; \
497     buf = vcombine_u32(t0, t1); } while(0)
498 
499     #define BLAKE2S_LOAD_MSG_4_2(buf) \
500     do { uint32x2_t t0, t1; \
501     t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m0), vget_high_u32(m1)); \
502     t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_high_u32(m3)); \
503     buf = vcombine_u32(t0, t1); } while(0)
504 
505     #define BLAKE2S_LOAD_MSG_4_3(buf) \
506     do { uint32x2_t t0, t1; \
507     t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m3), vget_high_u32(m2)); \
508     t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m1), vget_high_u32(m0)); \
509     buf = vcombine_u32(t0, t1); } while(0)
510 
511     #define BLAKE2S_LOAD_MSG_4_4(buf) \
512     do { uint32x2_t t0, t1; \
513     t0 = vext_u32(vget_low_u32(m0), vget_low_u32(m3), 1); \
514     t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m2), vget_low_u32(m3)); \
515     buf = vcombine_u32(t0, t1); } while(0)
516 
517     #define BLAKE2S_LOAD_MSG_5_1(buf) \
518     do { uint32x2_t t0, t1; \
519     t0 = vzip_u32((vget_high_u32(m0)), vget_high_u32(m1)).val[0]; \
520     t1 = vzip_u32(vget_low_u32(m0), vget_low_u32(m2)).val[0]; \
521     buf = vcombine_u32(t0, t1); } while(0)
522 
523     #define BLAKE2S_LOAD_MSG_5_2(buf) \
524     do { uint32x2_t t0, t1; \
525     t0 = vzip_u32(vget_low_u32(m3), vget_high_u32(m2)).val[0]; \
526     t1 = vzip_u32(vget_high_u32(m2), vget_high_u32(m0)).val[1]; \
527     buf = vcombine_u32(t0, t1); } while(0)
528 
529     #define BLAKE2S_LOAD_MSG_5_3(buf) \
530     do { uint32x2_t t0, t1; \
531     t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_high_u32(m1)); \
532     t1 = vzip_u32(vget_high_u32(m3), vget_low_u32(m0)).val[1]; \
533     buf = vcombine_u32(t0, t1); } while(0)
534 
535     #define BLAKE2S_LOAD_MSG_5_4(buf) \
536     do { uint32x2_t t0, t1; \
537     t0 = vzip_u32(vget_low_u32(m3), vget_low_u32(m1)).val[1]; \
538     t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m3), vget_low_u32(m2)); \
539     buf = vcombine_u32(t0, t1); } while(0)
540 
541     #define BLAKE2S_LOAD_MSG_6_1(buf) \
542     do { uint32x2_t t0, t1; \
543     t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m3), vget_low_u32(m0)); \
544     t1 = vzip_u32(vget_high_u32(m3), vget_low_u32(m1)).val[0]; \
545     buf = vcombine_u32(t0, t1); } while(0)
546 
547     #define BLAKE2S_LOAD_MSG_6_2(buf) \
548     do { uint32x2_t t0, t1; \
549     t0 = vzip_u32(vget_low_u32(m1), vget_high_u32(m3)).val[1]; \
550     t1 = vext_u32(vget_low_u32(m3), vget_high_u32(m2), 1); \
551     buf = vcombine_u32(t0, t1); } while(0)
552 
553     #define BLAKE2S_LOAD_MSG_6_3(buf) \
554     do { uint32x2_t t0, t1; \
555     t0 = vzip_u32(vget_low_u32(m0), vget_high_u32(m1)).val[0]; \
556     t1 = vext_u32(vget_low_u32(m2), vget_low_u32(m2), 1); \
557     buf = vcombine_u32(t0, t1); } while(0)
558 
559     #define BLAKE2S_LOAD_MSG_6_4(buf) \
560     do { uint32x2_t t0, t1; \
561     t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m0)).val[1]; \
562     t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_high_u32(m2)); \
563     buf = vcombine_u32(t0, t1); } while(0)
564 
565     #define BLAKE2S_LOAD_MSG_7_1(buf) \
566     do { uint32x2_t t0, t1; \
567     t0 = vzip_u32(vget_low_u32(m3), vget_high_u32(m1)).val[1]; \
568     t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m3), vget_high_u32(m0)); \
569     buf = vcombine_u32(t0, t1); } while(0)
570 
571     #define BLAKE2S_LOAD_MSG_7_2(buf) \
572     do { uint32x2_t t0, t1; \
573     t0 = vext_u32(vget_high_u32(m2), vget_high_u32(m3), 1); \
574     t1 = vzip_u32(vget_low_u32(m0), vget_low_u32(m2)).val[1]; \
575     buf = vcombine_u32(t0, t1); } while(0)
576 
577     #define BLAKE2S_LOAD_MSG_7_3(buf) \
578     do { uint32x2_t t0, t1; \
579     t0 = vzip_u32(vget_low_u32(m1), vget_high_u32(m3)).val[1]; \
580     t1 = vzip_u32(vget_low_u32(m2), vget_high_u32(m0)).val[0]; \
581     buf = vcombine_u32(t0, t1); } while(0)
582 
583     #define BLAKE2S_LOAD_MSG_7_4(buf) \
584     do { uint32x2_t t0, t1; \
585     t0 = vzip_u32(vget_low_u32(m0), vget_low_u32(m1)).val[0]; \
586     t1 = vzip_u32(vget_high_u32(m1), vget_high_u32(m2)).val[0]; \
587     buf = vcombine_u32(t0, t1); } while(0)
588 
589     #define BLAKE2S_LOAD_MSG_8_1(buf) \
590     do { uint32x2_t t0, t1; \
591     t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m3)).val[0]; \
592     t1 = vext_u32(vget_high_u32(m2), vget_low_u32(m0), 1); \
593     buf = vcombine_u32(t0, t1); } while(0)
594 
595     #define BLAKE2S_LOAD_MSG_8_2(buf) \
596     do { uint32x2_t t0, t1; \
597     t0 = vzip_u32(vget_high_u32(m3), vget_low_u32(m2)).val[1]; \
598     t1 = vext_u32(vget_high_u32(m0), vget_low_u32(m2), 1); \
599     buf = vcombine_u32(t0, t1); } while(0)
600 
601     #define BLAKE2S_LOAD_MSG_8_3(buf) \
602     do { uint32x2_t t0, t1; \
603     t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m3), vget_low_u32(m3)); \
604     t1 = vext_u32(vget_low_u32(m0), vget_high_u32(m2), 1); \
605     buf = vcombine_u32(t0, t1); } while(0)
606 
607     #define BLAKE2S_LOAD_MSG_8_4(buf) \
608     do { uint32x2_t t0, t1; \
609     t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_high_u32(m1)); \
610     t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_low_u32(m1)); \
611     buf = vcombine_u32(t0, t1); } while(0)
612 
613     #define BLAKE2S_LOAD_MSG_9_1(buf) \
614     do { uint32x2_t t0, t1; \
615     t0 = vzip_u32(vget_high_u32(m2), vget_low_u32(m2)).val[0]; \
616     t1 = vzip_u32(vget_high_u32(m1), vget_low_u32(m0)).val[1]; \
617     buf = vcombine_u32(t0, t1); } while(0)
618 
619     #define BLAKE2S_LOAD_MSG_9_2(buf) \
620     do { uint32x2_t t0, t1; \
621     t0 = vzip_u32((vget_high_u32(m0)), vget_low_u32(m1)).val[0]; \
622     t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m1), vget_low_u32(m1)); \
623     buf = vcombine_u32(t0, t1); } while(0)
624 
625     #define BLAKE2S_LOAD_MSG_9_3(buf) \
626     do { uint32x2_t t0, t1; \
627     t0 = vzip_u32(vget_high_u32(m3), vget_low_u32(m2)).val[1]; \
628     t1 = vzip_u32((vget_high_u32(m0)), vget_low_u32(m3)).val[1]; \
629     buf = vcombine_u32(t0, t1); } while(0)
630 
631     #define BLAKE2S_LOAD_MSG_9_4(buf) \
632     do { uint32x2_t t0, t1; \
633     t0 = vext_u32(vget_high_u32(m2), vget_high_u32(m3), 1); \
634     t1 = vzip_u32(vget_low_u32(m3), vget_low_u32(m0)).val[0]; \
635     buf = vcombine_u32(t0, t1); } while(0)
636 
637     #define vrorq_n_u32_16(x) vreinterpretq_u32_u16(vrev32q_u16(vreinterpretq_u16_u32(x)))
638 
639     #define vrorq_n_u32_8(x) vsriq_n_u32(vshlq_n_u32((x), 24), (x), 8)
640 
641     #define vrorq_n_u32(x, c) vsriq_n_u32(vshlq_n_u32((x), 32-(c)), (x), (c))
642 
643     #define BLAKE2S_G1(row1,row2,row3,row4,buf) \
644     do { \
645       row1 = vaddq_u32(vaddq_u32(row1, buf), row2); row4 = veorq_u32(row4, row1); \
646       row4 = vrorq_n_u32_16(row4); row3 = vaddq_u32(row3, row4); \
647       row2 = veorq_u32(row2, row3); row2 = vrorq_n_u32(row2, 12); \
648     } while(0)
649 
650     #define BLAKE2S_G2(row1,row2,row3,row4,buf) \
651     do { \
652       row1 = vaddq_u32(vaddq_u32(row1, buf), row2); row4 = veorq_u32(row4, row1); \
653       row4 = vrorq_n_u32_8(row4); row3 = vaddq_u32(row3, row4); \
654       row2 = veorq_u32(row2, row3); row2 = vrorq_n_u32(row2, 7); \
655     } while(0)
656 
657     #define BLAKE2S_DIAGONALIZE(row1,row2,row3,row4) \
658     do { \
659       row4 = vextq_u32(row4, row4, 3); row3 = vextq_u32(row3, row3, 2); row2 = vextq_u32(row2, row2, 1); \
660     } while(0)
661 
662     #define BLAKE2S_UNDIAGONALIZE(row1,row2,row3,row4) \
663     do { \
664       row4 = vextq_u32(row4, row4, 1); \
665       row3 = vextq_u32(row3, row3, 2); \
666       row2 = vextq_u32(row2, row2, 3); \
667     } while(0)
668 
669     #define BLAKE2S_ROUND(r)  \
670     do { \
671       uint32x4_t buf1, buf2, buf3, buf4; \
672       BLAKE2S_LOAD_MSG_ ##r ##_1(buf1); \
673       BLAKE2S_G1(row1,row2,row3,row4,buf1); \
674       BLAKE2S_LOAD_MSG_ ##r ##_2(buf2); \
675       BLAKE2S_G2(row1,row2,row3,row4,buf2); \
676       BLAKE2S_DIAGONALIZE(row1,row2,row3,row4); \
677       BLAKE2S_LOAD_MSG_ ##r ##_3(buf3); \
678       BLAKE2S_G1(row1,row2,row3,row4,buf3); \
679       BLAKE2S_LOAD_MSG_ ##r ##_4(buf4); \
680       BLAKE2S_G2(row1,row2,row3,row4,buf4); \
681       BLAKE2S_UNDIAGONALIZE(row1,row2,row3,row4); \
682     } while(0)
683 
684     const uint32x4_t m0 = vreinterpretq_u32_u8(vld1q_u8(input + 00));
685     const uint32x4_t m1 = vreinterpretq_u32_u8(vld1q_u8(input + 16));
686     const uint32x4_t m2 = vreinterpretq_u32_u8(vld1q_u8(input + 32));
687     const uint32x4_t m3 = vreinterpretq_u32_u8(vld1q_u8(input + 48));
688 
689     uint32x4_t row1, row2, row3, row4;
690 
691     const uint32x4_t f0 = row1 = vld1q_u32(state.h()+0);
692     const uint32x4_t f1 = row2 = vld1q_u32(state.h()+4);
693     row3 = vld1q_u32(BLAKE2S_IV+0);
694     row4 = veorq_u32(vld1q_u32(BLAKE2S_IV+4), vld1q_u32(state.t()+0));
695 
696     BLAKE2S_ROUND(0);
697     BLAKE2S_ROUND(1);
698     BLAKE2S_ROUND(2);
699     BLAKE2S_ROUND(3);
700     BLAKE2S_ROUND(4);
701     BLAKE2S_ROUND(5);
702     BLAKE2S_ROUND(6);
703     BLAKE2S_ROUND(7);
704     BLAKE2S_ROUND(8);
705     BLAKE2S_ROUND(9);
706 
707     vst1q_u32(state.h()+0, veorq_u32(f0, veorq_u32(row1, row3)));
708     vst1q_u32(state.h()+4, veorq_u32(f1, veorq_u32(row2, row4)));
709 }
710 #endif  // CRYPTOPP_ARM_NEON_AVAILABLE
711 
712 #if (CRYPTOPP_ALTIVEC_AVAILABLE)
713 
714 template <class T>
VecLoad32(const T * p)715 inline uint32x4_p VecLoad32(const T* p)
716 {
717     return VecLoad(p);
718 }
719 
720 template <class T>
VecLoad32LE(const T * p,const uint8x16_p le_mask)721 inline uint32x4_p VecLoad32LE(const T* p, const uint8x16_p le_mask)
722 {
723 #if defined(CRYPTOPP_BIG_ENDIAN)
724     const uint32x4_p v = VecLoad(p);
725     return VecPermute(v, v, le_mask);
726 #else
727     CRYPTOPP_UNUSED(le_mask);
728     return VecLoad(p);
729 #endif
730 }
731 
732 template <class T>
VecStore32(T * p,const uint32x4_p x)733 inline void VecStore32(T* p, const uint32x4_p x)
734 {
735     VecStore(x, p);
736 }
737 
738 template <class T>
VecStore32LE(T * p,const uint32x4_p x,const uint8x16_p le_mask)739 inline void VecStore32LE(T* p, const uint32x4_p x, const uint8x16_p le_mask)
740 {
741 #if defined(CRYPTOPP_BIG_ENDIAN)
742     const uint32x4_p v = VecPermute(x, x, le_mask);
743     VecStore(v, p);
744 #else
745     CRYPTOPP_UNUSED(le_mask);
746     VecStore(x, p);
747 #endif
748 }
749 
750 template <unsigned int E1, unsigned int E2>
VectorSet32(const uint32x4_p a,const uint32x4_p b)751 inline uint32x4_p VectorSet32(const uint32x4_p a, const uint32x4_p b)
752 {
753     // Re-index. I'd like to use something like Z=Y*4 and then
754     // VecShiftLeftOctet<Z>(b) but it crashes early Red Hat
755     // GCC compilers.
756     enum {X=E1&3, Y=E2&3};
757 
758     // Don't care element
759     const unsigned int DC = 31;
760 
761     // Element 0 combinations
762     if (X == 0 && Y == 0)
763     {
764         const uint8x16_p mask = {0,1,2,3, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
765         return VecPermute(a, b, mask);
766     }
767     else if (X == 0 && Y == 1)
768     {
769         const uint8x16_p mask = {0,1,2,3, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
770         return VecPermute(a, VecShiftLeftOctet<4>(b), mask);
771     }
772     else if (X == 0 && Y == 2)
773     {
774         const uint8x16_p mask = {0,1,2,3, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
775         return VecPermute(a, VecShiftLeftOctet<8>(b), mask);
776     }
777     else if (X == 0 && Y == 3)
778     {
779         const uint8x16_p mask = {0,1,2,3, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
780         return VecPermute(a, VecShiftLeftOctet<12>(b), mask);
781     }
782 
783     // Element 1 combinations
784     else if (X == 1 && Y == 0)
785     {
786         const uint8x16_p mask = {4,5,6,7, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
787         return VecPermute(a, b, mask);
788     }
789     else if (X == 1 && Y == 1)
790     {
791         const uint8x16_p mask = {4,5,6,7, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
792         return VecPermute(a, VecShiftLeftOctet<4>(b), mask);
793     }
794     else if (X == 1 && Y == 2)
795     {
796         const uint8x16_p mask = {4,5,6,7, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
797         return VecPermute(a, VecShiftLeftOctet<8>(b), mask);
798     }
799     else if (X == 1 && Y == 3)
800     {
801         const uint8x16_p mask = {4,5,6,7, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
802         return VecPermute(a, VecShiftLeftOctet<12>(b), mask);
803     }
804 
805     // Element 2 combinations
806     else if (X == 2 && Y == 0)
807     {
808         const uint8x16_p mask = {8,9,10,11, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
809         return VecPermute(a, b, mask);
810     }
811     else if (X == 2 && Y == 1)
812     {
813         const uint8x16_p mask = {8,9,10,11, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
814         return VecPermute(a, VecShiftLeftOctet<4>(b), mask);
815     }
816     else if (X == 2 && Y == 2)
817     {
818         const uint8x16_p mask = {8,9,10,11, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
819         return VecPermute(a, VecShiftLeftOctet<8>(b), mask);
820     }
821     else if (X == 2 && Y == 3)
822     {
823         const uint8x16_p mask = {8,9,10,11, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
824         return VecPermute(a, VecShiftLeftOctet<12>(b), mask);
825     }
826 
827     // Element 3 combinations
828     else if (X == 3 && Y == 0)
829     {
830         const uint8x16_p mask = {12,13,14,15, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
831         return VecPermute(a, b, mask);
832     }
833     else if (X == 3 && Y == 1)
834     {
835         const uint8x16_p mask = {12,13,14,15, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
836         return VecPermute(a, VecShiftLeftOctet<4>(b), mask);
837     }
838     else if (X == 3 && Y == 2)
839     {
840         const uint8x16_p mask = {12,13,14,15, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
841         return VecPermute(a, VecShiftLeftOctet<8>(b), mask);
842     }
843     else if (X == 3 && Y == 3)
844     {
845         const uint8x16_p mask = {12,13,14,15, 16,17,18,19, DC,DC,DC,DC, DC,DC,DC,DC};
846         return VecPermute(a, VecShiftLeftOctet<12>(b), mask);
847     }
848 
849     // Quiet IBM XLC warning
850     return VecXor(a, a);
851 }
852 
853 template <unsigned int E1, unsigned int E2, unsigned int E3, unsigned int E4>
VectorSet32(const uint32x4_p a,const uint32x4_p b,const uint32x4_p c,const uint32x4_p d)854 inline uint32x4_p VectorSet32(const uint32x4_p a, const uint32x4_p b,
855                               const uint32x4_p c, const uint32x4_p d)
856 {
857     // Re-index
858     enum {W=E1&3, X=E2&3, Y=E3&3, Z=E4&3};
859 
860     const uint32x4_p t0 = VectorSet32<W,X>(a, b);
861     const uint32x4_p t1 = VectorSet32<Y,Z>(c, d);
862 
863     // PowerPC follows SSE2's implementation, and this is _mm_set_epi32.
864     const uint8x16_p mask = {20,21,22,23, 16,17,18,19, 4,5,6,7, 0,1,2,3};
865     return VecPermute(t0, t1, mask);
866 }
867 
868 template<>
VectorSet32(const uint32x4_p a,const uint32x4_p b,const uint32x4_p c,const uint32x4_p d)869 uint32x4_p VectorSet32<2,0,2,0>(const uint32x4_p a, const uint32x4_p b,
870                                 const uint32x4_p c, const uint32x4_p d)
871 {
872     // a=b, c=d, mask is {2,0, 2,0}
873     CRYPTOPP_UNUSED(b); CRYPTOPP_UNUSED(d);
874     const uint8x16_p mask = {16,17,18,19, 24,25,26,27, 0,1,2,3, 8,9,10,11};
875     return VecPermute(a, c, mask);
876 }
877 
878 template<>
VectorSet32(const uint32x4_p a,const uint32x4_p b,const uint32x4_p c,const uint32x4_p d)879 uint32x4_p VectorSet32<3,1,3,1>(const uint32x4_p a, const uint32x4_p b,
880                                 const uint32x4_p c, const uint32x4_p d)
881 {
882     // a=b, c=d, mask is {3,1, 3,1}
883     CRYPTOPP_UNUSED(b); CRYPTOPP_UNUSED(d);
884     const uint8x16_p mask = {20,21,22,23, 28,29,30,31, 4,5,6,7, 12,13,14,15};
885     return VecPermute(a, c, mask);
886 }
887 
BLAKE2_Compress32_ALTIVEC(const byte * input,BLAKE2s_State & state)888 void BLAKE2_Compress32_ALTIVEC(const byte* input, BLAKE2s_State& state)
889 {
890     # define m1 m0
891     # define m2 m0
892     # define m3 m0
893 
894     # define m5 m4
895     # define m6 m4
896     # define m7 m4
897 
898     # define m9 m8
899     # define m10 m8
900     # define m11 m8
901 
902     # define m13 m12
903     # define m14 m12
904     # define m15 m12
905 
906     // #define BLAKE2S_LOAD_MSG_0_1(buf) buf = VectorSet32<6,4,2,0>(m6,m4,m2,m0);
907     #define BLAKE2S_LOAD_MSG_0_1(buf) buf = VectorSet32<2,0,2,0>(m6,m4,m2,m0);
908     // #define BLAKE2S_LOAD_MSG_0_2(buf) buf = VectorSet32<7,5,3,1>(m7,m5,m3,m1);
909     #define BLAKE2S_LOAD_MSG_0_2(buf) buf = VectorSet32<3,1,3,1>(m7,m5,m3,m1);
910     // #define BLAKE2S_LOAD_MSG_0_3(buf) buf = VectorSet32<14,12,10,8>(m14,m12,m10,m8);
911     #define BLAKE2S_LOAD_MSG_0_3(buf) buf = VectorSet32<2,0,2,0>(m14,m12,m10,m8);
912     // #define BLAKE2S_LOAD_MSG_0_4(buf) buf = VectorSet32<15,13,11,9>(m15,m13,m11,m9);
913     #define BLAKE2S_LOAD_MSG_0_4(buf) buf = VectorSet32<3,1,3,1>(m15,m13,m11,m9);
914 
915     #define BLAKE2S_LOAD_MSG_1_1(buf) buf = VectorSet32<13,9,4,14>(m13,m9,m4,m14);
916     #define BLAKE2S_LOAD_MSG_1_2(buf) buf = VectorSet32<6,15,8,10>(m6,m15,m8,m10)
917     #define BLAKE2S_LOAD_MSG_1_3(buf) buf = VectorSet32<5,11,0,1>(m5,m11,m0,m1)
918     #define BLAKE2S_LOAD_MSG_1_4(buf) buf = VectorSet32<3,7,2,12>(m3,m7,m2,m12)
919 
920     #define BLAKE2S_LOAD_MSG_2_1(buf) buf = VectorSet32<15,5,12,11>(m15,m5,m12,m11)
921     #define BLAKE2S_LOAD_MSG_2_2(buf) buf = VectorSet32<13,2,0,8>(m13,m2,m0,m8)
922     #define BLAKE2S_LOAD_MSG_2_3(buf) buf = VectorSet32<9,7,3,10>(m9,m7,m3,m10)
923     #define BLAKE2S_LOAD_MSG_2_4(buf) buf = VectorSet32<4,1,6,14>(m4,m1,m6,m14)
924 
925     #define BLAKE2S_LOAD_MSG_3_1(buf) buf = VectorSet32<11,13,3,7>(m11,m13,m3,m7)
926     #define BLAKE2S_LOAD_MSG_3_2(buf) buf = VectorSet32<14,12,1,9>(m14,m12,m1,m9)
927     #define BLAKE2S_LOAD_MSG_3_3(buf) buf = VectorSet32<15,4,5,2>(m15,m4,m5,m2)
928     #define BLAKE2S_LOAD_MSG_3_4(buf) buf = VectorSet32<8,0,10,6>(m8,m0,m10,m6)
929 
930     #define BLAKE2S_LOAD_MSG_4_1(buf) buf = VectorSet32<10,2,5,9>(m10,m2,m5,m9)
931     #define BLAKE2S_LOAD_MSG_4_2(buf) buf = VectorSet32<15,4,7,0>(m15,m4,m7,m0)
932     #define BLAKE2S_LOAD_MSG_4_3(buf) buf = VectorSet32<3,6,11,14>(m3,m6,m11,m14)
933     #define BLAKE2S_LOAD_MSG_4_4(buf) buf = VectorSet32<13,8,12,1>(m13,m8,m12,m1)
934 
935     #define BLAKE2S_LOAD_MSG_5_1(buf) buf = VectorSet32<8,0,6,2>(m8,m0,m6,m2)
936     #define BLAKE2S_LOAD_MSG_5_2(buf) buf = VectorSet32<3,11,10,12>(m3,m11,m10,m12)
937     #define BLAKE2S_LOAD_MSG_5_3(buf) buf = VectorSet32<1,15,7,4>(m1,m15,m7,m4)
938     #define BLAKE2S_LOAD_MSG_5_4(buf) buf = VectorSet32<9,14,5,13>(m9,m14,m5,m13)
939 
940     #define BLAKE2S_LOAD_MSG_6_1(buf) buf = VectorSet32<4,14,1,12>(m4,m14,m1,m12)
941     #define BLAKE2S_LOAD_MSG_6_2(buf) buf = VectorSet32<10,13,15,5>(m10,m13,m15,m5)
942     #define BLAKE2S_LOAD_MSG_6_3(buf) buf = VectorSet32<8,9,6,0>(m8,m9,m6,m0)
943     #define BLAKE2S_LOAD_MSG_6_4(buf) buf = VectorSet32<11,2,3,7>(m11,m2,m3,m7)
944 
945     #define BLAKE2S_LOAD_MSG_7_1(buf) buf = VectorSet32<3,12,7,13>(m3,m12,m7,m13)
946     #define BLAKE2S_LOAD_MSG_7_2(buf) buf = VectorSet32<9,1,14,11>(m9,m1,m14,m11)
947     #define BLAKE2S_LOAD_MSG_7_3(buf) buf = VectorSet32<2,8,15,5>(m2,m8,m15,m5)
948     #define BLAKE2S_LOAD_MSG_7_4(buf) buf = VectorSet32<10,6,4,0>(m10,m6,m4,m0)
949 
950     #define BLAKE2S_LOAD_MSG_8_1(buf) buf = VectorSet32<0,11,14,6>(m0,m11,m14,m6)
951     #define BLAKE2S_LOAD_MSG_8_2(buf) buf = VectorSet32<8,3,9,15>(m8,m3,m9,m15)
952     #define BLAKE2S_LOAD_MSG_8_3(buf) buf = VectorSet32<10,1,13,12>(m10,m1,m13,m12)
953     #define BLAKE2S_LOAD_MSG_8_4(buf) buf = VectorSet32<5,4,7,2>(m5,m4,m7,m2)
954 
955     #define BLAKE2S_LOAD_MSG_9_1(buf) buf = VectorSet32<1,7,8,10>(m1,m7,m8,m10)
956     #define BLAKE2S_LOAD_MSG_9_2(buf) buf = VectorSet32<5,6,4,2>(m5,m6,m4,m2)
957     #define BLAKE2S_LOAD_MSG_9_3(buf) buf = VectorSet32<13,3,9,15>(m13,m3,m9,m15)
958     #define BLAKE2S_LOAD_MSG_9_4(buf) buf = VectorSet32<0,12,14,11>(m0,m12,m14,m11)
959 
960     #define vec_ror_16(x) VecRotateRight<16>(x)
961     #define vec_ror_12(x) VecRotateRight<12>(x)
962     #define vec_ror_8(x)  VecRotateRight<8>(x)
963     #define vec_ror_7(x)  VecRotateRight<7>(x)
964 
965     #define BLAKE2S_G1(row1,row2,row3,row4,buf) \
966       row1 = VecAdd(VecAdd(row1, buf), row2); \
967       row4 = VecXor(row4, row1); \
968       row4 = vec_ror_16(row4); \
969       row3 = VecAdd(row3, row4);   \
970       row2 = VecXor(row2, row3); \
971       row2 = vec_ror_12(row2);
972 
973     #define BLAKE2S_G2(row1,row2,row3,row4,buf) \
974       row1 = VecAdd(VecAdd(row1, buf), row2); \
975       row4 = VecXor(row4, row1); \
976       row4 = vec_ror_8(row4); \
977       row3 = VecAdd(row3, row4);   \
978       row2 = VecXor(row2, row3); \
979       row2 = vec_ror_7(row2);
980 
981     const uint8x16_p D2103_MASK = {12,13,14,15, 0,1,2,3, 4,5,6,7, 8,9,10,11};
982     const uint8x16_p D1032_MASK = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7};
983     const uint8x16_p D0321_MASK = {4,5,6,7, 8,9,10,11, 12,13,14,15, 0,1,2,3};
984 
985     #define BLAKE2S_DIAGONALIZE(row1,row2,row3,row4) \
986       row4 = VecPermute(row4, row4, D2103_MASK); \
987       row3 = VecPermute(row3, row3, D1032_MASK); \
988       row2 = VecPermute(row2, row2, D0321_MASK);
989 
990     #define BLAKE2S_UNDIAGONALIZE(row1,row2,row3,row4) \
991       row4 = VecPermute(row4, row4, D0321_MASK); \
992       row3 = VecPermute(row3, row3, D1032_MASK); \
993       row2 = VecPermute(row2, row2, D2103_MASK);
994 
995     #define BLAKE2S_ROUND(r)  \
996       BLAKE2S_LOAD_MSG_ ##r ##_1(buf1); \
997       BLAKE2S_G1(row1,row2,row3,row4,buf1); \
998       BLAKE2S_LOAD_MSG_ ##r ##_2(buf2); \
999       BLAKE2S_G2(row1,row2,row3,row4,buf2); \
1000       BLAKE2S_DIAGONALIZE(row1,row2,row3,row4); \
1001       BLAKE2S_LOAD_MSG_ ##r ##_3(buf3); \
1002       BLAKE2S_G1(row1,row2,row3,row4,buf3); \
1003       BLAKE2S_LOAD_MSG_ ##r ##_4(buf4); \
1004       BLAKE2S_G2(row1,row2,row3,row4,buf4); \
1005       BLAKE2S_UNDIAGONALIZE(row1,row2,row3,row4);
1006 
1007     // Possibly unaligned user messages
1008     uint32x4_p m0, m4, m8, m12;
1009     // Endian conversion mask
1010     const uint8x16_p le_mask = {3,2,1,0, 7,6,5,4, 11,10,9,8, 15,14,13,12};
1011 
1012 #if defined(_ARCH_PWR9)
1013     // POWER9 provides loads for char's and short's
1014     m0 = (uint32x4_p) vec_xl(  0, CONST_V8_CAST( input ));
1015     m4 = (uint32x4_p) vec_xl( 16, CONST_V8_CAST( input ));
1016     m8 = (uint32x4_p) vec_xl( 32, CONST_V8_CAST( input ));
1017     m12 = (uint32x4_p) vec_xl( 48, CONST_V8_CAST( input ));
1018 
1019 # if defined(CRYPTOPP_BIG_ENDIAN)
1020     m0 = vec_perm(m0, m0, le_mask);
1021     m4 = vec_perm(m4, m4, le_mask);
1022     m8 = vec_perm(m8, m8, le_mask);
1023     m12 = vec_perm(m12, m12, le_mask);
1024 # endif
1025 #else
1026     // Altivec only provides 16-byte aligned loads
1027     // http://www.nxp.com/docs/en/reference-manual/ALTIVECPEM.pdf
1028     m0 = (uint32x4_p) vec_ld(  0, CONST_V8_CAST( input ));
1029     m4 = (uint32x4_p) vec_ld( 16, CONST_V8_CAST( input ));
1030     m8 = (uint32x4_p) vec_ld( 32, CONST_V8_CAST( input ));
1031     m12 = (uint32x4_p) vec_ld( 48, CONST_V8_CAST( input ));
1032 
1033     // Alignment check for load of the message buffer
1034     const uintptr_t addr = (uintptr_t)input;
1035     if (addr%16 == 0)
1036     {
1037         // Already aligned. Perform a little-endian swap as required
1038 # if defined(CRYPTOPP_BIG_ENDIAN)
1039         m0 = vec_perm(m0, m0, le_mask);
1040         m4 = vec_perm(m4, m4, le_mask);
1041         m8 = vec_perm(m8, m8, le_mask);
1042         m12 = vec_perm(m12, m12, le_mask);
1043 # endif
1044     }
1045     else
1046     {
1047         // Not aligned. Fix vectors and perform a little-endian swap as required
1048         // http://mirror.informatimago.com/next/developer.apple.com/
1049         //        hardwaredrivers/ve/code_optimization.html
1050         uint32x4_p ex; uint8x16_p perm;
1051         ex = (uint32x4_p) vec_ld(48+15, CONST_V8_CAST( input ));
1052         perm = vec_lvsl(0, CONST_V8_CAST( addr ));
1053 
1054 # if defined(CRYPTOPP_BIG_ENDIAN)
1055         // Combine the vector permute with the little-endian swap
1056         perm = vec_perm(perm, perm, le_mask);
1057 # endif
1058 
1059         m0 = vec_perm(m0, m4, perm);
1060         m4 = vec_perm(m4, m8, perm);
1061         m8 = vec_perm(m8, m12, perm);
1062         m12 = vec_perm(m12, ex, perm);
1063     }
1064 #endif
1065 
1066     uint32x4_p row1, row2, row3, row4;
1067     uint32x4_p buf1, buf2, buf3, buf4;
1068     uint32x4_p  ff0,  ff1;
1069 
1070     row1 = ff0 = VecLoad32LE(state.h()+0, le_mask);
1071     row2 = ff1 = VecLoad32LE(state.h()+4, le_mask);
1072     row3 = VecLoad32(BLAKE2S_IV+0);
1073     row4 = VecXor(VecLoad32(BLAKE2S_IV+4), VecLoad32(state.t()+0));
1074 
1075     BLAKE2S_ROUND(0);
1076     BLAKE2S_ROUND(1);
1077     BLAKE2S_ROUND(2);
1078     BLAKE2S_ROUND(3);
1079     BLAKE2S_ROUND(4);
1080     BLAKE2S_ROUND(5);
1081     BLAKE2S_ROUND(6);
1082     BLAKE2S_ROUND(7);
1083     BLAKE2S_ROUND(8);
1084     BLAKE2S_ROUND(9);
1085 
1086     VecStore32LE(state.h()+0, VecXor(ff0, VecXor(row1, row3)), le_mask);
1087     VecStore32LE(state.h()+4, VecXor(ff1, VecXor(row2, row4)), le_mask);
1088 }
1089 #endif  // CRYPTOPP_ALTIVEC_AVAILABLE
1090 
1091 NAMESPACE_END
1092