1 
2 #define VEC4_ROT(A, IMM) \
3     _mm_or_si128(_mm_slli_epi32(A, IMM), _mm_srli_epi32(A, (32 - IMM)))
4 
5 /* same, but replace 2 of the shift/shift/or "rotation" by byte shuffles (8 &
6  * 16) (better) */
7 #define VEC4_QUARTERROUND_SHUFFLE(A, B, C, D) \
8     x_##A = _mm_add_epi32(x_##A, x_##B);      \
9     t_##A = _mm_xor_si128(x_##D, x_##A);      \
10     x_##D = _mm_shuffle_epi8(t_##A, rot16);   \
11     x_##C = _mm_add_epi32(x_##C, x_##D);      \
12     t_##C = _mm_xor_si128(x_##B, x_##C);      \
13     x_##B = VEC4_ROT(t_##C, 12);              \
14     x_##A = _mm_add_epi32(x_##A, x_##B);      \
15     t_##A = _mm_xor_si128(x_##D, x_##A);      \
16     x_##D = _mm_shuffle_epi8(t_##A, rot8);    \
17     x_##C = _mm_add_epi32(x_##C, x_##D);      \
18     t_##C = _mm_xor_si128(x_##B, x_##C);      \
19     x_##B = VEC4_ROT(t_##C, 7)
20 
21 #define VEC4_QUARTERROUND(A, B, C, D) VEC4_QUARTERROUND_SHUFFLE(A, B, C, D)
22 
23 if (bytes >= 256) {
24     /* constant for shuffling bytes (replacing multiple-of-8 rotates) */
25     __m128i rot16 =
26         _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
27     __m128i rot8 =
28         _mm_set_epi8(14, 13, 12, 15, 10, 9, 8, 11, 6, 5, 4, 7, 2, 1, 0, 3);
29 
30     __m128i x_0  = _mm_set1_epi32(x[0]);
31     __m128i x_1  = _mm_set1_epi32(x[1]);
32     __m128i x_2  = _mm_set1_epi32(x[2]);
33     __m128i x_3  = _mm_set1_epi32(x[3]);
34     __m128i x_4  = _mm_set1_epi32(x[4]);
35     __m128i x_5  = _mm_set1_epi32(x[5]);
36     __m128i x_6  = _mm_set1_epi32(x[6]);
37     __m128i x_7  = _mm_set1_epi32(x[7]);
38     __m128i x_8  = _mm_set1_epi32(x[8]);
39     __m128i x_9  = _mm_set1_epi32(x[9]);
40     __m128i x_10 = _mm_set1_epi32(x[10]);
41     __m128i x_11 = _mm_set1_epi32(x[11]);
42     __m128i x_12;
43     __m128i x_13;
44     __m128i x_14   = _mm_set1_epi32(x[14]);
45     __m128i x_15   = _mm_set1_epi32(x[15]);
46     __m128i orig0  = x_0;
47     __m128i orig1  = x_1;
48     __m128i orig2  = x_2;
49     __m128i orig3  = x_3;
50     __m128i orig4  = x_4;
51     __m128i orig5  = x_5;
52     __m128i orig6  = x_6;
53     __m128i orig7  = x_7;
54     __m128i orig8  = x_8;
55     __m128i orig9  = x_9;
56     __m128i orig10 = x_10;
57     __m128i orig11 = x_11;
58     __m128i orig12;
59     __m128i orig13;
60     __m128i orig14 = x_14;
61     __m128i orig15 = x_15;
62     __m128i t_0, t_1, t_2, t_3, t_4, t_5, t_6, t_7, t_8, t_9, t_10, t_11, t_12,
63         t_13, t_14, t_15;
64 
65     uint32_t in12, in13;
66     int      i;
67 
68     while (bytes >= 256) {
69         const __m128i addv12 = _mm_set_epi64x(1, 0);
70         const __m128i addv13 = _mm_set_epi64x(3, 2);
71         __m128i       t12, t13;
72         uint64_t      in1213;
73 
74         x_0  = orig0;
75         x_1  = orig1;
76         x_2  = orig2;
77         x_3  = orig3;
78         x_4  = orig4;
79         x_5  = orig5;
80         x_6  = orig6;
81         x_7  = orig7;
82         x_8  = orig8;
83         x_9  = orig9;
84         x_10 = orig10;
85         x_11 = orig11;
86         x_14 = orig14;
87         x_15 = orig15;
88 
89         in12   = x[12];
90         in13   = x[13];
91         in1213 = ((uint64_t) in12) | (((uint64_t) in13) << 32);
92         t12    = _mm_set1_epi64x(in1213);
93         t13    = _mm_set1_epi64x(in1213);
94 
95         x_12 = _mm_add_epi64(addv12, t12);
96         x_13 = _mm_add_epi64(addv13, t13);
97 
98         t12 = _mm_unpacklo_epi32(x_12, x_13);
99         t13 = _mm_unpackhi_epi32(x_12, x_13);
100 
101         x_12 = _mm_unpacklo_epi32(t12, t13);
102         x_13 = _mm_unpackhi_epi32(t12, t13);
103 
104         orig12 = x_12;
105         orig13 = x_13;
106 
107         in1213 += 4;
108 
109         x[12] = in1213 & 0xFFFFFFFF;
110         x[13] = (in1213 >> 32) & 0xFFFFFFFF;
111 
112         for (i = 0; i < ROUNDS; i += 2) {
113             VEC4_QUARTERROUND(0, 4, 8, 12);
114             VEC4_QUARTERROUND(1, 5, 9, 13);
115             VEC4_QUARTERROUND(2, 6, 10, 14);
116             VEC4_QUARTERROUND(3, 7, 11, 15);
117             VEC4_QUARTERROUND(0, 5, 10, 15);
118             VEC4_QUARTERROUND(1, 6, 11, 12);
119             VEC4_QUARTERROUND(2, 7, 8, 13);
120             VEC4_QUARTERROUND(3, 4, 9, 14);
121         }
122 
123 #define ONEQUAD_TRANSPOSE(A, B, C, D)                                     \
124     {                                                                     \
125         __m128i t0, t1, t2, t3;                                           \
126                                                                           \
127         x_##A = _mm_add_epi32(x_##A, orig##A);                            \
128         x_##B = _mm_add_epi32(x_##B, orig##B);                            \
129         x_##C = _mm_add_epi32(x_##C, orig##C);                            \
130         x_##D = _mm_add_epi32(x_##D, orig##D);                            \
131         t_##A = _mm_unpacklo_epi32(x_##A, x_##B);                         \
132         t_##B = _mm_unpacklo_epi32(x_##C, x_##D);                         \
133         t_##C = _mm_unpackhi_epi32(x_##A, x_##B);                         \
134         t_##D = _mm_unpackhi_epi32(x_##C, x_##D);                         \
135         x_##A = _mm_unpacklo_epi64(t_##A, t_##B);                         \
136         x_##B = _mm_unpackhi_epi64(t_##A, t_##B);                         \
137         x_##C = _mm_unpacklo_epi64(t_##C, t_##D);                         \
138         x_##D = _mm_unpackhi_epi64(t_##C, t_##D);                         \
139                                                                           \
140         t0 = _mm_xor_si128(x_##A, _mm_loadu_si128((__m128i*) (m + 0)));   \
141         _mm_storeu_si128((__m128i*) (c + 0), t0);                         \
142         t1 = _mm_xor_si128(x_##B, _mm_loadu_si128((__m128i*) (m + 64)));  \
143         _mm_storeu_si128((__m128i*) (c + 64), t1);                        \
144         t2 = _mm_xor_si128(x_##C, _mm_loadu_si128((__m128i*) (m + 128))); \
145         _mm_storeu_si128((__m128i*) (c + 128), t2);                       \
146         t3 = _mm_xor_si128(x_##D, _mm_loadu_si128((__m128i*) (m + 192))); \
147         _mm_storeu_si128((__m128i*) (c + 192), t3);                       \
148     }
149 
150 #define ONEQUAD(A, B, C, D) ONEQUAD_TRANSPOSE(A, B, C, D)
151 
152         ONEQUAD(0, 1, 2, 3);
153         m += 16;
154         c += 16;
155         ONEQUAD(4, 5, 6, 7);
156         m += 16;
157         c += 16;
158         ONEQUAD(8, 9, 10, 11);
159         m += 16;
160         c += 16;
161         ONEQUAD(12, 13, 14, 15);
162         m -= 48;
163         c -= 48;
164 
165 #undef ONEQUAD
166 #undef ONEQUAD_TRANSPOSE
167 
168         bytes -= 256;
169         c += 256;
170         m += 256;
171     }
172 }
173 #undef VEC4_ROT
174 #undef VEC4_QUARTERROUND
175 #undef VEC4_QUARTERROUND_SHUFFLE
176