1 #[cfg(target_arch = "x86")]
2 use core::arch::x86::*;
3 #[cfg(target_arch = "x86_64")]
4 use core::arch::x86_64::*;
5 
6 use crate::guts::{
7     assemble_count, count_high, count_low, final_block, flag_word, input_debug_asserts, Finalize,
8     Job, Stride,
9 };
10 use crate::{Word, BLOCKBYTES, IV, SIGMA};
11 use arrayref::{array_refs, mut_array_refs};
12 use core::cmp;
13 use core::mem;
14 
15 pub const DEGREE: usize = 2;
16 
17 #[inline(always)]
loadu(src: *const [Word; DEGREE]) -> __m128i18 unsafe fn loadu(src: *const [Word; DEGREE]) -> __m128i {
19     // This is an unaligned load, so the pointer cast is allowed.
20     _mm_loadu_si128(src as *const __m128i)
21 }
22 
23 #[inline(always)]
storeu(src: __m128i, dest: *mut [Word; DEGREE])24 unsafe fn storeu(src: __m128i, dest: *mut [Word; DEGREE]) {
25     // This is an unaligned store, so the pointer cast is allowed.
26     _mm_storeu_si128(dest as *mut __m128i, src)
27 }
28 
29 #[inline(always)]
add(a: __m128i, b: __m128i) -> __m128i30 unsafe fn add(a: __m128i, b: __m128i) -> __m128i {
31     _mm_add_epi64(a, b)
32 }
33 
34 #[inline(always)]
eq(a: __m128i, b: __m128i) -> __m128i35 unsafe fn eq(a: __m128i, b: __m128i) -> __m128i {
36     _mm_cmpeq_epi64(a, b)
37 }
38 
39 #[inline(always)]
and(a: __m128i, b: __m128i) -> __m128i40 unsafe fn and(a: __m128i, b: __m128i) -> __m128i {
41     _mm_and_si128(a, b)
42 }
43 
44 #[inline(always)]
negate_and(a: __m128i, b: __m128i) -> __m128i45 unsafe fn negate_and(a: __m128i, b: __m128i) -> __m128i {
46     // Note that "and not" implies the reverse of the actual arg order.
47     _mm_andnot_si128(a, b)
48 }
49 
50 #[inline(always)]
xor(a: __m128i, b: __m128i) -> __m128i51 unsafe fn xor(a: __m128i, b: __m128i) -> __m128i {
52     _mm_xor_si128(a, b)
53 }
54 
55 #[inline(always)]
set1(x: u64) -> __m128i56 unsafe fn set1(x: u64) -> __m128i {
57     _mm_set1_epi64x(x as i64)
58 }
59 
60 #[inline(always)]
set2(a: u64, b: u64) -> __m128i61 unsafe fn set2(a: u64, b: u64) -> __m128i {
62     // There's no _mm_setr_epi64x, so note the arg order is backwards.
63     _mm_set_epi64x(b as i64, a as i64)
64 }
65 
66 // Adapted from https://github.com/rust-lang-nursery/stdsimd/pull/479.
67 macro_rules! _MM_SHUFFLE {
68     ($z:expr, $y:expr, $x:expr, $w:expr) => {
69         ($z << 6) | ($y << 4) | ($x << 2) | $w
70     };
71 }
72 
73 #[inline(always)]
rot32(x: __m128i) -> __m128i74 unsafe fn rot32(x: __m128i) -> __m128i {
75     _mm_shuffle_epi32(x, _MM_SHUFFLE!(2, 3, 0, 1))
76 }
77 
78 #[inline(always)]
rot24(x: __m128i) -> __m128i79 unsafe fn rot24(x: __m128i) -> __m128i {
80     let rotate24 = _mm_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10);
81     _mm_shuffle_epi8(x, rotate24)
82 }
83 
84 #[inline(always)]
rot16(x: __m128i) -> __m128i85 unsafe fn rot16(x: __m128i) -> __m128i {
86     let rotate16 = _mm_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9);
87     _mm_shuffle_epi8(x, rotate16)
88 }
89 
90 #[inline(always)]
rot63(x: __m128i) -> __m128i91 unsafe fn rot63(x: __m128i) -> __m128i {
92     _mm_or_si128(_mm_srli_epi64(x, 63), add(x, x))
93 }
94 
95 #[inline(always)]
round(v: &mut [__m128i; 16], m: &[__m128i; 16], r: usize)96 unsafe fn round(v: &mut [__m128i; 16], m: &[__m128i; 16], r: usize) {
97     v[0] = add(v[0], m[SIGMA[r][0] as usize]);
98     v[1] = add(v[1], m[SIGMA[r][2] as usize]);
99     v[2] = add(v[2], m[SIGMA[r][4] as usize]);
100     v[3] = add(v[3], m[SIGMA[r][6] as usize]);
101     v[0] = add(v[0], v[4]);
102     v[1] = add(v[1], v[5]);
103     v[2] = add(v[2], v[6]);
104     v[3] = add(v[3], v[7]);
105     v[12] = xor(v[12], v[0]);
106     v[13] = xor(v[13], v[1]);
107     v[14] = xor(v[14], v[2]);
108     v[15] = xor(v[15], v[3]);
109     v[12] = rot32(v[12]);
110     v[13] = rot32(v[13]);
111     v[14] = rot32(v[14]);
112     v[15] = rot32(v[15]);
113     v[8] = add(v[8], v[12]);
114     v[9] = add(v[9], v[13]);
115     v[10] = add(v[10], v[14]);
116     v[11] = add(v[11], v[15]);
117     v[4] = xor(v[4], v[8]);
118     v[5] = xor(v[5], v[9]);
119     v[6] = xor(v[6], v[10]);
120     v[7] = xor(v[7], v[11]);
121     v[4] = rot24(v[4]);
122     v[5] = rot24(v[5]);
123     v[6] = rot24(v[6]);
124     v[7] = rot24(v[7]);
125     v[0] = add(v[0], m[SIGMA[r][1] as usize]);
126     v[1] = add(v[1], m[SIGMA[r][3] as usize]);
127     v[2] = add(v[2], m[SIGMA[r][5] as usize]);
128     v[3] = add(v[3], m[SIGMA[r][7] as usize]);
129     v[0] = add(v[0], v[4]);
130     v[1] = add(v[1], v[5]);
131     v[2] = add(v[2], v[6]);
132     v[3] = add(v[3], v[7]);
133     v[12] = xor(v[12], v[0]);
134     v[13] = xor(v[13], v[1]);
135     v[14] = xor(v[14], v[2]);
136     v[15] = xor(v[15], v[3]);
137     v[12] = rot16(v[12]);
138     v[13] = rot16(v[13]);
139     v[14] = rot16(v[14]);
140     v[15] = rot16(v[15]);
141     v[8] = add(v[8], v[12]);
142     v[9] = add(v[9], v[13]);
143     v[10] = add(v[10], v[14]);
144     v[11] = add(v[11], v[15]);
145     v[4] = xor(v[4], v[8]);
146     v[5] = xor(v[5], v[9]);
147     v[6] = xor(v[6], v[10]);
148     v[7] = xor(v[7], v[11]);
149     v[4] = rot63(v[4]);
150     v[5] = rot63(v[5]);
151     v[6] = rot63(v[6]);
152     v[7] = rot63(v[7]);
153 
154     v[0] = add(v[0], m[SIGMA[r][8] as usize]);
155     v[1] = add(v[1], m[SIGMA[r][10] as usize]);
156     v[2] = add(v[2], m[SIGMA[r][12] as usize]);
157     v[3] = add(v[3], m[SIGMA[r][14] as usize]);
158     v[0] = add(v[0], v[5]);
159     v[1] = add(v[1], v[6]);
160     v[2] = add(v[2], v[7]);
161     v[3] = add(v[3], v[4]);
162     v[15] = xor(v[15], v[0]);
163     v[12] = xor(v[12], v[1]);
164     v[13] = xor(v[13], v[2]);
165     v[14] = xor(v[14], v[3]);
166     v[15] = rot32(v[15]);
167     v[12] = rot32(v[12]);
168     v[13] = rot32(v[13]);
169     v[14] = rot32(v[14]);
170     v[10] = add(v[10], v[15]);
171     v[11] = add(v[11], v[12]);
172     v[8] = add(v[8], v[13]);
173     v[9] = add(v[9], v[14]);
174     v[5] = xor(v[5], v[10]);
175     v[6] = xor(v[6], v[11]);
176     v[7] = xor(v[7], v[8]);
177     v[4] = xor(v[4], v[9]);
178     v[5] = rot24(v[5]);
179     v[6] = rot24(v[6]);
180     v[7] = rot24(v[7]);
181     v[4] = rot24(v[4]);
182     v[0] = add(v[0], m[SIGMA[r][9] as usize]);
183     v[1] = add(v[1], m[SIGMA[r][11] as usize]);
184     v[2] = add(v[2], m[SIGMA[r][13] as usize]);
185     v[3] = add(v[3], m[SIGMA[r][15] as usize]);
186     v[0] = add(v[0], v[5]);
187     v[1] = add(v[1], v[6]);
188     v[2] = add(v[2], v[7]);
189     v[3] = add(v[3], v[4]);
190     v[15] = xor(v[15], v[0]);
191     v[12] = xor(v[12], v[1]);
192     v[13] = xor(v[13], v[2]);
193     v[14] = xor(v[14], v[3]);
194     v[15] = rot16(v[15]);
195     v[12] = rot16(v[12]);
196     v[13] = rot16(v[13]);
197     v[14] = rot16(v[14]);
198     v[10] = add(v[10], v[15]);
199     v[11] = add(v[11], v[12]);
200     v[8] = add(v[8], v[13]);
201     v[9] = add(v[9], v[14]);
202     v[5] = xor(v[5], v[10]);
203     v[6] = xor(v[6], v[11]);
204     v[7] = xor(v[7], v[8]);
205     v[4] = xor(v[4], v[9]);
206     v[5] = rot63(v[5]);
207     v[6] = rot63(v[6]);
208     v[7] = rot63(v[7]);
209     v[4] = rot63(v[4]);
210 }
211 
212 // We'd rather make this a regular function with #[inline(always)], but for
213 // some reason that blows up compile times by about 10 seconds, at least in
214 // some cases (BLAKE2b avx2.rs). This macro seems to get the same performance
215 // result, without the compile time issue.
216 macro_rules! compress2_transposed {
217     (
218         $h_vecs:expr,
219         $msg_vecs:expr,
220         $count_low:expr,
221         $count_high:expr,
222         $lastblock:expr,
223         $lastnode:expr,
224     ) => {
225         let h_vecs: &mut [__m128i; 8] = $h_vecs;
226         let msg_vecs: &[__m128i; 16] = $msg_vecs;
227         let count_low: __m128i = $count_low;
228         let count_high: __m128i = $count_high;
229         let lastblock: __m128i = $lastblock;
230         let lastnode: __m128i = $lastnode;
231         let mut v = [
232             h_vecs[0],
233             h_vecs[1],
234             h_vecs[2],
235             h_vecs[3],
236             h_vecs[4],
237             h_vecs[5],
238             h_vecs[6],
239             h_vecs[7],
240             set1(IV[0]),
241             set1(IV[1]),
242             set1(IV[2]),
243             set1(IV[3]),
244             xor(set1(IV[4]), count_low),
245             xor(set1(IV[5]), count_high),
246             xor(set1(IV[6]), lastblock),
247             xor(set1(IV[7]), lastnode),
248         ];
249 
250         round(&mut v, &msg_vecs, 0);
251         round(&mut v, &msg_vecs, 1);
252         round(&mut v, &msg_vecs, 2);
253         round(&mut v, &msg_vecs, 3);
254         round(&mut v, &msg_vecs, 4);
255         round(&mut v, &msg_vecs, 5);
256         round(&mut v, &msg_vecs, 6);
257         round(&mut v, &msg_vecs, 7);
258         round(&mut v, &msg_vecs, 8);
259         round(&mut v, &msg_vecs, 9);
260         round(&mut v, &msg_vecs, 10);
261         round(&mut v, &msg_vecs, 11);
262 
263         h_vecs[0] = xor(xor(h_vecs[0], v[0]), v[8]);
264         h_vecs[1] = xor(xor(h_vecs[1], v[1]), v[9]);
265         h_vecs[2] = xor(xor(h_vecs[2], v[2]), v[10]);
266         h_vecs[3] = xor(xor(h_vecs[3], v[3]), v[11]);
267         h_vecs[4] = xor(xor(h_vecs[4], v[4]), v[12]);
268         h_vecs[5] = xor(xor(h_vecs[5], v[5]), v[13]);
269         h_vecs[6] = xor(xor(h_vecs[6], v[6]), v[14]);
270         h_vecs[7] = xor(xor(h_vecs[7], v[7]), v[15]);
271     };
272 }
273 
274 #[inline(always)]
transpose_vecs(a: __m128i, b: __m128i) -> [__m128i; DEGREE]275 unsafe fn transpose_vecs(a: __m128i, b: __m128i) -> [__m128i; DEGREE] {
276     let a_words: [Word; DEGREE] = mem::transmute(a);
277     let b_words: [Word; DEGREE] = mem::transmute(b);
278     [set2(a_words[0], b_words[0]), set2(a_words[1], b_words[1])]
279 }
280 
281 #[inline(always)]
transpose_state_vecs(jobs: &[Job; DEGREE]) -> [__m128i; 8]282 unsafe fn transpose_state_vecs(jobs: &[Job; DEGREE]) -> [__m128i; 8] {
283     // Load all the state words into transposed vectors, where the first vector
284     // has the first word of each state, etc. Transposing once at the beginning
285     // and once at the end is more efficient that repeating it for each block.
286     let words0 = array_refs!(&jobs[0].words, DEGREE, DEGREE, DEGREE, DEGREE);
287     let words1 = array_refs!(&jobs[1].words, DEGREE, DEGREE, DEGREE, DEGREE);
288     let [h0, h1] = transpose_vecs(loadu(words0.0), loadu(words1.0));
289     let [h2, h3] = transpose_vecs(loadu(words0.1), loadu(words1.1));
290     let [h4, h5] = transpose_vecs(loadu(words0.2), loadu(words1.2));
291     let [h6, h7] = transpose_vecs(loadu(words0.3), loadu(words1.3));
292     [h0, h1, h2, h3, h4, h5, h6, h7]
293 }
294 
295 #[inline(always)]
untranspose_state_vecs(h_vecs: &[__m128i; 8], jobs: &mut [Job; DEGREE])296 unsafe fn untranspose_state_vecs(h_vecs: &[__m128i; 8], jobs: &mut [Job; DEGREE]) {
297     // Un-transpose the updated state vectors back into the caller's arrays.
298     let [job0, job1] = jobs;
299     let words0 = mut_array_refs!(&mut job0.words, DEGREE, DEGREE, DEGREE, DEGREE);
300     let words1 = mut_array_refs!(&mut job1.words, DEGREE, DEGREE, DEGREE, DEGREE);
301 
302     let out = transpose_vecs(h_vecs[0], h_vecs[1]);
303     storeu(out[0], words0.0);
304     storeu(out[1], words1.0);
305     let out = transpose_vecs(h_vecs[2], h_vecs[3]);
306     storeu(out[0], words0.1);
307     storeu(out[1], words1.1);
308     let out = transpose_vecs(h_vecs[4], h_vecs[5]);
309     storeu(out[0], words0.2);
310     storeu(out[1], words1.2);
311     let out = transpose_vecs(h_vecs[6], h_vecs[7]);
312     storeu(out[0], words0.3);
313     storeu(out[1], words1.3);
314 }
315 
316 #[inline(always)]
transpose_msg_vecs(blocks: [*const [u8; BLOCKBYTES]; DEGREE]) -> [__m128i; 16]317 unsafe fn transpose_msg_vecs(blocks: [*const [u8; BLOCKBYTES]; DEGREE]) -> [__m128i; 16] {
318     // These input arrays have no particular alignment, so we use unaligned
319     // loads to read from them.
320     let block0 = blocks[0] as *const [Word; DEGREE];
321     let block1 = blocks[1] as *const [Word; DEGREE];
322     let [m0, m1] = transpose_vecs(loadu(block0.add(0)), loadu(block1.add(0)));
323     let [m2, m3] = transpose_vecs(loadu(block0.add(1)), loadu(block1.add(1)));
324     let [m4, m5] = transpose_vecs(loadu(block0.add(2)), loadu(block1.add(2)));
325     let [m6, m7] = transpose_vecs(loadu(block0.add(3)), loadu(block1.add(3)));
326     let [m8, m9] = transpose_vecs(loadu(block0.add(4)), loadu(block1.add(4)));
327     let [m10, m11] = transpose_vecs(loadu(block0.add(5)), loadu(block1.add(5)));
328     let [m12, m13] = transpose_vecs(loadu(block0.add(6)), loadu(block1.add(6)));
329     let [m14, m15] = transpose_vecs(loadu(block0.add(7)), loadu(block1.add(7)));
330     [
331         m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15,
332     ]
333 }
334 
335 #[inline(always)]
load_counts(jobs: &[Job; DEGREE]) -> (__m128i, __m128i)336 unsafe fn load_counts(jobs: &[Job; DEGREE]) -> (__m128i, __m128i) {
337     (
338         set2(count_low(jobs[0].count), count_low(jobs[1].count)),
339         set2(count_high(jobs[0].count), count_high(jobs[1].count)),
340     )
341 }
342 
343 #[inline(always)]
store_counts(jobs: &mut [Job; DEGREE], low: __m128i, high: __m128i)344 unsafe fn store_counts(jobs: &mut [Job; DEGREE], low: __m128i, high: __m128i) {
345     let low_ints: [Word; DEGREE] = mem::transmute(low);
346     let high_ints: [Word; DEGREE] = mem::transmute(high);
347     for i in 0..DEGREE {
348         jobs[i].count = assemble_count(low_ints[i], high_ints[i]);
349     }
350 }
351 
352 #[inline(always)]
add_to_counts(lo: &mut __m128i, hi: &mut __m128i, delta: __m128i)353 unsafe fn add_to_counts(lo: &mut __m128i, hi: &mut __m128i, delta: __m128i) {
354     // If the low counts reach zero, that means they wrapped, unless the delta
355     // was also zero.
356     *lo = add(*lo, delta);
357     let lo_reached_zero = eq(*lo, set1(0));
358     let delta_was_zero = eq(delta, set1(0));
359     let hi_inc = and(set1(1), negate_and(delta_was_zero, lo_reached_zero));
360     *hi = add(*hi, hi_inc);
361 }
362 
363 #[inline(always)]
flags_vec(flags: [bool; DEGREE]) -> __m128i364 unsafe fn flags_vec(flags: [bool; DEGREE]) -> __m128i {
365     set2(flag_word(flags[0]), flag_word(flags[1]))
366 }
367 
368 #[target_feature(enable = "sse4.1")]
compress2_loop(jobs: &mut [Job; DEGREE], finalize: Finalize, stride: Stride)369 pub unsafe fn compress2_loop(jobs: &mut [Job; DEGREE], finalize: Finalize, stride: Stride) {
370     // If we're not finalizing, there can't be a partial block at the end.
371     for job in jobs.iter() {
372         input_debug_asserts(job.input, finalize);
373     }
374 
375     let msg_ptrs = [jobs[0].input.as_ptr(), jobs[1].input.as_ptr()];
376     let mut h_vecs = transpose_state_vecs(&jobs);
377     let (mut counts_lo, mut counts_hi) = load_counts(&jobs);
378 
379     // Prepare the final blocks (note, which could be empty if the input is
380     // empty). Do all this before entering the main loop.
381     let min_len = jobs.iter().map(|job| job.input.len()).min().unwrap();
382     let mut fin_offset = min_len.saturating_sub(1);
383     fin_offset -= fin_offset % stride.padded_blockbytes();
384     // Performance note, making these buffers mem::uninitialized() seems to
385     // cause problems in the optimizer.
386     let mut buf0: [u8; BLOCKBYTES] = [0; BLOCKBYTES];
387     let mut buf1: [u8; BLOCKBYTES] = [0; BLOCKBYTES];
388     let (block0, len0, finalize0) = final_block(jobs[0].input, fin_offset, &mut buf0, stride);
389     let (block1, len1, finalize1) = final_block(jobs[1].input, fin_offset, &mut buf1, stride);
390     let fin_blocks: [*const [u8; BLOCKBYTES]; DEGREE] = [block0, block1];
391     let fin_counts_delta = set2(len0 as Word, len1 as Word);
392     let fin_last_block;
393     let fin_last_node;
394     if finalize.yes() {
395         fin_last_block = flags_vec([finalize0, finalize1]);
396         fin_last_node = flags_vec([
397             finalize0 && jobs[0].last_node.yes(),
398             finalize1 && jobs[1].last_node.yes(),
399         ]);
400     } else {
401         fin_last_block = set1(0);
402         fin_last_node = set1(0);
403     }
404 
405     // The main loop.
406     let mut offset = 0;
407     loop {
408         let blocks;
409         let counts_delta;
410         let last_block;
411         let last_node;
412         if offset == fin_offset {
413             blocks = fin_blocks;
414             counts_delta = fin_counts_delta;
415             last_block = fin_last_block;
416             last_node = fin_last_node;
417         } else {
418             blocks = [
419                 msg_ptrs[0].add(offset) as *const [u8; BLOCKBYTES],
420                 msg_ptrs[1].add(offset) as *const [u8; BLOCKBYTES],
421             ];
422             counts_delta = set1(BLOCKBYTES as Word);
423             last_block = set1(0);
424             last_node = set1(0);
425         };
426 
427         let m_vecs = transpose_msg_vecs(blocks);
428         add_to_counts(&mut counts_lo, &mut counts_hi, counts_delta);
429         compress2_transposed!(
430             &mut h_vecs,
431             &m_vecs,
432             counts_lo,
433             counts_hi,
434             last_block,
435             last_node,
436         );
437 
438         // Check for termination before bumping the offset, to avoid overflow.
439         if offset == fin_offset {
440             break;
441         }
442 
443         offset += stride.padded_blockbytes();
444     }
445 
446     // Write out the results.
447     untranspose_state_vecs(&h_vecs, &mut *jobs);
448     store_counts(&mut *jobs, counts_lo, counts_hi);
449     let max_consumed = offset.saturating_add(stride.padded_blockbytes());
450     for job in jobs.iter_mut() {
451         let consumed = cmp::min(max_consumed, job.input.len());
452         job.input = &job.input[consumed..];
453     }
454 }
455