1 #ifndef blamka_round_avx2_H 2 #define blamka_round_avx2_H 3 4 #include "private/common.h" 5 #include "private/sse2_64_32.h" 6 7 #define rotr32(x) _mm256_shuffle_epi32(x, _MM_SHUFFLE(2, 3, 0, 1)) 8 #define rotr24(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10)) 9 #define rotr16(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9)) 10 #define rotr63(x) _mm256_xor_si256(_mm256_srli_epi64((x), 63), _mm256_add_epi64((x), (x))) 11 12 #define G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ 13 do { \ 14 __m256i ml = _mm256_mul_epu32(A0, B0); \ 15 ml = _mm256_add_epi64(ml, ml); \ 16 A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \ 17 D0 = _mm256_xor_si256(D0, A0); \ 18 D0 = rotr32(D0); \ 19 \ 20 ml = _mm256_mul_epu32(C0, D0); \ 21 ml = _mm256_add_epi64(ml, ml); \ 22 C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \ 23 \ 24 B0 = _mm256_xor_si256(B0, C0); \ 25 B0 = rotr24(B0); \ 26 \ 27 ml = _mm256_mul_epu32(A1, B1); \ 28 ml = _mm256_add_epi64(ml, ml); \ 29 A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \ 30 D1 = _mm256_xor_si256(D1, A1); \ 31 D1 = rotr32(D1); \ 32 \ 33 ml = _mm256_mul_epu32(C1, D1); \ 34 ml = _mm256_add_epi64(ml, ml); \ 35 C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \ 36 \ 37 B1 = _mm256_xor_si256(B1, C1); \ 38 B1 = rotr24(B1); \ 39 } while((void)0, 0); 40 41 #define G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ 42 do { \ 43 __m256i ml = _mm256_mul_epu32(A0, B0); \ 44 ml = _mm256_add_epi64(ml, ml); \ 45 A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \ 46 D0 = _mm256_xor_si256(D0, A0); \ 47 D0 = rotr16(D0); \ 48 \ 49 ml = _mm256_mul_epu32(C0, D0); \ 50 ml = _mm256_add_epi64(ml, ml); \ 51 C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \ 52 B0 = _mm256_xor_si256(B0, C0); \ 53 B0 = rotr63(B0); \ 54 \ 55 ml = _mm256_mul_epu32(A1, B1); \ 56 ml = _mm256_add_epi64(ml, ml); \ 57 A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \ 58 D1 = _mm256_xor_si256(D1, A1); \ 59 D1 = rotr16(D1); \ 60 \ 61 ml = _mm256_mul_epu32(C1, D1); \ 62 ml = _mm256_add_epi64(ml, ml); \ 63 C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \ 64 B1 = _mm256_xor_si256(B1, C1); \ 65 B1 = rotr63(B1); \ 66 } while((void)0, 0); 67 68 #define DIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \ 69 do { \ 70 B0 = _mm256_permute4x64_epi64(B0, _MM_SHUFFLE(0, 3, 2, 1)); \ 71 C0 = _mm256_permute4x64_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \ 72 D0 = _mm256_permute4x64_epi64(D0, _MM_SHUFFLE(2, 1, 0, 3)); \ 73 \ 74 B1 = _mm256_permute4x64_epi64(B1, _MM_SHUFFLE(0, 3, 2, 1)); \ 75 C1 = _mm256_permute4x64_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \ 76 D1 = _mm256_permute4x64_epi64(D1, _MM_SHUFFLE(2, 1, 0, 3)); \ 77 } while((void)0, 0); 78 79 #define DIAGONALIZE_2(A0, A1, B0, B1, C0, C1, D0, D1) \ 80 do { \ 81 __m256i tmp1 = _mm256_blend_epi32(B0, B1, 0xCC); \ 82 __m256i tmp2 = _mm256_blend_epi32(B0, B1, 0x33); \ 83 B1 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \ 84 B0 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \ 85 \ 86 tmp1 = C0; \ 87 C0 = C1; \ 88 C1 = tmp1; \ 89 \ 90 tmp1 = _mm256_blend_epi32(D0, D1, 0xCC); \ 91 tmp2 = _mm256_blend_epi32(D0, D1, 0x33); \ 92 D0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \ 93 D1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \ 94 } while(0); 95 96 #define UNDIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \ 97 do { \ 98 B0 = _mm256_permute4x64_epi64(B0, _MM_SHUFFLE(2, 1, 0, 3)); \ 99 C0 = _mm256_permute4x64_epi64(C0, _MM_SHUFFLE(1, 0, 3, 2)); \ 100 D0 = _mm256_permute4x64_epi64(D0, _MM_SHUFFLE(0, 3, 2, 1)); \ 101 \ 102 B1 = _mm256_permute4x64_epi64(B1, _MM_SHUFFLE(2, 1, 0, 3)); \ 103 C1 = _mm256_permute4x64_epi64(C1, _MM_SHUFFLE(1, 0, 3, 2)); \ 104 D1 = _mm256_permute4x64_epi64(D1, _MM_SHUFFLE(0, 3, 2, 1)); \ 105 } while((void)0, 0); 106 107 #define UNDIAGONALIZE_2(A0, A1, B0, B1, C0, C1, D0, D1) \ 108 do { \ 109 __m256i tmp1 = _mm256_blend_epi32(B0, B1, 0xCC); \ 110 __m256i tmp2 = _mm256_blend_epi32(B0, B1, 0x33); \ 111 B0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \ 112 B1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \ 113 \ 114 tmp1 = C0; \ 115 C0 = C1; \ 116 C1 = tmp1; \ 117 \ 118 tmp1 = _mm256_blend_epi32(D0, D1, 0x33); \ 119 tmp2 = _mm256_blend_epi32(D0, D1, 0xCC); \ 120 D0 = _mm256_permute4x64_epi64(tmp1, _MM_SHUFFLE(2,3,0,1)); \ 121 D1 = _mm256_permute4x64_epi64(tmp2, _MM_SHUFFLE(2,3,0,1)); \ 122 } while((void)0, 0); 123 124 #define BLAKE2_ROUND_1(A0, A1, B0, B1, C0, C1, D0, D1) \ 125 do{ \ 126 G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ 127 G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ 128 \ 129 DIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \ 130 \ 131 G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ 132 G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ 133 \ 134 UNDIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \ 135 } while((void)0, 0); 136 137 #define BLAKE2_ROUND_2(A0, A1, B0, B1, C0, C1, D0, D1) \ 138 do{ \ 139 G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ 140 G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ 141 \ 142 DIAGONALIZE_2(A0, A1, B0, B1, C0, C1, D0, D1) \ 143 \ 144 G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ 145 G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ 146 \ 147 UNDIAGONALIZE_2(A0, A1, B0, B1, C0, C1, D0, D1) \ 148 } while((void)0, 0); 149 150 #endif 151