1 /*===----------------- gfniintrin.h - GFNI intrinsics ----------------------===
2  *
3  *
4  * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5  * See https://llvm.org/LICENSE.txt for license information.
6  * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7  *
8  *===-----------------------------------------------------------------------===
9  */
10 #ifndef __IMMINTRIN_H
11 #error "Never use <gfniintrin.h> directly; include <immintrin.h> instead."
12 #endif
13 
14 #ifndef __GFNIINTRIN_H
15 #define __GFNIINTRIN_H
16 
17 /* Default attributes for simple form (no masking). */
18 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("gfni"), __min_vector_width__(128)))
19 
20 /* Default attributes for YMM unmasked form. */
21 #define __DEFAULT_FN_ATTRS_Y __attribute__((__always_inline__, __nodebug__, __target__("avx,gfni"), __min_vector_width__(256)))
22 
23 /* Default attributes for ZMM forms. */
24 #define __DEFAULT_FN_ATTRS_Z __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,gfni"), __min_vector_width__(512)))
25 
26 /* Default attributes for VLX forms. */
27 #define __DEFAULT_FN_ATTRS_VL128 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(128)))
28 #define __DEFAULT_FN_ATTRS_VL256 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(256)))
29 
30 #define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \
31   (__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A),          \
32                                                   (__v16qi)(__m128i)(B),          \
33                                                   (char)(I))
34 
35 #define _mm_gf2p8affine_epi64_epi8(A, B, I) \
36   (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A),             \
37                                                   (__v16qi)(__m128i)(B),          \
38                                                   (char)(I))
39 
40 static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_gf2p8mul_epi8(__m128i __A,__m128i __B)41 _mm_gf2p8mul_epi8(__m128i __A, __m128i __B)
42 {
43   return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A,
44               (__v16qi) __B);
45 }
46 
47 #ifdef __AVXINTRIN_H
48 #define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \
49   (__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A),          \
50                                                   (__v32qi)(__m256i)(B),          \
51                                                   (char)(I))
52 
53 #define _mm256_gf2p8affine_epi64_epi8(A, B, I) \
54   (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A),             \
55                                                   (__v32qi)(__m256i)(B),          \
56                                                   (char)(I))
57 
58 static __inline__ __m256i __DEFAULT_FN_ATTRS_Y
_mm256_gf2p8mul_epi8(__m256i __A,__m256i __B)59 _mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)
60 {
61   return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi) __A,
62               (__v32qi) __B);
63 }
64 #endif /* __AVXINTRIN_H */
65 
66 #ifdef __AVX512BWINTRIN_H
67 #define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \
68   (__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A),          \
69                                                   (__v64qi)(__m512i)(B),          \
70                                                   (char)(I))
71 
72 #define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
73    (__m512i)__builtin_ia32_selectb_512((__mmask64)(U),                            \
74         (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I),                       \
75         (__v64qi)(__m512i)(S))
76 
77 #define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
78   (__m512i)_mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(),    \
79         U, A, B, I)
80 
81 #define _mm512_gf2p8affine_epi64_epi8(A, B, I) \
82   (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A),             \
83                                                   (__v64qi)(__m512i)(B),          \
84                                                   (char)(I))
85 
86 #define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
87    (__m512i)__builtin_ia32_selectb_512((__mmask64)(U),                            \
88         (__v64qi)_mm512_gf2p8affine_epi64_epi8(A, B, I),                          \
89         (__v64qi)(__m512i)(S))
90 
91 #define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
92   (__m512i)_mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(),       \
93         U, A, B, I)
94 
95 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
_mm512_gf2p8mul_epi8(__m512i __A,__m512i __B)96 _mm512_gf2p8mul_epi8(__m512i __A, __m512i __B)
97 {
98   return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi) __A,
99               (__v64qi) __B);
100 }
101 
102 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
_mm512_mask_gf2p8mul_epi8(__m512i __S,__mmask64 __U,__m512i __A,__m512i __B)103 _mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B)
104 {
105   return (__m512i) __builtin_ia32_selectb_512(__U,
106               (__v64qi) _mm512_gf2p8mul_epi8(__A, __B),
107               (__v64qi) __S);
108 }
109 
110 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
_mm512_maskz_gf2p8mul_epi8(__mmask64 __U,__m512i __A,__m512i __B)111 _mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B)
112 {
113   return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_si512(),
114               __U, __A, __B);
115 }
116 #endif /* __AVX512BWINTRIN_H */
117 
118 #ifdef __AVX512VLBWINTRIN_H
119 #define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
120   (__m128i)__builtin_ia32_selectb_128((__mmask16)(U),                             \
121         (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I),                          \
122         (__v16qi)(__m128i)(S))
123 
124 #define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
125   (__m128i)_mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(),       \
126         U, A, B, I)
127 
128 #define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
129    (__m256i)__builtin_ia32_selectb_256((__mmask32)(U),                            \
130         (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I),                       \
131         (__v32qi)(__m256i)(S))
132 
133 #define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
134   (__m256i)_mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
135         U, A, B, I)
136 
137 #define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
138   (__m128i)__builtin_ia32_selectb_128((__mmask16)(U),                             \
139         (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I),                             \
140         (__v16qi)(__m128i)(S))
141 
142 #define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
143   (__m128i)_mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(),          \
144         U, A, B, I)
145 
146 #define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
147    (__m256i)__builtin_ia32_selectb_256((__mmask32)(U),                            \
148         (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I),                          \
149         (__v32qi)(__m256i)(S))
150 
151 #define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
152   (__m256i)_mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(),    \
153         U, A, B, I)
154 
155 static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
_mm_mask_gf2p8mul_epi8(__m128i __S,__mmask16 __U,__m128i __A,__m128i __B)156 _mm_mask_gf2p8mul_epi8(__m128i __S, __mmask16 __U, __m128i __A, __m128i __B)
157 {
158   return (__m128i) __builtin_ia32_selectb_128(__U,
159               (__v16qi) _mm_gf2p8mul_epi8(__A, __B),
160               (__v16qi) __S);
161 }
162 
163 static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
_mm_maskz_gf2p8mul_epi8(__mmask16 __U,__m128i __A,__m128i __B)164 _mm_maskz_gf2p8mul_epi8(__mmask16 __U, __m128i __A, __m128i __B)
165 {
166   return _mm_mask_gf2p8mul_epi8((__m128i)_mm_setzero_si128(),
167               __U, __A, __B);
168 }
169 
170 static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
_mm256_mask_gf2p8mul_epi8(__m256i __S,__mmask32 __U,__m256i __A,__m256i __B)171 _mm256_mask_gf2p8mul_epi8(__m256i __S, __mmask32 __U, __m256i __A, __m256i __B)
172 {
173   return (__m256i) __builtin_ia32_selectb_256(__U,
174               (__v32qi) _mm256_gf2p8mul_epi8(__A, __B),
175               (__v32qi) __S);
176 }
177 
178 static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
_mm256_maskz_gf2p8mul_epi8(__mmask32 __U,__m256i __A,__m256i __B)179 _mm256_maskz_gf2p8mul_epi8(__mmask32 __U, __m256i __A, __m256i __B)
180 {
181   return _mm256_mask_gf2p8mul_epi8((__m256i)_mm256_setzero_si256(),
182               __U, __A, __B);
183 }
184 #endif /* __AVX512VLBWINTRIN_H */
185 
186 #undef __DEFAULT_FN_ATTRS
187 #undef __DEFAULT_FN_ATTRS_Y
188 #undef __DEFAULT_FN_ATTRS_Z
189 #undef __DEFAULT_FN_ATTRS_VL128
190 #undef __DEFAULT_FN_ATTRS_VL256
191 
192 #endif /* __GFNIINTRIN_H */
193 
194