1 /* SPDX-License-Identifier: MIT
2  *
3  * Permission is hereby granted, free of charge, to any person
4  * obtaining a copy of this software and associated documentation
5  * files (the "Software"), to deal in the Software without
6  * restriction, including without limitation the rights to use, copy,
7  * modify, merge, publish, distribute, sublicense, and/or sell copies
8  * of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be
12  * included in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Copyright:
24  *   2020      Evan Nemerson <evan@nemerson.com>
25  *   2020      Christopher Moore <moore@free.fr>
26  */
27 
28 #if !defined(SIMDE_X86_AVX512_SHUFFLE_H)
29 #define SIMDE_X86_AVX512_SHUFFLE_H
30 
31 #include "types.h"
32 #include "../avx2.h"
33 #include "mov.h"
34 
35 HEDLEY_DIAGNOSTIC_PUSH
36 SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
37 SIMDE_BEGIN_DECLS_
38 
39 SIMDE_FUNCTION_ATTRIBUTES
40 simde__m512i
simde_mm512_shuffle_epi8(simde__m512i a,simde__m512i b)41 simde_mm512_shuffle_epi8 (simde__m512i a, simde__m512i b) {
42   #if defined(SIMDE_X86_AVX512BW_NATIVE)
43     return _mm512_shuffle_epi8(a, b);
44   #else
45     simde__m512i_private
46       r_,
47       a_ = simde__m512i_to_private(a),
48       b_ = simde__m512i_to_private(b);
49 
50   #if SIMDE_NATURAL_VECTOR_SIZE_LE(256)
51     for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
52       r_.m256i[i] = simde_mm256_shuffle_epi8(a_.m256i[i], b_.m256i[i]);
53     }
54   #else
55   SIMDE_VECTORIZE
56     for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
57       r_.u8[i] = (b_.u8[i] & 0x80) ? 0 : a_.u8[(b_.u8[i] & 0x0f) + (i & 0x30)];
58     }
59   #endif
60 
61   return simde__m512i_from_private(r_);
62 #endif
63 }
64 #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
65   #undef _mm512_shuffle_epi8
66   #define _mm512_shuffle_epi8(a, b) simde_mm512_shuffle_epi8(a, b)
67 #endif
68 
69 SIMDE_FUNCTION_ATTRIBUTES
70 simde__m512i
simde_mm512_mask_shuffle_epi8(simde__m512i src,simde__mmask64 k,simde__m512i a,simde__m512i b)71 simde_mm512_mask_shuffle_epi8 (simde__m512i src, simde__mmask64 k, simde__m512i a, simde__m512i b) {
72   #if defined(SIMDE_X86_AVX512BW_NATIVE)
73     return _mm512_mask_shuffle_epi8(src, k, a, b);
74   #else
75     return simde_mm512_mask_mov_epi8(src, k, simde_mm512_shuffle_epi8(a, b));
76   #endif
77 }
78 #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
79   #undef _mm512_mask_shuffle_epi8
80   #define _mm512_mask_shuffle_epi8(src, k, a, b) simde_mm512_mask_shuffle_epi8(src, k, a, b)
81 #endif
82 
83 SIMDE_FUNCTION_ATTRIBUTES
84 simde__m512i
simde_mm512_maskz_shuffle_epi8(simde__mmask64 k,simde__m512i a,simde__m512i b)85 simde_mm512_maskz_shuffle_epi8 (simde__mmask64 k, simde__m512i a, simde__m512i b) {
86   #if defined(SIMDE_X86_AVX512BW_NATIVE)
87     return _mm512_maskz_shuffle_epi8(k, a, b);
88   #else
89     return simde_mm512_maskz_mov_epi8(k, simde_mm512_shuffle_epi8(a, b));
90   #endif
91 }
92 #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
93   #undef _mm512_maskz_shuffle_epi8
94   #define _mm512_maskz_shuffle_epi8(k, a, b) simde_mm512_maskz_shuffle_epi8(k, a, b)
95 #endif
96 
97 SIMDE_FUNCTION_ATTRIBUTES
98 simde__m256i
simde_mm256_shuffle_i32x4(simde__m256i a,simde__m256i b,const int imm8)99 simde_mm256_shuffle_i32x4 (simde__m256i a, simde__m256i b, const int imm8)
100     SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
101   simde__m256i_private
102     r_,
103     a_ = simde__m256i_to_private(a),
104     b_ = simde__m256i_to_private(b);
105 
106   r_.m128i[0] = a_.m128i[ imm8       & 1];
107   r_.m128i[1] = b_.m128i[(imm8 >> 1) & 1];
108 
109   return simde__m256i_from_private(r_);
110 }
111 #if defined(SIMDE_X86_AVX512F_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE)
112   #define simde_mm256_shuffle_i32x4(a, b, imm8) _mm256_shuffle_i32x4(a, b, imm8)
113 #endif
114 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) && defined(SIMDE_X86_AVX512VL_ENABLE_NATIVE_ALIASES)
115   #undef _mm256_shuffle_i32x4
116   #define _mm256_shuffle_i32x4(a, b, imm8) simde_mm256_shuffle_i32x4(a, b, imm8)
117 #endif
118 
119 #define simde_mm256_maskz_shuffle_i32x4(k, a, b, imm8) simde_mm256_maskz_mov_epi32(k, simde_mm256_shuffle_i32x4(a, b, imm8))
120 #define simde_mm256_mask_shuffle_i32x4(src, k, a, b, imm8) simde_mm256_mask_mov_epi32(src, k, simde_mm256_shuffle_i32x4(a, b, imm8))
121 
122 #define simde_mm256_shuffle_f32x4(a, b, imm8) simde_mm256_castsi256_ps(simde_mm256_shuffle_i32x4(simde_mm256_castps_si256(a), simde_mm256_castps_si256(b), imm8))
123 #define simde_mm256_maskz_shuffle_f32x4(k, a, b, imm8) simde_mm256_maskz_mov_ps(k, simde_mm256_shuffle_f32x4(a, b, imm8))
124 #define simde_mm256_mask_shuffle_f32x4(src, k, a, b, imm8) simde_mm256_mask_mov_ps(src, k, simde_mm256_shuffle_f32x4(a, b, imm8))
125 
126 #define simde_mm256_shuffle_i64x2(a, b, imm8) simde_mm256_shuffle_i32x4(a, b, imm8)
127 #define simde_mm256_maskz_shuffle_i64x2(k, a, b, imm8) simde_mm256_maskz_mov_epi64(k, simde_mm256_shuffle_i64x2(a, b, imm8))
128 #define simde_mm256_mask_shuffle_i64x2(src, k, a, b, imm8) simde_mm256_mask_mov_epi64(src, k, simde_mm256_shuffle_i64x2(a, b, imm8))
129 
130 #define simde_mm256_shuffle_f64x2(a, b, imm8) simde_mm256_castsi256_pd(simde_mm256_shuffle_i64x2(simde_mm256_castpd_si256(a), simde_mm256_castpd_si256(b), imm8))
131 #define simde_mm256_maskz_shuffle_f64x2(k, a, b, imm8) simde_mm256_maskz_mov_pd(k, simde_mm256_shuffle_f64x2(a, b, imm8))
132 #define simde_mm256_mask_shuffle_f64x2(src, k, a, b, imm8) simde_mm256_mask_mov_pd(src, k, simde_mm256_shuffle_f64x2(a, b, imm8))
133 
134 SIMDE_FUNCTION_ATTRIBUTES
135 simde__m512i
simde_mm512_shuffle_i32x4(simde__m512i a,simde__m512i b,const int imm8)136 simde_mm512_shuffle_i32x4 (simde__m512i a, simde__m512i b, const int imm8)
137     SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
138   simde__m512i_private
139     r_,
140     a_ = simde__m512i_to_private(a),
141     b_ = simde__m512i_to_private(b);
142 
143   r_.m128i[0] = a_.m128i[ imm8       & 3];
144   r_.m128i[1] = a_.m128i[(imm8 >> 2) & 3];
145   r_.m128i[2] = b_.m128i[(imm8 >> 4) & 3];
146   r_.m128i[3] = b_.m128i[(imm8 >> 6) & 3];
147 
148   return simde__m512i_from_private(r_);
149 }
150 #if defined(SIMDE_X86_AVX512F_NATIVE)
151   #define simde_mm512_shuffle_i32x4(a, b, imm8) _mm512_shuffle_i32x4(a, b, imm8)
152 #endif
153 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
154   #undef _mm512_shuffle_i32x4
155   #define _mm512_shuffle_i32x4(a, b, imm8) simde_mm512_shuffle_i32x4(a, b, imm8)
156 #endif
157 
158 #define simde_mm512_maskz_shuffle_i32x4(k, a, b, imm8) simde_mm512_maskz_mov_epi32(k, simde_mm512_shuffle_i32x4(a, b, imm8))
159 #define simde_mm512_mask_shuffle_i32x4(src, k, a, b, imm8) simde_mm512_mask_mov_epi32(src, k, simde_mm512_shuffle_i32x4(a, b, imm8))
160 
161 #define simde_mm512_shuffle_f32x4(a, b, imm8) simde_mm512_castsi512_ps(simde_mm512_shuffle_i32x4(simde_mm512_castps_si512(a), simde_mm512_castps_si512(b), imm8))
162 #define simde_mm512_maskz_shuffle_f32x4(k, a, b, imm8) simde_mm512_maskz_mov_ps(k, simde_mm512_shuffle_f32x4(a, b, imm8))
163 #define simde_mm512_mask_shuffle_f32x4(src, k, a, b, imm8) simde_mm512_mask_mov_ps(src, k, simde_mm512_shuffle_f32x4(a, b, imm8))
164 
165 #define simde_mm512_shuffle_i64x2(a, b, imm8) simde_mm512_shuffle_i32x4(a, b, imm8)
166 #define simde_mm512_maskz_shuffle_i64x2(k, a, b, imm8) simde_mm512_maskz_mov_epi64(k, simde_mm512_shuffle_i64x2(a, b, imm8))
167 #define simde_mm512_mask_shuffle_i64x2(src, k, a, b, imm8) simde_mm512_mask_mov_epi64(src, k, simde_mm512_shuffle_i64x2(a, b, imm8))
168 
169 #define simde_mm512_shuffle_f64x2(a, b, imm8) simde_mm512_castsi512_pd(simde_mm512_shuffle_i64x2(simde_mm512_castpd_si512(a), simde_mm512_castpd_si512(b), imm8))
170 #define simde_mm512_maskz_shuffle_f64x2(k, a, b, imm8) simde_mm512_maskz_mov_pd(k, simde_mm512_shuffle_f64x2(a, b, imm8))
171 #define simde_mm512_mask_shuffle_f64x2(src, k, a, b, imm8) simde_mm512_mask_mov_pd(src, k, simde_mm512_shuffle_f64x2(a, b, imm8))
172 
173 SIMDE_END_DECLS_
174 HEDLEY_DIAGNOSTIC_POP
175 
176 #endif /* !defined(SIMDE_X86_AVX512_SHUFFLE_H) */
177