1 /* SPDX-License-Identifier: MIT
2  *
3  * Permission is hereby granted, free of charge, to any person
4  * obtaining a copy of this software and associated documentation
5  * files (the "Software"), to deal in the Software without
6  * restriction, including without limitation the rights to use, copy,
7  * modify, merge, publish, distribute, sublicense, and/or sell copies
8  * of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be
12  * included in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Copyright:
24  *   2020      Evan Nemerson <evan@nemerson.com>
25  *   2020      Himanshi Mathur <himanshi18037@iiitd.ac.in>
26  *   2020      Hidayat Khan <huk2209@gmail.com>
27  */
28 
29 #if !defined(SIMDE_X86_AVX512_MUL_H)
30 #define SIMDE_X86_AVX512_MUL_H
31 
32 #include "types.h"
33 #include "mov.h"
34 
35 HEDLEY_DIAGNOSTIC_PUSH
36 SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
37 SIMDE_BEGIN_DECLS_
38 
39 SIMDE_FUNCTION_ATTRIBUTES
40 simde__m512
simde_mm512_mul_ps(simde__m512 a,simde__m512 b)41 simde_mm512_mul_ps (simde__m512 a, simde__m512 b) {
42   #if defined(SIMDE_X86_AVX512F_NATIVE)
43     return _mm512_mul_ps(a, b);
44   #else
45     simde__m512_private
46       r_,
47       a_ = simde__m512_to_private(a),
48       b_ = simde__m512_to_private(b);
49 
50     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
51       r_.f32 = a_.f32 * b_.f32;
52     #else
53       SIMDE_VECTORIZE
54       for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) {
55         r_.m256[i] = simde_mm256_mul_ps(a_.m256[i], b_.m256[i]);
56       }
57     #endif
58 
59     return simde__m512_from_private(r_);
60   #endif
61 }
62 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
63   #undef _mm512_mul_ps
64   #define _mm512_mul_ps(a, b) simde_mm512_mul_ps(a, b)
65 #endif
66 
67 SIMDE_FUNCTION_ATTRIBUTES
68 simde__m512
simde_mm512_mask_mul_ps(simde__m512 src,simde__mmask16 k,simde__m512 a,simde__m512 b)69 simde_mm512_mask_mul_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) {
70   #if defined(SIMDE_X86_AVX512F_NATIVE)
71     return _mm512_mask_mul_ps(src, k, a, b);
72   #else
73     return simde_mm512_mask_mov_ps(src, k, simde_mm512_mul_ps(a, b));
74   #endif
75 }
76 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
77   #undef _mm512_mask_mul_ps
78   #define _mm512_mask_mul_ps(src, k, a, b) simde_mm512_mask_mul_ps(src, k, a, b)
79 #endif
80 
81 SIMDE_FUNCTION_ATTRIBUTES
82 simde__m512
simde_mm512_maskz_mul_ps(simde__mmask16 k,simde__m512 a,simde__m512 b)83 simde_mm512_maskz_mul_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) {
84   #if defined(SIMDE_X86_AVX512F_NATIVE)
85     return _mm512_maskz_mul_ps(k, a, b);
86   #else
87     return simde_mm512_maskz_mov_ps(k, simde_mm512_mul_ps(a, b));
88   #endif
89 }
90 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
91   #undef _mm512_maskz_mul_ps
92   #define _mm512_maskz_mul_ps(k, a, b) simde_mm512_maskz_mul_ps(k, a, b)
93 #endif
94 
95 SIMDE_FUNCTION_ATTRIBUTES
96 simde__m512d
simde_mm512_mul_pd(simde__m512d a,simde__m512d b)97 simde_mm512_mul_pd (simde__m512d a, simde__m512d b) {
98   #if defined(SIMDE_X86_AVX512F_NATIVE)
99     return _mm512_mul_pd(a, b);
100   #else
101     simde__m512d_private
102       r_,
103       a_ = simde__m512d_to_private(a),
104       b_ = simde__m512d_to_private(b);
105 
106     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
107       r_.f64 = a_.f64 * b_.f64;
108     #else
109       SIMDE_VECTORIZE
110       for (size_t i = 0 ; i < (sizeof(r_.m256d) / sizeof(r_.m256d[0])) ; i++) {
111         r_.m256d[i] = simde_mm256_mul_pd(a_.m256d[i], b_.m256d[i]);
112       }
113     #endif
114 
115     return simde__m512d_from_private(r_);
116   #endif
117 }
118 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
119   #undef _mm512_mul_pd
120   #define _mm512_mul_pd(a, b) simde_mm512_mul_pd(a, b)
121 #endif
122 
123 SIMDE_FUNCTION_ATTRIBUTES
124 simde__m512d
simde_mm512_mask_mul_pd(simde__m512d src,simde__mmask8 k,simde__m512d a,simde__m512d b)125 simde_mm512_mask_mul_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) {
126   #if defined(SIMDE_X86_AVX512F_NATIVE)
127     return _mm512_mask_mul_pd(src, k, a, b);
128   #else
129     return simde_mm512_mask_mov_pd(src, k, simde_mm512_mul_pd(a, b));
130   #endif
131 }
132 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
133   #undef _mm512_mask_mul_pd
134   #define _mm512_mask_mul_pd(src, k, a, b) simde_mm512_mask_mul_pd(src, k, a, b)
135 #endif
136 
137 SIMDE_FUNCTION_ATTRIBUTES
138 simde__m512d
simde_mm512_maskz_mul_pd(simde__mmask8 k,simde__m512d a,simde__m512d b)139 simde_mm512_maskz_mul_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) {
140   #if defined(SIMDE_X86_AVX512F_NATIVE)
141     return _mm512_maskz_mul_pd(k, a, b);
142   #else
143     return simde_mm512_maskz_mov_pd(k, simde_mm512_mul_pd(a, b));
144   #endif
145 }
146 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
147   #undef _mm512_maskz_mul_pd
148   #define _mm512_maskz_mul_pd(k, a, b) simde_mm512_maskz_mul_pd(k, a, b)
149 #endif
150 
151 SIMDE_FUNCTION_ATTRIBUTES
152 simde__m512i
simde_mm512_mul_epi32(simde__m512i a,simde__m512i b)153 simde_mm512_mul_epi32 (simde__m512i a, simde__m512i b) {
154   #if defined(SIMDE_X86_AVX512F_NATIVE)
155     return _mm512_mul_epi32(a, b);
156   #else
157     simde__m512i_private
158       r_,
159       a_ = simde__m512i_to_private(a),
160       b_ = simde__m512i_to_private(b);
161 
162     #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_SHUFFLE_VECTOR_)
163       simde__m512i_private x;
164       __typeof__(r_.i64) ta, tb;
165 
166       /* Get even numbered 32-bit values */
167       x.i32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.i32, b_.i32, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
168       /* Cast to 64 bits */
169       SIMDE_CONVERT_VECTOR_(ta, x.m256i_private[0].i32);
170       SIMDE_CONVERT_VECTOR_(tb, x.m256i_private[1].i32);
171       r_.i64 = ta * tb;
172     #else
173       SIMDE_VECTORIZE
174       for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
175         r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i32[i << 1]) * HEDLEY_STATIC_CAST(int64_t, b_.i32[i << 1]);
176       }
177     #endif
178       return simde__m512i_from_private(r_);
179     #endif
180 }
181 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
182   #undef _mm512_mul_epi32
183   #define _mm512_mul_epi32(a, b) simde_mm512_mul_epi32(a, b)
184 #endif
185 
186 SIMDE_FUNCTION_ATTRIBUTES
187 simde__m512i
simde_mm512_mask_mul_epi32(simde__m512i src,simde__mmask8 k,simde__m512i a,simde__m512i b)188 simde_mm512_mask_mul_epi32(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) {
189   #if defined(SIMDE_X86_AVX512F_NATIVE)
190     return _mm512_mask_mul_epi32(src, k, a, b);
191   #else
192     return simde_mm512_mask_mov_epi64(src, k, simde_mm512_mul_epi32(a, b));
193   #endif
194 }
195 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
196   #undef _mm512_mask_mul_epi32
197   #define _mm512_mask_mul_epi32(src, k, a, b) simde_mm512_mask_mul_epi32(src, k, a, b)
198 #endif
199 
200 SIMDE_FUNCTION_ATTRIBUTES
201 simde__m512i
simde_mm512_maskz_mul_epi32(simde__mmask8 k,simde__m512i a,simde__m512i b)202 simde_mm512_maskz_mul_epi32(simde__mmask8 k, simde__m512i a, simde__m512i b) {
203   #if defined(SIMDE_X86_AVX512F_NATIVE)
204     return _mm512_maskz_mul_epi32(k, a, b);
205   #else
206     return simde_mm512_maskz_mov_epi64(k, simde_mm512_mul_epi32(a, b));
207   #endif
208 }
209 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
210   #undef _mm512_maskz_mul_epi32
211   #define _mm512_maskz_mul_epi32(k, a, b) simde_mm512_maskz_mul_epi32(k, a, b)
212 #endif
213 
214 SIMDE_FUNCTION_ATTRIBUTES
215 simde__m512i
simde_mm512_mul_epu32(simde__m512i a,simde__m512i b)216 simde_mm512_mul_epu32 (simde__m512i a, simde__m512i b) {
217   #if defined(SIMDE_X86_AVX512F_NATIVE)
218     return _mm512_mul_epu32(a, b);
219   #else
220     simde__m512i_private
221       r_,
222       a_ = simde__m512i_to_private(a),
223       b_ = simde__m512i_to_private(b);
224 
225     #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_SHUFFLE_VECTOR_)
226       simde__m512i_private x;
227       __typeof__(r_.u64) ta, tb;
228 
229       x.u32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.u32, b_.u32, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
230       SIMDE_CONVERT_VECTOR_(ta, x.m256i_private[0].u32);
231       SIMDE_CONVERT_VECTOR_(tb, x.m256i_private[1].u32);
232       r_.u64 = ta * tb;
233     #else
234       SIMDE_VECTORIZE
235       for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
236         r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[i << 1]) * HEDLEY_STATIC_CAST(uint64_t, b_.u32[i << 1]);
237       }
238     #endif
239 
240     return simde__m512i_from_private(r_);
241   #endif
242 }
243 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
244   #undef _mm512_mul_epu32
245   #define _mm512_mul_epu32(a, b) simde_mm512_mul_epu32(a, b)
246 #endif
247 
248 SIMDE_FUNCTION_ATTRIBUTES
249 simde__m512i
simde_mm512_mask_mul_epu32(simde__m512i src,simde__mmask8 k,simde__m512i a,simde__m512i b)250 simde_mm512_mask_mul_epu32(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) {
251   #if defined(SIMDE_X86_AVX512F_NATIVE)
252     return _mm512_mask_mul_epu32(src, k, a, b);
253   #else
254     return simde_mm512_mask_mov_epi64(src, k, simde_mm512_mul_epu32(a, b));
255   #endif
256 }
257 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
258   #undef _mm512_mask_mul_epu32
259   #define _mm512_mask_mul_epu32(src, k, a, b) simde_mm512_mask_mul_epu32(src, k, a, b)
260 #endif
261 
262 SIMDE_FUNCTION_ATTRIBUTES
263 simde__m512i
simde_mm512_maskz_mul_epu32(simde__mmask8 k,simde__m512i a,simde__m512i b)264 simde_mm512_maskz_mul_epu32(simde__mmask8 k, simde__m512i a, simde__m512i b) {
265   #if defined(SIMDE_X86_AVX512F_NATIVE)
266     return _mm512_maskz_mul_epu32(k, a, b);
267   #else
268     return simde_mm512_maskz_mov_epi64(k, simde_mm512_mul_epu32(a, b));
269   #endif
270 }
271 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
272   #undef _mm512_maskz_mul_epu32
273   #define _mm512_maskz_mul_epu32(k, a, b) simde_mm512_maskz_mul_epu32(k, a, b)
274 #endif
275 
276 SIMDE_END_DECLS_
277 HEDLEY_DIAGNOSTIC_POP
278 
279 #endif /* !defined(SIMDE_X86_AVX512_MUL_H) */
280