1 /* SPDX-License-Identifier: MIT
2  *
3  * Permission is hereby granted, free of charge, to any person
4  * obtaining a copy of this software and associated documentation
5  * files (the "Software"), to deal in the Software without
6  * restriction, including without limitation the rights to use, copy,
7  * modify, merge, publish, distribute, sublicense, and/or sell copies
8  * of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be
12  * included in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Copyright:
24  *   2020      Evan Nemerson <evan@nemerson.com>
25      2020      Himanshi Mathur <himanshi18037@iiitd.ac.in>
26  */
27 
28 #if !defined(SIMDE_SVML_H)
29 #define SIMDE_SVML_H
30 
31 #include "avx2.h"
32 #include "avx512f.h"
33 
34 #if !defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_ENABLE_NATIVE_ALIASES)
35 #  define SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES
36 #endif
37 
38 HEDLEY_DIAGNOSTIC_PUSH
39 SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
40 SIMDE_BEGIN_DECLS_
41 
42 #if !defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_ENABLE_NATIVE_ALIASES)
43 #  define SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES
44 #endif
45 
46 SIMDE_FUNCTION_ATTRIBUTES
47 simde__m128
simde_mm_acos_ps(simde__m128 a)48 simde_mm_acos_ps (simde__m128 a) {
49   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
50     return _mm_acos_ps(a);
51   #else
52     simde__m128_private
53       r_,
54       a_ = simde__m128_to_private(a);
55 
56     SIMDE_VECTORIZE
57     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
58       r_.f32[i] =  simde_math_acosf(a_.f32[i]);
59     }
60 
61     return simde__m128_from_private(r_);
62   #endif
63 }
64 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
65   #undef _mm_acos_ps
66   #define _mm_acos_ps(a) simde_mm_acos_ps(a)
67 #endif
68 
69 SIMDE_FUNCTION_ATTRIBUTES
70 simde__m128d
simde_mm_acos_pd(simde__m128d a)71 simde_mm_acos_pd (simde__m128d a) {
72   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
73     return _mm_acos_pd(a);
74   #else
75     simde__m128d_private
76       r_,
77       a_ = simde__m128d_to_private(a);
78 
79     SIMDE_VECTORIZE
80     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
81       r_.f64[i] =  simde_math_acos(a_.f64[i]);
82     }
83 
84     return simde__m128d_from_private(r_);
85   #endif
86 }
87 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
88   #undef _mm_acos_pd
89   #define _mm_acos_pd(a) simde_mm_acos_pd(a)
90 #endif
91 
92 SIMDE_FUNCTION_ATTRIBUTES
93 simde__m256
simde_mm256_acos_ps(simde__m256 a)94 simde_mm256_acos_ps (simde__m256 a) {
95   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
96     return _mm256_acos_ps(a);
97   #else
98     simde__m256_private
99       r_,
100       a_ = simde__m256_to_private(a);
101 
102     SIMDE_VECTORIZE
103     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
104       r_.f32[i] =  simde_math_acosf(a_.f32[i]);
105     }
106 
107     return simde__m256_from_private(r_);
108   #endif
109 }
110 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
111   #undef _mm256_acos_ps
112   #define _mm256_acos_ps(a) simde_mm256_acos_ps(a)
113 #endif
114 
115 
116 SIMDE_FUNCTION_ATTRIBUTES
117 simde__m256d
simde_mm256_acos_pd(simde__m256d a)118 simde_mm256_acos_pd (simde__m256d a) {
119   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
120     return _mm256_acos_pd(a);
121   #else
122     simde__m256d_private
123       r_,
124       a_ = simde__m256d_to_private(a);
125 
126     SIMDE_VECTORIZE
127     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
128       r_.f64[i] =  simde_math_acos(a_.f64[i]);
129     }
130 
131     return simde__m256d_from_private(r_);
132   #endif
133 }
134 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
135   #undef _mm256_acos_pd
136   #define _mm256_acos_pd(a) simde_mm256_acos_pd(a)
137 #endif
138 
139 SIMDE_FUNCTION_ATTRIBUTES
140 simde__m512
simde_mm512_acos_ps(simde__m512 a)141 simde_mm512_acos_ps (simde__m512 a) {
142   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
143     return _mm512_acos_ps(a);
144   #else
145     simde__m512_private
146       r_,
147       a_ = simde__m512_to_private(a);
148 
149     SIMDE_VECTORIZE
150     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
151       r_.f32[i] =  simde_math_acosf(a_.f32[i]);
152     }
153 
154     return simde__m512_from_private(r_);
155   #endif
156 }
157 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
158   #undef _mm512_acos_ps
159   #define _mm512_acos_ps(a) simde_mm512_acos_ps(a)
160 #endif
161 
162 SIMDE_FUNCTION_ATTRIBUTES
163 simde__m512d
simde_mm512_acos_pd(simde__m512d a)164 simde_mm512_acos_pd (simde__m512d a) {
165   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
166     return _mm512_acos_pd(a);
167   #else
168     simde__m512d_private
169       r_,
170       a_ = simde__m512d_to_private(a);
171 
172     SIMDE_VECTORIZE
173     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
174       r_.f64[i] =  simde_math_acos(a_.f64[i]);
175     }
176 
177     return simde__m512d_from_private(r_);
178   #endif
179 }
180 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
181   #undef _mm512_acos_pd
182   #define _mm512_acos_pd(a) simde_mm512_acos_pd(a)
183 #endif
184 
185 SIMDE_FUNCTION_ATTRIBUTES
186 simde__m512
simde_mm512_mask_acos_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)187 simde_mm512_mask_acos_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
188   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
189     return _mm512_mask_acos_ps(src, k, a);
190   #else
191     return simde_mm512_mask_mov_ps(src, k, simde_mm512_acos_ps(a));
192   #endif
193 }
194 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
195   #undef _mm512_mask_acos_ps
196   #define _mm512_mask_acos_ps(src, k, a) simde_mm512_mask_acos_ps(src, k, a)
197 #endif
198 
199 SIMDE_FUNCTION_ATTRIBUTES
200 simde__m512d
simde_mm512_mask_acos_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)201 simde_mm512_mask_acos_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
202   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
203     return _mm512_mask_acos_pd(src, k, a);
204   #else
205     return simde_mm512_mask_mov_pd(src, k, simde_mm512_acos_pd(a));
206   #endif
207 }
208 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
209   #undef _mm512_mask_acos_pd
210   #define _mm512_mask_acos_pd(src, k, a) simde_mm512_mask_acos_pd(src, k, a)
211 #endif
212 
213 SIMDE_FUNCTION_ATTRIBUTES
214 simde__m128
simde_mm_acosh_ps(simde__m128 a)215 simde_mm_acosh_ps (simde__m128 a) {
216   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
217     return _mm_acosh_ps(a);
218   #else
219     simde__m128_private
220       r_,
221       a_ = simde__m128_to_private(a);
222 
223     SIMDE_VECTORIZE
224     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
225       r_.f32[i] =  simde_math_acoshf(a_.f32[i]);
226     }
227 
228     return simde__m128_from_private(r_);
229   #endif
230 }
231 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
232   #undef _mm_acosh_ps
233   #define _mm_acosh_ps(a) simde_mm_acosh_ps(a)
234 #endif
235 
236 SIMDE_FUNCTION_ATTRIBUTES
237 simde__m128d
simde_mm_acosh_pd(simde__m128d a)238 simde_mm_acosh_pd (simde__m128d a) {
239   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
240     return _mm_acosh_pd(a);
241   #else
242     simde__m128d_private
243       r_,
244       a_ = simde__m128d_to_private(a);
245 
246     SIMDE_VECTORIZE
247     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
248       r_.f64[i] =  simde_math_acosh(a_.f64[i]);
249     }
250 
251     return simde__m128d_from_private(r_);
252   #endif
253 }
254 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
255   #undef _mm_acosh_pd
256   #define _mm_acosh_pd(a) simde_mm_acosh_pd(a)
257 #endif
258 
259 SIMDE_FUNCTION_ATTRIBUTES
260 simde__m256
simde_mm256_acosh_ps(simde__m256 a)261 simde_mm256_acosh_ps (simde__m256 a) {
262   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
263     return _mm256_acosh_ps(a);
264   #else
265     simde__m256_private
266       r_,
267       a_ = simde__m256_to_private(a);
268 
269     SIMDE_VECTORIZE
270     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
271       r_.f32[i] =  simde_math_acoshf(a_.f32[i]);
272     }
273 
274     return simde__m256_from_private(r_);
275   #endif
276 }
277 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
278   #undef _mm256_acosh_ps
279   #define _mm256_acosh_ps(a) simde_mm256_acosh_ps(a)
280 #endif
281 
282 
283 SIMDE_FUNCTION_ATTRIBUTES
284 simde__m256d
simde_mm256_acosh_pd(simde__m256d a)285 simde_mm256_acosh_pd (simde__m256d a) {
286   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
287     return _mm256_acosh_pd(a);
288   #else
289     simde__m256d_private
290       r_,
291       a_ = simde__m256d_to_private(a);
292 
293     SIMDE_VECTORIZE
294     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
295       r_.f64[i] =  simde_math_acosh(a_.f64[i]);
296     }
297 
298     return simde__m256d_from_private(r_);
299   #endif
300 }
301 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
302   #undef _mm256_acosh_pd
303   #define _mm256_acosh_pd(a) simde_mm256_acosh_pd(a)
304 #endif
305 
306 SIMDE_FUNCTION_ATTRIBUTES
307 simde__m512
simde_mm512_acosh_ps(simde__m512 a)308 simde_mm512_acosh_ps (simde__m512 a) {
309   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
310     return _mm512_acosh_ps(a);
311   #else
312     simde__m512_private
313       r_,
314       a_ = simde__m512_to_private(a);
315 
316     SIMDE_VECTORIZE
317     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
318       r_.f32[i] =  simde_math_acoshf(a_.f32[i]);
319     }
320 
321     return simde__m512_from_private(r_);
322   #endif
323 }
324 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
325   #undef _mm512_acosh_ps
326   #define _mm512_acosh_ps(a) simde_mm512_acosh_ps(a)
327 #endif
328 
329 SIMDE_FUNCTION_ATTRIBUTES
330 simde__m512d
simde_mm512_acosh_pd(simde__m512d a)331 simde_mm512_acosh_pd (simde__m512d a) {
332   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
333     return _mm512_acosh_pd(a);
334   #else
335     simde__m512d_private
336       r_,
337       a_ = simde__m512d_to_private(a);
338 
339     SIMDE_VECTORIZE
340     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
341       r_.f64[i] =  simde_math_acosh(a_.f64[i]);
342     }
343 
344     return simde__m512d_from_private(r_);
345   #endif
346 }
347 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
348   #undef _mm512_acosh_pd
349   #define _mm512_acosh_pd(a) simde_mm512_acosh_pd(a)
350 #endif
351 
352 SIMDE_FUNCTION_ATTRIBUTES
353 simde__m512
simde_mm512_mask_acosh_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)354 simde_mm512_mask_acosh_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
355   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
356     return _mm512_mask_acosh_ps(src, k, a);
357   #else
358     return simde_mm512_mask_mov_ps(src, k, simde_mm512_acosh_ps(a));
359   #endif
360 }
361 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
362   #undef _mm512_mask_acosh_ps
363   #define _mm512_mask_acosh_ps(src, k, a) simde_mm512_mask_acosh_ps(src, k, a)
364 #endif
365 
366 SIMDE_FUNCTION_ATTRIBUTES
367 simde__m512d
simde_mm512_mask_acosh_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)368 simde_mm512_mask_acosh_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
369   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
370     return _mm512_mask_acosh_pd(src, k, a);
371   #else
372     return simde_mm512_mask_mov_pd(src, k, simde_mm512_acosh_pd(a));
373   #endif
374 }
375 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
376   #undef _mm512_mask_acosh_pd
377   #define _mm512_mask_acosh_pd(src, k, a) simde_mm512_mask_acosh_pd(src, k, a)
378 #endif
379 
380 SIMDE_FUNCTION_ATTRIBUTES
381 simde__m128
simde_mm_asin_ps(simde__m128 a)382 simde_mm_asin_ps (simde__m128 a) {
383    #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
384     return _mm_asin_ps(a);
385   #else
386     simde__m128_private
387       r_,
388       a_ = simde__m128_to_private(a);
389 
390     SIMDE_VECTORIZE
391     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
392       r_.f32[i] =  simde_math_asinf(a_.f32[i]);
393     }
394 
395     return simde__m128_from_private(r_);
396   #endif
397 }
398 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
399   #undef _mm_asin_ps
400   #define _mm_asin_ps(a) simde_mm_asin_ps(a)
401 #endif
402 
403 SIMDE_FUNCTION_ATTRIBUTES
404 simde__m128d
simde_mm_asin_pd(simde__m128d a)405 simde_mm_asin_pd (simde__m128d a) {
406   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
407     return _mm_asin_pd(a);
408   #else
409     simde__m128d_private
410       r_,
411       a_ = simde__m128d_to_private(a);
412 
413     SIMDE_VECTORIZE
414     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
415       r_.f64[i] =  simde_math_asin(a_.f64[i]);
416     }
417 
418     return simde__m128d_from_private(r_);
419   #endif
420 }
421 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
422   #undef _mm_asin_pd
423   #define _mm_asin_pd(a) simde_mm_asin_pd(a)
424 #endif
425 
426 SIMDE_FUNCTION_ATTRIBUTES
427 simde__m256
simde_mm256_asin_ps(simde__m256 a)428 simde_mm256_asin_ps (simde__m256 a) {
429   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
430     return _mm256_asin_ps(a);
431   #else
432     simde__m256_private
433       r_,
434       a_ = simde__m256_to_private(a);
435 
436     SIMDE_VECTORIZE
437     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
438       r_.f32[i] =  simde_math_asinf(a_.f32[i]);
439     }
440 
441     return simde__m256_from_private(r_);
442   #endif
443 }
444 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
445   #undef _mm256_asin_ps
446   #define _mm256_asin_ps(a) simde_mm256_asin_ps(a)
447 #endif
448 
449 
450 SIMDE_FUNCTION_ATTRIBUTES
451 simde__m256d
simde_mm256_asin_pd(simde__m256d a)452 simde_mm256_asin_pd (simde__m256d a) {
453   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
454     return _mm256_asin_pd(a);
455   #else
456     simde__m256d_private
457       r_,
458       a_ = simde__m256d_to_private(a);
459 
460     SIMDE_VECTORIZE
461     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
462       r_.f64[i] =  simde_math_asin(a_.f64[i]);
463     }
464 
465     return simde__m256d_from_private(r_);
466   #endif
467 }
468 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
469   #undef _mm256_asin_pd
470   #define _mm256_asin_pd(a) simde_mm256_asin_pd(a)
471 #endif
472 
473 SIMDE_FUNCTION_ATTRIBUTES
474 simde__m512
simde_mm512_asin_ps(simde__m512 a)475 simde_mm512_asin_ps (simde__m512 a) {
476   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
477     return _mm512_asin_ps(a);
478   #else
479     simde__m512_private
480       r_,
481       a_ = simde__m512_to_private(a);
482 
483     SIMDE_VECTORIZE
484     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
485       r_.f32[i] =  simde_math_asinf(a_.f32[i]);
486     }
487 
488     return simde__m512_from_private(r_);
489   #endif
490 }
491 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
492   #undef _mm512_asin_ps
493   #define _mm512_asin_ps(a) simde_mm512_asin_ps(a)
494 #endif
495 
496 SIMDE_FUNCTION_ATTRIBUTES
497 simde__m512d
simde_mm512_asin_pd(simde__m512d a)498 simde_mm512_asin_pd (simde__m512d a) {
499   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
500     return _mm512_asin_pd(a);
501   #else
502     simde__m512d_private
503       r_,
504       a_ = simde__m512d_to_private(a);
505 
506     SIMDE_VECTORIZE
507     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
508       r_.f64[i] =  simde_math_asin(a_.f64[i]);
509     }
510 
511     return simde__m512d_from_private(r_);
512   #endif
513 }
514 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
515   #undef _mm512_asin_pd
516   #define _mm512_asin_pd(a) simde_mm512_asin_pd(a)
517 #endif
518 
519 SIMDE_FUNCTION_ATTRIBUTES
520 simde__m512
simde_mm512_mask_asin_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)521 simde_mm512_mask_asin_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
522   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
523     return _mm512_mask_asin_ps(src, k, a);
524   #else
525     return simde_mm512_mask_mov_ps(src, k, simde_mm512_asin_ps(a));
526   #endif
527 }
528 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
529   #undef _mm512_mask_asin_ps
530   #define _mm512_mask_asin_ps(src, k, a) simde_mm512_mask_asin_ps(src, k, a)
531 #endif
532 
533 SIMDE_FUNCTION_ATTRIBUTES
534 simde__m512d
simde_mm512_mask_asin_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)535 simde_mm512_mask_asin_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
536   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
537     return _mm512_mask_asin_pd(src, k, a);
538   #else
539     return simde_mm512_mask_mov_pd(src, k, simde_mm512_asin_pd(a));
540   #endif
541 }
542 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
543   #undef _mm512_mask_asin_pd
544   #define _mm512_mask_asin_pd(src, k, a) simde_mm512_mask_asin_pd(src, k, a)
545 #endif
546 
547 SIMDE_FUNCTION_ATTRIBUTES
548 simde__m128
simde_mm_asinh_ps(simde__m128 a)549 simde_mm_asinh_ps (simde__m128 a) {
550   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
551     return _mm_asinh_ps(a);
552   #else
553     simde__m128_private
554       r_,
555       a_ = simde__m128_to_private(a);
556 
557     SIMDE_VECTORIZE
558     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
559       r_.f32[i] =  simde_math_asinhf(a_.f32[i]);
560     }
561 
562     return simde__m128_from_private(r_);
563   #endif
564 }
565 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
566   #undef _mm_asinh_ps
567   #define _mm_asinh_ps(a) simde_mm_asinh_ps(a)
568 #endif
569 
570 SIMDE_FUNCTION_ATTRIBUTES
571 simde__m128d
simde_mm_asinh_pd(simde__m128d a)572 simde_mm_asinh_pd (simde__m128d a) {
573   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
574     return _mm_asinh_pd(a);
575   #else
576     simde__m128d_private
577       r_,
578       a_ = simde__m128d_to_private(a);
579 
580     SIMDE_VECTORIZE
581     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
582       r_.f64[i] =  simde_math_asinh(a_.f64[i]);
583     }
584 
585     return simde__m128d_from_private(r_);
586   #endif
587 }
588 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
589   #undef _mm_asinh_pd
590   #define _mm_asinh_pd(a) simde_mm_asinh_pd(a)
591 #endif
592 
593 SIMDE_FUNCTION_ATTRIBUTES
594 simde__m256
simde_mm256_asinh_ps(simde__m256 a)595 simde_mm256_asinh_ps (simde__m256 a) {
596   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
597     return _mm256_asinh_ps(a);
598   #else
599     simde__m256_private
600       r_,
601       a_ = simde__m256_to_private(a);
602 
603     SIMDE_VECTORIZE
604     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
605       r_.f32[i] =  simde_math_asinhf(a_.f32[i]);
606     }
607 
608     return simde__m256_from_private(r_);
609   #endif
610 }
611 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
612   #undef _mm256_asinh_ps
613   #define _mm256_asinh_ps(a) simde_mm256_asinh_ps(a)
614 #endif
615 
616 
617 SIMDE_FUNCTION_ATTRIBUTES
618 simde__m256d
simde_mm256_asinh_pd(simde__m256d a)619 simde_mm256_asinh_pd (simde__m256d a) {
620   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
621     return _mm256_asinh_pd(a);
622   #else
623     simde__m256d_private
624       r_,
625       a_ = simde__m256d_to_private(a);
626 
627     SIMDE_VECTORIZE
628     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
629       r_.f64[i] =  simde_math_asinh(a_.f64[i]);
630     }
631 
632     return simde__m256d_from_private(r_);
633   #endif
634 }
635 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
636   #undef _mm256_asinh_pd
637   #define _mm256_asinh_pd(a) simde_mm256_asinh_pd(a)
638 #endif
639 
640 SIMDE_FUNCTION_ATTRIBUTES
641 simde__m512
simde_mm512_asinh_ps(simde__m512 a)642 simde_mm512_asinh_ps (simde__m512 a) {
643   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
644     return _mm512_asinh_ps(a);
645   #else
646     simde__m512_private
647       r_,
648       a_ = simde__m512_to_private(a);
649 
650     SIMDE_VECTORIZE
651     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
652       r_.f32[i] =  simde_math_asinhf(a_.f32[i]);
653     }
654 
655     return simde__m512_from_private(r_);
656   #endif
657 }
658 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
659   #undef _mm512_asinh_ps
660   #define _mm512_asinh_ps(a) simde_mm512_asinh_ps(a)
661 #endif
662 
663 SIMDE_FUNCTION_ATTRIBUTES
664 simde__m512d
simde_mm512_asinh_pd(simde__m512d a)665 simde_mm512_asinh_pd (simde__m512d a) {
666   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
667     return _mm512_asinh_pd(a);
668   #else
669     simde__m512d_private
670       r_,
671       a_ = simde__m512d_to_private(a);
672 
673     SIMDE_VECTORIZE
674     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
675       r_.f64[i] =  simde_math_asinh(a_.f64[i]);
676     }
677 
678     return simde__m512d_from_private(r_);
679   #endif
680 }
681 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
682   #undef _mm512_asinh_pd
683   #define _mm512_asinh_pd(a) simde_mm512_asinh_pd(a)
684 #endif
685 
686 SIMDE_FUNCTION_ATTRIBUTES
687 simde__m512
simde_mm512_mask_asinh_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)688 simde_mm512_mask_asinh_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
689   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
690     return _mm512_mask_asinh_ps(src, k, a);
691   #else
692     return simde_mm512_mask_mov_ps(src, k, simde_mm512_asinh_ps(a));
693   #endif
694 }
695 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
696   #undef _mm512_mask_asinh_ps
697   #define _mm512_mask_asinh_ps(src, k, a) simde_mm512_mask_asinh_ps(src, k, a)
698 #endif
699 
700 SIMDE_FUNCTION_ATTRIBUTES
701 simde__m512d
simde_mm512_mask_asinh_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)702 simde_mm512_mask_asinh_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
703   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
704     return _mm512_mask_asinh_pd(src, k, a);
705   #else
706     return simde_mm512_mask_mov_pd(src, k, simde_mm512_asinh_pd(a));
707   #endif
708 }
709 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
710   #undef _mm512_mask_asinh_pd
711   #define _mm512_mask_asinh_pd(src, k, a) simde_mm512_mask_asinh_pd(src, k, a)
712 #endif
713 
714 SIMDE_FUNCTION_ATTRIBUTES
715 simde__m128
simde_mm_atan_ps(simde__m128 a)716 simde_mm_atan_ps (simde__m128 a) {
717   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
718     return _mm_atan_ps(a);
719   #else
720     simde__m128_private
721       r_,
722       a_ = simde__m128_to_private(a);
723 
724     SIMDE_VECTORIZE
725     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
726       r_.f32[i] =  simde_math_atanf(a_.f32[i]);
727     }
728 
729     return simde__m128_from_private(r_);
730   #endif
731 }
732 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
733   #undef _mm_atan_ps
734   #define _mm_atan_ps(a) simde_mm_atan_ps(a)
735 #endif
736 
737 SIMDE_FUNCTION_ATTRIBUTES
738 simde__m128d
simde_mm_atan_pd(simde__m128d a)739 simde_mm_atan_pd (simde__m128d a) {
740   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
741     return _mm_atan_pd(a);
742   #else
743     simde__m128d_private
744       r_,
745       a_ = simde__m128d_to_private(a);
746 
747     SIMDE_VECTORIZE
748     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
749       r_.f64[i] =  simde_math_atan(a_.f64[i]);
750     }
751 
752     return simde__m128d_from_private(r_);
753   #endif
754 }
755 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
756   #undef _mm_atan_pd
757   #define _mm_atan_pd(a) simde_mm_atan_pd(a)
758 #endif
759 
760 SIMDE_FUNCTION_ATTRIBUTES
761 simde__m256
simde_mm256_atan_ps(simde__m256 a)762 simde_mm256_atan_ps (simde__m256 a) {
763   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
764     return _mm256_atan_ps(a);
765   #else
766     simde__m256_private
767       r_,
768       a_ = simde__m256_to_private(a);
769 
770     SIMDE_VECTORIZE
771     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
772       r_.f32[i] =  simde_math_atanf(a_.f32[i]);
773     }
774 
775     return simde__m256_from_private(r_);
776   #endif
777 }
778 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
779   #undef _mm256_atan_ps
780   #define _mm256_atan_ps(a) simde_mm256_atan_ps(a)
781 #endif
782 
783 
784 SIMDE_FUNCTION_ATTRIBUTES
785 simde__m256d
simde_mm256_atan_pd(simde__m256d a)786 simde_mm256_atan_pd (simde__m256d a) {
787   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
788     return _mm256_atan_pd(a);
789   #else
790     simde__m256d_private
791       r_,
792       a_ = simde__m256d_to_private(a);
793 
794     SIMDE_VECTORIZE
795     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
796       r_.f64[i] =  simde_math_atan(a_.f64[i]);
797     }
798 
799     return simde__m256d_from_private(r_);
800   #endif
801 }
802 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
803   #undef _mm256_atan_pd
804   #define _mm256_atan_pd(a) simde_mm256_atan_pd(a)
805 #endif
806 
807 SIMDE_FUNCTION_ATTRIBUTES
808 simde__m512
simde_mm512_atan_ps(simde__m512 a)809 simde_mm512_atan_ps (simde__m512 a) {
810   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
811     return _mm512_atan_ps(a);
812   #else
813     simde__m512_private
814       r_,
815       a_ = simde__m512_to_private(a);
816 
817     SIMDE_VECTORIZE
818     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
819       r_.f32[i] =  simde_math_atanf(a_.f32[i]);
820     }
821 
822     return simde__m512_from_private(r_);
823   #endif
824 }
825 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
826   #undef _mm512_atan_ps
827   #define _mm512_atan_ps(a) simde_mm512_atan_ps(a)
828 #endif
829 
830 SIMDE_FUNCTION_ATTRIBUTES
831 simde__m512d
simde_mm512_atan_pd(simde__m512d a)832 simde_mm512_atan_pd (simde__m512d a) {
833   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
834     return _mm512_atan_pd(a);
835   #else
836     simde__m512d_private
837       r_,
838       a_ = simde__m512d_to_private(a);
839 
840     SIMDE_VECTORIZE
841     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
842       r_.f64[i] =  simde_math_atan(a_.f64[i]);
843     }
844 
845     return simde__m512d_from_private(r_);
846   #endif
847 }
848 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
849   #undef _mm512_atan_pd
850   #define _mm512_atan_pd(a) simde_mm512_atan_pd(a)
851 #endif
852 
853 SIMDE_FUNCTION_ATTRIBUTES
854 simde__m512
simde_mm512_mask_atan_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)855 simde_mm512_mask_atan_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
856   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
857     return _mm512_mask_atan_ps(src, k, a);
858   #else
859     return simde_mm512_mask_mov_ps(src, k, simde_mm512_atan_ps(a));
860   #endif
861 }
862 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
863   #undef _mm512_mask_atan_ps
864   #define _mm512_mask_atan_ps(src, k, a) simde_mm512_mask_atan_ps(src, k, a)
865 #endif
866 
867 SIMDE_FUNCTION_ATTRIBUTES
868 simde__m512d
simde_mm512_mask_atan_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)869 simde_mm512_mask_atan_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
870   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
871     return _mm512_mask_atan_pd(src, k, a);
872   #else
873     return simde_mm512_mask_mov_pd(src, k, simde_mm512_atan_pd(a));
874   #endif
875 }
876 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
877   #undef _mm512_mask_atan_pd
878   #define _mm512_mask_atan_pd(src, k, a) simde_mm512_mask_atan_pd(src, k, a)
879 #endif
880 
881 SIMDE_FUNCTION_ATTRIBUTES
882 simde__m128
simde_mm_atan2_ps(simde__m128 a,simde__m128 b)883 simde_mm_atan2_ps (simde__m128 a, simde__m128 b) {
884   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
885     return _mm_atan2_ps(a, b);
886   #else
887     simde__m128_private
888       r_,
889       a_ = simde__m128_to_private(a),
890       b_ = simde__m128_to_private(b);
891 
892     SIMDE_VECTORIZE
893     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
894       r_.f32[i] =  simde_math_atan2f(a_.f32[i], b_.f32[i]);
895     }
896 
897     return simde__m128_from_private(r_);
898   #endif
899 }
900 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
901   #undef _mm_atan2_ps
902   #define _mm_atan2_ps(a, b) simde_mm_atan2_ps(a, b)
903 #endif
904 
905 SIMDE_FUNCTION_ATTRIBUTES
906 simde__m128d
simde_mm_atan2_pd(simde__m128d a,simde__m128d b)907 simde_mm_atan2_pd (simde__m128d a, simde__m128d b) {
908   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
909     return _mm_atan2_pd(a, b);
910   #else
911     simde__m128d_private
912       r_,
913       a_ = simde__m128d_to_private(a),
914       b_ = simde__m128d_to_private(b);
915 
916     SIMDE_VECTORIZE
917     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
918       r_.f64[i] =  simde_math_atan2(a_.f64[i], b_.f64[i]);
919     }
920 
921     return simde__m128d_from_private(r_);
922   #endif
923 }
924 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
925   #undef _mm_atan2_pd
926   #define _mm_atan2_pd(a, b) simde_mm_atan2_pd(a, b)
927 #endif
928 
929 SIMDE_FUNCTION_ATTRIBUTES
930 simde__m256
simde_mm256_atan2_ps(simde__m256 a,simde__m256 b)931 simde_mm256_atan2_ps (simde__m256 a, simde__m256 b) {
932   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
933     return _mm256_atan2_ps(a, b);
934   #else
935     simde__m256_private
936       r_,
937       a_ = simde__m256_to_private(a),
938       b_ = simde__m256_to_private(b);
939 
940     SIMDE_VECTORIZE
941     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
942       r_.f32[i] =  simde_math_atan2f(a_.f32[i], b_.f32[i]);
943     }
944 
945     return simde__m256_from_private(r_);
946   #endif
947 }
948 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
949   #undef _mm256_atan2_ps
950   #define _mm256_atan2_ps(a, b) simde_mm256_atan2_ps(a, b)
951 #endif
952 
953 SIMDE_FUNCTION_ATTRIBUTES
954 simde__m256d
simde_mm256_atan2_pd(simde__m256d a,simde__m256d b)955 simde_mm256_atan2_pd (simde__m256d a, simde__m256d b) {
956   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
957     return _mm256_atan2_pd(a, b);
958   #else
959     simde__m256d_private
960       r_,
961       a_ = simde__m256d_to_private(a),
962       b_ = simde__m256d_to_private(b);
963 
964     SIMDE_VECTORIZE
965     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
966       r_.f64[i] =  simde_math_atan2(a_.f64[i], b_.f64[i]);
967     }
968 
969     return simde__m256d_from_private(r_);
970   #endif
971 }
972 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
973   #undef _mm256_atan2_pd
974   #define _mm256_atan2_pd(a, b) simde_mm256_atan2_pd(a, b)
975 #endif
976 
977 SIMDE_FUNCTION_ATTRIBUTES
978 simde__m512
simde_mm512_atan2_ps(simde__m512 a,simde__m512 b)979 simde_mm512_atan2_ps (simde__m512 a, simde__m512 b) {
980   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
981     return _mm512_atan2_ps(a, b);
982   #else
983     simde__m512_private
984       r_,
985       a_ = simde__m512_to_private(a),
986       b_ = simde__m512_to_private(b);
987 
988     SIMDE_VECTORIZE
989     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
990       r_.f32[i] =  simde_math_atan2f(a_.f32[i], b_.f32[i]);
991     }
992 
993     return simde__m512_from_private(r_);
994   #endif
995 }
996 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
997   #undef _mm512_atan2_ps
998   #define _mm512_atan2_ps(a, b) simde_mm512_atan2_ps(a, b)
999 #endif
1000 
1001 SIMDE_FUNCTION_ATTRIBUTES
1002 simde__m512d
simde_mm512_atan2_pd(simde__m512d a,simde__m512d b)1003 simde_mm512_atan2_pd (simde__m512d a, simde__m512d b) {
1004   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1005     return _mm512_atan2_pd(a, b);
1006   #else
1007     simde__m512d_private
1008       r_,
1009       a_ = simde__m512d_to_private(a),
1010       b_ = simde__m512d_to_private(b);
1011 
1012     SIMDE_VECTORIZE
1013     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1014       r_.f64[i] =  simde_math_atan2(a_.f64[i], b_.f64[i]);
1015     }
1016 
1017     return simde__m512d_from_private(r_);
1018   #endif
1019 }
1020 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1021   #undef _mm512_atan2_pd
1022   #define _mm512_atan2_pd(a, b) simde_mm512_atan2_pd(a, b)
1023 #endif
1024 
1025 SIMDE_FUNCTION_ATTRIBUTES
1026 simde__m512
simde_mm512_mask_atan2_ps(simde__m512 src,simde__mmask16 k,simde__m512 a,simde__m512 b)1027 simde_mm512_mask_atan2_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) {
1028   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1029     return _mm512_mask_atan2_ps(src, k, a, b);
1030   #else
1031     return simde_mm512_mask_mov_ps(src, k, simde_mm512_atan2_ps(a, b));
1032   #endif
1033 }
1034 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1035   #undef _mm512_mask_atan2_ps
1036   #define _mm512_mask_atan2_ps(src, k, a, b) simde_mm512_mask_atan2_ps(src, k, a, b)
1037 #endif
1038 
1039 SIMDE_FUNCTION_ATTRIBUTES
1040 simde__m512d
simde_mm512_mask_atan2_pd(simde__m512d src,simde__mmask8 k,simde__m512d a,simde__m512d b)1041 simde_mm512_mask_atan2_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) {
1042   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1043     return _mm512_mask_atan2_pd(src, k, a, b);
1044   #else
1045     return simde_mm512_mask_mov_pd(src, k, simde_mm512_atan2_pd(a, b));
1046   #endif
1047 }
1048 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1049   #undef _mm512_mask_atan2_pd
1050   #define _mm512_mask_atan2_pd(src, k, a, b) simde_mm512_mask_atan2_pd(src, k, a, b)
1051 #endif
1052 
1053 SIMDE_FUNCTION_ATTRIBUTES
1054 simde__m128
simde_mm_atanh_ps(simde__m128 a)1055 simde_mm_atanh_ps (simde__m128 a) {
1056   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1057     return _mm_atanh_ps(a);
1058   #else
1059     simde__m128_private
1060       r_,
1061       a_ = simde__m128_to_private(a);
1062 
1063     SIMDE_VECTORIZE
1064     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1065       r_.f32[i] =  simde_math_atanhf(a_.f32[i]);
1066     }
1067 
1068     return simde__m128_from_private(r_);
1069   #endif
1070 }
1071 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1072   #undef _mm_atanh_ps
1073   #define _mm_atanh_ps(a) simde_mm_atanh_ps(a)
1074 #endif
1075 
1076 SIMDE_FUNCTION_ATTRIBUTES
1077 simde__m128d
simde_mm_atanh_pd(simde__m128d a)1078 simde_mm_atanh_pd (simde__m128d a) {
1079   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1080     return _mm_atanh_pd(a);
1081   #else
1082     simde__m128d_private
1083       r_,
1084       a_ = simde__m128d_to_private(a);
1085 
1086     SIMDE_VECTORIZE
1087     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1088       r_.f64[i] =  simde_math_atanh(a_.f64[i]);
1089     }
1090 
1091     return simde__m128d_from_private(r_);
1092   #endif
1093 }
1094 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1095   #undef _mm_atanh_pd
1096   #define _mm_atanh_pd(a) simde_mm_atanh_pd(a)
1097 #endif
1098 
1099 SIMDE_FUNCTION_ATTRIBUTES
1100 simde__m256
simde_mm256_atanh_ps(simde__m256 a)1101 simde_mm256_atanh_ps (simde__m256 a) {
1102   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1103     return _mm256_atanh_ps(a);
1104   #else
1105     simde__m256_private
1106       r_,
1107       a_ = simde__m256_to_private(a);
1108 
1109     SIMDE_VECTORIZE
1110     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1111       r_.f32[i] =  simde_math_atanhf(a_.f32[i]);
1112     }
1113 
1114     return simde__m256_from_private(r_);
1115   #endif
1116 }
1117 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1118   #undef _mm256_atanh_ps
1119   #define _mm256_atanh_ps(a) simde_mm256_atanh_ps(a)
1120 #endif
1121 
1122 
1123 SIMDE_FUNCTION_ATTRIBUTES
1124 simde__m256d
simde_mm256_atanh_pd(simde__m256d a)1125 simde_mm256_atanh_pd (simde__m256d a) {
1126   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1127     return _mm256_atanh_pd(a);
1128   #else
1129     simde__m256d_private
1130       r_,
1131       a_ = simde__m256d_to_private(a);
1132 
1133     SIMDE_VECTORIZE
1134     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1135       r_.f64[i] =  simde_math_atanh(a_.f64[i]);
1136     }
1137 
1138     return simde__m256d_from_private(r_);
1139   #endif
1140 }
1141 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1142   #undef _mm256_atanh_pd
1143   #define _mm256_atanh_pd(a) simde_mm256_atanh_pd(a)
1144 #endif
1145 
1146 SIMDE_FUNCTION_ATTRIBUTES
1147 simde__m512
simde_mm512_atanh_ps(simde__m512 a)1148 simde_mm512_atanh_ps (simde__m512 a) {
1149   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1150     return _mm512_atanh_ps(a);
1151   #else
1152     simde__m512_private
1153       r_,
1154       a_ = simde__m512_to_private(a);
1155 
1156     SIMDE_VECTORIZE
1157     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1158       r_.f32[i] =  simde_math_atanhf(a_.f32[i]);
1159     }
1160 
1161     return simde__m512_from_private(r_);
1162   #endif
1163 }
1164 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1165   #undef _mm512_atanh_ps
1166   #define _mm512_atanh_ps(a) simde_mm512_atanh_ps(a)
1167 #endif
1168 
1169 SIMDE_FUNCTION_ATTRIBUTES
1170 simde__m512d
simde_mm512_atanh_pd(simde__m512d a)1171 simde_mm512_atanh_pd (simde__m512d a) {
1172   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1173     return _mm512_atanh_pd(a);
1174   #else
1175     simde__m512d_private
1176       r_,
1177       a_ = simde__m512d_to_private(a);
1178 
1179     SIMDE_VECTORIZE
1180     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1181       r_.f64[i] =  simde_math_atanh(a_.f64[i]);
1182     }
1183 
1184     return simde__m512d_from_private(r_);
1185   #endif
1186 }
1187 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1188   #undef _mm512_atanh_pd
1189   #define _mm512_atanh_pd(a) simde_mm512_atanh_pd(a)
1190 #endif
1191 
1192 SIMDE_FUNCTION_ATTRIBUTES
1193 simde__m512
simde_mm512_mask_atanh_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)1194 simde_mm512_mask_atanh_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
1195   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1196     return _mm512_mask_atanh_ps(src, k, a);
1197   #else
1198     return simde_mm512_mask_mov_ps(src, k, simde_mm512_atanh_ps(a));
1199   #endif
1200 }
1201 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1202   #undef _mm512_mask_atanh_ps
1203   #define _mm512_mask_atanh_ps(src, k, a) simde_mm512_mask_atanh_ps(src, k, a)
1204 #endif
1205 
1206 SIMDE_FUNCTION_ATTRIBUTES
1207 simde__m512d
simde_mm512_mask_atanh_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)1208 simde_mm512_mask_atanh_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
1209   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1210     return _mm512_mask_atanh_pd(src, k, a);
1211   #else
1212     return simde_mm512_mask_mov_pd(src, k, simde_mm512_atanh_pd(a));
1213   #endif
1214 }
1215 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1216   #undef _mm512_mask_atanh_pd
1217   #define _mm512_mask_atanh_pd(src, k, a) simde_mm512_mask_atanh_pd(src, k, a)
1218 #endif
1219 
1220 SIMDE_FUNCTION_ATTRIBUTES
1221 simde__m128
simde_mm_cbrt_ps(simde__m128 a)1222 simde_mm_cbrt_ps (simde__m128 a) {
1223   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1224     return _mm_cbrt_ps(a);
1225   #else
1226     simde__m128_private
1227       r_,
1228       a_ = simde__m128_to_private(a);
1229 
1230     SIMDE_VECTORIZE
1231     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1232       r_.f32[i] =  simde_math_cbrtf(a_.f32[i]);
1233     }
1234 
1235     return simde__m128_from_private(r_);
1236   #endif
1237 }
1238 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1239   #undef _mm_cbrt_ps
1240   #define _mm_cbrt_ps(a) simde_mm_cbrt_ps(a)
1241 #endif
1242 
1243 SIMDE_FUNCTION_ATTRIBUTES
1244 simde__m128d
simde_mm_cbrt_pd(simde__m128d a)1245 simde_mm_cbrt_pd (simde__m128d a) {
1246   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1247     return _mm_cbrt_pd(a);
1248   #else
1249     simde__m128d_private
1250       r_,
1251       a_ = simde__m128d_to_private(a);
1252 
1253     SIMDE_VECTORIZE
1254     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1255       r_.f64[i] =  simde_math_cbrt(a_.f64[i]);
1256     }
1257 
1258     return simde__m128d_from_private(r_);
1259   #endif
1260 }
1261 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1262   #undef _mm_cbrt_pd
1263   #define _mm_cbrt_pd(a) simde_mm_cbrt_pd(a)
1264 #endif
1265 
1266 SIMDE_FUNCTION_ATTRIBUTES
1267 simde__m256
simde_mm256_cbrt_ps(simde__m256 a)1268 simde_mm256_cbrt_ps (simde__m256 a) {
1269   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1270     return _mm256_cbrt_ps(a);
1271   #else
1272     simde__m256_private
1273       r_,
1274       a_ = simde__m256_to_private(a);
1275 
1276     SIMDE_VECTORIZE
1277     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1278       r_.f32[i] =  simde_math_cbrtf(a_.f32[i]);
1279     }
1280 
1281     return simde__m256_from_private(r_);
1282   #endif
1283 }
1284 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1285   #undef _mm256_cbrt_ps
1286   #define _mm256_cbrt_ps(a) simde_mm256_cbrt_ps(a)
1287 #endif
1288 
1289 
1290 SIMDE_FUNCTION_ATTRIBUTES
1291 simde__m256d
simde_mm256_cbrt_pd(simde__m256d a)1292 simde_mm256_cbrt_pd (simde__m256d a) {
1293   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1294     return _mm256_cbrt_pd(a);
1295   #else
1296     simde__m256d_private
1297       r_,
1298       a_ = simde__m256d_to_private(a);
1299 
1300     SIMDE_VECTORIZE
1301     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1302       r_.f64[i] =  simde_math_cbrt(a_.f64[i]);
1303     }
1304 
1305     return simde__m256d_from_private(r_);
1306   #endif
1307 }
1308 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1309   #undef _mm256_cbrt_pd
1310   #define _mm256_cbrt_pd(a) simde_mm256_cbrt_pd(a)
1311 #endif
1312 
1313 SIMDE_FUNCTION_ATTRIBUTES
1314 simde__m512
simde_mm512_cbrt_ps(simde__m512 a)1315 simde_mm512_cbrt_ps (simde__m512 a) {
1316   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1317     return _mm512_cbrt_ps(a);
1318   #else
1319     simde__m512_private
1320       r_,
1321       a_ = simde__m512_to_private(a);
1322 
1323     SIMDE_VECTORIZE
1324     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1325       r_.f32[i] =  simde_math_cbrtf(a_.f32[i]);
1326     }
1327 
1328     return simde__m512_from_private(r_);
1329   #endif
1330 }
1331 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1332   #undef _mm512_cbrt_ps
1333   #define _mm512_cbrt_ps(a) simde_mm512_cbrt_ps(a)
1334 #endif
1335 
1336 SIMDE_FUNCTION_ATTRIBUTES
1337 simde__m512d
simde_mm512_cbrt_pd(simde__m512d a)1338 simde_mm512_cbrt_pd (simde__m512d a) {
1339   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1340     return _mm512_cbrt_pd(a);
1341   #else
1342     simde__m512d_private
1343       r_,
1344       a_ = simde__m512d_to_private(a);
1345 
1346     SIMDE_VECTORIZE
1347     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1348       r_.f64[i] =  simde_math_cbrt(a_.f64[i]);
1349     }
1350 
1351     return simde__m512d_from_private(r_);
1352   #endif
1353 }
1354 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1355   #undef _mm512_cbrt_pd
1356   #define _mm512_cbrt_pd(a) simde_mm512_cbrt_pd(a)
1357 #endif
1358 
1359 SIMDE_FUNCTION_ATTRIBUTES
1360 simde__m512
simde_mm512_mask_cbrt_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)1361 simde_mm512_mask_cbrt_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
1362   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1363     return _mm512_mask_cbrt_ps(src, k, a);
1364   #else
1365     return simde_mm512_mask_mov_ps(src, k, simde_mm512_cbrt_ps(a));
1366   #endif
1367 }
1368 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1369   #undef _mm512_mask_cbrt_ps
1370   #define _mm512_mask_cbrt_ps(src, k, a) simde_mm512_mask_cbrt_ps(src, k, a)
1371 #endif
1372 
1373 SIMDE_FUNCTION_ATTRIBUTES
1374 simde__m512d
simde_mm512_mask_cbrt_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)1375 simde_mm512_mask_cbrt_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
1376   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1377     return _mm512_mask_cbrt_pd(src, k, a);
1378   #else
1379     return simde_mm512_mask_mov_pd(src, k, simde_mm512_cbrt_pd(a));
1380   #endif
1381 }
1382 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1383   #undef _mm512_mask_cbrt_pd
1384   #define _mm512_mask_cbrt_pd(src, k, a) simde_mm512_mask_cbrt_pd(src, k, a)
1385 #endif
1386 
1387 SIMDE_FUNCTION_ATTRIBUTES
1388 simde__m128
simde_mm_cdfnorm_ps(simde__m128 a)1389 simde_mm_cdfnorm_ps (simde__m128 a) {
1390   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1391     return _mm_cdfnorm_ps(a);
1392   #else
1393     simde__m128_private
1394       r_,
1395       a_ = simde__m128_to_private(a);
1396 
1397     SIMDE_VECTORIZE
1398     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1399       r_.f32[i] =  simde_math_cdfnormf(a_.f32[i]);
1400     }
1401 
1402     return simde__m128_from_private(r_);
1403   #endif
1404 }
1405 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1406   #undef _mm_cdfnorm_ps
1407   #define _mm_cdfnorm_ps(a) simde_mm_cdfnorm_ps(a)
1408 #endif
1409 
1410 SIMDE_FUNCTION_ATTRIBUTES
1411 simde__m128d
simde_mm_cdfnorm_pd(simde__m128d a)1412 simde_mm_cdfnorm_pd (simde__m128d a) {
1413   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1414     return _mm_cdfnorm_pd(a);
1415   #else
1416     simde__m128d_private
1417       r_,
1418       a_ = simde__m128d_to_private(a);
1419 
1420     SIMDE_VECTORIZE
1421     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1422       r_.f64[i] =  simde_math_cdfnorm(a_.f64[i]);
1423     }
1424 
1425     return simde__m128d_from_private(r_);
1426   #endif
1427 }
1428 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1429   #undef _mm_cdfnorm_pd
1430   #define _mm_cdfnorm_pd(a) simde_mm_cdfnorm_pd(a)
1431 #endif
1432 
1433 SIMDE_FUNCTION_ATTRIBUTES
1434 simde__m256
simde_mm256_cdfnorm_ps(simde__m256 a)1435 simde_mm256_cdfnorm_ps (simde__m256 a) {
1436   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1437     return _mm256_cdfnorm_ps(a);
1438   #else
1439     simde__m256_private
1440       r_,
1441       a_ = simde__m256_to_private(a);
1442 
1443     SIMDE_VECTORIZE
1444     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1445       r_.f32[i] =  simde_math_cdfnormf(a_.f32[i]);
1446     }
1447 
1448     return simde__m256_from_private(r_);
1449   #endif
1450 }
1451 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1452   #undef _mm256_cdfnorm_ps
1453   #define _mm256_cdfnorm_ps(a) simde_mm256_cdfnorm_ps(a)
1454 #endif
1455 
1456 
1457 SIMDE_FUNCTION_ATTRIBUTES
1458 simde__m256d
simde_mm256_cdfnorm_pd(simde__m256d a)1459 simde_mm256_cdfnorm_pd (simde__m256d a) {
1460   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1461     return _mm256_cdfnorm_pd(a);
1462   #else
1463     simde__m256d_private
1464       r_,
1465       a_ = simde__m256d_to_private(a);
1466 
1467     SIMDE_VECTORIZE
1468     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1469       r_.f64[i] =  simde_math_cdfnorm(a_.f64[i]);
1470     }
1471 
1472     return simde__m256d_from_private(r_);
1473   #endif
1474 }
1475 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1476   #undef _mm256_cdfnorm_pd
1477   #define _mm256_cdfnorm_pd(a) simde_mm256_cdfnorm_pd(a)
1478 #endif
1479 
1480 SIMDE_FUNCTION_ATTRIBUTES
1481 simde__m512
simde_mm512_cdfnorm_ps(simde__m512 a)1482 simde_mm512_cdfnorm_ps (simde__m512 a) {
1483   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1484     return _mm512_cdfnorm_ps(a);
1485   #else
1486     simde__m512_private
1487       r_,
1488       a_ = simde__m512_to_private(a);
1489 
1490     SIMDE_VECTORIZE
1491     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1492       r_.f32[i] =  simde_math_cdfnormf(a_.f32[i]);
1493     }
1494 
1495     return simde__m512_from_private(r_);
1496   #endif
1497 }
1498 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1499   #undef _mm512_cdfnorm_ps
1500   #define _mm512_cdfnorm_ps(a) simde_mm512_cdfnorm_ps(a)
1501 #endif
1502 
1503 SIMDE_FUNCTION_ATTRIBUTES
1504 simde__m512d
simde_mm512_cdfnorm_pd(simde__m512d a)1505 simde_mm512_cdfnorm_pd (simde__m512d a) {
1506   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1507     return _mm512_cdfnorm_pd(a);
1508   #else
1509     simde__m512d_private
1510       r_,
1511       a_ = simde__m512d_to_private(a);
1512 
1513     SIMDE_VECTORIZE
1514     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1515       r_.f64[i] =  simde_math_cdfnorm(a_.f64[i]);
1516     }
1517 
1518     return simde__m512d_from_private(r_);
1519   #endif
1520 }
1521 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1522   #undef _mm512_cdfnorm_pd
1523   #define _mm512_cdfnorm_pd(a) simde_mm512_cdfnorm_pd(a)
1524 #endif
1525 
1526 SIMDE_FUNCTION_ATTRIBUTES
1527 simde__m512
simde_mm512_mask_cdfnorm_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)1528 simde_mm512_mask_cdfnorm_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
1529   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1530     return _mm512_mask_cdfnorm_ps(src, k, a);
1531   #else
1532     return simde_mm512_mask_mov_ps(src, k, simde_mm512_cdfnorm_ps(a));
1533   #endif
1534 }
1535 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1536   #undef _mm512_mask_cdfnorm_ps
1537   #define _mm512_mask_cdfnorm_ps(src, k, a) simde_mm512_mask_cdfnorm_ps(src, k, a)
1538 #endif
1539 
1540 SIMDE_FUNCTION_ATTRIBUTES
1541 simde__m512d
simde_mm512_mask_cdfnorm_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)1542 simde_mm512_mask_cdfnorm_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
1543   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1544     return _mm512_mask_cdfnorm_pd(src, k, a);
1545   #else
1546     return simde_mm512_mask_mov_pd(src, k, simde_mm512_cdfnorm_pd(a));
1547   #endif
1548 }
1549 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1550   #undef _mm512_mask_cdfnorm_pd
1551   #define _mm512_mask_cdfnorm_pd(src, k, a) simde_mm512_mask_cdfnorm_pd(src, k, a)
1552 #endif
1553 
1554 SIMDE_FUNCTION_ATTRIBUTES
1555 simde__m128
simde_mm_cos_ps(simde__m128 a)1556 simde_mm_cos_ps (simde__m128 a) {
1557   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1558     return _mm_cos_ps(a);
1559   #else
1560     simde__m128_private
1561       r_,
1562       a_ = simde__m128_to_private(a);
1563 
1564     SIMDE_VECTORIZE
1565     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1566       r_.f32[i] =  simde_math_cosf(a_.f32[i]);
1567     }
1568 
1569     return simde__m128_from_private(r_);
1570   #endif
1571 }
1572 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1573   #undef _mm_cos_ps
1574   #define _mm_cos_ps(a) simde_mm_cos_ps(a)
1575 #endif
1576 
1577 SIMDE_FUNCTION_ATTRIBUTES
1578 simde__m128d
simde_mm_cos_pd(simde__m128d a)1579 simde_mm_cos_pd (simde__m128d a) {
1580   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1581     return _mm_cos_pd(a);
1582   #else
1583     simde__m128d_private
1584       r_,
1585       a_ = simde__m128d_to_private(a);
1586 
1587     SIMDE_VECTORIZE
1588     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1589       r_.f64[i] =  simde_math_cos(a_.f64[i]);
1590     }
1591 
1592     return simde__m128d_from_private(r_);
1593   #endif
1594 }
1595 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1596   #undef _mm_cos_pd
1597   #define _mm_cos_pd(a) simde_mm_cos_pd(a)
1598 #endif
1599 
1600 SIMDE_FUNCTION_ATTRIBUTES
1601 simde__m256
simde_mm256_cos_ps(simde__m256 a)1602 simde_mm256_cos_ps (simde__m256 a) {
1603   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1604     return _mm256_cos_ps(a);
1605   #else
1606     simde__m256_private
1607       r_,
1608       a_ = simde__m256_to_private(a);
1609 
1610     SIMDE_VECTORIZE
1611     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1612       r_.f32[i] =  simde_math_cosf(a_.f32[i]);
1613     }
1614 
1615     return simde__m256_from_private(r_);
1616   #endif
1617 }
1618 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1619   #undef _mm256_cos_ps
1620   #define _mm256_cos_ps(a) simde_mm256_cos_ps(a)
1621 #endif
1622 
1623 
1624 SIMDE_FUNCTION_ATTRIBUTES
1625 simde__m256d
simde_mm256_cos_pd(simde__m256d a)1626 simde_mm256_cos_pd (simde__m256d a) {
1627   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1628     return _mm256_cos_pd(a);
1629   #else
1630     simde__m256d_private
1631       r_,
1632       a_ = simde__m256d_to_private(a);
1633 
1634     SIMDE_VECTORIZE
1635     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1636       r_.f64[i] =  simde_math_cos(a_.f64[i]);
1637     }
1638 
1639     return simde__m256d_from_private(r_);
1640   #endif
1641 }
1642 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1643   #undef _mm256_cos_pd
1644   #define _mm256_cos_pd(a) simde_mm256_cos_pd(a)
1645 #endif
1646 
1647 SIMDE_FUNCTION_ATTRIBUTES
1648 simde__m512
simde_mm512_cos_ps(simde__m512 a)1649 simde_mm512_cos_ps (simde__m512 a) {
1650   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1651     return _mm512_cos_ps(a);
1652   #else
1653     simde__m512_private
1654       r_,
1655       a_ = simde__m512_to_private(a);
1656 
1657     SIMDE_VECTORIZE
1658     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1659       r_.f32[i] =  simde_math_cosf(a_.f32[i]);
1660     }
1661 
1662     return simde__m512_from_private(r_);
1663   #endif
1664 }
1665 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1666   #undef _mm512_cos_ps
1667   #define _mm512_cos_ps(a) simde_mm512_cos_ps(a)
1668 #endif
1669 
1670 SIMDE_FUNCTION_ATTRIBUTES
1671 simde__m512d
simde_mm512_cos_pd(simde__m512d a)1672 simde_mm512_cos_pd (simde__m512d a) {
1673   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1674     return _mm512_cos_pd(a);
1675   #else
1676     simde__m512d_private
1677       r_,
1678       a_ = simde__m512d_to_private(a);
1679 
1680     SIMDE_VECTORIZE
1681     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1682       r_.f64[i] =  simde_math_cos(a_.f64[i]);
1683     }
1684 
1685     return simde__m512d_from_private(r_);
1686   #endif
1687 }
1688 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1689   #undef _mm512_cos_pd
1690   #define _mm512_cos_pd(a) simde_mm512_cos_pd(a)
1691 #endif
1692 
1693 SIMDE_FUNCTION_ATTRIBUTES
1694 simde__m512
simde_mm512_mask_cos_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)1695 simde_mm512_mask_cos_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
1696   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1697     return _mm512_mask_cos_ps(src, k, a);
1698   #else
1699     return simde_mm512_mask_mov_ps(src, k, simde_mm512_cos_ps(a));
1700   #endif
1701 }
1702 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1703   #undef _mm512_mask_cos_ps
1704   #define _mm512_mask_cos_ps(src, k, a) simde_mm512_mask_cos_ps(src, k, a)
1705 #endif
1706 
1707 SIMDE_FUNCTION_ATTRIBUTES
1708 simde__m512d
simde_mm512_mask_cos_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)1709 simde_mm512_mask_cos_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
1710   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1711     return _mm512_mask_cos_pd(src, k, a);
1712   #else
1713     return simde_mm512_mask_mov_pd(src, k, simde_mm512_cos_pd(a));
1714   #endif
1715 }
1716 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1717   #undef _mm512_mask_cos_pd
1718   #define _mm512_mask_cos_pd(src, k, a) simde_mm512_mask_cos_pd(src, k, a)
1719 #endif
1720 
1721 SIMDE_FUNCTION_ATTRIBUTES
1722 simde__m128
simde_mm_cosd_ps(simde__m128 a)1723 simde_mm_cosd_ps (simde__m128 a) {
1724   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1725     return _mm_cosd_ps(a);
1726   #else
1727     simde__m128_private
1728       r_,
1729       a_ = simde__m128_to_private(a);
1730 
1731     SIMDE_VECTORIZE
1732     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1733       r_.f32[i] =  simde_math_cosf(simde_math_deg2radf(a_.f32[i]));
1734     }
1735 
1736     return simde__m128_from_private(r_);
1737   #endif
1738 }
1739 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1740   #undef _mm_cosd_ps
1741   #define _mm_cosd_ps(a) simde_mm_cosd_ps(a)
1742 #endif
1743 
1744 SIMDE_FUNCTION_ATTRIBUTES
1745 simde__m128d
simde_mm_cosd_pd(simde__m128d a)1746 simde_mm_cosd_pd (simde__m128d a) {
1747   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1748     return _mm_cosd_pd(a);
1749   #else
1750     simde__m128d_private
1751       r_,
1752       a_ = simde__m128d_to_private(a);
1753 
1754     SIMDE_VECTORIZE
1755     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1756       r_.f64[i] =  simde_math_cos(simde_math_deg2rad(a_.f64[i]));
1757     }
1758 
1759     return simde__m128d_from_private(r_);
1760   #endif
1761 }
1762 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1763   #undef _mm_cosd_pd
1764   #define _mm_cosd_pd(a) simde_mm_cosd_pd(a)
1765 #endif
1766 
1767 SIMDE_FUNCTION_ATTRIBUTES
1768 simde__m256
simde_mm256_cosd_ps(simde__m256 a)1769 simde_mm256_cosd_ps (simde__m256 a) {
1770   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1771     return _mm256_cosd_ps(a);
1772   #else
1773     simde__m256_private
1774       r_,
1775       a_ = simde__m256_to_private(a);
1776 
1777     SIMDE_VECTORIZE
1778     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1779       r_.f32[i] =  simde_math_cosf(simde_math_deg2radf(a_.f32[i]));
1780     }
1781 
1782     return simde__m256_from_private(r_);
1783   #endif
1784 }
1785 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1786   #undef _mm256_cosd_ps
1787   #define _mm256_cosd_ps(a) simde_mm256_cosd_ps(a)
1788 #endif
1789 
1790 SIMDE_FUNCTION_ATTRIBUTES
1791 simde__m256d
simde_mm256_cosd_pd(simde__m256d a)1792 simde_mm256_cosd_pd (simde__m256d a) {
1793   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1794     return _mm256_cosd_pd(a);
1795   #else
1796     simde__m256d_private
1797       r_,
1798       a_ = simde__m256d_to_private(a);
1799 
1800     SIMDE_VECTORIZE
1801     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1802       r_.f64[i] =  simde_math_cos(simde_math_deg2rad(a_.f64[i]));
1803     }
1804 
1805     return simde__m256d_from_private(r_);
1806   #endif
1807 }
1808 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1809   #undef _mm256_cosd_pd
1810   #define _mm256_cosd_pd(a) simde_mm256_cosd_pd(a)
1811 #endif
1812 
1813 
1814 SIMDE_FUNCTION_ATTRIBUTES
1815 simde__m512
simde_mm512_cosd_ps(simde__m512 a)1816 simde_mm512_cosd_ps (simde__m512 a) {
1817   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1818     return _mm512_cosd_ps(a);
1819   #else
1820     simde__m512_private
1821       r_,
1822       a_ = simde__m512_to_private(a);
1823 
1824     SIMDE_VECTORIZE
1825     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1826       r_.f32[i] =  simde_math_cosf(simde_math_deg2radf(a_.f32[i]));
1827     }
1828 
1829     return simde__m512_from_private(r_);
1830   #endif
1831 }
1832 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1833   #undef _mm512_cosd_ps
1834   #define _mm512_cosd_ps(a) simde_mm512_cosd_ps(a)
1835 #endif
1836 
1837 SIMDE_FUNCTION_ATTRIBUTES
1838 simde__m512d
simde_mm512_cosd_pd(simde__m512d a)1839 simde_mm512_cosd_pd (simde__m512d a) {
1840   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1841     return _mm512_cosd_pd(a);
1842   #else
1843     simde__m512d_private
1844       r_,
1845       a_ = simde__m512d_to_private(a);
1846 
1847     SIMDE_VECTORIZE
1848     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1849       r_.f64[i] =  simde_math_cos(simde_math_deg2rad(a_.f64[i]));
1850     }
1851 
1852     return simde__m512d_from_private(r_);
1853   #endif
1854 }
1855 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1856   #undef _mm512_cosd_pd
1857   #define _mm512_cosd_pd(a) simde_mm512_cosd_pd(a)
1858 #endif
1859 
1860 SIMDE_FUNCTION_ATTRIBUTES
1861 simde__m512
simde_mm512_mask_cosd_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)1862 simde_mm512_mask_cosd_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
1863   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1864     return _mm512_mask_cosd_ps(src, k, a);
1865   #else
1866     return simde_mm512_mask_mov_ps(src, k, simde_mm512_cosd_ps(a));
1867   #endif
1868 }
1869 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1870   #undef _mm512_mask_cosd_ps
1871   #define _mm512_mask_cosd_ps(src, k, a) simde_mm512_mask_cosd_ps(src, k, a)
1872 #endif
1873 
1874 SIMDE_FUNCTION_ATTRIBUTES
1875 simde__m512d
simde_mm512_mask_cosd_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)1876 simde_mm512_mask_cosd_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
1877   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1878     return _mm512_mask_cosd_pd(src, k, a);
1879   #else
1880     return simde_mm512_mask_mov_pd(src, k, simde_mm512_cosd_pd(a));
1881   #endif
1882 }
1883 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1884   #undef _mm512_mask_cosd_pd
1885   #define _mm512_mask_cosd_pd(src, k, a) simde_mm512_mask_cosd_pd(src, k, a)
1886 #endif
1887 
1888 SIMDE_FUNCTION_ATTRIBUTES
1889 simde__m128
simde_mm_cosh_ps(simde__m128 a)1890 simde_mm_cosh_ps (simde__m128 a) {
1891   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1892     return _mm_cosh_ps(a);
1893   #else
1894     simde__m128_private
1895       r_,
1896       a_ = simde__m128_to_private(a);
1897 
1898     SIMDE_VECTORIZE
1899     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1900       r_.f32[i] =  simde_math_coshf(a_.f32[i]);
1901     }
1902 
1903     return simde__m128_from_private(r_);
1904   #endif
1905 }
1906 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1907   #undef _mm_cosh_ps
1908   #define _mm_cosh_ps(a) simde_mm_cosh_ps(a)
1909 #endif
1910 
1911 SIMDE_FUNCTION_ATTRIBUTES
1912 simde__m128d
simde_mm_cosh_pd(simde__m128d a)1913 simde_mm_cosh_pd (simde__m128d a) {
1914   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
1915     return _mm_cosh_pd(a);
1916   #else
1917     simde__m128d_private
1918       r_,
1919       a_ = simde__m128d_to_private(a);
1920 
1921     SIMDE_VECTORIZE
1922     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1923       r_.f64[i] =  simde_math_cosh(a_.f64[i]);
1924     }
1925 
1926     return simde__m128d_from_private(r_);
1927   #endif
1928 }
1929 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1930   #undef _mm_cosh_pd
1931   #define _mm_cosh_pd(a) simde_mm_cosh_pd(a)
1932 #endif
1933 
1934 SIMDE_FUNCTION_ATTRIBUTES
1935 simde__m256
simde_mm256_cosh_ps(simde__m256 a)1936 simde_mm256_cosh_ps (simde__m256 a) {
1937   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1938     return _mm256_cosh_ps(a);
1939   #else
1940     simde__m256_private
1941       r_,
1942       a_ = simde__m256_to_private(a);
1943 
1944     SIMDE_VECTORIZE
1945     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1946       r_.f32[i] =  simde_math_coshf(a_.f32[i]);
1947     }
1948 
1949     return simde__m256_from_private(r_);
1950   #endif
1951 }
1952 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1953   #undef _mm256_cosh_ps
1954   #define _mm256_cosh_ps(a) simde_mm256_cosh_ps(a)
1955 #endif
1956 
1957 
1958 SIMDE_FUNCTION_ATTRIBUTES
1959 simde__m256d
simde_mm256_cosh_pd(simde__m256d a)1960 simde_mm256_cosh_pd (simde__m256d a) {
1961   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
1962     return _mm256_cosh_pd(a);
1963   #else
1964     simde__m256d_private
1965       r_,
1966       a_ = simde__m256d_to_private(a);
1967 
1968     SIMDE_VECTORIZE
1969     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
1970       r_.f64[i] =  simde_math_cosh(a_.f64[i]);
1971     }
1972 
1973     return simde__m256d_from_private(r_);
1974   #endif
1975 }
1976 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
1977   #undef _mm256_cosh_pd
1978   #define _mm256_cosh_pd(a) simde_mm256_cosh_pd(a)
1979 #endif
1980 
1981 SIMDE_FUNCTION_ATTRIBUTES
1982 simde__m512
simde_mm512_cosh_ps(simde__m512 a)1983 simde_mm512_cosh_ps (simde__m512 a) {
1984   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
1985     return _mm512_cosh_ps(a);
1986   #else
1987     simde__m512_private
1988       r_,
1989       a_ = simde__m512_to_private(a);
1990 
1991     SIMDE_VECTORIZE
1992     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
1993       r_.f32[i] =  simde_math_coshf(a_.f32[i]);
1994     }
1995 
1996     return simde__m512_from_private(r_);
1997   #endif
1998 }
1999 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2000   #undef _mm512_cosh_ps
2001   #define _mm512_cosh_ps(a) simde_mm512_cosh_ps(a)
2002 #endif
2003 
2004 SIMDE_FUNCTION_ATTRIBUTES
2005 simde__m512d
simde_mm512_cosh_pd(simde__m512d a)2006 simde_mm512_cosh_pd (simde__m512d a) {
2007   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2008     return _mm512_cosh_pd(a);
2009   #else
2010     simde__m512d_private
2011       r_,
2012       a_ = simde__m512d_to_private(a);
2013 
2014     SIMDE_VECTORIZE
2015     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
2016       r_.f64[i] =  simde_math_cosh(a_.f64[i]);
2017     }
2018 
2019     return simde__m512d_from_private(r_);
2020   #endif
2021 }
2022 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2023   #undef _mm512_cosh_pd
2024   #define _mm512_cosh_pd(a) simde_mm512_cosh_pd(a)
2025 #endif
2026 
2027 SIMDE_FUNCTION_ATTRIBUTES
2028 simde__m512
simde_mm512_mask_cosh_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)2029 simde_mm512_mask_cosh_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
2030   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2031     return _mm512_mask_cosh_ps(src, k, a);
2032   #else
2033     return simde_mm512_mask_mov_ps(src, k, simde_mm512_cosh_ps(a));
2034   #endif
2035 }
2036 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2037   #undef _mm512_mask_cosh_ps
2038   #define _mm512_mask_cosh_ps(src, k, a) simde_mm512_mask_cosh_ps(src, k, a)
2039 #endif
2040 
2041 SIMDE_FUNCTION_ATTRIBUTES
2042 simde__m512d
simde_mm512_mask_cosh_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)2043 simde_mm512_mask_cosh_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
2044   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2045     return _mm512_mask_cosh_pd(src, k, a);
2046   #else
2047     return simde_mm512_mask_mov_pd(src, k, simde_mm512_cosh_pd(a));
2048   #endif
2049 }
2050 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2051   #undef _mm512_mask_cosh_pd
2052   #define _mm512_mask_cosh_pd(src, k, a) simde_mm512_mask_cosh_pd(src, k, a)
2053 #endif
2054 
2055 SIMDE_FUNCTION_ATTRIBUTES
2056 simde__m128i
simde_mm_div_epi8(simde__m128i a,simde__m128i b)2057 simde_mm_div_epi8 (simde__m128i a, simde__m128i b) {
2058   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
2059     return _mm_div_epi8(a, b);
2060   #else
2061     simde__m128i_private
2062       r_,
2063       a_ = simde__m128i_to_private(a),
2064       b_ = simde__m128i_to_private(b);
2065 
2066     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2067       r_.i8 = a_.i8 / b_.i8;
2068     #elif defined(SIMDE_WASM_SIMD128_NATIVE)
2069       r_.wasm_v128 = wasm_i8x4_div(a_.wasm_v128, b_.wasm_v128);
2070     #else
2071       SIMDE_VECTORIZE
2072       for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
2073         r_.i8[i] = a_.i8[i] / b_.i8[i];
2074       }
2075     #endif
2076 
2077     return simde__m128i_from_private(r_);
2078   #endif
2079 }
2080 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2081   #undef _mm_div_epi8
2082   #define _mm_div_epi8(a, b) simde_mm_div_epi8((a), (b))
2083 #endif
2084 
2085 SIMDE_FUNCTION_ATTRIBUTES
2086 simde__m128i
simde_mm_div_epi16(simde__m128i a,simde__m128i b)2087 simde_mm_div_epi16 (simde__m128i a, simde__m128i b) {
2088   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
2089     return _mm_div_epi16(a, b);
2090   #else
2091     simde__m128i_private
2092       r_,
2093       a_ = simde__m128i_to_private(a),
2094       b_ = simde__m128i_to_private(b);
2095 
2096     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2097       r_.i16 = a_.i16 / b_.i16;
2098     #elif defined(SIMDE_WASM_SIMD128_NATIVE)
2099       r_.wasm_v128 = wasm_i16x4_div(a_.wasm_v128, b_.wasm_v128);
2100     #else
2101       SIMDE_VECTORIZE
2102       for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
2103         r_.i16[i] = a_.i16[i] / b_.i16[i];
2104       }
2105     #endif
2106 
2107     return simde__m128i_from_private(r_);
2108   #endif
2109 }
2110 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2111   #undef _mm_div_epi16
2112   #define _mm_div_epi16(a, b) simde_mm_div_epi16((a), (b))
2113 #endif
2114 
2115 SIMDE_FUNCTION_ATTRIBUTES
2116 simde__m128i
simde_mm_div_epi32(simde__m128i a,simde__m128i b)2117 simde_mm_div_epi32 (simde__m128i a, simde__m128i b) {
2118   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
2119     return _mm_div_epi32(a, b);
2120   #else
2121     simde__m128i_private
2122       r_,
2123       a_ = simde__m128i_to_private(a),
2124       b_ = simde__m128i_to_private(b);
2125 
2126     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2127       r_.i32 = a_.i32 / b_.i32;
2128     #elif defined(SIMDE_WASM_SIMD128_NATIVE)
2129       r_.wasm_v128 = wasm_i32x4_div(a_.wasm_v128, b_.wasm_v128);
2130     #else
2131       SIMDE_VECTORIZE
2132       for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
2133         r_.i32[i] = a_.i32[i] / b_.i32[i];
2134       }
2135     #endif
2136 
2137     return simde__m128i_from_private(r_);
2138   #endif
2139 }
2140 #define simde_mm_idiv_epi32(a, b) simde_mm_div_epi32(a, b)
2141 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2142   #undef _mm_div_epi32
2143   #define _mm_div_epi32(a, b) simde_mm_div_epi32(a, b)
2144   #undef _mm_idiv_epi32
2145   #define _mm_idiv_epi32(a, b) simde_mm_div_epi32(a, b)
2146 #endif
2147 
2148 SIMDE_FUNCTION_ATTRIBUTES
2149 simde__m128i
simde_mm_div_epi64(simde__m128i a,simde__m128i b)2150 simde_mm_div_epi64 (simde__m128i a, simde__m128i b) {
2151   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
2152     return _mm_div_epi64(a, b);
2153   #else
2154     simde__m128i_private
2155       r_,
2156       a_ = simde__m128i_to_private(a),
2157       b_ = simde__m128i_to_private(b);
2158 
2159     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2160       r_.i64 = a_.i64 / b_.i64;
2161     #elif defined(SIMDE_WASM_SIMD128_NATIVE)
2162       r_.wasm_v128 = wasm_i64x4_div(a_.wasm_v128, b_.wasm_v128);
2163     #else
2164       SIMDE_VECTORIZE
2165       for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
2166         r_.i64[i] = a_.i64[i] / b_.i64[i];
2167       }
2168     #endif
2169 
2170     return simde__m128i_from_private(r_);
2171   #endif
2172 }
2173 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2174   #undef _mm_div_epi64
2175   #define _mm_div_epi64(a, b) simde_mm_div_epi64((a), (b))
2176 #endif
2177 
2178 SIMDE_FUNCTION_ATTRIBUTES
2179 simde__m128i
simde_mm_div_epu8(simde__m128i a,simde__m128i b)2180 simde_mm_div_epu8 (simde__m128i a, simde__m128i b) {
2181   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
2182     return _mm_div_epu8(a, b);
2183   #else
2184     simde__m128i_private
2185       r_,
2186       a_ = simde__m128i_to_private(a),
2187       b_ = simde__m128i_to_private(b);
2188 
2189     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2190       r_.u8 = a_.u8 / b_.u8;
2191     #elif defined(SIMDE_WASM_SIMD128_NATIVE)
2192       r_.wasm_v128 =  wasm_u8x16_div(a_.wasm_v128, b_.wasm_v128);
2193     #else
2194       SIMDE_VECTORIZE
2195       for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
2196         r_.u8[i] = a_.u8[i] / b_.u8[i];
2197       }
2198     #endif
2199 
2200     return simde__m128i_from_private(r_);
2201   #endif
2202 }
2203 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2204   #undef _mm_div_epu8
2205   #define _mm_div_epu8(a, b) simde_mm_div_epu8((a), (b))
2206 #endif
2207 
2208 SIMDE_FUNCTION_ATTRIBUTES
2209 simde__m128i
simde_mm_div_epu16(simde__m128i a,simde__m128i b)2210 simde_mm_div_epu16 (simde__m128i a, simde__m128i b) {
2211   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
2212     return _mm_div_epu16(a, b);
2213   #else
2214     simde__m128i_private
2215       r_,
2216       a_ = simde__m128i_to_private(a),
2217       b_ = simde__m128i_to_private(b);
2218 
2219     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2220       r_.u16 = a_.u16 / b_.u16;
2221     #elif defined(SIMDE_WASM_SIMD128_NATIVE)
2222       r_.wasm_v128 =  wasm_u16x16_div(a_.wasm_v128, b_.wasm_v128);
2223     #else
2224       SIMDE_VECTORIZE
2225       for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
2226         r_.u16[i] = a_.u16[i] / b_.u16[i];
2227       }
2228     #endif
2229 
2230     return simde__m128i_from_private(r_);
2231   #endif
2232 }
2233 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2234   #undef _mm_div_epu16
2235   #define _mm_div_epu16(a, b) simde_mm_div_epu16((a), (b))
2236 #endif
2237 
2238 SIMDE_FUNCTION_ATTRIBUTES
2239 simde__m128i
simde_mm_div_epu32(simde__m128i a,simde__m128i b)2240 simde_mm_div_epu32 (simde__m128i a, simde__m128i b) {
2241   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
2242     return _mm_div_epu32(a, b);
2243   #else
2244     simde__m128i_private
2245       r_,
2246       a_ = simde__m128i_to_private(a),
2247       b_ = simde__m128i_to_private(b);
2248 
2249     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2250       r_.u32 = a_.u32 / b_.u32;
2251     #elif defined(SIMDE_WASM_SIMD128_NATIVE)
2252       r_.wasm_v128 =  wasm_u32x16_div(a_.wasm_v128, b_.wasm_v128);
2253     #else
2254       SIMDE_VECTORIZE
2255       for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
2256         r_.u32[i] = a_.u32[i] / b_.u32[i];
2257       }
2258     #endif
2259 
2260     return simde__m128i_from_private(r_);
2261   #endif
2262 }
2263 #define simde_mm_udiv_epi32(a, b) simde_mm_div_epu32(a, b)
2264 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2265   #undef _mm_div_epu32
2266   #define _mm_div_epu32(a, b) simde_mm_div_epu32(a, b)
2267   #undef _mm_udiv_epi32
2268   #define _mm_udiv_epi32(a, b) simde_mm_div_epu32(a, b)
2269 #endif
2270 
2271 SIMDE_FUNCTION_ATTRIBUTES
2272 simde__m128i
simde_mm_div_epu64(simde__m128i a,simde__m128i b)2273 simde_mm_div_epu64 (simde__m128i a, simde__m128i b) {
2274   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
2275     return _mm_div_epu64(a, b);
2276   #else
2277     simde__m128i_private
2278       r_,
2279       a_ = simde__m128i_to_private(a),
2280       b_ = simde__m128i_to_private(b);
2281 
2282     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2283       r_.u64 = a_.u64 / b_.u64;
2284     #elif defined(SIMDE_WASM_SIMD128_NATIVE)
2285       r_.wasm_v128 =  wasm_u64x16_div(a_.wasm_v128, b_.wasm_v128);
2286     #else
2287       SIMDE_VECTORIZE
2288       for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
2289         r_.u64[i] = a_.u64[i] / b_.u64[i];
2290       }
2291     #endif
2292 
2293     return simde__m128i_from_private(r_);
2294   #endif
2295 }
2296 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2297   #undef _mm_div_epu64
2298   #define _mm_div_epu64(a, b) simde_mm_div_epu64((a), (b))
2299 #endif
2300 
2301 SIMDE_FUNCTION_ATTRIBUTES
2302 simde__m256i
simde_mm256_div_epi8(simde__m256i a,simde__m256i b)2303 simde_mm256_div_epi8 (simde__m256i a, simde__m256i b) {
2304   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
2305     return _mm256_div_epi8(a, b);
2306   #else
2307     simde__m256i_private
2308       r_,
2309       a_ = simde__m256i_to_private(a),
2310       b_ = simde__m256i_to_private(b);
2311 
2312     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2313       r_.i8 = a_.i8 / b_.i8;
2314     #else
2315       SIMDE_VECTORIZE
2316       for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
2317         r_.i8[i] = a_.i8[i] / b_.i8[i];
2318       }
2319     #endif
2320 
2321     return simde__m256i_from_private(r_);
2322   #endif
2323 }
2324 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2325   #undef _mm256_div_epi8
2326   #define _mm256_div_epi8(a, b) simde_mm256_div_epi8((a), (b))
2327 #endif
2328 
2329 SIMDE_FUNCTION_ATTRIBUTES
2330 simde__m256i
simde_mm256_div_epi16(simde__m256i a,simde__m256i b)2331 simde_mm256_div_epi16 (simde__m256i a, simde__m256i b) {
2332   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
2333     return _mm256_div_epi16(a, b);
2334   #else
2335     simde__m256i_private
2336       r_,
2337       a_ = simde__m256i_to_private(a),
2338       b_ = simde__m256i_to_private(b);
2339 
2340     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2341       r_.i16 = a_.i16 / b_.i16;
2342     #else
2343       SIMDE_VECTORIZE
2344       for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
2345         r_.i16[i] = a_.i16[i] / b_.i16[i];
2346       }
2347     #endif
2348 
2349     return simde__m256i_from_private(r_);
2350   #endif
2351 }
2352 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2353   #undef _mm256_div_epi16
2354   #define _mm256_div_epi16(a, b) simde_mm256_div_epi16((a), (b))
2355 #endif
2356 
2357 SIMDE_FUNCTION_ATTRIBUTES
2358 simde__m256i
simde_mm256_div_epi32(simde__m256i a,simde__m256i b)2359 simde_mm256_div_epi32 (simde__m256i a, simde__m256i b) {
2360   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
2361     return _mm256_div_epi32(a, b);
2362   #else
2363     simde__m256i_private
2364       r_,
2365       a_ = simde__m256i_to_private(a),
2366       b_ = simde__m256i_to_private(b);
2367 
2368     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2369       r_.i32 = a_.i32 / b_.i32;
2370     #else
2371       SIMDE_VECTORIZE
2372       for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
2373         r_.i32[i] = a_.i32[i] / b_.i32[i];
2374       }
2375     #endif
2376 
2377     return simde__m256i_from_private(r_);
2378   #endif
2379 }
2380 #define simde_mm256_idiv_epi32(a, b) simde_mm256_div_epi32(a, b)
2381 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2382   #undef _mm256_div_epi32
2383   #define _mm256_div_epi32(a, b) simde_mm256_div_epi32(a, b)
2384   #undef _mm256_idiv_epi32
2385   #define _mm256_idiv_epi32(a, b) simde_mm256_div_epi32(a, b)
2386 #endif
2387 
2388 SIMDE_FUNCTION_ATTRIBUTES
2389 simde__m256i
simde_mm256_div_epi64(simde__m256i a,simde__m256i b)2390 simde_mm256_div_epi64 (simde__m256i a, simde__m256i b) {
2391   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
2392     return _mm256_div_epi64(a, b);
2393   #else
2394     simde__m256i_private
2395       r_,
2396       a_ = simde__m256i_to_private(a),
2397       b_ = simde__m256i_to_private(b);
2398 
2399     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2400       r_.i64 = a_.i64 / b_.i64;
2401     #else
2402       SIMDE_VECTORIZE
2403       for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
2404         r_.i64[i] = a_.i64[i] / b_.i64[i];
2405       }
2406     #endif
2407 
2408     return simde__m256i_from_private(r_);
2409   #endif
2410 }
2411 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2412   #undef _mm256_div_epi64
2413   #define _mm256_div_epi64(a, b) simde_mm256_div_epi64((a), (b))
2414 #endif
2415 
2416 SIMDE_FUNCTION_ATTRIBUTES
2417 simde__m256i
simde_mm256_div_epu8(simde__m256i a,simde__m256i b)2418 simde_mm256_div_epu8 (simde__m256i a, simde__m256i b) {
2419   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
2420     return _mm256_div_epu8(a, b);
2421   #else
2422     simde__m256i_private
2423       r_,
2424       a_ = simde__m256i_to_private(a),
2425       b_ = simde__m256i_to_private(b);
2426 
2427     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2428       r_.u8 = a_.u8 / b_.u8;
2429     #else
2430       SIMDE_VECTORIZE
2431       for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
2432         r_.u8[i] = a_.u8[i] / b_.u8[i];
2433       }
2434     #endif
2435 
2436     return simde__m256i_from_private(r_);
2437   #endif
2438 }
2439 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2440   #undef _mm256_div_epu8
2441   #define _mm256_div_epu8(a, b) simde_mm256_div_epu8((a), (b))
2442 #endif
2443 
2444 SIMDE_FUNCTION_ATTRIBUTES
2445 simde__m256i
simde_mm256_div_epu16(simde__m256i a,simde__m256i b)2446 simde_mm256_div_epu16 (simde__m256i a, simde__m256i b) {
2447   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
2448     return _mm256_div_epu16(a, b);
2449   #else
2450     simde__m256i_private
2451       r_,
2452       a_ = simde__m256i_to_private(a),
2453       b_ = simde__m256i_to_private(b);
2454 
2455     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2456       r_.u16 = a_.u16 / b_.u16;
2457     #else
2458       SIMDE_VECTORIZE
2459       for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
2460         r_.u16[i] = a_.u16[i] / b_.u16[i];
2461       }
2462     #endif
2463 
2464     return simde__m256i_from_private(r_);
2465   #endif
2466 }
2467 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2468   #undef _mm256_div_epu16
2469   #define _mm256_div_epu16(a, b) simde_mm256_div_epu16((a), (b))
2470 #endif
2471 
2472 SIMDE_FUNCTION_ATTRIBUTES
2473 simde__m256i
simde_mm256_div_epu32(simde__m256i a,simde__m256i b)2474 simde_mm256_div_epu32 (simde__m256i a, simde__m256i b) {
2475   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
2476     return _mm256_div_epu32(a, b);
2477   #else
2478     simde__m256i_private
2479       r_,
2480       a_ = simde__m256i_to_private(a),
2481       b_ = simde__m256i_to_private(b);
2482 
2483     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2484       r_.u32 = a_.u32 / b_.u32;
2485     #else
2486       SIMDE_VECTORIZE
2487       for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
2488         r_.u32[i] = a_.u32[i] / b_.u32[i];
2489       }
2490     #endif
2491 
2492     return simde__m256i_from_private(r_);
2493   #endif
2494 }
2495 #define simde_mm256_udiv_epi32(a, b) simde_mm256_div_epu32(a, b)
2496 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2497   #undef _mm256_div_epu32
2498   #define _mm256_div_epu32(a, b) simde_mm256_div_epu32(a, b)
2499   #undef _mm256_udiv_epi32
2500   #define _mm256_udiv_epi32(a, b) simde_mm256_div_epu32(a, b)
2501 #endif
2502 
2503 SIMDE_FUNCTION_ATTRIBUTES
2504 simde__m256i
simde_mm256_div_epu64(simde__m256i a,simde__m256i b)2505 simde_mm256_div_epu64 (simde__m256i a, simde__m256i b) {
2506   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
2507     return _mm256_div_epu64(a, b);
2508   #else
2509     simde__m256i_private
2510       r_,
2511       a_ = simde__m256i_to_private(a),
2512       b_ = simde__m256i_to_private(b);
2513 
2514     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2515       r_.u64 = a_.u64 / b_.u64;
2516     #else
2517       SIMDE_VECTORIZE
2518       for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
2519         r_.u64[i] = a_.u64[i] / b_.u64[i];
2520       }
2521     #endif
2522 
2523     return simde__m256i_from_private(r_);
2524   #endif
2525 }
2526 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2527   #undef _mm256_div_epu64
2528   #define _mm256_div_epu64(a, b) simde_mm256_div_epu64((a), (b))
2529 #endif
2530 
2531 SIMDE_FUNCTION_ATTRIBUTES
2532 simde__m512i
simde_mm512_div_epi8(simde__m512i a,simde__m512i b)2533 simde_mm512_div_epi8 (simde__m512i a, simde__m512i b) {
2534   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2535     return _mm512_div_epi8(a, b);
2536   #else
2537     simde__m512i_private
2538       r_,
2539       a_ = simde__m512i_to_private(a),
2540       b_ = simde__m512i_to_private(b);
2541 
2542     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2543       r_.i8 = a_.i8 / b_.i8;
2544     #else
2545       SIMDE_VECTORIZE
2546       for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
2547         r_.i8[i] = a_.i8[i] / b_.i8[i];
2548       }
2549     #endif
2550 
2551     return simde__m512i_from_private(r_);
2552   #endif
2553 }
2554 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2555   #undef _mm512_div_epi8
2556   #define _mm512_div_epi8(a, b) simde_mm512_div_epi8((a), (b))
2557 #endif
2558 
2559 SIMDE_FUNCTION_ATTRIBUTES
2560 simde__m512i
simde_mm512_div_epi16(simde__m512i a,simde__m512i b)2561 simde_mm512_div_epi16 (simde__m512i a, simde__m512i b) {
2562   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2563     return _mm512_div_epi16(a, b);
2564   #else
2565     simde__m512i_private
2566       r_,
2567       a_ = simde__m512i_to_private(a),
2568       b_ = simde__m512i_to_private(b);
2569 
2570     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2571       r_.i16 = a_.i16 / b_.i16;
2572     #else
2573       SIMDE_VECTORIZE
2574       for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
2575         r_.i16[i] = a_.i16[i] / b_.i16[i];
2576       }
2577     #endif
2578 
2579     return simde__m512i_from_private(r_);
2580   #endif
2581 }
2582 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2583   #undef _mm512_div_epi16
2584   #define _mm512_div_epi16(a, b) simde_mm512_div_epi16((a), (b))
2585 #endif
2586 
2587 SIMDE_FUNCTION_ATTRIBUTES
2588 simde__m512i
simde_mm512_div_epi32(simde__m512i a,simde__m512i b)2589 simde_mm512_div_epi32 (simde__m512i a, simde__m512i b) {
2590   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2591     return _mm512_div_epi32(a, b);
2592   #else
2593     simde__m512i_private
2594       r_,
2595       a_ = simde__m512i_to_private(a),
2596       b_ = simde__m512i_to_private(b);
2597 
2598     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2599       r_.i32 = a_.i32 / b_.i32;
2600     #else
2601       SIMDE_VECTORIZE
2602       for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
2603         r_.i32[i] = a_.i32[i] / b_.i32[i];
2604       }
2605     #endif
2606 
2607     return simde__m512i_from_private(r_);
2608   #endif
2609 }
2610 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2611   #undef _mm512_div_epi32
2612   #define _mm512_div_epi32(a, b) simde_mm512_div_epi32((a), (b))
2613 #endif
2614 
2615 SIMDE_FUNCTION_ATTRIBUTES
2616 simde__m512i
simde_mm512_mask_div_epi32(simde__m512i src,simde__mmask16 k,simde__m512i a,simde__m512i b)2617 simde_mm512_mask_div_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
2618   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2619     return _mm512_mask_div_epi32(src, k, a, b);
2620   #else
2621     return simde_mm512_mask_mov_epi32(src, k, simde_mm512_div_epi32(a, b));
2622   #endif
2623 }
2624 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2625   #undef _mm512_mask_div_epi32
2626   #define _mm512_mask_div_epi32(src, k, a, b) simde_mm512_mask_div_epi32(src, k, a, b)
2627 #endif
2628 
2629 SIMDE_FUNCTION_ATTRIBUTES
2630 simde__m512i
simde_mm512_div_epi64(simde__m512i a,simde__m512i b)2631 simde_mm512_div_epi64 (simde__m512i a, simde__m512i b) {
2632   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2633     return _mm512_div_epi64(a, b);
2634   #else
2635     simde__m512i_private
2636       r_,
2637       a_ = simde__m512i_to_private(a),
2638       b_ = simde__m512i_to_private(b);
2639 
2640     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2641       r_.i64 = a_.i64 / b_.i64;
2642     #else
2643       SIMDE_VECTORIZE
2644       for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
2645         r_.i64[i] = a_.i64[i] / b_.i64[i];
2646       }
2647     #endif
2648 
2649     return simde__m512i_from_private(r_);
2650   #endif
2651 }
2652 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2653   #undef _mm512_div_epi64
2654   #define _mm512_div_epi64(a, b) simde_mm512_div_epi64((a), (b))
2655 #endif
2656 
2657 SIMDE_FUNCTION_ATTRIBUTES
2658 simde__m512i
simde_mm512_div_epu8(simde__m512i a,simde__m512i b)2659 simde_mm512_div_epu8 (simde__m512i a, simde__m512i b) {
2660   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2661     return _mm512_div_epu8(a, b);
2662   #else
2663     simde__m512i_private
2664       r_,
2665       a_ = simde__m512i_to_private(a),
2666       b_ = simde__m512i_to_private(b);
2667 
2668     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2669       r_.u8 = a_.u8 / b_.u8;
2670     #else
2671       SIMDE_VECTORIZE
2672       for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
2673         r_.u8[i] = a_.u8[i] / b_.u8[i];
2674       }
2675     #endif
2676 
2677     return simde__m512i_from_private(r_);
2678   #endif
2679 }
2680 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2681   #undef _mm512_div_epu8
2682   #define _mm512_div_epu8(a, b) simde_mm512_div_epu8((a), (b))
2683 #endif
2684 
2685 SIMDE_FUNCTION_ATTRIBUTES
2686 simde__m512i
simde_mm512_div_epu16(simde__m512i a,simde__m512i b)2687 simde_mm512_div_epu16 (simde__m512i a, simde__m512i b) {
2688   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2689     return _mm512_div_epu16(a, b);
2690   #else
2691     simde__m512i_private
2692       r_,
2693       a_ = simde__m512i_to_private(a),
2694       b_ = simde__m512i_to_private(b);
2695 
2696     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2697       r_.u16 = a_.u16 / b_.u16;
2698     #else
2699       SIMDE_VECTORIZE
2700       for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
2701         r_.u16[i] = a_.u16[i] / b_.u16[i];
2702       }
2703     #endif
2704 
2705     return simde__m512i_from_private(r_);
2706   #endif
2707 }
2708 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2709   #undef _mm512_div_epu16
2710   #define _mm512_div_epu16(a, b) simde_mm512_div_epu16((a), (b))
2711 #endif
2712 
2713 SIMDE_FUNCTION_ATTRIBUTES
2714 simde__m512i
simde_mm512_div_epu32(simde__m512i a,simde__m512i b)2715 simde_mm512_div_epu32 (simde__m512i a, simde__m512i b) {
2716   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2717     return _mm512_div_epu32(a, b);
2718   #else
2719     simde__m512i_private
2720       r_,
2721       a_ = simde__m512i_to_private(a),
2722       b_ = simde__m512i_to_private(b);
2723 
2724     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2725       r_.u32 = a_.u32 / b_.u32;
2726     #else
2727       SIMDE_VECTORIZE
2728       for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
2729         r_.u32[i] = a_.u32[i] / b_.u32[i];
2730       }
2731     #endif
2732 
2733     return simde__m512i_from_private(r_);
2734   #endif
2735 }
2736 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2737   #undef _mm512_div_epu32
2738   #define _mm512_div_epu32(a, b) simde_mm512_div_epu32((a), (b))
2739 #endif
2740 
2741 SIMDE_FUNCTION_ATTRIBUTES
2742 simde__m512i
simde_mm512_mask_div_epu32(simde__m512i src,simde__mmask16 k,simde__m512i a,simde__m512i b)2743 simde_mm512_mask_div_epu32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
2744   #if defined(SIMDE_X86_SVML_NATIVE)
2745     return _mm512_mask_div_epu32(src, k, a, b);
2746   #else
2747     return simde_mm512_mask_mov_epi32(src, k, simde_mm512_div_epu32(a, b));
2748   #endif
2749 }
2750 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2751   #undef _mm512_mask_div_epu32
2752   #define _mm512_mask_div_epu32(src, k, a, b) simde_mm512_mask_div_epu32(src, k, a, b)
2753 #endif
2754 
2755 SIMDE_FUNCTION_ATTRIBUTES
2756 simde__m512i
simde_mm512_div_epu64(simde__m512i a,simde__m512i b)2757 simde_mm512_div_epu64 (simde__m512i a, simde__m512i b) {
2758   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2759     return _mm512_div_epu64(a, b);
2760   #else
2761     simde__m512i_private
2762       r_,
2763       a_ = simde__m512i_to_private(a),
2764       b_ = simde__m512i_to_private(b);
2765 
2766     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
2767       r_.u64 = a_.u64 / b_.u64;
2768     #else
2769       SIMDE_VECTORIZE
2770       for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
2771         r_.u64[i] = a_.u64[i] / b_.u64[i];
2772       }
2773     #endif
2774 
2775     return simde__m512i_from_private(r_);
2776   #endif
2777 }
2778 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2779   #undef _mm512_div_epu64
2780   #define _mm512_div_epu64(a, b) simde_mm512_div_epu64((a), (b))
2781 #endif
2782 
2783 SIMDE_FUNCTION_ATTRIBUTES
2784 simde__m128
simde_mm_erf_ps(simde__m128 a)2785 simde_mm_erf_ps (simde__m128 a) {
2786   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
2787     return _mm_erf_ps(a);
2788   #else
2789     simde__m128_private
2790       r_,
2791       a_ = simde__m128_to_private(a);
2792 
2793     SIMDE_VECTORIZE
2794     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
2795       r_.f32[i] =  simde_math_erff(a_.f32[i]);
2796     }
2797 
2798     return simde__m128_from_private(r_);
2799   #endif
2800 }
2801 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2802   #undef _mm_erf_ps
2803   #define _mm_erf_ps(a) simde_mm_erf_ps(a)
2804 #endif
2805 
2806 SIMDE_FUNCTION_ATTRIBUTES
2807 simde__m128d
simde_mm_erf_pd(simde__m128d a)2808 simde_mm_erf_pd (simde__m128d a) {
2809   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
2810     return _mm_erf_pd(a);
2811   #else
2812     simde__m128d_private
2813       r_,
2814       a_ = simde__m128d_to_private(a);
2815 
2816     SIMDE_VECTORIZE
2817     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
2818       r_.f64[i] =  simde_math_erf(a_.f64[i]);
2819     }
2820 
2821     return simde__m128d_from_private(r_);
2822   #endif
2823 }
2824 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2825   #undef _mm_erf_pd
2826   #define _mm_erf_pd(a) simde_mm_erf_pd(a)
2827 #endif
2828 
2829 SIMDE_FUNCTION_ATTRIBUTES
2830 simde__m256
simde_mm256_erf_ps(simde__m256 a)2831 simde_mm256_erf_ps (simde__m256 a) {
2832   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
2833     return _mm256_erf_ps(a);
2834   #else
2835     simde__m256_private
2836       r_,
2837       a_ = simde__m256_to_private(a);
2838 
2839     SIMDE_VECTORIZE
2840     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
2841       r_.f32[i] =  simde_math_erff(a_.f32[i]);
2842     }
2843 
2844     return simde__m256_from_private(r_);
2845   #endif
2846 }
2847 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2848   #undef _mm256_erf_ps
2849   #define _mm256_erf_ps(a) simde_mm256_erf_ps(a)
2850 #endif
2851 
2852 
2853 SIMDE_FUNCTION_ATTRIBUTES
2854 simde__m256d
simde_mm256_erf_pd(simde__m256d a)2855 simde_mm256_erf_pd (simde__m256d a) {
2856   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
2857     return _mm256_erf_pd(a);
2858   #else
2859     simde__m256d_private
2860       r_,
2861       a_ = simde__m256d_to_private(a);
2862 
2863     SIMDE_VECTORIZE
2864     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
2865       r_.f64[i] =  simde_math_erf(a_.f64[i]);
2866     }
2867 
2868     return simde__m256d_from_private(r_);
2869   #endif
2870 }
2871 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2872   #undef _mm256_erf_pd
2873   #define _mm256_erf_pd(a) simde_mm256_erf_pd(a)
2874 #endif
2875 
2876 SIMDE_FUNCTION_ATTRIBUTES
2877 simde__m512
simde_mm512_erf_ps(simde__m512 a)2878 simde_mm512_erf_ps (simde__m512 a) {
2879   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2880     return _mm512_erf_ps(a);
2881   #else
2882     simde__m512_private
2883       r_,
2884       a_ = simde__m512_to_private(a);
2885 
2886     SIMDE_VECTORIZE
2887     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
2888       r_.f32[i] =  simde_math_erff(a_.f32[i]);
2889     }
2890 
2891     return simde__m512_from_private(r_);
2892   #endif
2893 }
2894 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2895   #undef _mm512_erf_ps
2896   #define _mm512_erf_ps(a) simde_mm512_erf_ps(a)
2897 #endif
2898 
2899 SIMDE_FUNCTION_ATTRIBUTES
2900 simde__m512d
simde_mm512_erf_pd(simde__m512d a)2901 simde_mm512_erf_pd (simde__m512d a) {
2902   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2903     return _mm512_erf_pd(a);
2904   #else
2905     simde__m512d_private
2906       r_,
2907       a_ = simde__m512d_to_private(a);
2908 
2909     SIMDE_VECTORIZE
2910     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
2911       r_.f64[i] =  simde_math_erf(a_.f64[i]);
2912     }
2913 
2914     return simde__m512d_from_private(r_);
2915   #endif
2916 }
2917 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2918   #undef _mm512_erf_pd
2919   #define _mm512_erf_pd(a) simde_mm512_erf_pd(a)
2920 #endif
2921 
2922 SIMDE_FUNCTION_ATTRIBUTES
2923 simde__m512
simde_mm512_mask_erf_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)2924 simde_mm512_mask_erf_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
2925   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2926     return _mm512_mask_erf_ps(src, k, a);
2927   #else
2928     return simde_mm512_mask_mov_ps(src, k, simde_mm512_erf_ps(a));
2929   #endif
2930 }
2931 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2932   #undef _mm512_mask_erf_ps
2933   #define _mm512_mask_erf_ps(src, k, a) simde_mm512_mask_erf_ps(src, k, a)
2934 #endif
2935 
2936 SIMDE_FUNCTION_ATTRIBUTES
2937 simde__m512d
simde_mm512_mask_erf_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)2938 simde_mm512_mask_erf_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
2939   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
2940     return _mm512_mask_erf_pd(src, k, a);
2941   #else
2942     return simde_mm512_mask_mov_pd(src, k, simde_mm512_erf_pd(a));
2943   #endif
2944 }
2945 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2946   #undef _mm512_mask_erf_pd
2947   #define _mm512_mask_erf_pd(src, k, a) simde_mm512_mask_erf_pd(src, k, a)
2948 #endif
2949 
2950 SIMDE_FUNCTION_ATTRIBUTES
2951 simde__m128
simde_mm_erfc_ps(simde__m128 a)2952 simde_mm_erfc_ps (simde__m128 a) {
2953   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
2954     return _mm_erfc_ps(a);
2955   #else
2956     simde__m128_private
2957       r_,
2958       a_ = simde__m128_to_private(a);
2959 
2960     SIMDE_VECTORIZE
2961     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
2962       r_.f32[i] =  simde_math_erfcf(a_.f32[i]);
2963     }
2964 
2965     return simde__m128_from_private(r_);
2966   #endif
2967 }
2968 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2969   #undef _mm_erfc_ps
2970   #define _mm_erfc_ps(a) simde_mm_erfc_ps(a)
2971 #endif
2972 
2973 SIMDE_FUNCTION_ATTRIBUTES
2974 simde__m128d
simde_mm_erfc_pd(simde__m128d a)2975 simde_mm_erfc_pd (simde__m128d a) {
2976   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
2977     return _mm_erfc_pd(a);
2978   #else
2979     simde__m128d_private
2980       r_,
2981       a_ = simde__m128d_to_private(a);
2982 
2983     SIMDE_VECTORIZE
2984     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
2985       r_.f64[i] =  simde_math_erfc(a_.f64[i]);
2986     }
2987 
2988     return simde__m128d_from_private(r_);
2989   #endif
2990 }
2991 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
2992   #undef _mm_erfc_pd
2993   #define _mm_erfc_pd(a) simde_mm_erfc_pd(a)
2994 #endif
2995 
2996 SIMDE_FUNCTION_ATTRIBUTES
2997 simde__m256
simde_mm256_erfc_ps(simde__m256 a)2998 simde_mm256_erfc_ps (simde__m256 a) {
2999   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3000     return _mm256_erfc_ps(a);
3001   #else
3002     simde__m256_private
3003       r_,
3004       a_ = simde__m256_to_private(a);
3005 
3006     SIMDE_VECTORIZE
3007     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3008       r_.f32[i] =  simde_math_erfcf(a_.f32[i]);
3009     }
3010 
3011     return simde__m256_from_private(r_);
3012   #endif
3013 }
3014 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3015   #undef _mm256_erfc_ps
3016   #define _mm256_erfc_ps(a) simde_mm256_erfc_ps(a)
3017 #endif
3018 
3019 
3020 SIMDE_FUNCTION_ATTRIBUTES
3021 simde__m256d
simde_mm256_erfc_pd(simde__m256d a)3022 simde_mm256_erfc_pd (simde__m256d a) {
3023   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3024     return _mm256_erfc_pd(a);
3025   #else
3026     simde__m256d_private
3027       r_,
3028       a_ = simde__m256d_to_private(a);
3029 
3030     SIMDE_VECTORIZE
3031     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3032       r_.f64[i] =  simde_math_erfc(a_.f64[i]);
3033     }
3034 
3035     return simde__m256d_from_private(r_);
3036   #endif
3037 }
3038 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3039   #undef _mm256_erfc_pd
3040   #define _mm256_erfc_pd(a) simde_mm256_erfc_pd(a)
3041 #endif
3042 
3043 SIMDE_FUNCTION_ATTRIBUTES
3044 simde__m512
simde_mm512_erfc_ps(simde__m512 a)3045 simde_mm512_erfc_ps (simde__m512 a) {
3046   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3047     return _mm512_erfc_ps(a);
3048   #else
3049     simde__m512_private
3050       r_,
3051       a_ = simde__m512_to_private(a);
3052 
3053     SIMDE_VECTORIZE
3054     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3055       r_.f32[i] =  simde_math_erfcf(a_.f32[i]);
3056     }
3057 
3058     return simde__m512_from_private(r_);
3059   #endif
3060 }
3061 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3062   #undef _mm512_erfc_ps
3063   #define _mm512_erfc_ps(a) simde_mm512_erfc_ps(a)
3064 #endif
3065 
3066 SIMDE_FUNCTION_ATTRIBUTES
3067 simde__m512d
simde_mm512_erfc_pd(simde__m512d a)3068 simde_mm512_erfc_pd (simde__m512d a) {
3069   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3070     return _mm512_erfc_pd(a);
3071   #else
3072     simde__m512d_private
3073       r_,
3074       a_ = simde__m512d_to_private(a);
3075 
3076     SIMDE_VECTORIZE
3077     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3078       r_.f64[i] =  simde_math_erfc(a_.f64[i]);
3079     }
3080 
3081     return simde__m512d_from_private(r_);
3082   #endif
3083 }
3084 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3085   #undef _mm512_erfc_pd
3086   #define _mm512_erfc_pd(a) simde_mm512_erfc_pd(a)
3087 #endif
3088 
3089 SIMDE_FUNCTION_ATTRIBUTES
3090 simde__m512
simde_mm512_mask_erfc_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)3091 simde_mm512_mask_erfc_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
3092   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3093     return _mm512_mask_erfc_ps(src, k, a);
3094   #else
3095     return simde_mm512_mask_mov_ps(src, k, simde_mm512_erfc_ps(a));
3096   #endif
3097 }
3098 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3099   #undef _mm512_mask_erfc_ps
3100   #define _mm512_mask_erfc_ps(src, k, a) simde_mm512_mask_erfc_ps(src, k, a)
3101 #endif
3102 
3103 SIMDE_FUNCTION_ATTRIBUTES
3104 simde__m512d
simde_mm512_mask_erfc_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)3105 simde_mm512_mask_erfc_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
3106   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3107     return _mm512_mask_erfc_pd(src, k, a);
3108   #else
3109     return simde_mm512_mask_mov_pd(src, k, simde_mm512_erfc_pd(a));
3110   #endif
3111 }
3112 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3113   #undef _mm512_mask_erfc_pd
3114   #define _mm512_mask_erfc_pd(src, k, a) simde_mm512_mask_erfc_pd(src, k, a)
3115 #endif
3116 
3117 SIMDE_FUNCTION_ATTRIBUTES
3118 simde__m128
simde_mm_exp_ps(simde__m128 a)3119 simde_mm_exp_ps (simde__m128 a) {
3120   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
3121     return _mm_exp_ps(a);
3122   #else
3123     simde__m128_private
3124       r_,
3125       a_ = simde__m128_to_private(a);
3126 
3127     SIMDE_VECTORIZE
3128     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3129       r_.f32[i] =  simde_math_expf(a_.f32[i]);
3130     }
3131 
3132     return simde__m128_from_private(r_);
3133   #endif
3134 }
3135 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3136   #undef _mm_exp_ps
3137   #define _mm_exp_ps(a) simde_mm_exp_ps(a)
3138 #endif
3139 
3140 SIMDE_FUNCTION_ATTRIBUTES
3141 simde__m128d
simde_mm_exp_pd(simde__m128d a)3142 simde_mm_exp_pd (simde__m128d a) {
3143   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
3144     return _mm_exp_pd(a);
3145   #else
3146     simde__m128d_private
3147       r_,
3148       a_ = simde__m128d_to_private(a);
3149 
3150     SIMDE_VECTORIZE
3151     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3152       r_.f64[i] =  simde_math_exp(a_.f64[i]);
3153     }
3154 
3155     return simde__m128d_from_private(r_);
3156   #endif
3157 }
3158 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3159   #undef _mm_exp_pd
3160   #define _mm_exp_pd(a) simde_mm_exp_pd(a)
3161 #endif
3162 
3163 SIMDE_FUNCTION_ATTRIBUTES
3164 simde__m256
simde_mm256_exp_ps(simde__m256 a)3165 simde_mm256_exp_ps (simde__m256 a) {
3166   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3167     return _mm256_exp_ps(a);
3168   #else
3169     simde__m256_private
3170       r_,
3171       a_ = simde__m256_to_private(a);
3172 
3173     SIMDE_VECTORIZE
3174     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3175       r_.f32[i] =  simde_math_expf(a_.f32[i]);
3176     }
3177 
3178     return simde__m256_from_private(r_);
3179   #endif
3180 }
3181 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3182   #undef _mm256_exp_ps
3183   #define _mm256_exp_ps(a) simde_mm256_exp_ps(a)
3184 #endif
3185 
3186 
3187 SIMDE_FUNCTION_ATTRIBUTES
3188 simde__m256d
simde_mm256_exp_pd(simde__m256d a)3189 simde_mm256_exp_pd (simde__m256d a) {
3190   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3191     return _mm256_exp_pd(a);
3192   #else
3193     simde__m256d_private
3194       r_,
3195       a_ = simde__m256d_to_private(a);
3196 
3197     SIMDE_VECTORIZE
3198     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3199       r_.f64[i] =  simde_math_exp(a_.f64[i]);
3200     }
3201 
3202     return simde__m256d_from_private(r_);
3203   #endif
3204 }
3205 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3206   #undef _mm256_exp_pd
3207   #define _mm256_exp_pd(a) simde_mm256_exp_pd(a)
3208 #endif
3209 
3210 SIMDE_FUNCTION_ATTRIBUTES
3211 simde__m512
simde_mm512_exp_ps(simde__m512 a)3212 simde_mm512_exp_ps (simde__m512 a) {
3213   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3214     return _mm512_exp_ps(a);
3215   #else
3216     simde__m512_private
3217       r_,
3218       a_ = simde__m512_to_private(a);
3219 
3220     SIMDE_VECTORIZE
3221     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3222       r_.f32[i] =  simde_math_expf(a_.f32[i]);
3223     }
3224 
3225     return simde__m512_from_private(r_);
3226   #endif
3227 }
3228 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3229   #undef _mm512_exp_ps
3230   #define _mm512_exp_ps(a) simde_mm512_exp_ps(a)
3231 #endif
3232 
3233 SIMDE_FUNCTION_ATTRIBUTES
3234 simde__m512d
simde_mm512_exp_pd(simde__m512d a)3235 simde_mm512_exp_pd (simde__m512d a) {
3236   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3237     return _mm512_exp_pd(a);
3238   #else
3239     simde__m512d_private
3240       r_,
3241       a_ = simde__m512d_to_private(a);
3242 
3243     SIMDE_VECTORIZE
3244     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3245       r_.f64[i] =  simde_math_exp(a_.f64[i]);
3246     }
3247 
3248     return simde__m512d_from_private(r_);
3249   #endif
3250 }
3251 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3252   #undef _mm512_exp_pd
3253   #define _mm512_exp_pd(a) simde_mm512_exp_pd(a)
3254 #endif
3255 
3256 SIMDE_FUNCTION_ATTRIBUTES
3257 simde__m512
simde_mm512_mask_exp_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)3258 simde_mm512_mask_exp_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
3259   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3260     return _mm512_mask_exp_ps(src, k, a);
3261   #else
3262     return simde_mm512_mask_mov_ps(src, k, simde_mm512_exp_ps(a));
3263   #endif
3264 }
3265 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3266   #undef _mm512_mask_exp_ps
3267   #define _mm512_mask_exp_ps(src, k, a) simde_mm512_mask_exp_ps(src, k, a)
3268 #endif
3269 
3270 SIMDE_FUNCTION_ATTRIBUTES
3271 simde__m512d
simde_mm512_mask_exp_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)3272 simde_mm512_mask_exp_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
3273   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3274     return _mm512_mask_exp_pd(src, k, a);
3275   #else
3276     return simde_mm512_mask_mov_pd(src, k, simde_mm512_exp_pd(a));
3277   #endif
3278 }
3279 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3280   #undef _mm512_mask_exp_pd
3281   #define _mm512_mask_exp_pd(src, k, a) simde_mm512_mask_exp_pd(src, k, a)
3282 #endif
3283 
3284 SIMDE_FUNCTION_ATTRIBUTES
3285 simde__m128
simde_mm_expm1_ps(simde__m128 a)3286 simde_mm_expm1_ps (simde__m128 a) {
3287   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
3288     return _mm_expm1_ps(a);
3289   #else
3290     simde__m128_private
3291       r_,
3292       a_ = simde__m128_to_private(a);
3293 
3294     SIMDE_VECTORIZE
3295     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3296       r_.f32[i] =  simde_math_expm1f(a_.f32[i]);
3297     }
3298 
3299     return simde__m128_from_private(r_);
3300   #endif
3301 }
3302 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3303   #undef _mm_expm1_ps
3304   #define _mm_expm1_ps(a) simde_mm_expm1_ps(a)
3305 #endif
3306 
3307 SIMDE_FUNCTION_ATTRIBUTES
3308 simde__m128d
simde_mm_expm1_pd(simde__m128d a)3309 simde_mm_expm1_pd (simde__m128d a) {
3310   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
3311     return _mm_expm1_pd(a);
3312   #else
3313     simde__m128d_private
3314       r_,
3315       a_ = simde__m128d_to_private(a);
3316 
3317     SIMDE_VECTORIZE
3318     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3319       r_.f64[i] =  simde_math_expm1(a_.f64[i]);
3320     }
3321 
3322     return simde__m128d_from_private(r_);
3323   #endif
3324 }
3325 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3326   #undef _mm_expm1_pd
3327   #define _mm_expm1_pd(a) simde_mm_expm1_pd(a)
3328 #endif
3329 
3330 SIMDE_FUNCTION_ATTRIBUTES
3331 simde__m256
simde_mm256_expm1_ps(simde__m256 a)3332 simde_mm256_expm1_ps (simde__m256 a) {
3333   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3334     return _mm256_expm1_ps(a);
3335   #else
3336     simde__m256_private
3337       r_,
3338       a_ = simde__m256_to_private(a);
3339 
3340     SIMDE_VECTORIZE
3341     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3342       r_.f32[i] =  simde_math_expm1f(a_.f32[i]);
3343     }
3344 
3345     return simde__m256_from_private(r_);
3346   #endif
3347 }
3348 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3349   #undef _mm256_expm1_ps
3350   #define _mm256_expm1_ps(a) simde_mm256_expm1_ps(a)
3351 #endif
3352 
3353 SIMDE_FUNCTION_ATTRIBUTES
3354 simde__m256d
simde_mm256_expm1_pd(simde__m256d a)3355 simde_mm256_expm1_pd (simde__m256d a) {
3356   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3357     return _mm256_expm1_pd(a);
3358   #else
3359     simde__m256d_private
3360       r_,
3361       a_ = simde__m256d_to_private(a);
3362 
3363     SIMDE_VECTORIZE
3364     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3365       r_.f64[i] =  simde_math_expm1(a_.f64[i]);
3366     }
3367 
3368     return simde__m256d_from_private(r_);
3369   #endif
3370 }
3371 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3372   #undef _mm256_expm1_pd
3373   #define _mm256_expm1_pd(a) simde_mm256_expm1_pd(a)
3374 #endif
3375 
3376 SIMDE_FUNCTION_ATTRIBUTES
3377 simde__m512
simde_mm512_expm1_ps(simde__m512 a)3378 simde_mm512_expm1_ps (simde__m512 a) {
3379   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3380     return _mm512_expm1_ps(a);
3381   #else
3382     simde__m512_private
3383       r_,
3384       a_ = simde__m512_to_private(a);
3385 
3386     SIMDE_VECTORIZE
3387     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3388       r_.f32[i] =  simde_math_expm1f(a_.f32[i]);
3389     }
3390 
3391     return simde__m512_from_private(r_);
3392   #endif
3393 }
3394 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3395   #undef _mm512_expm1_ps
3396   #define _mm512_expm1_ps(a) simde_mm512_expm1_ps(a)
3397 #endif
3398 
3399 SIMDE_FUNCTION_ATTRIBUTES
3400 simde__m512d
simde_mm512_expm1_pd(simde__m512d a)3401 simde_mm512_expm1_pd (simde__m512d a) {
3402   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3403     return _mm512_expm1_pd(a);
3404   #else
3405     simde__m512d_private
3406       r_,
3407       a_ = simde__m512d_to_private(a);
3408 
3409     SIMDE_VECTORIZE
3410     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3411       r_.f64[i] =  simde_math_expm1(a_.f64[i]);
3412     }
3413 
3414     return simde__m512d_from_private(r_);
3415   #endif
3416 }
3417 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3418   #undef _mm512_expm1_pd
3419   #define _mm512_expm1_pd(a) simde_mm512_expm1_pd(a)
3420 #endif
3421 
3422 SIMDE_FUNCTION_ATTRIBUTES
3423 simde__m512
simde_mm512_mask_expm1_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)3424 simde_mm512_mask_expm1_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
3425   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3426     return _mm512_mask_expm1_ps(src, k, a);
3427   #else
3428     return simde_mm512_mask_mov_ps(src, k, simde_mm512_expm1_ps(a));
3429   #endif
3430 }
3431 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3432   #undef _mm512_mask_expm1_ps
3433   #define _mm512_mask_expm1_ps(src, k, a) simde_mm512_mask_expm1_ps(src, k, a)
3434 #endif
3435 
3436 SIMDE_FUNCTION_ATTRIBUTES
3437 simde__m512d
simde_mm512_mask_expm1_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)3438 simde_mm512_mask_expm1_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
3439   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3440     return _mm512_mask_expm1_pd(src, k, a);
3441   #else
3442     return simde_mm512_mask_mov_pd(src, k, simde_mm512_expm1_pd(a));
3443   #endif
3444 }
3445 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3446   #undef _mm512_mask_expm1_pd
3447   #define _mm512_mask_expm1_pd(src, k, a) simde_mm512_mask_expm1_pd(src, k, a)
3448 #endif
3449 
3450 SIMDE_FUNCTION_ATTRIBUTES
3451 simde__m128
simde_mm_exp2_ps(simde__m128 a)3452 simde_mm_exp2_ps (simde__m128 a) {
3453   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
3454     return _mm_exp2_ps(a);
3455   #else
3456     simde__m128_private
3457       r_,
3458       a_ = simde__m128_to_private(a);
3459 
3460     SIMDE_VECTORIZE
3461     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3462       r_.f32[i] =  simde_math_exp2f(a_.f32[i]);
3463     }
3464 
3465     return simde__m128_from_private(r_);
3466   #endif
3467 }
3468 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3469   #undef _mm_exp2_ps
3470   #define _mm_exp2_ps(a) simde_mm_exp2_ps(a)
3471 #endif
3472 
3473 SIMDE_FUNCTION_ATTRIBUTES
3474 simde__m128d
simde_mm_exp2_pd(simde__m128d a)3475 simde_mm_exp2_pd (simde__m128d a) {
3476   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
3477     return _mm_exp2_pd(a);
3478   #else
3479     simde__m128d_private
3480       r_,
3481       a_ = simde__m128d_to_private(a);
3482 
3483     SIMDE_VECTORIZE
3484     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3485       r_.f64[i] =  simde_math_exp2(a_.f64[i]);
3486     }
3487 
3488     return simde__m128d_from_private(r_);
3489   #endif
3490 }
3491 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3492   #undef _mm_exp2_pd
3493   #define _mm_exp2_pd(a) simde_mm_exp2_pd(a)
3494 #endif
3495 
3496 SIMDE_FUNCTION_ATTRIBUTES
3497 simde__m256
simde_mm256_exp2_ps(simde__m256 a)3498 simde_mm256_exp2_ps (simde__m256 a) {
3499   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3500     return _mm256_exp2_ps(a);
3501   #else
3502     simde__m256_private
3503       r_,
3504       a_ = simde__m256_to_private(a);
3505 
3506     SIMDE_VECTORIZE
3507     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3508       r_.f32[i] =  simde_math_exp2f(a_.f32[i]);
3509     }
3510 
3511     return simde__m256_from_private(r_);
3512   #endif
3513 }
3514 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3515   #undef _mm256_exp2_ps
3516   #define _mm256_exp2_ps(a) simde_mm256_exp2_ps(a)
3517 #endif
3518 
3519 
3520 SIMDE_FUNCTION_ATTRIBUTES
3521 simde__m256d
simde_mm256_exp2_pd(simde__m256d a)3522 simde_mm256_exp2_pd (simde__m256d a) {
3523   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3524     return _mm256_exp2_pd(a);
3525   #else
3526     simde__m256d_private
3527       r_,
3528       a_ = simde__m256d_to_private(a);
3529 
3530     SIMDE_VECTORIZE
3531     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3532       r_.f64[i] =  simde_math_exp2(a_.f64[i]);
3533     }
3534 
3535     return simde__m256d_from_private(r_);
3536   #endif
3537 }
3538 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3539   #undef _mm256_exp2_pd
3540   #define _mm256_exp2_pd(a) simde_mm256_exp2_pd(a)
3541 #endif
3542 
3543 SIMDE_FUNCTION_ATTRIBUTES
3544 simde__m512
simde_mm512_exp2_ps(simde__m512 a)3545 simde_mm512_exp2_ps (simde__m512 a) {
3546   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3547     return _mm512_exp2_ps(a);
3548   #else
3549     simde__m512_private
3550       r_,
3551       a_ = simde__m512_to_private(a);
3552 
3553     SIMDE_VECTORIZE
3554     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3555       r_.f32[i] =  simde_math_exp2f(a_.f32[i]);
3556     }
3557 
3558     return simde__m512_from_private(r_);
3559   #endif
3560 }
3561 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3562   #undef _mm512_exp2_ps
3563   #define _mm512_exp2_ps(a) simde_mm512_exp2_ps(a)
3564 #endif
3565 
3566 SIMDE_FUNCTION_ATTRIBUTES
3567 simde__m512d
simde_mm512_exp2_pd(simde__m512d a)3568 simde_mm512_exp2_pd (simde__m512d a) {
3569   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3570     return _mm512_exp2_pd(a);
3571   #else
3572     simde__m512d_private
3573       r_,
3574       a_ = simde__m512d_to_private(a);
3575 
3576     SIMDE_VECTORIZE
3577     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3578       r_.f64[i] =  simde_math_exp2(a_.f64[i]);
3579     }
3580 
3581     return simde__m512d_from_private(r_);
3582   #endif
3583 }
3584 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3585   #undef _mm512_exp2_pd
3586   #define _mm512_exp2_pd(a) simde_mm512_exp2_pd(a)
3587 #endif
3588 
3589 SIMDE_FUNCTION_ATTRIBUTES
3590 simde__m512
simde_mm512_mask_exp2_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)3591 simde_mm512_mask_exp2_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
3592   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3593     return _mm512_mask_exp2_ps(src, k, a);
3594   #else
3595     return simde_mm512_mask_mov_ps(src, k, simde_mm512_exp2_ps(a));
3596   #endif
3597 }
3598 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3599   #undef _mm512_mask_exp2_ps
3600   #define _mm512_mask_exp2_ps(src, k, a) simde_mm512_mask_exp2_ps(src, k, a)
3601 #endif
3602 
3603 SIMDE_FUNCTION_ATTRIBUTES
3604 simde__m512d
simde_mm512_mask_exp2_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)3605 simde_mm512_mask_exp2_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
3606   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3607     return _mm512_mask_exp2_pd(src, k, a);
3608   #else
3609     return simde_mm512_mask_mov_pd(src, k, simde_mm512_exp2_pd(a));
3610   #endif
3611 }
3612 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3613   #undef _mm512_mask_exp2_pd
3614   #define _mm512_mask_exp2_pd(src, k, a) simde_mm512_mask_exp2_pd(src, k, a)
3615 #endif
3616 
3617 SIMDE_FUNCTION_ATTRIBUTES
3618 simde__m128
simde_mm_exp10_ps(simde__m128 a)3619 simde_mm_exp10_ps (simde__m128 a) {
3620   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
3621     return _mm_exp10_ps(a);
3622   #else
3623     simde__m128_private
3624       r_,
3625       a_ = simde__m128_to_private(a);
3626 
3627     SIMDE_VECTORIZE
3628     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3629       r_.f32[i] =  simde_math_exp10f(a_.f32[i]);
3630     }
3631 
3632     return simde__m128_from_private(r_);
3633   #endif
3634 }
3635 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3636   #undef _mm_exp10_ps
3637   #define _mm_exp10_ps(a) simde_mm_exp10_ps(a)
3638 #endif
3639 
3640 SIMDE_FUNCTION_ATTRIBUTES
3641 simde__m128d
simde_mm_exp10_pd(simde__m128d a)3642 simde_mm_exp10_pd (simde__m128d a) {
3643   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
3644     return _mm_exp10_pd(a);
3645   #else
3646     simde__m128d_private
3647       r_,
3648       a_ = simde__m128d_to_private(a);
3649 
3650     SIMDE_VECTORIZE
3651     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3652       r_.f64[i] =  simde_math_exp10(a_.f64[i]);
3653     }
3654 
3655     return simde__m128d_from_private(r_);
3656   #endif
3657 }
3658 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3659   #undef _mm_exp10_pd
3660   #define _mm_exp10_pd(a) simde_mm_exp10_pd(a)
3661 #endif
3662 
3663 SIMDE_FUNCTION_ATTRIBUTES
3664 simde__m256
simde_mm256_exp10_ps(simde__m256 a)3665 simde_mm256_exp10_ps (simde__m256 a) {
3666   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3667     return _mm256_exp10_ps(a);
3668   #else
3669     simde__m256_private
3670       r_,
3671       a_ = simde__m256_to_private(a);
3672 
3673     SIMDE_VECTORIZE
3674     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3675       r_.f32[i] =  simde_math_exp10f(a_.f32[i]);
3676     }
3677 
3678     return simde__m256_from_private(r_);
3679   #endif
3680 }
3681 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3682   #undef _mm256_exp10_ps
3683   #define _mm256_exp10_ps(a) simde_mm256_exp10_ps(a)
3684 #endif
3685 
3686 
3687 SIMDE_FUNCTION_ATTRIBUTES
3688 simde__m256d
simde_mm256_exp10_pd(simde__m256d a)3689 simde_mm256_exp10_pd (simde__m256d a) {
3690   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3691     return _mm256_exp10_pd(a);
3692   #else
3693     simde__m256d_private
3694       r_,
3695       a_ = simde__m256d_to_private(a);
3696 
3697     SIMDE_VECTORIZE
3698     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3699       r_.f64[i] =  simde_math_exp10(a_.f64[i]);
3700     }
3701 
3702     return simde__m256d_from_private(r_);
3703   #endif
3704 }
3705 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3706   #undef _mm256_exp10_pd
3707   #define _mm256_exp10_pd(a) simde_mm256_exp10_pd(a)
3708 #endif
3709 
3710 SIMDE_FUNCTION_ATTRIBUTES
3711 simde__m512
simde_mm512_exp10_ps(simde__m512 a)3712 simde_mm512_exp10_ps (simde__m512 a) {
3713   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3714     return _mm512_exp10_ps(a);
3715   #else
3716     simde__m512_private
3717       r_,
3718       a_ = simde__m512_to_private(a);
3719 
3720     SIMDE_VECTORIZE
3721     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3722       r_.f32[i] =  simde_math_exp10f(a_.f32[i]);
3723     }
3724 
3725     return simde__m512_from_private(r_);
3726   #endif
3727 }
3728 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3729   #undef _mm512_exp10_ps
3730   #define _mm512_exp10_ps(a) simde_mm512_exp10_ps(a)
3731 #endif
3732 
3733 SIMDE_FUNCTION_ATTRIBUTES
3734 simde__m512d
simde_mm512_exp10_pd(simde__m512d a)3735 simde_mm512_exp10_pd (simde__m512d a) {
3736   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3737     return _mm512_exp10_pd(a);
3738   #else
3739     simde__m512d_private
3740       r_,
3741       a_ = simde__m512d_to_private(a);
3742 
3743     SIMDE_VECTORIZE
3744     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3745       r_.f64[i] =  simde_math_exp10(a_.f64[i]);
3746     }
3747 
3748     return simde__m512d_from_private(r_);
3749   #endif
3750 }
3751 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3752   #undef _mm512_exp10_pd
3753   #define _mm512_exp10_pd(a) simde_mm512_exp10_pd(a)
3754 #endif
3755 
3756 SIMDE_FUNCTION_ATTRIBUTES
3757 simde__m512
simde_mm512_mask_exp10_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)3758 simde_mm512_mask_exp10_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
3759   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3760     return _mm512_mask_exp10_ps(src, k, a);
3761   #else
3762     return simde_mm512_mask_mov_ps(src, k, simde_mm512_exp10_ps(a));
3763   #endif
3764 }
3765 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3766   #undef _mm512_mask_exp10_ps
3767   #define _mm512_mask_exp10_ps(src, k, a) simde_mm512_mask_exp10_ps(src, k, a)
3768 #endif
3769 
3770 SIMDE_FUNCTION_ATTRIBUTES
3771 simde__m512d
simde_mm512_mask_exp10_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)3772 simde_mm512_mask_exp10_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
3773   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3774     return _mm512_mask_exp10_pd(src, k, a);
3775   #else
3776     return simde_mm512_mask_mov_pd(src, k, simde_mm512_exp10_pd(a));
3777   #endif
3778 }
3779 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3780   #undef _mm512_mask_exp10_pd
3781   #define _mm512_mask_exp10_pd(src, k, a) simde_mm512_mask_exp10_pd(src, k, a)
3782 #endif
3783 
3784 SIMDE_FUNCTION_ATTRIBUTES
3785 simde__m128i
simde_mm_idivrem_epi32(simde__m128i * mem_addr,simde__m128i a,simde__m128i b)3786 simde_mm_idivrem_epi32 (simde__m128i* mem_addr, simde__m128i a, simde__m128i b) {
3787   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
3788     return _mm_idivrem_epi32(HEDLEY_REINTERPRET_CAST(__m128i*, mem_addr), a, b);
3789   #else
3790     simde__m128i r;
3791 
3792     r = simde_mm_div_epi32(a, b);
3793     *mem_addr = simde_mm_sub_epi32(a, simde_mm_mullo_epi32(r, b));
3794 
3795     return r;
3796   #endif
3797 }
3798 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3799   #undef _mm_idivrem_epi32
3800   #define _mm_idivrem_epi32(mem_addr, a, b) simde_mm_idivrem_epi32((mem_addr),(a), (b))
3801 #endif
3802 
3803 SIMDE_FUNCTION_ATTRIBUTES
3804 simde__m256i
simde_mm256_idivrem_epi32(simde__m256i * mem_addr,simde__m256i a,simde__m256i b)3805 simde_mm256_idivrem_epi32 (simde__m256i* mem_addr, simde__m256i a, simde__m256i b) {
3806   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3807     return _mm256_idivrem_epi32(HEDLEY_REINTERPRET_CAST(__m256i*, mem_addr), a, b);
3808   #else
3809     simde__m256i r;
3810 
3811     r = simde_mm256_div_epi32(a, b);
3812     *mem_addr = simde_mm256_sub_epi32(a, simde_mm256_mullo_epi32(r, b));
3813 
3814     return r;
3815   #endif
3816 }
3817 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3818   #undef _mm256_idivrem_epi32
3819   #define _mm256_idivrem_epi32(mem_addr, a, b) simde_mm256_idivrem_epi32((mem_addr),(a), (b))
3820 #endif
3821 
3822 SIMDE_FUNCTION_ATTRIBUTES
3823 simde__m128
simde_mm_hypot_ps(simde__m128 a,simde__m128 b)3824 simde_mm_hypot_ps (simde__m128 a, simde__m128 b) {
3825   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
3826     return _mm_hypot_ps(a, b);
3827   #else
3828     simde__m128_private
3829       r_,
3830       a_ = simde__m128_to_private(a),
3831       b_ = simde__m128_to_private(b);
3832 
3833     SIMDE_VECTORIZE
3834     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3835       r_.f32[i] =  simde_math_hypotf(a_.f32[i], b_.f32[i]);
3836     }
3837 
3838     return simde__m128_from_private(r_);
3839   #endif
3840 }
3841 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3842   #undef _mm_hypot_ps
3843   #define _mm_hypot_ps(a, b) simde_mm_hypot_ps(a, b)
3844 #endif
3845 
3846 SIMDE_FUNCTION_ATTRIBUTES
3847 simde__m128d
simde_mm_hypot_pd(simde__m128d a,simde__m128d b)3848 simde_mm_hypot_pd (simde__m128d a, simde__m128d b) {
3849   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
3850     return _mm_hypot_pd(a, b);
3851   #else
3852     simde__m128d_private
3853       r_,
3854       a_ = simde__m128d_to_private(a),
3855       b_ = simde__m128d_to_private(b);
3856 
3857     SIMDE_VECTORIZE
3858     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3859       r_.f64[i] =  simde_math_hypot(a_.f64[i], b_.f64[i]);
3860     }
3861 
3862     return simde__m128d_from_private(r_);
3863   #endif
3864 }
3865 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3866   #undef _mm_hypot_pd
3867   #define _mm_hypot_pd(a, b) simde_mm_hypot_pd(a, b)
3868 #endif
3869 
3870 SIMDE_FUNCTION_ATTRIBUTES
3871 simde__m256
simde_mm256_hypot_ps(simde__m256 a,simde__m256 b)3872 simde_mm256_hypot_ps (simde__m256 a, simde__m256 b) {
3873   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3874     return _mm256_hypot_ps(a, b);
3875   #else
3876     simde__m256_private
3877       r_,
3878       a_ = simde__m256_to_private(a),
3879       b_ = simde__m256_to_private(b);
3880 
3881     SIMDE_VECTORIZE
3882     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3883       r_.f32[i] =  simde_math_hypotf(a_.f32[i], b_.f32[i]);
3884     }
3885 
3886     return simde__m256_from_private(r_);
3887   #endif
3888 }
3889 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3890   #undef _mm256_hypot_ps
3891   #define _mm256_hypot_ps(a, b) simde_mm256_hypot_ps(a, b)
3892 #endif
3893 
3894 SIMDE_FUNCTION_ATTRIBUTES
3895 simde__m256d
simde_mm256_hypot_pd(simde__m256d a,simde__m256d b)3896 simde_mm256_hypot_pd (simde__m256d a, simde__m256d b) {
3897   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
3898     return _mm256_hypot_pd(a, b);
3899   #else
3900     simde__m256d_private
3901       r_,
3902       a_ = simde__m256d_to_private(a),
3903       b_ = simde__m256d_to_private(b);
3904 
3905     SIMDE_VECTORIZE
3906     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3907       r_.f64[i] =  simde_math_hypot(a_.f64[i], b_.f64[i]);
3908     }
3909 
3910     return simde__m256d_from_private(r_);
3911   #endif
3912 }
3913 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3914   #undef _mm256_hypot_pd
3915   #define _mm256_hypot_pd(a, b) simde_mm256_hypot_pd(a, b)
3916 #endif
3917 
3918 SIMDE_FUNCTION_ATTRIBUTES
3919 simde__m512
simde_mm512_hypot_ps(simde__m512 a,simde__m512 b)3920 simde_mm512_hypot_ps (simde__m512 a, simde__m512 b) {
3921   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3922     return _mm512_hypot_ps(a, b);
3923   #else
3924     simde__m512_private
3925       r_,
3926       a_ = simde__m512_to_private(a),
3927       b_ = simde__m512_to_private(b);
3928 
3929     SIMDE_VECTORIZE
3930     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
3931       r_.f32[i] =  simde_math_hypotf(a_.f32[i], b_.f32[i]);
3932     }
3933 
3934     return simde__m512_from_private(r_);
3935   #endif
3936 }
3937 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3938   #undef _mm512_hypot_ps
3939   #define _mm512_hypot_ps(a, b) simde_mm512_hypot_ps(a, b)
3940 #endif
3941 
3942 SIMDE_FUNCTION_ATTRIBUTES
3943 simde__m512d
simde_mm512_hypot_pd(simde__m512d a,simde__m512d b)3944 simde_mm512_hypot_pd (simde__m512d a, simde__m512d b) {
3945   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3946     return _mm512_hypot_pd(a, b);
3947   #else
3948     simde__m512d_private
3949       r_,
3950       a_ = simde__m512d_to_private(a),
3951       b_ = simde__m512d_to_private(b);
3952 
3953     SIMDE_VECTORIZE
3954     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
3955       r_.f64[i] =  simde_math_hypot(a_.f64[i], b_.f64[i]);
3956     }
3957 
3958     return simde__m512d_from_private(r_);
3959   #endif
3960 }
3961 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3962   #undef _mm512_hypot_pd
3963   #define _mm512_hypot_pd(a, b) simde_mm512_hypot_pd(a, b)
3964 #endif
3965 
3966 SIMDE_FUNCTION_ATTRIBUTES
3967 simde__m512
simde_mm512_mask_hypot_ps(simde__m512 src,simde__mmask16 k,simde__m512 a,simde__m512 b)3968 simde_mm512_mask_hypot_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) {
3969   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3970     return _mm512_mask_hypot_ps(src, k, a, b);
3971   #else
3972     return simde_mm512_mask_mov_ps(src, k, simde_mm512_hypot_ps(a, b));
3973   #endif
3974 }
3975 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3976   #undef _mm512_mask_hypot_ps
3977   #define _mm512_mask_hypot_ps(src, k, a, b) simde_mm512_mask_hypot_ps(src, k, a, b)
3978 #endif
3979 
3980 SIMDE_FUNCTION_ATTRIBUTES
3981 simde__m512d
simde_mm512_mask_hypot_pd(simde__m512d src,simde__mmask8 k,simde__m512d a,simde__m512d b)3982 simde_mm512_mask_hypot_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) {
3983   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
3984     return _mm512_mask_hypot_pd(src, k, a, b);
3985   #else
3986     return simde_mm512_mask_mov_pd(src, k, simde_mm512_hypot_pd(a, b));
3987   #endif
3988 }
3989 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
3990   #undef _mm512_mask_hypot_pd
3991   #define _mm512_mask_hypot_pd(src, k, a, b) simde_mm512_mask_hypot_pd(src, k, a, b)
3992 #endif
3993 
3994 SIMDE_FUNCTION_ATTRIBUTES
3995 simde__m128
simde_mm_invcbrt_ps(simde__m128 a)3996 simde_mm_invcbrt_ps (simde__m128 a) {
3997   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
3998     return _mm_invcbrt_ps(a);
3999   #else
4000     return simde_mm_rcp_ps(simde_mm_cbrt_ps(a));
4001   #endif
4002 }
4003 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4004   #undef _mm_invcbrt_ps
4005   #define _mm_invcbrt_ps(a) simde_mm_invcbrt_ps(a)
4006 #endif
4007 
4008 SIMDE_FUNCTION_ATTRIBUTES
4009 simde__m128d
simde_mm_invcbrt_pd(simde__m128d a)4010 simde_mm_invcbrt_pd (simde__m128d a) {
4011   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4012     return _mm_invcbrt_pd(a);
4013   #else
4014     return simde_mm_div_pd(simde_mm_set1_pd(SIMDE_FLOAT64_C(1.0)), simde_mm_cbrt_pd(a));
4015   #endif
4016 }
4017 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4018   #undef _mm_invcbrt_pd
4019   #define _mm_invcbrt_pd(a) simde_mm_invcbrt_pd(a)
4020 #endif
4021 
4022 SIMDE_FUNCTION_ATTRIBUTES
4023 simde__m256
simde_mm256_invcbrt_ps(simde__m256 a)4024 simde_mm256_invcbrt_ps (simde__m256 a) {
4025   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4026     return _mm256_invcbrt_ps(a);
4027   #else
4028     return simde_mm256_rcp_ps(simde_mm256_cbrt_ps(a));
4029   #endif
4030 }
4031 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4032   #undef _mm256_invcbrt_ps
4033   #define _mm256_invcbrt_ps(a) simde_mm256_invcbrt_ps(a)
4034 #endif
4035 
4036 SIMDE_FUNCTION_ATTRIBUTES
4037 simde__m256d
simde_mm256_invcbrt_pd(simde__m256d a)4038 simde_mm256_invcbrt_pd (simde__m256d a) {
4039   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4040     return _mm256_invcbrt_pd(a);
4041   #else
4042     return simde_mm256_div_pd(simde_mm256_set1_pd(SIMDE_FLOAT64_C(1.0)), simde_mm256_cbrt_pd(a));
4043   #endif
4044 }
4045 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4046   #undef _mm256_invcbrt_pd
4047   #define _mm256_invcbrt_pd(a) simde_mm256_invcbrt_pd(a)
4048 #endif
4049 
4050 SIMDE_FUNCTION_ATTRIBUTES
4051 simde__m128
simde_mm_invsqrt_ps(simde__m128 a)4052 simde_mm_invsqrt_ps (simde__m128 a) {
4053   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4054     return _mm_invsqrt_ps(a);
4055   #else
4056     return simde_mm_rcp_ps(simde_mm_sqrt_ps(a));
4057   #endif
4058 }
4059 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4060   #undef _mm_invsqrt_ps
4061   #define _mm_invsqrt_ps(a) simde_mm_invsqrt_ps(a)
4062 #endif
4063 
4064 SIMDE_FUNCTION_ATTRIBUTES
4065 simde__m128d
simde_mm_invsqrt_pd(simde__m128d a)4066 simde_mm_invsqrt_pd (simde__m128d a) {
4067   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4068     return _mm_invsqrt_pd(a);
4069   #else
4070     return simde_mm_div_pd(simde_mm_set1_pd(SIMDE_FLOAT64_C(1.0)), simde_mm_sqrt_pd(a));
4071   #endif
4072 }
4073 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4074   #undef _mm_invsqrt_pd
4075   #define _mm_invsqrt_pd(a) simde_mm_invsqrt_pd(a)
4076 #endif
4077 
4078 SIMDE_FUNCTION_ATTRIBUTES
4079 simde__m256
simde_mm256_invsqrt_ps(simde__m256 a)4080 simde_mm256_invsqrt_ps (simde__m256 a) {
4081   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4082     return _mm256_invsqrt_ps(a);
4083   #else
4084     return simde_mm256_rcp_ps(simde_mm256_sqrt_ps(a));
4085   #endif
4086 }
4087 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4088   #undef _mm256_invsqrt_ps
4089   #define _mm256_invsqrt_ps(a) simde_mm256_invsqrt_ps(a)
4090 #endif
4091 
4092 SIMDE_FUNCTION_ATTRIBUTES
4093 simde__m256d
simde_mm256_invsqrt_pd(simde__m256d a)4094 simde_mm256_invsqrt_pd (simde__m256d a) {
4095   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4096     return _mm256_invsqrt_pd(a);
4097   #else
4098     return simde_mm256_div_pd(simde_mm256_set1_pd(SIMDE_FLOAT64_C(1.0)), simde_mm256_sqrt_pd(a));
4099   #endif
4100 }
4101 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4102   #undef _mm256_invsqrt_pd
4103   #define _mm256_invsqrt_pd(a) simde_mm256_invsqrt_pd(a)
4104 #endif
4105 
4106 SIMDE_FUNCTION_ATTRIBUTES
4107 simde__m512
simde_mm512_invsqrt_ps(simde__m512 a)4108 simde_mm512_invsqrt_ps (simde__m512 a) {
4109   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4110     return _mm512_invsqrt_ps(a);
4111   #else
4112     return simde_mm512_div_ps(simde_mm512_set1_ps(SIMDE_FLOAT32_C(1.0)), simde_mm512_sqrt_ps(a));
4113   #endif
4114 }
4115 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4116   #undef _mm512_invsqrt_ps
4117   #define _mm512_invsqrt_ps(a) simde_mm512_invsqrt_ps(a)
4118 #endif
4119 
4120 SIMDE_FUNCTION_ATTRIBUTES
4121 simde__m512d
simde_mm512_invsqrt_pd(simde__m512d a)4122 simde_mm512_invsqrt_pd (simde__m512d a) {
4123   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4124     return _mm512_invsqrt_pd(a);
4125   #else
4126     return simde_mm512_div_pd(simde_mm512_set1_pd(SIMDE_FLOAT64_C(1.0)), simde_mm512_sqrt_pd(a));
4127   #endif
4128 }
4129 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4130   #undef _mm512_invsqrt_pd
4131   #define _mm512_invsqrt_pd(a) simde_mm512_invsqrt_pd(a)
4132 #endif
4133 
4134 SIMDE_FUNCTION_ATTRIBUTES
4135 simde__m512
simde_mm512_mask_invsqrt_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)4136 simde_mm512_mask_invsqrt_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
4137   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4138     return _mm512_mask_invsqrt_ps(src, k, a);
4139   #else
4140     return simde_mm512_mask_mov_ps(src, k, simde_mm512_invsqrt_ps(a));
4141   #endif
4142 }
4143 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4144   #undef _mm512_mask_invsqrt_ps
4145   #define _mm512_mask_invsqrt_ps(src, k, a) simde_mm512_mask_invsqrt_ps(src, k, a)
4146 #endif
4147 
4148 SIMDE_FUNCTION_ATTRIBUTES
4149 simde__m512d
simde_mm512_mask_invsqrt_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)4150 simde_mm512_mask_invsqrt_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
4151   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4152     return _mm512_mask_invsqrt_pd(src, k, a);
4153   #else
4154     return simde_mm512_mask_mov_pd(src, k, simde_mm512_invsqrt_pd(a));
4155   #endif
4156 }
4157 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4158   #undef _mm512_mask_invsqrt_pd
4159   #define _mm512_mask_invsqrt_pd(src, k, a) simde_mm512_mask_invsqrt_pd(src, k, a)
4160 #endif
4161 
4162 SIMDE_FUNCTION_ATTRIBUTES
4163 simde__m128
simde_mm_log_ps(simde__m128 a)4164 simde_mm_log_ps (simde__m128 a) {
4165   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4166     return _mm_log_ps(a);
4167   #else
4168     simde__m128_private
4169       r_,
4170       a_ = simde__m128_to_private(a);
4171 
4172     SIMDE_VECTORIZE
4173     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4174       r_.f32[i] =  simde_math_logf(a_.f32[i]);
4175     }
4176 
4177     return simde__m128_from_private(r_);
4178   #endif
4179 }
4180 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4181   #undef _mm_log_ps
4182   #define _mm_log_ps(a) simde_mm_log_ps(a)
4183 #endif
4184 
4185 SIMDE_FUNCTION_ATTRIBUTES
4186 simde__m128d
simde_mm_log_pd(simde__m128d a)4187 simde_mm_log_pd (simde__m128d a) {
4188   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4189     return _mm_log_pd(a);
4190   #else
4191     simde__m128d_private
4192       r_,
4193       a_ = simde__m128d_to_private(a);
4194 
4195     SIMDE_VECTORIZE
4196     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4197       r_.f64[i] =  simde_math_log(a_.f64[i]);
4198     }
4199 
4200     return simde__m128d_from_private(r_);
4201   #endif
4202 }
4203 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4204   #undef _mm_log_pd
4205   #define _mm_log_pd(a) simde_mm_log_pd(a)
4206 #endif
4207 
4208 SIMDE_FUNCTION_ATTRIBUTES
4209 simde__m256
simde_mm256_log_ps(simde__m256 a)4210 simde_mm256_log_ps (simde__m256 a) {
4211   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
4212     return _mm256_log_ps(a);
4213   #else
4214     simde__m256_private
4215       r_,
4216       a_ = simde__m256_to_private(a);
4217 
4218     SIMDE_VECTORIZE
4219     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4220       r_.f32[i] =  simde_math_logf(a_.f32[i]);
4221     }
4222 
4223     return simde__m256_from_private(r_);
4224   #endif
4225 }
4226 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4227   #undef _mm256_log_ps
4228   #define _mm256_log_ps(a) simde_mm256_log_ps(a)
4229 #endif
4230 
4231 SIMDE_FUNCTION_ATTRIBUTES
4232 simde__m256d
simde_mm256_log_pd(simde__m256d a)4233 simde_mm256_log_pd (simde__m256d a) {
4234   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
4235     return _mm256_log_pd(a);
4236   #else
4237     simde__m256d_private
4238       r_,
4239       a_ = simde__m256d_to_private(a);
4240 
4241     SIMDE_VECTORIZE
4242     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4243       r_.f64[i] =  simde_math_log(a_.f64[i]);
4244     }
4245 
4246     return simde__m256d_from_private(r_);
4247   #endif
4248 }
4249 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4250   #undef _mm256_log_pd
4251   #define _mm256_log_pd(a) simde_mm256_log_pd(a)
4252 #endif
4253 
4254 SIMDE_FUNCTION_ATTRIBUTES
4255 simde__m512
simde_mm512_log_ps(simde__m512 a)4256 simde_mm512_log_ps (simde__m512 a) {
4257   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4258     return _mm512_log_ps(a);
4259   #else
4260     simde__m512_private
4261       r_,
4262       a_ = simde__m512_to_private(a);
4263 
4264     SIMDE_VECTORIZE
4265     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4266       r_.f32[i] =  simde_math_logf(a_.f32[i]);
4267     }
4268 
4269     return simde__m512_from_private(r_);
4270   #endif
4271 }
4272 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4273   #undef _mm512_log_ps
4274   #define _mm512_log_ps(a) simde_mm512_log_ps(a)
4275 #endif
4276 
4277 SIMDE_FUNCTION_ATTRIBUTES
4278 simde__m512d
simde_mm512_log_pd(simde__m512d a)4279 simde_mm512_log_pd (simde__m512d a) {
4280   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4281     return _mm512_log_pd(a);
4282   #else
4283     simde__m512d_private
4284       r_,
4285       a_ = simde__m512d_to_private(a);
4286 
4287     SIMDE_VECTORIZE
4288     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4289       r_.f64[i] =  simde_math_log(a_.f64[i]);
4290     }
4291 
4292     return simde__m512d_from_private(r_);
4293   #endif
4294 }
4295 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4296   #undef _mm512_log_pd
4297   #define _mm512_log_pd(a) simde_mm512_log_pd(a)
4298 #endif
4299 
4300 SIMDE_FUNCTION_ATTRIBUTES
4301 simde__m512
simde_mm512_mask_log_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)4302 simde_mm512_mask_log_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
4303   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4304     return _mm512_mask_log_ps(src, k, a);
4305   #else
4306     return simde_mm512_mask_mov_ps(src, k, simde_mm512_log_ps(a));
4307   #endif
4308 }
4309 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4310   #undef _mm512_mask_log_ps
4311   #define _mm512_mask_log_ps(src, k, a) simde_mm512_mask_log_ps(src, k, a)
4312 #endif
4313 
4314 SIMDE_FUNCTION_ATTRIBUTES
4315 simde__m512d
simde_mm512_mask_log_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)4316 simde_mm512_mask_log_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
4317   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4318     return _mm512_mask_log_pd(src, k, a);
4319   #else
4320     return simde_mm512_mask_mov_pd(src, k, simde_mm512_log_pd(a));
4321   #endif
4322 }
4323 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4324   #undef _mm512_mask_log_pd
4325   #define _mm512_mask_log_pd(src, k, a) simde_mm512_mask_log_pd(src, k, a)
4326 #endif
4327 
4328 SIMDE_FUNCTION_ATTRIBUTES
4329 simde__m128
simde_mm_logb_ps(simde__m128 a)4330 simde_mm_logb_ps (simde__m128 a) {
4331   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4332     return _mm_logb_ps(a);
4333   #else
4334     simde__m128_private
4335       r_,
4336       a_ = simde__m128_to_private(a);
4337 
4338     SIMDE_VECTORIZE
4339     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4340       r_.f32[i] =  simde_math_logbf(a_.f32[i]);
4341     }
4342 
4343     return simde__m128_from_private(r_);
4344   #endif
4345 }
4346 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4347   #undef _mm_logb_ps
4348   #define _mm_logb_ps(a) simde_mm_logb_ps(a)
4349 #endif
4350 
4351 SIMDE_FUNCTION_ATTRIBUTES
4352 simde__m128d
simde_mm_logb_pd(simde__m128d a)4353 simde_mm_logb_pd (simde__m128d a) {
4354   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4355     return _mm_logb_pd(a);
4356   #else
4357     simde__m128d_private
4358       r_,
4359       a_ = simde__m128d_to_private(a);
4360 
4361     SIMDE_VECTORIZE
4362     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4363       r_.f64[i] =  simde_math_logb(a_.f64[i]);
4364     }
4365 
4366     return simde__m128d_from_private(r_);
4367   #endif
4368 }
4369 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4370   #undef _mm_logb_pd
4371   #define _mm_logb_pd(a) simde_mm_logb_pd(a)
4372 #endif
4373 
4374 SIMDE_FUNCTION_ATTRIBUTES
4375 simde__m256
simde_mm256_logb_ps(simde__m256 a)4376 simde_mm256_logb_ps (simde__m256 a) {
4377   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
4378     return _mm256_logb_ps(a);
4379   #else
4380     simde__m256_private
4381       r_,
4382       a_ = simde__m256_to_private(a);
4383 
4384     SIMDE_VECTORIZE
4385     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4386       r_.f32[i] =  simde_math_logbf(a_.f32[i]);
4387     }
4388 
4389     return simde__m256_from_private(r_);
4390   #endif
4391 }
4392 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4393   #undef _mm256_logb_ps
4394   #define _mm256_logb_ps(a) simde_mm256_logb_ps(a)
4395 #endif
4396 
4397 SIMDE_FUNCTION_ATTRIBUTES
4398 simde__m256d
simde_mm256_logb_pd(simde__m256d a)4399 simde_mm256_logb_pd (simde__m256d a) {
4400   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
4401     return _mm256_logb_pd(a);
4402   #else
4403     simde__m256d_private
4404       r_,
4405       a_ = simde__m256d_to_private(a);
4406 
4407     SIMDE_VECTORIZE
4408     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4409       r_.f64[i] =  simde_math_logb(a_.f64[i]);
4410     }
4411 
4412     return simde__m256d_from_private(r_);
4413   #endif
4414 }
4415 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4416   #undef _mm256_logb_pd
4417   #define _mm256_logb_pd(a) simde_mm256_logb_pd(a)
4418 #endif
4419 
4420 SIMDE_FUNCTION_ATTRIBUTES
4421 simde__m512
simde_mm512_logb_ps(simde__m512 a)4422 simde_mm512_logb_ps (simde__m512 a) {
4423   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4424     return _mm512_logb_ps(a);
4425   #else
4426     simde__m512_private
4427       r_,
4428       a_ = simde__m512_to_private(a);
4429 
4430     SIMDE_VECTORIZE
4431     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4432       r_.f32[i] =  simde_math_logbf(a_.f32[i]);
4433     }
4434 
4435     return simde__m512_from_private(r_);
4436   #endif
4437 }
4438 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4439   #undef _mm512_logb_ps
4440   #define _mm512_logb_ps(a) simde_mm512_logb_ps(a)
4441 #endif
4442 
4443 SIMDE_FUNCTION_ATTRIBUTES
4444 simde__m512d
simde_mm512_logb_pd(simde__m512d a)4445 simde_mm512_logb_pd (simde__m512d a) {
4446   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4447     return _mm512_logb_pd(a);
4448   #else
4449     simde__m512d_private
4450       r_,
4451       a_ = simde__m512d_to_private(a);
4452 
4453     SIMDE_VECTORIZE
4454     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4455       r_.f64[i] =  simde_math_logb(a_.f64[i]);
4456     }
4457 
4458     return simde__m512d_from_private(r_);
4459   #endif
4460 }
4461 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4462   #undef _mm512_logb_pd
4463   #define _mm512_logb_pd(a) simde_mm512_logb_pd(a)
4464 #endif
4465 
4466 SIMDE_FUNCTION_ATTRIBUTES
4467 simde__m512
simde_mm512_mask_logb_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)4468 simde_mm512_mask_logb_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
4469   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4470     return _mm512_mask_logb_ps(src, k, a);
4471   #else
4472     return simde_mm512_mask_mov_ps(src, k, simde_mm512_logb_ps(a));
4473   #endif
4474 }
4475 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4476   #undef _mm512_mask_logb_ps
4477   #define _mm512_mask_logb_ps(src, k, a) simde_mm512_mask_logb_ps(src, k, a)
4478 #endif
4479 
4480 SIMDE_FUNCTION_ATTRIBUTES
4481 simde__m512d
simde_mm512_mask_logb_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)4482 simde_mm512_mask_logb_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
4483   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4484     return _mm512_mask_logb_pd(src, k, a);
4485   #else
4486     return simde_mm512_mask_mov_pd(src, k, simde_mm512_logb_pd(a));
4487   #endif
4488 }
4489 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4490   #undef _mm512_mask_logb_pd
4491   #define _mm512_mask_logb_pd(src, k, a) simde_mm512_mask_logb_pd(src, k, a)
4492 #endif
4493 
4494 SIMDE_FUNCTION_ATTRIBUTES
4495 simde__m128
simde_mm_log2_ps(simde__m128 a)4496 simde_mm_log2_ps (simde__m128 a) {
4497   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4498     return _mm_log2_ps(a);
4499   #else
4500     simde__m128_private
4501       r_,
4502       a_ = simde__m128_to_private(a);
4503 
4504     SIMDE_VECTORIZE
4505     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4506       r_.f32[i] =  simde_math_log2f(a_.f32[i]);
4507     }
4508 
4509     return simde__m128_from_private(r_);
4510   #endif
4511 }
4512 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4513   #undef _mm_log2_ps
4514   #define _mm_log2_ps(a) simde_mm_log2_ps(a)
4515 #endif
4516 
4517 SIMDE_FUNCTION_ATTRIBUTES
4518 simde__m128d
simde_mm_log2_pd(simde__m128d a)4519 simde_mm_log2_pd (simde__m128d a) {
4520   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4521     return _mm_log2_pd(a);
4522   #else
4523     simde__m128d_private
4524       r_,
4525       a_ = simde__m128d_to_private(a);
4526 
4527     SIMDE_VECTORIZE
4528     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4529       r_.f64[i] =  simde_math_log2(a_.f64[i]);
4530     }
4531 
4532     return simde__m128d_from_private(r_);
4533   #endif
4534 }
4535 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4536   #undef _mm_log2_pd
4537   #define _mm_log2_pd(a) simde_mm_log2_pd(a)
4538 #endif
4539 
4540 SIMDE_FUNCTION_ATTRIBUTES
4541 simde__m256
simde_mm256_log2_ps(simde__m256 a)4542 simde_mm256_log2_ps (simde__m256 a) {
4543   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
4544     return _mm256_log2_ps(a);
4545   #else
4546     simde__m256_private
4547       r_,
4548       a_ = simde__m256_to_private(a);
4549 
4550     SIMDE_VECTORIZE
4551     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4552       r_.f32[i] =  simde_math_log2f(a_.f32[i]);
4553     }
4554 
4555     return simde__m256_from_private(r_);
4556   #endif
4557 }
4558 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4559   #undef _mm256_log2_ps
4560   #define _mm256_log2_ps(a) simde_mm256_log2_ps(a)
4561 #endif
4562 
4563 SIMDE_FUNCTION_ATTRIBUTES
4564 simde__m128
simde_mm_log1p_ps(simde__m128 a)4565 simde_mm_log1p_ps (simde__m128 a) {
4566   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4567     return _mm_log1p_ps(a);
4568   #else
4569     simde__m128_private
4570       r_,
4571       a_ = simde__m128_to_private(a);
4572 
4573     SIMDE_VECTORIZE
4574     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4575       r_.f32[i] =  simde_math_log1pf(a_.f32[i]);
4576     }
4577 
4578     return simde__m128_from_private(r_);
4579   #endif
4580 }
4581 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4582   #undef _mm_log1p_ps
4583   #define _mm_log1p_ps(a) simde_mm_log1p_ps(a)
4584 #endif
4585 
4586 SIMDE_FUNCTION_ATTRIBUTES
4587 simde__m128d
simde_mm_log1p_pd(simde__m128d a)4588 simde_mm_log1p_pd (simde__m128d a) {
4589   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4590     return _mm_log1p_pd(a);
4591   #else
4592     simde__m128d_private
4593       r_,
4594       a_ = simde__m128d_to_private(a);
4595 
4596     SIMDE_VECTORIZE
4597     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4598       r_.f64[i] =  simde_math_log1p(a_.f64[i]);
4599     }
4600 
4601     return simde__m128d_from_private(r_);
4602   #endif
4603 }
4604 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4605   #undef _mm_log1p_pd
4606   #define _mm_log1p_pd(a) simde_mm_log1p_pd(a)
4607 #endif
4608 
4609 SIMDE_FUNCTION_ATTRIBUTES
4610 simde__m256
simde_mm256_log1p_ps(simde__m256 a)4611 simde_mm256_log1p_ps (simde__m256 a) {
4612   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
4613     return _mm256_log1p_ps(a);
4614   #else
4615     simde__m256_private
4616       r_,
4617       a_ = simde__m256_to_private(a);
4618 
4619     SIMDE_VECTORIZE
4620     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4621       r_.f32[i] =  simde_math_log1pf(a_.f32[i]);
4622     }
4623 
4624     return simde__m256_from_private(r_);
4625   #endif
4626 }
4627 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4628   #undef _mm256_log1p_ps
4629   #define _mm256_log1p_ps(a) simde_mm256_log1p_ps(a)
4630 #endif
4631 
4632 
4633 SIMDE_FUNCTION_ATTRIBUTES
4634 simde__m256d
simde_mm256_log1p_pd(simde__m256d a)4635 simde_mm256_log1p_pd (simde__m256d a) {
4636   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
4637     return _mm256_log1p_pd(a);
4638   #else
4639     simde__m256d_private
4640       r_,
4641       a_ = simde__m256d_to_private(a);
4642 
4643     SIMDE_VECTORIZE
4644     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4645       r_.f64[i] =  simde_math_log1p(a_.f64[i]);
4646     }
4647 
4648     return simde__m256d_from_private(r_);
4649   #endif
4650 }
4651 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4652   #undef _mm256_log1p_pd
4653   #define _mm256_log1p_pd(a) simde_mm256_log1p_pd(a)
4654 #endif
4655 
4656 SIMDE_FUNCTION_ATTRIBUTES
4657 simde__m512
simde_mm512_log1p_ps(simde__m512 a)4658 simde_mm512_log1p_ps (simde__m512 a) {
4659   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4660     return _mm512_log1p_ps(a);
4661   #else
4662     simde__m512_private
4663       r_,
4664       a_ = simde__m512_to_private(a);
4665 
4666     SIMDE_VECTORIZE
4667     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4668       r_.f32[i] =  simde_math_log1pf(a_.f32[i]);
4669     }
4670 
4671     return simde__m512_from_private(r_);
4672   #endif
4673 }
4674 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4675   #undef _mm512_log1p_ps
4676   #define _mm512_log1p_ps(a) simde_mm512_log1p_ps(a)
4677 #endif
4678 
4679 SIMDE_FUNCTION_ATTRIBUTES
4680 simde__m512d
simde_mm512_log1p_pd(simde__m512d a)4681 simde_mm512_log1p_pd (simde__m512d a) {
4682   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4683     return _mm512_log1p_pd(a);
4684   #else
4685     simde__m512d_private
4686       r_,
4687       a_ = simde__m512d_to_private(a);
4688 
4689     SIMDE_VECTORIZE
4690     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4691       r_.f64[i] =  simde_math_log1p(a_.f64[i]);
4692     }
4693 
4694     return simde__m512d_from_private(r_);
4695   #endif
4696 }
4697 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4698   #undef _mm512_log1p_pd
4699   #define _mm512_log1p_pd(a) simde_mm512_log1p_pd(a)
4700 #endif
4701 
4702 SIMDE_FUNCTION_ATTRIBUTES
4703 simde__m512
simde_mm512_mask_log1p_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)4704 simde_mm512_mask_log1p_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
4705   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4706     return _mm512_mask_log1p_ps(src, k, a);
4707   #else
4708     return simde_mm512_mask_mov_ps(src, k, simde_mm512_log1p_ps(a));
4709   #endif
4710 }
4711 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4712   #undef _mm512_mask_log1p_ps
4713   #define _mm512_mask_log1p_ps(src, k, a) simde_mm512_mask_log1p_ps(src, k, a)
4714 #endif
4715 
4716 SIMDE_FUNCTION_ATTRIBUTES
4717 simde__m512d
simde_mm512_mask_log1p_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)4718 simde_mm512_mask_log1p_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
4719   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4720     return _mm512_mask_log1p_pd(src, k, a);
4721   #else
4722     return simde_mm512_mask_mov_pd(src, k, simde_mm512_log1p_pd(a));
4723   #endif
4724 }
4725 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4726   #undef _mm512_mask_log1p_pd
4727   #define _mm512_mask_log1p_pd(src, k, a) simde_mm512_mask_log1p_pd(src, k, a)
4728 #endif
4729 
4730 SIMDE_FUNCTION_ATTRIBUTES
4731 simde__m256d
simde_mm256_log2_pd(simde__m256d a)4732 simde_mm256_log2_pd (simde__m256d a) {
4733   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
4734     return _mm256_log2_pd(a);
4735   #else
4736     simde__m256d_private
4737       r_,
4738       a_ = simde__m256d_to_private(a);
4739 
4740     SIMDE_VECTORIZE
4741     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4742       r_.f64[i] =  simde_math_log2(a_.f64[i]);
4743     }
4744 
4745     return simde__m256d_from_private(r_);
4746   #endif
4747 }
4748 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4749   #undef _mm256_log2_pd
4750   #define _mm256_log2_pd(a) simde_mm256_log2_pd(a)
4751 #endif
4752 
4753 SIMDE_FUNCTION_ATTRIBUTES
4754 simde__m512
simde_mm512_log2_ps(simde__m512 a)4755 simde_mm512_log2_ps (simde__m512 a) {
4756   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4757     return _mm512_log2_ps(a);
4758   #else
4759     simde__m512_private
4760       r_,
4761       a_ = simde__m512_to_private(a);
4762 
4763     SIMDE_VECTORIZE
4764     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4765       r_.f32[i] =  simde_math_log2f(a_.f32[i]);
4766     }
4767 
4768     return simde__m512_from_private(r_);
4769   #endif
4770 }
4771 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4772   #undef _mm512_log2_ps
4773   #define _mm512_log2_ps(a) simde_mm512_log2_ps(a)
4774 #endif
4775 
4776 SIMDE_FUNCTION_ATTRIBUTES
4777 simde__m512d
simde_mm512_log2_pd(simde__m512d a)4778 simde_mm512_log2_pd (simde__m512d a) {
4779   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4780     return _mm512_log2_pd(a);
4781   #else
4782     simde__m512d_private
4783       r_,
4784       a_ = simde__m512d_to_private(a);
4785 
4786     SIMDE_VECTORIZE
4787     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4788       r_.f64[i] =  simde_math_log2(a_.f64[i]);
4789     }
4790 
4791     return simde__m512d_from_private(r_);
4792   #endif
4793 }
4794 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4795   #undef _mm512_log2_pd
4796   #define _mm512_log2_pd(a) simde_mm512_log2_pd(a)
4797 #endif
4798 
4799 SIMDE_FUNCTION_ATTRIBUTES
4800 simde__m512
simde_mm512_mask_log2_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)4801 simde_mm512_mask_log2_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
4802   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4803     return _mm512_mask_log2_ps(src, k, a);
4804   #else
4805     return simde_mm512_mask_mov_ps(src, k, simde_mm512_log2_ps(a));
4806   #endif
4807 }
4808 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4809   #undef _mm512_mask_log2_ps
4810   #define _mm512_mask_log2_ps(src, k, a) simde_mm512_mask_log2_ps(src, k, a)
4811 #endif
4812 
4813 SIMDE_FUNCTION_ATTRIBUTES
4814 simde__m512d
simde_mm512_mask_log2_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)4815 simde_mm512_mask_log2_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
4816   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4817     return _mm512_mask_log2_pd(src, k, a);
4818   #else
4819     return simde_mm512_mask_mov_pd(src, k, simde_mm512_log2_pd(a));
4820   #endif
4821 }
4822 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4823   #undef _mm512_mask_log2_pd
4824   #define _mm512_mask_log2_pd(src, k, a) simde_mm512_mask_log2_pd(src, k, a)
4825 #endif
4826 
4827 SIMDE_FUNCTION_ATTRIBUTES
4828 simde__m128
simde_mm_log10_ps(simde__m128 a)4829 simde_mm_log10_ps (simde__m128 a) {
4830   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4831     return _mm_log10_ps(a);
4832   #else
4833     simde__m128_private
4834       r_,
4835       a_ = simde__m128_to_private(a);
4836 
4837     SIMDE_VECTORIZE
4838     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4839       r_.f32[i] =  simde_math_log10f(a_.f32[i]);
4840     }
4841 
4842     return simde__m128_from_private(r_);
4843   #endif
4844 }
4845 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4846   #undef _mm_log10_ps
4847   #define _mm_log10_ps(a) simde_mm_log10_ps(a)
4848 #endif
4849 
4850 SIMDE_FUNCTION_ATTRIBUTES
4851 simde__m128d
simde_mm_log10_pd(simde__m128d a)4852 simde_mm_log10_pd (simde__m128d a) {
4853   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4854     return _mm_log10_pd(a);
4855   #else
4856     simde__m128d_private
4857       r_,
4858       a_ = simde__m128d_to_private(a);
4859 
4860     SIMDE_VECTORIZE
4861     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4862       r_.f64[i] =  simde_math_log10(a_.f64[i]);
4863     }
4864 
4865     return simde__m128d_from_private(r_);
4866   #endif
4867 }
4868 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4869   #undef _mm_log10_pd
4870   #define _mm_log10_pd(a) simde_mm_log10_pd(a)
4871 #endif
4872 
4873 SIMDE_FUNCTION_ATTRIBUTES
4874 simde__m256
simde_mm256_log10_ps(simde__m256 a)4875 simde_mm256_log10_ps (simde__m256 a) {
4876   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
4877     return _mm256_log10_ps(a);
4878   #else
4879     simde__m256_private
4880       r_,
4881       a_ = simde__m256_to_private(a);
4882 
4883     SIMDE_VECTORIZE
4884     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4885       r_.f32[i] =  simde_math_log10f(a_.f32[i]);
4886     }
4887 
4888     return simde__m256_from_private(r_);
4889   #endif
4890 }
4891 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4892   #undef _mm256_log10_ps
4893   #define _mm256_log10_ps(a) simde_mm256_log10_ps(a)
4894 #endif
4895 
4896 
4897 SIMDE_FUNCTION_ATTRIBUTES
4898 simde__m256d
simde_mm256_log10_pd(simde__m256d a)4899 simde_mm256_log10_pd (simde__m256d a) {
4900   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
4901     return _mm256_log10_pd(a);
4902   #else
4903     simde__m256d_private
4904       r_,
4905       a_ = simde__m256d_to_private(a);
4906 
4907     SIMDE_VECTORIZE
4908     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4909       r_.f64[i] =  simde_math_log10(a_.f64[i]);
4910     }
4911 
4912     return simde__m256d_from_private(r_);
4913   #endif
4914 }
4915 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4916   #undef _mm256_log10_pd
4917   #define _mm256_log10_pd(a) simde_mm256_log10_pd(a)
4918 #endif
4919 
4920 SIMDE_FUNCTION_ATTRIBUTES
4921 simde__m512
simde_mm512_log10_ps(simde__m512 a)4922 simde_mm512_log10_ps (simde__m512 a) {
4923   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4924     return _mm512_log10_ps(a);
4925   #else
4926     simde__m512_private
4927       r_,
4928       a_ = simde__m512_to_private(a);
4929 
4930     SIMDE_VECTORIZE
4931     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
4932       r_.f32[i] =  simde_math_log10f(a_.f32[i]);
4933     }
4934 
4935     return simde__m512_from_private(r_);
4936   #endif
4937 }
4938 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4939   #undef _mm512_log10_ps
4940   #define _mm512_log10_ps(a) simde_mm512_log10_ps(a)
4941 #endif
4942 
4943 SIMDE_FUNCTION_ATTRIBUTES
4944 simde__m512d
simde_mm512_log10_pd(simde__m512d a)4945 simde_mm512_log10_pd (simde__m512d a) {
4946   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4947     return _mm512_log10_pd(a);
4948   #else
4949     simde__m512d_private
4950       r_,
4951       a_ = simde__m512d_to_private(a);
4952 
4953     SIMDE_VECTORIZE
4954     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
4955       r_.f64[i] =  simde_math_log10(a_.f64[i]);
4956     }
4957 
4958     return simde__m512d_from_private(r_);
4959   #endif
4960 }
4961 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4962   #undef _mm512_log10_pd
4963   #define _mm512_log10_pd(a) simde_mm512_log10_pd(a)
4964 #endif
4965 
4966 SIMDE_FUNCTION_ATTRIBUTES
4967 simde__m512
simde_mm512_mask_log10_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)4968 simde_mm512_mask_log10_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
4969   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4970     return _mm512_mask_log10_ps(src, k, a);
4971   #else
4972     return simde_mm512_mask_mov_ps(src, k, simde_mm512_log10_ps(a));
4973   #endif
4974 }
4975 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4976   #undef _mm512_mask_log10_ps
4977   #define _mm512_mask_log10_ps(src, k, a) simde_mm512_mask_log10_ps(src, k, a)
4978 #endif
4979 
4980 SIMDE_FUNCTION_ATTRIBUTES
4981 simde__m512d
simde_mm512_mask_log10_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)4982 simde_mm512_mask_log10_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
4983   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
4984     return _mm512_mask_log10_pd(src, k, a);
4985   #else
4986     return simde_mm512_mask_mov_pd(src, k, simde_mm512_log10_pd(a));
4987   #endif
4988 }
4989 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
4990   #undef _mm512_mask_log10_pd
4991   #define _mm512_mask_log10_pd(src, k, a) simde_mm512_mask_log10_pd(src, k, a)
4992 #endif
4993 
4994 SIMDE_FUNCTION_ATTRIBUTES
4995 simde__m128
simde_mm_pow_ps(simde__m128 a,simde__m128 b)4996 simde_mm_pow_ps (simde__m128 a, simde__m128 b) {
4997   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
4998     return _mm_pow_ps(a, b);
4999   #else
5000     simde__m128_private
5001       r_,
5002       a_ = simde__m128_to_private(a),
5003       b_ = simde__m128_to_private(b);
5004 
5005     SIMDE_VECTORIZE
5006     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
5007       r_.f32[i] =  simde_math_powf(a_.f32[i], b_.f32[i]);
5008     }
5009 
5010     return simde__m128_from_private(r_);
5011   #endif
5012 }
5013 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5014   #undef _mm_pow_ps
5015   #define _mm_pow_ps(a, b) simde_mm_pow_ps(a, b)
5016 #endif
5017 
5018 SIMDE_FUNCTION_ATTRIBUTES
5019 simde__m128d
simde_mm_pow_pd(simde__m128d a,simde__m128d b)5020 simde_mm_pow_pd (simde__m128d a, simde__m128d b) {
5021   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
5022     return _mm_pow_pd(a, b);
5023   #else
5024     simde__m128d_private
5025       r_,
5026       a_ = simde__m128d_to_private(a),
5027       b_ = simde__m128d_to_private(b);
5028 
5029     SIMDE_VECTORIZE
5030     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
5031       r_.f64[i] =  simde_math_pow(a_.f64[i], b_.f64[i]);
5032     }
5033 
5034     return simde__m128d_from_private(r_);
5035   #endif
5036 }
5037 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5038   #undef _mm_pow_pd
5039   #define _mm_pow_pd(a, b) simde_mm_pow_pd(a, b)
5040 #endif
5041 
5042 SIMDE_FUNCTION_ATTRIBUTES
5043 simde__m256
simde_mm256_pow_ps(simde__m256 a,simde__m256 b)5044 simde_mm256_pow_ps (simde__m256 a, simde__m256 b) {
5045   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5046     return _mm256_pow_ps(a, b);
5047   #else
5048     simde__m256_private
5049       r_,
5050       a_ = simde__m256_to_private(a),
5051       b_ = simde__m256_to_private(b);
5052 
5053     SIMDE_VECTORIZE
5054     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
5055       r_.f32[i] =  simde_math_powf(a_.f32[i], b_.f32[i]);
5056     }
5057 
5058     return simde__m256_from_private(r_);
5059   #endif
5060 }
5061 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5062   #undef _mm256_pow_ps
5063   #define _mm256_pow_ps(a, b) simde_mm256_pow_ps(a, b)
5064 #endif
5065 
5066 SIMDE_FUNCTION_ATTRIBUTES
5067 simde__m256d
simde_mm256_pow_pd(simde__m256d a,simde__m256d b)5068 simde_mm256_pow_pd (simde__m256d a, simde__m256d b) {
5069   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5070     return _mm256_pow_pd(a, b);
5071   #else
5072     simde__m256d_private
5073       r_,
5074       a_ = simde__m256d_to_private(a),
5075       b_ = simde__m256d_to_private(b);
5076 
5077     SIMDE_VECTORIZE
5078     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
5079       r_.f64[i] =  simde_math_pow(a_.f64[i], b_.f64[i]);
5080     }
5081 
5082     return simde__m256d_from_private(r_);
5083   #endif
5084 }
5085 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5086   #undef _mm256_pow_pd
5087   #define _mm256_pow_pd(a, b) simde_mm256_pow_pd(a, b)
5088 #endif
5089 
5090 SIMDE_FUNCTION_ATTRIBUTES
5091 simde__m512
simde_mm512_pow_ps(simde__m512 a,simde__m512 b)5092 simde_mm512_pow_ps (simde__m512 a, simde__m512 b) {
5093   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5094     return _mm512_pow_ps(a, b);
5095   #else
5096     simde__m512_private
5097       r_,
5098       a_ = simde__m512_to_private(a),
5099       b_ = simde__m512_to_private(b);
5100 
5101     SIMDE_VECTORIZE
5102     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
5103       r_.f32[i] =  simde_math_powf(a_.f32[i], b_.f32[i]);
5104     }
5105 
5106     return simde__m512_from_private(r_);
5107   #endif
5108 }
5109 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5110   #undef _mm512_pow_ps
5111   #define _mm512_pow_ps(a, b) simde_mm512_pow_ps(a, b)
5112 #endif
5113 
5114 SIMDE_FUNCTION_ATTRIBUTES
5115 simde__m512d
simde_mm512_pow_pd(simde__m512d a,simde__m512d b)5116 simde_mm512_pow_pd (simde__m512d a, simde__m512d b) {
5117   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5118     return _mm512_pow_pd(a, b);
5119   #else
5120     simde__m512d_private
5121       r_,
5122       a_ = simde__m512d_to_private(a),
5123       b_ = simde__m512d_to_private(b);
5124 
5125     SIMDE_VECTORIZE
5126     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
5127       r_.f64[i] =  simde_math_pow(a_.f64[i], b_.f64[i]);
5128     }
5129 
5130     return simde__m512d_from_private(r_);
5131   #endif
5132 }
5133 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5134   #undef _mm512_pow_pd
5135   #define _mm512_pow_pd(a, b) simde_mm512_pow_pd(a, b)
5136 #endif
5137 
5138 SIMDE_FUNCTION_ATTRIBUTES
5139 simde__m512
simde_mm512_mask_pow_ps(simde__m512 src,simde__mmask16 k,simde__m512 a,simde__m512 b)5140 simde_mm512_mask_pow_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) {
5141   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5142     return _mm512_mask_pow_ps(src, k, a, b);
5143   #else
5144     return simde_mm512_mask_mov_ps(src, k, simde_mm512_pow_ps(a, b));
5145   #endif
5146 }
5147 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5148   #undef _mm512_mask_pow_ps
5149   #define _mm512_mask_pow_ps(src, k, a, b) simde_mm512_mask_pow_ps(src, k, a, b)
5150 #endif
5151 
5152 SIMDE_FUNCTION_ATTRIBUTES
5153 simde__m512d
simde_mm512_mask_pow_pd(simde__m512d src,simde__mmask8 k,simde__m512d a,simde__m512d b)5154 simde_mm512_mask_pow_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) {
5155   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5156     return _mm512_mask_pow_pd(src, k, a, b);
5157   #else
5158     return simde_mm512_mask_mov_pd(src, k, simde_mm512_pow_pd(a, b));
5159   #endif
5160 }
5161 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5162   #undef _mm512_mask_pow_pd
5163   #define _mm512_mask_pow_pd(src, k, a, b) simde_mm512_mask_pow_pd(src, k, a, b)
5164 #endif
5165 
5166 SIMDE_FUNCTION_ATTRIBUTES
5167 simde__m128i
simde_mm_rem_epi8(simde__m128i a,simde__m128i b)5168 simde_mm_rem_epi8 (simde__m128i a, simde__m128i b) {
5169   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
5170     return _mm_rem_epi8(a, b);
5171   #else
5172     simde__m128i_private
5173       r_,
5174       a_ = simde__m128i_to_private(a),
5175       b_ = simde__m128i_to_private(b);
5176 
5177     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5178       r_.i8 = a_.i8 % b_.i8;
5179     #else
5180       SIMDE_VECTORIZE
5181       for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
5182         r_.i8[i] = a_.i8[i] % b_.i8[i];
5183       }
5184     #endif
5185 
5186     return simde__m128i_from_private(r_);
5187   #endif
5188 }
5189 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5190   #undef _mm_rem_epi8
5191   #define _mm_rem_epi8(a, b) simde_mm_rem_epi8((a), (b))
5192 #endif
5193 
5194 SIMDE_FUNCTION_ATTRIBUTES
5195 simde__m128i
simde_mm_rem_epi16(simde__m128i a,simde__m128i b)5196 simde_mm_rem_epi16 (simde__m128i a, simde__m128i b) {
5197   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
5198     return _mm_rem_epi16(a, b);
5199   #else
5200     simde__m128i_private
5201       r_,
5202       a_ = simde__m128i_to_private(a),
5203       b_ = simde__m128i_to_private(b);
5204 
5205     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5206       r_.i16 = a_.i16 % b_.i16;
5207     #else
5208       SIMDE_VECTORIZE
5209       for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
5210         r_.i16[i] = a_.i16[i] % b_.i16[i];
5211       }
5212     #endif
5213 
5214     return simde__m128i_from_private(r_);
5215   #endif
5216 }
5217 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5218   #undef _mm_rem_epi16
5219   #define _mm_rem_epi16(a, b) simde_mm_rem_epi16((a), (b))
5220 #endif
5221 
5222 SIMDE_FUNCTION_ATTRIBUTES
5223 simde__m128i
simde_mm_rem_epi32(simde__m128i a,simde__m128i b)5224 simde_mm_rem_epi32 (simde__m128i a, simde__m128i b) {
5225   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
5226     return _mm_rem_epi32(a, b);
5227   #else
5228     simde__m128i_private
5229       r_,
5230       a_ = simde__m128i_to_private(a),
5231       b_ = simde__m128i_to_private(b);
5232 
5233     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5234       r_.i32 = a_.i32 % b_.i32;
5235     #else
5236       SIMDE_VECTORIZE
5237       for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
5238         r_.i32[i] = a_.i32[i] % b_.i32[i];
5239       }
5240     #endif
5241 
5242     return simde__m128i_from_private(r_);
5243   #endif
5244 }
5245 #define simde_mm_irem_epi32(a, b) simde_mm_rem_epi32(a, b)
5246 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5247   #undef _mm_rem_epi32
5248   #define _mm_rem_epi32(a, b) simde_mm_rem_epi32(a, b)
5249   #undef _mm_irem_epi32
5250   #define _mm_irem_epi32(a, b) simde_mm_rem_epi32(a, b)
5251 #endif
5252 
5253 SIMDE_FUNCTION_ATTRIBUTES
5254 simde__m128i
simde_mm_rem_epi64(simde__m128i a,simde__m128i b)5255 simde_mm_rem_epi64 (simde__m128i a, simde__m128i b) {
5256   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
5257     return _mm_rem_epi64(a, b);
5258   #else
5259     simde__m128i_private
5260       r_,
5261       a_ = simde__m128i_to_private(a),
5262       b_ = simde__m128i_to_private(b);
5263 
5264     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5265       r_.i64 = a_.i64 % b_.i64;
5266     #else
5267       SIMDE_VECTORIZE
5268       for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
5269         r_.i64[i] = a_.i64[i] % b_.i64[i];
5270       }
5271     #endif
5272 
5273     return simde__m128i_from_private(r_);
5274   #endif
5275 }
5276 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5277   #undef _mm_rem_epi64
5278   #define _mm_rem_epi64(a, b) simde_mm_rem_epi64((a), (b))
5279 #endif
5280 
5281 SIMDE_FUNCTION_ATTRIBUTES
5282 simde__m128i
simde_mm_rem_epu8(simde__m128i a,simde__m128i b)5283 simde_mm_rem_epu8 (simde__m128i a, simde__m128i b) {
5284   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
5285     return _mm_rem_epu8(a, b);
5286   #else
5287     simde__m128i_private
5288       r_,
5289       a_ = simde__m128i_to_private(a),
5290       b_ = simde__m128i_to_private(b);
5291 
5292     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5293       r_.u8 = a_.u8 % b_.u8;
5294     #else
5295       SIMDE_VECTORIZE
5296       for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
5297         r_.u8[i] = a_.u8[i] % b_.u8[i];
5298       }
5299     #endif
5300 
5301     return simde__m128i_from_private(r_);
5302   #endif
5303 }
5304 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5305   #undef _mm_rem_epu8
5306   #define _mm_rem_epu8(a, b) simde_mm_rem_epu8((a), (b))
5307 #endif
5308 
5309 SIMDE_FUNCTION_ATTRIBUTES
5310 simde__m128i
simde_mm_rem_epu16(simde__m128i a,simde__m128i b)5311 simde_mm_rem_epu16 (simde__m128i a, simde__m128i b) {
5312   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
5313     return _mm_rem_epu16(a, b);
5314   #else
5315     simde__m128i_private
5316       r_,
5317       a_ = simde__m128i_to_private(a),
5318       b_ = simde__m128i_to_private(b);
5319 
5320     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5321       r_.u16 = a_.u16 % b_.u16;
5322     #else
5323       SIMDE_VECTORIZE
5324       for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
5325         r_.u16[i] = a_.u16[i] % b_.u16[i];
5326       }
5327     #endif
5328 
5329     return simde__m128i_from_private(r_);
5330   #endif
5331 }
5332 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5333   #undef _mm_rem_epu16
5334   #define _mm_rem_epu16(a, b) simde_mm_rem_epu16((a), (b))
5335 #endif
5336 
5337 SIMDE_FUNCTION_ATTRIBUTES
5338 simde__m128i
simde_mm_rem_epu32(simde__m128i a,simde__m128i b)5339 simde_mm_rem_epu32 (simde__m128i a, simde__m128i b) {
5340   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
5341     return _mm_rem_epu32(a, b);
5342   #else
5343     simde__m128i_private
5344       r_,
5345       a_ = simde__m128i_to_private(a),
5346       b_ = simde__m128i_to_private(b);
5347 
5348     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5349       r_.u32 = a_.u32 % b_.u32;
5350     #else
5351       SIMDE_VECTORIZE
5352       for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
5353         r_.u32[i] = a_.u32[i] % b_.u32[i];
5354       }
5355     #endif
5356 
5357     return simde__m128i_from_private(r_);
5358   #endif
5359 }
5360 #define simde_mm_urem_epi32(a, b) simde_mm_rem_epu32(a, b)
5361 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5362   #undef _mm_rem_epu32
5363   #define _mm_rem_epu32(a, b) simde_mm_rem_epu32(a, b)
5364   #undef _mm_urem_epi32
5365   #define _mm_urem_epi32(a, b) simde_mm_rem_epu32(a, b)
5366 #endif
5367 
5368 SIMDE_FUNCTION_ATTRIBUTES
5369 simde__m128i
simde_mm_rem_epu64(simde__m128i a,simde__m128i b)5370 simde_mm_rem_epu64 (simde__m128i a, simde__m128i b) {
5371   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
5372     return _mm_rem_epu64(a, b);
5373   #else
5374     simde__m128i_private
5375       r_,
5376       a_ = simde__m128i_to_private(a),
5377       b_ = simde__m128i_to_private(b);
5378 
5379     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5380       r_.u64 = a_.u64 % b_.u64;
5381     #else
5382       SIMDE_VECTORIZE
5383       for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
5384         r_.u64[i] = a_.u64[i] % b_.u64[i];
5385       }
5386     #endif
5387 
5388     return simde__m128i_from_private(r_);
5389   #endif
5390 }
5391 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5392   #undef _mm_rem_epu64
5393   #define _mm_rem_epu64(a, b) simde_mm_rem_epu64((a), (b))
5394 #endif
5395 
5396 SIMDE_FUNCTION_ATTRIBUTES
5397 simde__m256i
simde_mm256_rem_epi8(simde__m256i a,simde__m256i b)5398 simde_mm256_rem_epi8 (simde__m256i a, simde__m256i b) {
5399   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5400     return _mm256_rem_epi8(a, b);
5401   #else
5402     simde__m256i_private
5403       r_,
5404       a_ = simde__m256i_to_private(a),
5405       b_ = simde__m256i_to_private(b);
5406 
5407     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5408       r_.i8 = a_.i8 % b_.i8;
5409     #else
5410       SIMDE_VECTORIZE
5411       for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
5412         r_.i8[i] = a_.i8[i] % b_.i8[i];
5413       }
5414     #endif
5415 
5416     return simde__m256i_from_private(r_);
5417   #endif
5418 }
5419 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5420   #undef _mm256_rem_epi8
5421   #define _mm256_rem_epi8(a, b) simde_mm256_rem_epi8((a), (b))
5422 #endif
5423 
5424 SIMDE_FUNCTION_ATTRIBUTES
5425 simde__m256i
simde_mm256_rem_epi16(simde__m256i a,simde__m256i b)5426 simde_mm256_rem_epi16 (simde__m256i a, simde__m256i b) {
5427   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5428     return _mm256_rem_epi16(a, b);
5429   #else
5430     simde__m256i_private
5431       r_,
5432       a_ = simde__m256i_to_private(a),
5433       b_ = simde__m256i_to_private(b);
5434 
5435     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5436       r_.i16 = a_.i16 % b_.i16;
5437     #else
5438       SIMDE_VECTORIZE
5439       for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
5440         r_.i16[i] = a_.i16[i] % b_.i16[i];
5441       }
5442     #endif
5443 
5444     return simde__m256i_from_private(r_);
5445   #endif
5446 }
5447 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5448   #undef _mm256_rem_epi16
5449   #define _mm256_rem_epi16(a, b) simde_mm256_rem_epi16((a), (b))
5450 #endif
5451 
5452 SIMDE_FUNCTION_ATTRIBUTES
5453 simde__m256i
simde_mm256_rem_epi32(simde__m256i a,simde__m256i b)5454 simde_mm256_rem_epi32 (simde__m256i a, simde__m256i b) {
5455   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5456     return _mm256_rem_epi32(a, b);
5457   #else
5458     simde__m256i_private
5459       r_,
5460       a_ = simde__m256i_to_private(a),
5461       b_ = simde__m256i_to_private(b);
5462 
5463     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5464       r_.i32 = a_.i32 % b_.i32;
5465     #else
5466       SIMDE_VECTORIZE
5467       for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
5468         r_.i32[i] = a_.i32[i] % b_.i32[i];
5469       }
5470     #endif
5471 
5472     return simde__m256i_from_private(r_);
5473   #endif
5474 }
5475 #define simde_mm256_irem_epi32(a, b) simde_mm256_rem_epi32(a, b)
5476 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5477   #undef _mm256_rem_epi32
5478   #define _mm256_rem_epi32(a, b) simde_mm256_rem_epi32(a, b)
5479   #undef _mm256_irem_epi32
5480   #define _mm256_irem_epi32(a, b) simde_mm256_rem_epi32(a, b)
5481 #endif
5482 
5483 SIMDE_FUNCTION_ATTRIBUTES
5484 simde__m256i
simde_mm256_rem_epi64(simde__m256i a,simde__m256i b)5485 simde_mm256_rem_epi64 (simde__m256i a, simde__m256i b) {
5486   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5487     return _mm256_rem_epi64(a, b);
5488   #else
5489     simde__m256i_private
5490       r_,
5491       a_ = simde__m256i_to_private(a),
5492       b_ = simde__m256i_to_private(b);
5493 
5494     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5495       r_.i64 = a_.i64 % b_.i64;
5496     #else
5497       SIMDE_VECTORIZE
5498       for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
5499         r_.i64[i] = a_.i64[i] % b_.i64[i];
5500       }
5501     #endif
5502 
5503     return simde__m256i_from_private(r_);
5504   #endif
5505 }
5506 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5507   #undef _mm256_rem_epi64
5508   #define _mm256_rem_epi64(a, b) simde_mm256_rem_epi64((a), (b))
5509 #endif
5510 
5511 SIMDE_FUNCTION_ATTRIBUTES
5512 simde__m256i
simde_mm256_rem_epu8(simde__m256i a,simde__m256i b)5513 simde_mm256_rem_epu8 (simde__m256i a, simde__m256i b) {
5514   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5515     return _mm256_rem_epu8(a, b);
5516   #else
5517     simde__m256i_private
5518       r_,
5519       a_ = simde__m256i_to_private(a),
5520       b_ = simde__m256i_to_private(b);
5521 
5522     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5523       r_.u8 = a_.u8 % b_.u8;
5524     #else
5525       SIMDE_VECTORIZE
5526       for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
5527         r_.u8[i] = a_.u8[i] % b_.u8[i];
5528       }
5529     #endif
5530 
5531     return simde__m256i_from_private(r_);
5532   #endif
5533 }
5534 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5535   #undef _mm256_rem_epu8
5536   #define _mm256_rem_epu8(a, b) simde_mm256_rem_epu8((a), (b))
5537 #endif
5538 
5539 SIMDE_FUNCTION_ATTRIBUTES
5540 simde__m256i
simde_mm256_rem_epu16(simde__m256i a,simde__m256i b)5541 simde_mm256_rem_epu16 (simde__m256i a, simde__m256i b) {
5542   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5543     return _mm256_rem_epu16(a, b);
5544   #else
5545     simde__m256i_private
5546       r_,
5547       a_ = simde__m256i_to_private(a),
5548       b_ = simde__m256i_to_private(b);
5549 
5550     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5551       r_.u16 = a_.u16 % b_.u16;
5552     #else
5553       SIMDE_VECTORIZE
5554       for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
5555         r_.u16[i] = a_.u16[i] % b_.u16[i];
5556       }
5557     #endif
5558 
5559     return simde__m256i_from_private(r_);
5560   #endif
5561 }
5562 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5563   #undef _mm256_rem_epu16
5564   #define _mm256_rem_epu16(a, b) simde_mm256_rem_epu16((a), (b))
5565 #endif
5566 
5567 SIMDE_FUNCTION_ATTRIBUTES
5568 simde__m256i
simde_mm256_rem_epu32(simde__m256i a,simde__m256i b)5569 simde_mm256_rem_epu32 (simde__m256i a, simde__m256i b) {
5570   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5571     return _mm256_rem_epu32(a, b);
5572   #else
5573     simde__m256i_private
5574       r_,
5575       a_ = simde__m256i_to_private(a),
5576       b_ = simde__m256i_to_private(b);
5577 
5578     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5579       r_.u32 = a_.u32 % b_.u32;
5580     #else
5581       SIMDE_VECTORIZE
5582       for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
5583         r_.u32[i] = a_.u32[i] % b_.u32[i];
5584       }
5585     #endif
5586 
5587     return simde__m256i_from_private(r_);
5588   #endif
5589 }
5590 #define simde_mm256_urem_epi32(a, b) simde_mm256_rem_epu32(a, b)
5591 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5592   #undef _mm256_rem_epu32
5593   #define _mm256_rem_epu32(a, b) simde_mm256_rem_epu32(a, b)
5594   #undef _mm256_urem_epi32
5595   #define _mm256_urem_epi32(a, b) simde_mm256_rem_epu32(a, b)
5596 #endif
5597 
5598 SIMDE_FUNCTION_ATTRIBUTES
5599 simde__m256i
simde_mm256_rem_epu64(simde__m256i a,simde__m256i b)5600 simde_mm256_rem_epu64 (simde__m256i a, simde__m256i b) {
5601   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5602     return _mm256_rem_epu64(a, b);
5603   #else
5604     simde__m256i_private
5605       r_,
5606       a_ = simde__m256i_to_private(a),
5607       b_ = simde__m256i_to_private(b);
5608 
5609     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5610       r_.u64 = a_.u64 % b_.u64;
5611     #else
5612       SIMDE_VECTORIZE
5613       for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
5614         r_.u64[i] = a_.u64[i] % b_.u64[i];
5615       }
5616     #endif
5617 
5618     return simde__m256i_from_private(r_);
5619   #endif
5620 }
5621 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5622   #undef _mm256_rem_epu64
5623   #define _mm256_rem_epu64(a, b) simde_mm256_rem_epu64((a), (b))
5624 #endif
5625 
5626 SIMDE_FUNCTION_ATTRIBUTES
5627 simde__m512i
simde_mm512_rem_epi8(simde__m512i a,simde__m512i b)5628 simde_mm512_rem_epi8 (simde__m512i a, simde__m512i b) {
5629   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5630     return _mm512_rem_epi8(a, b);
5631   #else
5632     simde__m512i_private
5633       r_,
5634       a_ = simde__m512i_to_private(a),
5635       b_ = simde__m512i_to_private(b);
5636 
5637     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5638       r_.i8 = a_.i8 % b_.i8;
5639     #else
5640       SIMDE_VECTORIZE
5641       for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
5642         r_.i8[i] = a_.i8[i] % b_.i8[i];
5643       }
5644     #endif
5645 
5646     return simde__m512i_from_private(r_);
5647   #endif
5648 }
5649 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5650   #undef _mm512_rem_epi8
5651   #define _mm512_rem_epi8(a, b) simde_mm512_rem_epi8((a), (b))
5652 #endif
5653 
5654 SIMDE_FUNCTION_ATTRIBUTES
5655 simde__m512i
simde_mm512_rem_epi16(simde__m512i a,simde__m512i b)5656 simde_mm512_rem_epi16 (simde__m512i a, simde__m512i b) {
5657   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5658     return _mm512_rem_epi16(a, b);
5659   #else
5660     simde__m512i_private
5661       r_,
5662       a_ = simde__m512i_to_private(a),
5663       b_ = simde__m512i_to_private(b);
5664 
5665     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5666       r_.i16 = a_.i16 % b_.i16;
5667     #else
5668       SIMDE_VECTORIZE
5669       for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
5670         r_.i16[i] = a_.i16[i] % b_.i16[i];
5671       }
5672     #endif
5673 
5674     return simde__m512i_from_private(r_);
5675   #endif
5676 }
5677 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5678   #undef _mm512_rem_epi16
5679   #define _mm512_rem_epi16(a, b) simde_mm512_rem_epi16((a), (b))
5680 #endif
5681 
5682 SIMDE_FUNCTION_ATTRIBUTES
5683 simde__m512i
simde_mm512_rem_epi32(simde__m512i a,simde__m512i b)5684 simde_mm512_rem_epi32 (simde__m512i a, simde__m512i b) {
5685   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5686     return _mm512_rem_epi32(a, b);
5687   #else
5688     simde__m512i_private
5689       r_,
5690       a_ = simde__m512i_to_private(a),
5691       b_ = simde__m512i_to_private(b);
5692 
5693     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5694       r_.i32 = a_.i32 % b_.i32;
5695     #else
5696       SIMDE_VECTORIZE
5697       for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
5698         r_.i32[i] = a_.i32[i] % b_.i32[i];
5699       }
5700     #endif
5701 
5702     return simde__m512i_from_private(r_);
5703   #endif
5704 }
5705 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5706   #undef _mm512_rem_epi32
5707   #define _mm512_rem_epi32(a, b) simde_mm512_rem_epi32((a), (b))
5708 #endif
5709 
5710 SIMDE_FUNCTION_ATTRIBUTES
5711 simde__m512i
simde_mm512_mask_rem_epi32(simde__m512i src,simde__mmask16 k,simde__m512i a,simde__m512i b)5712 simde_mm512_mask_rem_epi32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
5713   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5714     return _mm512_mask_rem_epi32(src, k, a, b);
5715   #else
5716     return simde_mm512_mask_mov_epi32(src, k, simde_mm512_rem_epi32(a, b));
5717   #endif
5718 }
5719 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5720   #undef _mm512_mask_rem_epi32
5721   #define _mm512_mask_rem_epi32(src, k, a, b) simde_mm512_mask_rem_epi32(src, k, a, b)
5722 #endif
5723 
5724 SIMDE_FUNCTION_ATTRIBUTES
5725 simde__m512i
simde_mm512_rem_epi64(simde__m512i a,simde__m512i b)5726 simde_mm512_rem_epi64 (simde__m512i a, simde__m512i b) {
5727   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5728     return _mm512_rem_epi64(a, b);
5729   #else
5730     simde__m512i_private
5731       r_,
5732       a_ = simde__m512i_to_private(a),
5733       b_ = simde__m512i_to_private(b);
5734 
5735     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5736       r_.i64 = a_.i64 % b_.i64;
5737     #else
5738       SIMDE_VECTORIZE
5739       for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
5740         r_.i64[i] = a_.i64[i] % b_.i64[i];
5741       }
5742     #endif
5743 
5744     return simde__m512i_from_private(r_);
5745   #endif
5746 }
5747 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5748   #undef _mm512_rem_epi64
5749   #define _mm512_rem_epi64(a, b) simde_mm512_rem_epi64((a), (b))
5750 #endif
5751 
5752 SIMDE_FUNCTION_ATTRIBUTES
5753 simde__m512i
simde_mm512_rem_epu8(simde__m512i a,simde__m512i b)5754 simde_mm512_rem_epu8 (simde__m512i a, simde__m512i b) {
5755   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5756     return _mm512_rem_epu8(a, b);
5757   #else
5758     simde__m512i_private
5759       r_,
5760       a_ = simde__m512i_to_private(a),
5761       b_ = simde__m512i_to_private(b);
5762 
5763     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5764       r_.u8 = a_.u8 % b_.u8;
5765     #else
5766       SIMDE_VECTORIZE
5767       for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
5768         r_.u8[i] = a_.u8[i] % b_.u8[i];
5769       }
5770     #endif
5771 
5772     return simde__m512i_from_private(r_);
5773   #endif
5774 }
5775 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5776   #undef _mm512_rem_epu8
5777   #define _mm512_rem_epu8(a, b) simde_mm512_rem_epu8((a), (b))
5778 #endif
5779 
5780 SIMDE_FUNCTION_ATTRIBUTES
5781 simde__m512i
simde_mm512_rem_epu16(simde__m512i a,simde__m512i b)5782 simde_mm512_rem_epu16 (simde__m512i a, simde__m512i b) {
5783   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5784     return _mm512_rem_epu16(a, b);
5785   #else
5786     simde__m512i_private
5787       r_,
5788       a_ = simde__m512i_to_private(a),
5789       b_ = simde__m512i_to_private(b);
5790 
5791     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5792       r_.u16 = a_.u16 % b_.u16;
5793     #else
5794       SIMDE_VECTORIZE
5795       for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
5796         r_.u16[i] = a_.u16[i] % b_.u16[i];
5797       }
5798     #endif
5799 
5800     return simde__m512i_from_private(r_);
5801   #endif
5802 }
5803 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5804   #undef _mm512_rem_epu16
5805   #define _mm512_rem_epu16(a, b) simde_mm512_rem_epu16((a), (b))
5806 #endif
5807 
5808 SIMDE_FUNCTION_ATTRIBUTES
5809 simde__m512i
simde_mm512_rem_epu32(simde__m512i a,simde__m512i b)5810 simde_mm512_rem_epu32 (simde__m512i a, simde__m512i b) {
5811   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5812     return _mm512_rem_epu32(a, b);
5813   #else
5814     simde__m512i_private
5815       r_,
5816       a_ = simde__m512i_to_private(a),
5817       b_ = simde__m512i_to_private(b);
5818 
5819     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5820       r_.u32 = a_.u32 % b_.u32;
5821     #else
5822       SIMDE_VECTORIZE
5823       for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
5824         r_.u32[i] = a_.u32[i] % b_.u32[i];
5825       }
5826     #endif
5827 
5828     return simde__m512i_from_private(r_);
5829   #endif
5830 }
5831 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5832   #undef _mm512_rem_epu32
5833   #define _mm512_rem_epu32(a, b) simde_mm512_rem_epu32((a), (b))
5834 #endif
5835 
5836 SIMDE_FUNCTION_ATTRIBUTES
5837 simde__m512i
simde_mm512_mask_rem_epu32(simde__m512i src,simde__mmask16 k,simde__m512i a,simde__m512i b)5838 simde_mm512_mask_rem_epu32(simde__m512i src, simde__mmask16 k, simde__m512i a, simde__m512i b) {
5839   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5840     return _mm512_mask_rem_epu32(src, k, a, b);
5841   #else
5842     return simde_mm512_mask_mov_epi32(src, k, simde_mm512_rem_epu32(a, b));
5843   #endif
5844 }
5845 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5846   #undef _mm512_mask_rem_epu32
5847   #define _mm512_mask_rem_epu32(src, k, a, b) simde_mm512_mask_rem_epu32(src, k, a, b)
5848 #endif
5849 
5850 SIMDE_FUNCTION_ATTRIBUTES
5851 simde__m512i
simde_mm512_rem_epu64(simde__m512i a,simde__m512i b)5852 simde_mm512_rem_epu64 (simde__m512i a, simde__m512i b) {
5853   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5854     return _mm512_rem_epu64(a, b);
5855   #else
5856     simde__m512i_private
5857       r_,
5858       a_ = simde__m512i_to_private(a),
5859       b_ = simde__m512i_to_private(b);
5860 
5861     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
5862       r_.u64 = a_.u64 % b_.u64;
5863     #else
5864       SIMDE_VECTORIZE
5865       for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
5866         r_.u64[i] = a_.u64[i] % b_.u64[i];
5867       }
5868     #endif
5869 
5870     return simde__m512i_from_private(r_);
5871   #endif
5872 }
5873 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5874   #undef _mm512_rem_epu64
5875   #define _mm512_rem_epu64(a, b) simde_mm512_rem_epu64((a), (b))
5876 #endif
5877 
5878 SIMDE_FUNCTION_ATTRIBUTES
5879 simde__m128
simde_mm_sin_ps(simde__m128 a)5880 simde_mm_sin_ps (simde__m128 a) {
5881   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
5882     return _mm_sin_ps(a);
5883   #else
5884     simde__m128_private
5885       r_,
5886       a_ = simde__m128_to_private(a);
5887 
5888     SIMDE_VECTORIZE
5889     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
5890       r_.f32[i] =  simde_math_sinf(a_.f32[i]);
5891     }
5892 
5893     return simde__m128_from_private(r_);
5894   #endif
5895 }
5896 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5897   #undef _mm_sin_ps
5898   #define _mm_sin_ps(a) simde_mm_sin_ps(a)
5899 #endif
5900 
5901 SIMDE_FUNCTION_ATTRIBUTES
5902 simde__m128d
simde_mm_sin_pd(simde__m128d a)5903 simde_mm_sin_pd (simde__m128d a) {
5904   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
5905     return _mm_sin_pd(a);
5906   #else
5907     simde__m128d_private
5908       r_,
5909       a_ = simde__m128d_to_private(a);
5910 
5911     SIMDE_VECTORIZE
5912     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
5913       r_.f64[i] =  simde_math_sin(a_.f64[i]);
5914     }
5915 
5916     return simde__m128d_from_private(r_);
5917   #endif
5918 }
5919 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5920   #undef _mm_sin_pd
5921   #define _mm_sin_pd(a) simde_mm_sin_pd(a)
5922 #endif
5923 
5924 SIMDE_FUNCTION_ATTRIBUTES
5925 simde__m256
simde_mm256_sin_ps(simde__m256 a)5926 simde_mm256_sin_ps (simde__m256 a) {
5927   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5928     return _mm256_sin_ps(a);
5929   #else
5930     simde__m256_private
5931       r_,
5932       a_ = simde__m256_to_private(a);
5933 
5934     SIMDE_VECTORIZE
5935     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
5936       r_.f32[i] =  simde_math_sinf(a_.f32[i]);
5937     }
5938 
5939     return simde__m256_from_private(r_);
5940   #endif
5941 }
5942 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5943   #undef _mm256_sin_ps
5944   #define _mm256_sin_ps(a) simde_mm256_sin_ps(a)
5945 #endif
5946 
5947 SIMDE_FUNCTION_ATTRIBUTES
5948 simde__m256d
simde_mm256_sin_pd(simde__m256d a)5949 simde_mm256_sin_pd (simde__m256d a) {
5950   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
5951     return _mm256_sin_pd(a);
5952   #else
5953     simde__m256d_private
5954       r_,
5955       a_ = simde__m256d_to_private(a);
5956 
5957     SIMDE_VECTORIZE
5958     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
5959       r_.f64[i] =  simde_math_sin(a_.f64[i]);
5960     }
5961 
5962     return simde__m256d_from_private(r_);
5963   #endif
5964 }
5965 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5966   #undef _mm256_sin_pd
5967   #define _mm256_sin_pd(a) simde_mm256_sin_pd(a)
5968 #endif
5969 
5970 SIMDE_FUNCTION_ATTRIBUTES
5971 simde__m512
simde_mm512_sin_ps(simde__m512 a)5972 simde_mm512_sin_ps (simde__m512 a) {
5973   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5974     return _mm512_sin_ps(a);
5975   #else
5976     simde__m512_private
5977       r_,
5978       a_ = simde__m512_to_private(a);
5979 
5980     SIMDE_VECTORIZE
5981     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
5982       r_.f32[i] =  simde_math_sinf(a_.f32[i]);
5983     }
5984 
5985     return simde__m512_from_private(r_);
5986   #endif
5987 }
5988 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
5989   #undef _mm512_sin_ps
5990   #define _mm512_sin_ps(a) simde_mm512_sin_ps(a)
5991 #endif
5992 
5993 SIMDE_FUNCTION_ATTRIBUTES
5994 simde__m512d
simde_mm512_sin_pd(simde__m512d a)5995 simde_mm512_sin_pd (simde__m512d a) {
5996   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
5997     return _mm512_sin_pd(a);
5998   #else
5999     simde__m512d_private
6000       r_,
6001       a_ = simde__m512d_to_private(a);
6002 
6003     SIMDE_VECTORIZE
6004     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6005       r_.f64[i] =  simde_math_sin(a_.f64[i]);
6006     }
6007 
6008     return simde__m512d_from_private(r_);
6009   #endif
6010 }
6011 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6012   #undef _mm512_sin_pd
6013   #define _mm512_sin_pd(a) simde_mm512_sin_pd(a)
6014 #endif
6015 
6016 SIMDE_FUNCTION_ATTRIBUTES
6017 simde__m512
simde_mm512_mask_sin_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)6018 simde_mm512_mask_sin_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
6019   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6020     return _mm512_mask_sin_ps(src, k, a);
6021   #else
6022     return simde_mm512_mask_mov_ps(src, k, simde_mm512_sin_ps(a));
6023   #endif
6024 }
6025 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6026   #undef _mm512_mask_sin_ps
6027   #define _mm512_mask_sin_ps(src, k, a) simde_mm512_mask_sin_ps(src, k, a)
6028 #endif
6029 
6030 SIMDE_FUNCTION_ATTRIBUTES
6031 simde__m512d
simde_mm512_mask_sin_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)6032 simde_mm512_mask_sin_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
6033   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6034     return _mm512_mask_sin_pd(src, k, a);
6035   #else
6036     return simde_mm512_mask_mov_pd(src, k, simde_mm512_sin_pd(a));
6037   #endif
6038 }
6039 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6040   #undef _mm512_mask_sin_pd
6041   #define _mm512_mask_sin_pd(src, k, a) simde_mm512_mask_sin_pd(src, k, a)
6042 #endif
6043 
6044 SIMDE_FUNCTION_ATTRIBUTES
6045 simde__m128
simde_mm_sind_ps(simde__m128 a)6046 simde_mm_sind_ps (simde__m128 a) {
6047   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6048     return _mm_sind_ps(a);
6049   #else
6050     simde__m128_private
6051       r_,
6052       a_ = simde__m128_to_private(a);
6053 
6054     SIMDE_VECTORIZE
6055     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6056       r_.f32[i] =  simde_math_sinf(simde_math_deg2radf(a_.f32[i]));
6057     }
6058 
6059     return simde__m128_from_private(r_);
6060   #endif
6061 }
6062 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6063   #undef _mm_sind_ps
6064   #define _mm_sind_ps(a) simde_mm_sind_ps(a)
6065 #endif
6066 
6067 SIMDE_FUNCTION_ATTRIBUTES
6068 simde__m128d
simde_mm_sind_pd(simde__m128d a)6069 simde_mm_sind_pd (simde__m128d a) {
6070   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6071     return _mm_sind_pd(a);
6072   #else
6073     simde__m128d_private
6074       r_,
6075       a_ = simde__m128d_to_private(a);
6076 
6077     SIMDE_VECTORIZE
6078     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6079       r_.f64[i] =  simde_math_sin(simde_math_deg2rad(a_.f64[i]));
6080     }
6081 
6082     return simde__m128d_from_private(r_);
6083   #endif
6084 }
6085 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6086   #undef _mm_sind_pd
6087   #define _mm_sind_pd(a) simde_mm_sind_pd(a)
6088 #endif
6089 
6090 SIMDE_FUNCTION_ATTRIBUTES
6091 simde__m256
simde_mm256_sind_ps(simde__m256 a)6092 simde_mm256_sind_ps (simde__m256 a) {
6093   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6094     return _mm256_sind_ps(a);
6095   #else
6096     simde__m256_private
6097       r_,
6098       a_ = simde__m256_to_private(a);
6099 
6100     SIMDE_VECTORIZE
6101     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6102       r_.f32[i] =  simde_math_sinf(simde_math_deg2radf(a_.f32[i]));
6103     }
6104 
6105     return simde__m256_from_private(r_);
6106   #endif
6107 }
6108 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6109   #undef _mm256_sind_ps
6110   #define _mm256_sind_ps(a) simde_mm256_sind_ps(a)
6111 #endif
6112 
6113 SIMDE_FUNCTION_ATTRIBUTES
6114 simde__m256d
simde_mm256_sind_pd(simde__m256d a)6115 simde_mm256_sind_pd (simde__m256d a) {
6116   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6117     return _mm256_sind_pd(a);
6118   #else
6119     simde__m256d_private
6120       r_,
6121       a_ = simde__m256d_to_private(a);
6122 
6123     SIMDE_VECTORIZE
6124     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6125       r_.f64[i] =  simde_math_sin(simde_math_deg2rad(a_.f64[i]));
6126     }
6127 
6128     return simde__m256d_from_private(r_);
6129   #endif
6130 }
6131 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6132   #undef _mm256_sind_pd
6133   #define _mm256_sind_pd(a) simde_mm256_sind_pd(a)
6134 #endif
6135 
6136 
6137 SIMDE_FUNCTION_ATTRIBUTES
6138 simde__m512
simde_mm512_sind_ps(simde__m512 a)6139 simde_mm512_sind_ps (simde__m512 a) {
6140   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6141     return _mm512_sind_ps(a);
6142   #else
6143     simde__m512_private
6144       r_,
6145       a_ = simde__m512_to_private(a);
6146 
6147     SIMDE_VECTORIZE
6148     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6149       r_.f32[i] =  simde_math_sinf(simde_math_deg2radf(a_.f32[i]));
6150     }
6151 
6152     return simde__m512_from_private(r_);
6153   #endif
6154 }
6155 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6156   #undef _mm512_sind_ps
6157   #define _mm512_sind_ps(a) simde_mm512_sind_ps(a)
6158 #endif
6159 
6160 SIMDE_FUNCTION_ATTRIBUTES
6161 simde__m512d
simde_mm512_sind_pd(simde__m512d a)6162 simde_mm512_sind_pd (simde__m512d a) {
6163   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6164     return _mm512_sind_pd(a);
6165   #else
6166     simde__m512d_private
6167       r_,
6168       a_ = simde__m512d_to_private(a);
6169 
6170     SIMDE_VECTORIZE
6171     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6172       r_.f64[i] =  simde_math_sin(simde_math_deg2rad(a_.f64[i]));
6173     }
6174 
6175     return simde__m512d_from_private(r_);
6176   #endif
6177 }
6178 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6179   #undef _mm512_sind_pd
6180   #define _mm512_sind_pd(a) simde_mm512_sind_pd(a)
6181 #endif
6182 
6183 SIMDE_FUNCTION_ATTRIBUTES
6184 simde__m512
simde_mm512_mask_sind_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)6185 simde_mm512_mask_sind_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
6186   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6187     return _mm512_mask_sind_ps(src, k, a);
6188   #else
6189     return simde_mm512_mask_mov_ps(src, k, simde_mm512_sind_ps(a));
6190   #endif
6191 }
6192 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6193   #undef _mm512_mask_sind_ps
6194   #define _mm512_mask_sind_ps(src, k, a) simde_mm512_mask_sind_ps(src, k, a)
6195 #endif
6196 
6197 SIMDE_FUNCTION_ATTRIBUTES
6198 simde__m512d
simde_mm512_mask_sind_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)6199 simde_mm512_mask_sind_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
6200   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6201     return _mm512_mask_sind_pd(src, k, a);
6202   #else
6203     return simde_mm512_mask_mov_pd(src, k, simde_mm512_sind_pd(a));
6204   #endif
6205 }
6206 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6207   #undef _mm512_mask_sind_pd
6208   #define _mm512_mask_sind_pd(src, k, a) simde_mm512_mask_sind_pd(src, k, a)
6209 #endif
6210 
6211 SIMDE_FUNCTION_ATTRIBUTES
6212 simde__m128
simde_mm_sinh_ps(simde__m128 a)6213 simde_mm_sinh_ps (simde__m128 a) {
6214   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6215     return _mm_sinh_ps(a);
6216   #else
6217     simde__m128_private
6218       r_,
6219       a_ = simde__m128_to_private(a);
6220 
6221     SIMDE_VECTORIZE
6222     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6223       r_.f32[i] =  simde_math_sinhf(a_.f32[i]);
6224     }
6225 
6226     return simde__m128_from_private(r_);
6227   #endif
6228 }
6229 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6230   #undef _mm_sinh_ps
6231   #define _mm_sinh_ps(a) simde_mm_sinh_ps(a)
6232 #endif
6233 
6234 SIMDE_FUNCTION_ATTRIBUTES
6235 simde__m128d
simde_mm_sinh_pd(simde__m128d a)6236 simde_mm_sinh_pd (simde__m128d a) {
6237   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6238     return _mm_sinh_pd(a);
6239   #else
6240     simde__m128d_private
6241       r_,
6242       a_ = simde__m128d_to_private(a);
6243 
6244     SIMDE_VECTORIZE
6245     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6246       r_.f64[i] =  simde_math_sinh(a_.f64[i]);
6247     }
6248 
6249     return simde__m128d_from_private(r_);
6250   #endif
6251 }
6252 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6253   #undef _mm_sinh_pd
6254   #define _mm_sinh_pd(a) simde_mm_sinh_pd(a)
6255 #endif
6256 
6257 SIMDE_FUNCTION_ATTRIBUTES
6258 simde__m256
simde_mm256_sinh_ps(simde__m256 a)6259 simde_mm256_sinh_ps (simde__m256 a) {
6260   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6261     return _mm256_sinh_ps(a);
6262   #else
6263     simde__m256_private
6264       r_,
6265       a_ = simde__m256_to_private(a);
6266 
6267     SIMDE_VECTORIZE
6268     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6269       r_.f32[i] =  simde_math_sinhf(a_.f32[i]);
6270     }
6271 
6272     return simde__m256_from_private(r_);
6273   #endif
6274 }
6275 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6276   #undef _mm256_sinh_ps
6277   #define _mm256_sinh_ps(a) simde_mm256_sinh_ps(a)
6278 #endif
6279 
6280 
6281 SIMDE_FUNCTION_ATTRIBUTES
6282 simde__m256d
simde_mm256_sinh_pd(simde__m256d a)6283 simde_mm256_sinh_pd (simde__m256d a) {
6284   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6285     return _mm256_sinh_pd(a);
6286   #else
6287     simde__m256d_private
6288       r_,
6289       a_ = simde__m256d_to_private(a);
6290 
6291     SIMDE_VECTORIZE
6292     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6293       r_.f64[i] =  simde_math_sinh(a_.f64[i]);
6294     }
6295 
6296     return simde__m256d_from_private(r_);
6297   #endif
6298 }
6299 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6300   #undef _mm256_sinh_pd
6301   #define _mm256_sinh_pd(a) simde_mm256_sinh_pd(a)
6302 #endif
6303 
6304 SIMDE_FUNCTION_ATTRIBUTES
6305 simde__m512
simde_mm512_sinh_ps(simde__m512 a)6306 simde_mm512_sinh_ps (simde__m512 a) {
6307   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6308     return _mm512_sinh_ps(a);
6309   #else
6310     simde__m512_private
6311       r_,
6312       a_ = simde__m512_to_private(a);
6313 
6314     SIMDE_VECTORIZE
6315     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6316       r_.f32[i] =  simde_math_sinhf(a_.f32[i]);
6317     }
6318 
6319     return simde__m512_from_private(r_);
6320   #endif
6321 }
6322 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6323   #undef _mm512_sinh_ps
6324   #define _mm512_sinh_ps(a) simde_mm512_sinh_ps(a)
6325 #endif
6326 
6327 SIMDE_FUNCTION_ATTRIBUTES
6328 simde__m512d
simde_mm512_sinh_pd(simde__m512d a)6329 simde_mm512_sinh_pd (simde__m512d a) {
6330   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6331     return _mm512_sinh_pd(a);
6332   #else
6333     simde__m512d_private
6334       r_,
6335       a_ = simde__m512d_to_private(a);
6336 
6337     SIMDE_VECTORIZE
6338     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6339       r_.f64[i] =  simde_math_sinh(a_.f64[i]);
6340     }
6341 
6342     return simde__m512d_from_private(r_);
6343   #endif
6344 }
6345 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6346   #undef _mm512_sinh_pd
6347   #define _mm512_sinh_pd(a) simde_mm512_sinh_pd(a)
6348 #endif
6349 
6350 SIMDE_FUNCTION_ATTRIBUTES
6351 simde__m512
simde_mm512_mask_sinh_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)6352 simde_mm512_mask_sinh_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
6353   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6354     return _mm512_mask_sinh_ps(src, k, a);
6355   #else
6356     return simde_mm512_mask_mov_ps(src, k, simde_mm512_sinh_ps(a));
6357   #endif
6358 }
6359 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6360   #undef _mm512_mask_sinh_ps
6361   #define _mm512_mask_sinh_ps(src, k, a) simde_mm512_mask_sinh_ps(src, k, a)
6362 #endif
6363 
6364 SIMDE_FUNCTION_ATTRIBUTES
6365 simde__m512d
simde_mm512_mask_sinh_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)6366 simde_mm512_mask_sinh_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
6367   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6368     return _mm512_mask_sinh_pd(src, k, a);
6369   #else
6370     return simde_mm512_mask_mov_pd(src, k, simde_mm512_sinh_pd(a));
6371   #endif
6372 }
6373 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6374   #undef _mm512_mask_sinh_pd
6375   #define _mm512_mask_sinh_pd(src, k, a) simde_mm512_mask_sinh_pd(src, k, a)
6376 #endif
6377 
6378 SIMDE_FUNCTION_ATTRIBUTES
6379 simde__m128
simde_mm_svml_sqrt_ps(simde__m128 a)6380 simde_mm_svml_sqrt_ps (simde__m128 a) {
6381   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6382     return _mm_svml_sqrt_ps(a);
6383   #else
6384     return simde_mm_sqrt_ps(a);
6385   #endif
6386 }
6387 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6388   #undef _mm_svml_sqrt_ps
6389   #define _mm_svml_sqrt_ps(a) simde_mm_svml_sqrt_ps(a)
6390 #endif
6391 
6392 SIMDE_FUNCTION_ATTRIBUTES
6393 simde__m128d
simde_mm_svml_sqrt_pd(simde__m128d a)6394 simde_mm_svml_sqrt_pd (simde__m128d a) {
6395   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6396     return _mm_svml_sqrt_pd(a);
6397   #else
6398     return simde_mm_sqrt_pd(a);
6399   #endif
6400 }
6401 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6402   #undef _mm_svml_sqrt_pd
6403   #define _mm_svml_sqrt_pd(a) simde_mm_svml_sqrt_pd(a)
6404 #endif
6405 
6406 SIMDE_FUNCTION_ATTRIBUTES
6407 simde__m256
simde_mm256_svml_sqrt_ps(simde__m256 a)6408 simde_mm256_svml_sqrt_ps (simde__m256 a) {
6409   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6410     return _mm256_svml_sqrt_ps(a);
6411   #else
6412     return simde_mm256_sqrt_ps(a);
6413   #endif
6414 }
6415 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6416   #undef _mm256_svml_sqrt_ps
6417   #define _mm256_svml_sqrt_ps(a) simde_mm256_svml_sqrt_ps(a)
6418 #endif
6419 
6420 SIMDE_FUNCTION_ATTRIBUTES
6421 simde__m256d
simde_mm256_svml_sqrt_pd(simde__m256d a)6422 simde_mm256_svml_sqrt_pd (simde__m256d a) {
6423   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6424     return _mm256_svml_sqrt_pd(a);
6425   #else
6426     return simde_mm256_sqrt_pd(a);
6427   #endif
6428 }
6429 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6430   #undef _mm256_svml_sqrt_pd
6431   #define _mm256_svml_sqrt_pd(a) simde_mm256_svml_sqrt_pd(a)
6432 #endif
6433 
6434 SIMDE_FUNCTION_ATTRIBUTES
6435 simde__m512
simde_mm512_svml_sqrt_ps(simde__m512 a)6436 simde_mm512_svml_sqrt_ps (simde__m512 a) {
6437   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6438     return _mm512_svml_sqrt_ps(a);
6439   #else
6440     return simde_mm512_sqrt_ps(a);
6441   #endif
6442 }
6443 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6444   #undef _mm512_svml_sqrt_ps
6445   #define _mm512_svml_sqrt_ps(a) simde_mm512_svml_sqrt_ps(a)
6446 #endif
6447 
6448 SIMDE_FUNCTION_ATTRIBUTES
6449 simde__m512d
simde_mm512_svml_sqrt_pd(simde__m512d a)6450 simde_mm512_svml_sqrt_pd (simde__m512d a) {
6451   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6452     return _mm512_svml_sqrt_pd(a);
6453   #else
6454     return simde_mm512_sqrt_pd(a);
6455   #endif
6456 }
6457 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6458   #undef _mm512_svml_sqrt_pd
6459   #define _mm512_svml_sqrt_pd(a) simde_mm512_svml_sqrt_pd(a)
6460 #endif
6461 
6462 SIMDE_FUNCTION_ATTRIBUTES
6463 simde__m128
simde_mm_tan_ps(simde__m128 a)6464 simde_mm_tan_ps (simde__m128 a) {
6465   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6466     return _mm_tan_ps(a);
6467   #else
6468     simde__m128_private
6469       r_,
6470       a_ = simde__m128_to_private(a);
6471 
6472     SIMDE_VECTORIZE
6473     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6474       r_.f32[i] =  simde_math_tanf(a_.f32[i]);
6475     }
6476 
6477     return simde__m128_from_private(r_);
6478   #endif
6479 }
6480 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6481   #undef _mm_tan_ps
6482   #define _mm_tan_ps(a) simde_mm_tan_ps(a)
6483 #endif
6484 
6485 SIMDE_FUNCTION_ATTRIBUTES
6486 simde__m128d
simde_mm_tan_pd(simde__m128d a)6487 simde_mm_tan_pd (simde__m128d a) {
6488   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6489     return _mm_tan_pd(a);
6490   #else
6491     simde__m128d_private
6492       r_,
6493       a_ = simde__m128d_to_private(a);
6494 
6495     SIMDE_VECTORIZE
6496     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6497       r_.f64[i] =  simde_math_tan(a_.f64[i]);
6498     }
6499 
6500     return simde__m128d_from_private(r_);
6501   #endif
6502 }
6503 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6504   #undef _mm_tan_pd
6505   #define _mm_tan_pd(a) simde_mm_tan_pd(a)
6506 #endif
6507 
6508 SIMDE_FUNCTION_ATTRIBUTES
6509 simde__m256
simde_mm256_tan_ps(simde__m256 a)6510 simde_mm256_tan_ps (simde__m256 a) {
6511   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6512     return _mm256_tan_ps(a);
6513   #else
6514     simde__m256_private
6515       r_,
6516       a_ = simde__m256_to_private(a);
6517 
6518     SIMDE_VECTORIZE
6519     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6520       r_.f32[i] =  simde_math_tanf(a_.f32[i]);
6521     }
6522 
6523     return simde__m256_from_private(r_);
6524   #endif
6525 }
6526 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6527   #undef _mm256_tan_ps
6528   #define _mm256_tan_ps(a) simde_mm256_tan_ps(a)
6529 #endif
6530 
6531 SIMDE_FUNCTION_ATTRIBUTES
6532 simde__m256d
simde_mm256_tan_pd(simde__m256d a)6533 simde_mm256_tan_pd (simde__m256d a) {
6534   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6535     return _mm256_tan_pd(a);
6536   #else
6537     simde__m256d_private
6538       r_,
6539       a_ = simde__m256d_to_private(a);
6540 
6541     SIMDE_VECTORIZE
6542     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6543       r_.f64[i] =  simde_math_tan(a_.f64[i]);
6544     }
6545 
6546     return simde__m256d_from_private(r_);
6547   #endif
6548 }
6549 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6550   #undef _mm256_tan_pd
6551   #define _mm256_tan_pd(a) simde_mm256_tan_pd(a)
6552 #endif
6553 
6554 SIMDE_FUNCTION_ATTRIBUTES
6555 simde__m512
simde_mm512_tan_ps(simde__m512 a)6556 simde_mm512_tan_ps (simde__m512 a) {
6557   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6558     return _mm512_tan_ps(a);
6559   #else
6560     simde__m512_private
6561       r_,
6562       a_ = simde__m512_to_private(a);
6563 
6564     SIMDE_VECTORIZE
6565     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6566       r_.f32[i] =  simde_math_tanf(a_.f32[i]);
6567     }
6568 
6569     return simde__m512_from_private(r_);
6570   #endif
6571 }
6572 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6573   #undef _mm512_tan_ps
6574   #define _mm512_tan_ps(a) simde_mm512_tan_ps(a)
6575 #endif
6576 
6577 SIMDE_FUNCTION_ATTRIBUTES
6578 simde__m512d
simde_mm512_tan_pd(simde__m512d a)6579 simde_mm512_tan_pd (simde__m512d a) {
6580   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6581     return _mm512_tan_pd(a);
6582   #else
6583     simde__m512d_private
6584       r_,
6585       a_ = simde__m512d_to_private(a);
6586 
6587     SIMDE_VECTORIZE
6588     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6589       r_.f64[i] =  simde_math_tan(a_.f64[i]);
6590     }
6591 
6592     return simde__m512d_from_private(r_);
6593   #endif
6594 }
6595 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6596   #undef _mm512_tan_pd
6597   #define _mm512_tan_pd(a) simde_mm512_tan_pd(a)
6598 #endif
6599 
6600 SIMDE_FUNCTION_ATTRIBUTES
6601 simde__m512
simde_mm512_mask_tan_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)6602 simde_mm512_mask_tan_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
6603   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6604     return _mm512_mask_tan_ps(src, k, a);
6605   #else
6606     return simde_mm512_mask_mov_ps(src, k, simde_mm512_tan_ps(a));
6607   #endif
6608 }
6609 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6610   #undef _mm512_mask_tan_ps
6611   #define _mm512_mask_tan_ps(src, k, a) simde_mm512_mask_tan_ps(src, k, a)
6612 #endif
6613 
6614 SIMDE_FUNCTION_ATTRIBUTES
6615 simde__m512d
simde_mm512_mask_tan_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)6616 simde_mm512_mask_tan_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
6617   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6618     return _mm512_mask_tan_pd(src, k, a);
6619   #else
6620     return simde_mm512_mask_mov_pd(src, k, simde_mm512_tan_pd(a));
6621   #endif
6622 }
6623 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6624   #undef _mm512_mask_tan_pd
6625   #define _mm512_mask_tan_pd(src, k, a) simde_mm512_mask_tan_pd(src, k, a)
6626 #endif
6627 
6628 SIMDE_FUNCTION_ATTRIBUTES
6629 simde__m128
simde_mm_tand_ps(simde__m128 a)6630 simde_mm_tand_ps (simde__m128 a) {
6631   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6632     return _mm_tand_ps(a);
6633   #else
6634     simde__m128_private
6635       r_,
6636       a_ = simde__m128_to_private(a);
6637 
6638     SIMDE_VECTORIZE
6639     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6640       r_.f32[i] =  simde_math_tanf(simde_math_deg2radf(a_.f32[i]));
6641     }
6642 
6643     return simde__m128_from_private(r_);
6644   #endif
6645 }
6646 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6647   #undef _mm_tand_ps
6648   #define _mm_tand_ps(a) simde_mm_tand_ps(a)
6649 #endif
6650 
6651 SIMDE_FUNCTION_ATTRIBUTES
6652 simde__m128d
simde_mm_tand_pd(simde__m128d a)6653 simde_mm_tand_pd (simde__m128d a) {
6654   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6655     return _mm_tand_pd(a);
6656   #else
6657     simde__m128d_private
6658       r_,
6659       a_ = simde__m128d_to_private(a);
6660 
6661     SIMDE_VECTORIZE
6662     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6663       r_.f64[i] =  simde_math_tan(simde_math_deg2rad(a_.f64[i]));
6664     }
6665 
6666     return simde__m128d_from_private(r_);
6667   #endif
6668 }
6669 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6670   #undef _mm_tand_pd
6671   #define _mm_tand_pd(a) simde_mm_tand_pd(a)
6672 #endif
6673 
6674 SIMDE_FUNCTION_ATTRIBUTES
6675 simde__m256
simde_mm256_tand_ps(simde__m256 a)6676 simde_mm256_tand_ps (simde__m256 a) {
6677   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6678     return _mm256_tand_ps(a);
6679   #else
6680     simde__m256_private
6681       r_,
6682       a_ = simde__m256_to_private(a);
6683 
6684     SIMDE_VECTORIZE
6685     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6686       r_.f32[i] =  simde_math_tanf(simde_math_deg2radf(a_.f32[i]));
6687     }
6688 
6689     return simde__m256_from_private(r_);
6690   #endif
6691 }
6692 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6693   #undef _mm256_tand_ps
6694   #define _mm256_tand_ps(a) simde_mm256_tand_ps(a)
6695 #endif
6696 
6697 SIMDE_FUNCTION_ATTRIBUTES
6698 simde__m256d
simde_mm256_tand_pd(simde__m256d a)6699 simde_mm256_tand_pd (simde__m256d a) {
6700   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6701     return _mm256_tand_pd(a);
6702   #else
6703     simde__m256d_private
6704       r_,
6705       a_ = simde__m256d_to_private(a);
6706 
6707     SIMDE_VECTORIZE
6708     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6709       r_.f64[i] =  simde_math_tan(simde_math_deg2rad(a_.f64[i]));
6710     }
6711 
6712     return simde__m256d_from_private(r_);
6713   #endif
6714 }
6715 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6716   #undef _mm256_tand_pd
6717   #define _mm256_tand_pd(a) simde_mm256_tand_pd(a)
6718 #endif
6719 
6720 SIMDE_FUNCTION_ATTRIBUTES
6721 simde__m512
simde_mm512_tand_ps(simde__m512 a)6722 simde_mm512_tand_ps (simde__m512 a) {
6723   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6724     return _mm512_tand_ps(a);
6725   #else
6726     simde__m512_private
6727       r_,
6728       a_ = simde__m512_to_private(a);
6729 
6730     SIMDE_VECTORIZE
6731     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6732       r_.f32[i] =  simde_math_tanf(simde_math_deg2radf(a_.f32[i]));
6733     }
6734 
6735     return simde__m512_from_private(r_);
6736   #endif
6737 }
6738 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6739   #undef _mm512_tand_ps
6740   #define _mm512_tand_ps(a) simde_mm512_tand_ps(a)
6741 #endif
6742 
6743 SIMDE_FUNCTION_ATTRIBUTES
6744 simde__m512d
simde_mm512_tand_pd(simde__m512d a)6745 simde_mm512_tand_pd (simde__m512d a) {
6746   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6747     return _mm512_tand_pd(a);
6748   #else
6749     simde__m512d_private
6750       r_,
6751       a_ = simde__m512d_to_private(a);
6752 
6753     SIMDE_VECTORIZE
6754     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6755       r_.f64[i] =  simde_math_tan(simde_math_deg2rad(a_.f64[i]));
6756     }
6757 
6758     return simde__m512d_from_private(r_);
6759   #endif
6760 }
6761 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6762   #undef _mm512_tand_pd
6763   #define _mm512_tand_pd(a) simde_mm512_tand_pd(a)
6764 #endif
6765 
6766 SIMDE_FUNCTION_ATTRIBUTES
6767 simde__m512
simde_mm512_mask_tand_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)6768 simde_mm512_mask_tand_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
6769   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6770     return _mm512_mask_tand_ps(src, k, a);
6771   #else
6772     return simde_mm512_mask_mov_ps(src, k, simde_mm512_tand_ps(a));
6773   #endif
6774 }
6775 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6776   #undef _mm512_mask_tand_ps
6777   #define _mm512_mask_tand_ps(src, k, a) simde_mm512_mask_tand_ps(src, k, a)
6778 #endif
6779 
6780 SIMDE_FUNCTION_ATTRIBUTES
6781 simde__m512d
simde_mm512_mask_tand_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)6782 simde_mm512_mask_tand_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
6783   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6784     return _mm512_mask_tand_pd(src, k, a);
6785   #else
6786     return simde_mm512_mask_mov_pd(src, k, simde_mm512_tand_pd(a));
6787   #endif
6788 }
6789 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6790   #undef _mm512_mask_tand_pd
6791   #define _mm512_mask_tand_pd(src, k, a) simde_mm512_mask_tand_pd(src, k, a)
6792 #endif
6793 
6794 SIMDE_FUNCTION_ATTRIBUTES
6795 simde__m128
simde_mm_tanh_ps(simde__m128 a)6796 simde_mm_tanh_ps (simde__m128 a) {
6797   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6798     return _mm_tanh_ps(a);
6799   #else
6800     simde__m128_private
6801       r_,
6802       a_ = simde__m128_to_private(a);
6803 
6804     SIMDE_VECTORIZE
6805     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6806       r_.f32[i] =  simde_math_tanhf(a_.f32[i]);
6807     }
6808 
6809     return simde__m128_from_private(r_);
6810   #endif
6811 }
6812 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6813   #undef _mm_tanh_ps
6814   #define _mm_tanh_ps(a) simde_mm_tanh_ps(a)
6815 #endif
6816 
6817 SIMDE_FUNCTION_ATTRIBUTES
6818 simde__m128d
simde_mm_tanh_pd(simde__m128d a)6819 simde_mm_tanh_pd (simde__m128d a) {
6820   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6821     return _mm_tanh_pd(a);
6822   #else
6823     simde__m128d_private
6824       r_,
6825       a_ = simde__m128d_to_private(a);
6826 
6827     SIMDE_VECTORIZE
6828     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6829       r_.f64[i] =  simde_math_tanh(a_.f64[i]);
6830     }
6831 
6832     return simde__m128d_from_private(r_);
6833   #endif
6834 }
6835 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6836   #undef _mm_tanh_pd
6837   #define _mm_tanh_pd(a) simde_mm_tanh_pd(a)
6838 #endif
6839 
6840 SIMDE_FUNCTION_ATTRIBUTES
6841 simde__m256
simde_mm256_tanh_ps(simde__m256 a)6842 simde_mm256_tanh_ps (simde__m256 a) {
6843   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6844     return _mm256_tanh_ps(a);
6845   #else
6846     simde__m256_private
6847       r_,
6848       a_ = simde__m256_to_private(a);
6849 
6850     SIMDE_VECTORIZE
6851     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6852       r_.f32[i] =  simde_math_tanhf(a_.f32[i]);
6853     }
6854 
6855     return simde__m256_from_private(r_);
6856   #endif
6857 }
6858 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6859   #undef _mm256_tanh_ps
6860   #define _mm256_tanh_ps(a) simde_mm256_tanh_ps(a)
6861 #endif
6862 
6863 
6864 SIMDE_FUNCTION_ATTRIBUTES
6865 simde__m256d
simde_mm256_tanh_pd(simde__m256d a)6866 simde_mm256_tanh_pd (simde__m256d a) {
6867   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
6868     return _mm256_tanh_pd(a);
6869   #else
6870     simde__m256d_private
6871       r_,
6872       a_ = simde__m256d_to_private(a);
6873 
6874     SIMDE_VECTORIZE
6875     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6876       r_.f64[i] =  simde_math_tanh(a_.f64[i]);
6877     }
6878 
6879     return simde__m256d_from_private(r_);
6880   #endif
6881 }
6882 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6883   #undef _mm256_tanh_pd
6884   #define _mm256_tanh_pd(a) simde_mm256_tanh_pd(a)
6885 #endif
6886 
6887 SIMDE_FUNCTION_ATTRIBUTES
6888 simde__m512
simde_mm512_tanh_ps(simde__m512 a)6889 simde_mm512_tanh_ps (simde__m512 a) {
6890   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6891     return _mm512_tanh_ps(a);
6892   #else
6893     simde__m512_private
6894       r_,
6895       a_ = simde__m512_to_private(a);
6896 
6897     SIMDE_VECTORIZE
6898     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6899       r_.f32[i] =  simde_math_tanhf(a_.f32[i]);
6900     }
6901 
6902     return simde__m512_from_private(r_);
6903   #endif
6904 }
6905 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6906   #undef _mm512_tanh_ps
6907   #define _mm512_tanh_ps(a) simde_mm512_tanh_ps(a)
6908 #endif
6909 
6910 SIMDE_FUNCTION_ATTRIBUTES
6911 simde__m512d
simde_mm512_tanh_pd(simde__m512d a)6912 simde_mm512_tanh_pd (simde__m512d a) {
6913   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6914     return _mm512_tanh_pd(a);
6915   #else
6916     simde__m512d_private
6917       r_,
6918       a_ = simde__m512d_to_private(a);
6919 
6920     SIMDE_VECTORIZE
6921     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6922       r_.f64[i] =  simde_math_tanh(a_.f64[i]);
6923     }
6924 
6925     return simde__m512d_from_private(r_);
6926   #endif
6927 }
6928 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6929   #undef _mm512_tanh_pd
6930   #define _mm512_tanh_pd(a) simde_mm512_tanh_pd(a)
6931 #endif
6932 
6933 SIMDE_FUNCTION_ATTRIBUTES
6934 simde__m512
simde_mm512_mask_tanh_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)6935 simde_mm512_mask_tanh_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
6936   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6937     return _mm512_mask_tanh_ps(src, k, a);
6938   #else
6939     return simde_mm512_mask_mov_ps(src, k, simde_mm512_tanh_ps(a));
6940   #endif
6941 }
6942 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6943   #undef _mm512_mask_tanh_ps
6944   #define _mm512_mask_tanh_ps(src, k, a) simde_mm512_mask_tanh_ps(src, k, a)
6945 #endif
6946 
6947 SIMDE_FUNCTION_ATTRIBUTES
6948 simde__m512d
simde_mm512_mask_tanh_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)6949 simde_mm512_mask_tanh_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
6950   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
6951     return _mm512_mask_tanh_pd(src, k, a);
6952   #else
6953     return simde_mm512_mask_mov_pd(src, k, simde_mm512_tanh_pd(a));
6954   #endif
6955 }
6956 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6957   #undef _mm512_mask_tanh_pd
6958   #define _mm512_mask_tanh_pd(src, k, a) simde_mm512_mask_tanh_pd(src, k, a)
6959 #endif
6960 
6961 SIMDE_FUNCTION_ATTRIBUTES
6962 simde__m128
simde_mm_trunc_ps(simde__m128 a)6963 simde_mm_trunc_ps (simde__m128 a) {
6964   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6965     return _mm_trunc_ps(a);
6966   #else
6967     simde__m128_private
6968       r_,
6969       a_ = simde__m128_to_private(a);
6970 
6971     SIMDE_VECTORIZE
6972     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
6973       r_.f32[i] =  simde_math_truncf(a_.f32[i]);
6974     }
6975 
6976     return simde__m128_from_private(r_);
6977   #endif
6978 }
6979 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
6980   #undef _mm_trunc_ps
6981   #define _mm_trunc_ps(a) simde_mm_trunc_ps(a)
6982 #endif
6983 
6984 SIMDE_FUNCTION_ATTRIBUTES
6985 simde__m128d
simde_mm_trunc_pd(simde__m128d a)6986 simde_mm_trunc_pd (simde__m128d a) {
6987   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE_NATIVE)
6988     return _mm_trunc_pd(a);
6989   #else
6990     simde__m128d_private
6991       r_,
6992       a_ = simde__m128d_to_private(a);
6993 
6994     SIMDE_VECTORIZE
6995     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
6996       r_.f64[i] =  simde_math_trunc(a_.f64[i]);
6997     }
6998 
6999     return simde__m128d_from_private(r_);
7000   #endif
7001 }
7002 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
7003   #undef _mm_trunc_pd
7004   #define _mm_trunc_pd(a) simde_mm_trunc_pd(a)
7005 #endif
7006 
7007 SIMDE_FUNCTION_ATTRIBUTES
7008 simde__m256
simde_mm256_trunc_ps(simde__m256 a)7009 simde_mm256_trunc_ps (simde__m256 a) {
7010   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
7011     return _mm256_trunc_ps(a);
7012   #else
7013     simde__m256_private
7014       r_,
7015       a_ = simde__m256_to_private(a);
7016 
7017     SIMDE_VECTORIZE
7018     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
7019       r_.f32[i] =  simde_math_truncf(a_.f32[i]);
7020     }
7021 
7022     return simde__m256_from_private(r_);
7023   #endif
7024 }
7025 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
7026   #undef _mm256_trunc_ps
7027   #define _mm256_trunc_ps(a) simde_mm256_trunc_ps(a)
7028 #endif
7029 
7030 
7031 SIMDE_FUNCTION_ATTRIBUTES
7032 simde__m256d
simde_mm256_trunc_pd(simde__m256d a)7033 simde_mm256_trunc_pd (simde__m256d a) {
7034   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
7035     return _mm256_trunc_pd(a);
7036   #else
7037     simde__m256d_private
7038       r_,
7039       a_ = simde__m256d_to_private(a);
7040 
7041     SIMDE_VECTORIZE
7042     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
7043       r_.f64[i] =  simde_math_trunc(a_.f64[i]);
7044     }
7045 
7046     return simde__m256d_from_private(r_);
7047   #endif
7048 }
7049 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
7050   #undef _mm256_trunc_pd
7051   #define _mm256_trunc_pd(a) simde_mm256_trunc_pd(a)
7052 #endif
7053 
7054 SIMDE_FUNCTION_ATTRIBUTES
7055 simde__m512
simde_mm512_trunc_ps(simde__m512 a)7056 simde_mm512_trunc_ps (simde__m512 a) {
7057   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
7058     return _mm512_trunc_ps(a);
7059   #else
7060     simde__m512_private
7061       r_,
7062       a_ = simde__m512_to_private(a);
7063 
7064     SIMDE_VECTORIZE
7065     for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
7066       r_.f32[i] =  simde_math_truncf(a_.f32[i]);
7067     }
7068 
7069     return simde__m512_from_private(r_);
7070   #endif
7071 }
7072 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
7073   #undef _mm512_trunc_ps
7074   #define _mm512_trunc_ps(a) simde_mm512_trunc_ps(a)
7075 #endif
7076 
7077 SIMDE_FUNCTION_ATTRIBUTES
7078 simde__m512d
simde_mm512_trunc_pd(simde__m512d a)7079 simde_mm512_trunc_pd (simde__m512d a) {
7080   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
7081     return _mm512_trunc_pd(a);
7082   #else
7083     simde__m512d_private
7084       r_,
7085       a_ = simde__m512d_to_private(a);
7086 
7087     SIMDE_VECTORIZE
7088     for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
7089       r_.f64[i] =  simde_math_trunc(a_.f64[i]);
7090     }
7091 
7092     return simde__m512d_from_private(r_);
7093   #endif
7094 }
7095 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
7096   #undef _mm512_trunc_pd
7097   #define _mm512_trunc_pd(a) simde_mm512_trunc_pd(a)
7098 #endif
7099 
7100 SIMDE_FUNCTION_ATTRIBUTES
7101 simde__m512
simde_mm512_mask_trunc_ps(simde__m512 src,simde__mmask16 k,simde__m512 a)7102 simde_mm512_mask_trunc_ps(simde__m512 src, simde__mmask16 k, simde__m512 a) {
7103   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
7104     return _mm512_mask_trunc_ps(src, k, a);
7105   #else
7106     return simde_mm512_mask_mov_ps(src, k, simde_mm512_trunc_ps(a));
7107   #endif
7108 }
7109 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
7110   #undef _mm512_mask_trunc_ps
7111   #define _mm512_mask_trunc_ps(src, k, a) simde_mm512_mask_trunc_ps(src, k, a)
7112 #endif
7113 
7114 SIMDE_FUNCTION_ATTRIBUTES
7115 simde__m512d
simde_mm512_mask_trunc_pd(simde__m512d src,simde__mmask8 k,simde__m512d a)7116 simde_mm512_mask_trunc_pd(simde__m512d src, simde__mmask8 k, simde__m512d a) {
7117   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX512F_NATIVE)
7118     return _mm512_mask_trunc_pd(src, k, a);
7119   #else
7120     return simde_mm512_mask_mov_pd(src, k, simde_mm512_trunc_pd(a));
7121   #endif
7122 }
7123 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
7124   #undef _mm512_mask_trunc_pd
7125   #define _mm512_mask_trunc_pd(src, k, a) simde_mm512_mask_trunc_pd(src, k, a)
7126 #endif
7127 
7128 SIMDE_FUNCTION_ATTRIBUTES
7129 simde__m128i
simde_mm_udivrem_epi32(simde__m128i * mem_addr,simde__m128i a,simde__m128i b)7130 simde_mm_udivrem_epi32 (simde__m128i * mem_addr, simde__m128i a, simde__m128i b) {
7131   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_SSE2_NATIVE)
7132     return _mm_udivrem_epi32(mem_addr, a, b);
7133   #else
7134     simde__m128i r;
7135 
7136     r = simde_mm_div_epu32(a, b);
7137     *mem_addr = simde_x_mm_sub_epu32(a, simde_x_mm_mullo_epu32(r, b));
7138 
7139     return r;
7140   #endif
7141 }
7142 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
7143   #undef _mm_udivrem_epi32
7144   #define _mm_udivrem_epi32(mem_addr, a, b) simde_mm_udivrem_epi32((mem_addr),(a), (b))
7145 #endif
7146 
7147 SIMDE_FUNCTION_ATTRIBUTES
7148 simde__m256i
simde_mm256_udivrem_epi32(simde__m256i * mem_addr,simde__m256i a,simde__m256i b)7149 simde_mm256_udivrem_epi32 (simde__m256i* mem_addr, simde__m256i a, simde__m256i b) {
7150   #if defined(SIMDE_X86_SVML_NATIVE) && defined(SIMDE_X86_AVX_NATIVE)
7151     return _mm256_udivrem_epi32(HEDLEY_REINTERPRET_CAST(__m256i*, mem_addr), a, b);
7152   #else
7153     simde__m256i r;
7154 
7155     r = simde_mm256_div_epu32(a, b);
7156     *mem_addr = simde_x_mm256_sub_epu32(a, simde_x_mm256_mullo_epu32(r, b));
7157 
7158     return r;
7159   #endif
7160 }
7161 #if defined(SIMDE_X86_SVML_ENABLE_NATIVE_ALIASES)
7162   #undef _mm256_udivrem_epi32
7163   #define _mm256_udivrem_epi32(mem_addr, a, b) simde_mm256_udivrem_epi32((mem_addr),(a), (b))
7164 #endif
7165 
7166 SIMDE_END_DECLS_
7167 
7168 HEDLEY_DIAGNOSTIC_POP
7169 
7170 #endif /* !defined(SIMDE_SVML_H) */
7171