1 /* SPDX-License-Identifier: MIT
2  *
3  * Permission is hereby granted, free of charge, to any person
4  * obtaining a copy of this software and associated documentation
5  * files (the "Software"), to deal in the Software without
6  * restriction, including without limitation the rights to use, copy,
7  * modify, merge, publish, distribute, sublicense, and/or sell copies
8  * of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be
12  * included in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Copyright:
24  *   2020      Evan Nemerson <evan@nemerson.com>
25  *   2020      Christopher Moore <moore@free.fr>
26  */
27 
28 #if !defined(SIMDE_X86_AVX512_CMPGT_H)
29 #define SIMDE_X86_AVX512_CMPGT_H
30 
31 #include "types.h"
32 #include "../avx2.h"
33 #include "mov.h"
34 #include "mov_mask.h"
35 
36 HEDLEY_DIAGNOSTIC_PUSH
37 SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
38 SIMDE_BEGIN_DECLS_
39 
40 SIMDE_FUNCTION_ATTRIBUTES
41 simde__mmask64
simde_mm512_cmpgt_epi8_mask(simde__m512i a,simde__m512i b)42 simde_mm512_cmpgt_epi8_mask (simde__m512i a, simde__m512i b) {
43   #if defined(SIMDE_X86_AVX512BW_NATIVE)
44     return _mm512_cmpgt_epi8_mask(a, b);
45   #else
46     simde__m512i_private
47       a_ = simde__m512i_to_private(a),
48       b_ = simde__m512i_to_private(b);
49     simde__mmask64 r;
50 
51     #if SIMDE_NATURAL_VECTOR_SIZE_LE(256) && !defined(HEDLEY_INTEL_VERSION)
52       r = 0;
53 
54       SIMDE_VECTORIZE_REDUCTION(|:r)
55       for (size_t i = 0 ; i < (sizeof(a_.m256i) / sizeof(a_.m256i[0])) ; i++) {
56         const uint32_t t = HEDLEY_STATIC_CAST(uint32_t, simde_mm256_movemask_epi8(simde_mm256_cmpgt_epi8(a_.m256i[i], b_.m256i[i])));
57         r |= HEDLEY_STATIC_CAST(uint64_t, t) << HEDLEY_STATIC_CAST(uint64_t, i * 32);
58       }
59     #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
60       simde__m512i_private tmp;
61 
62       tmp.i8 = HEDLEY_STATIC_CAST(__typeof__(tmp.i8), a_.i8 > b_.i8);
63       r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp));
64     #else
65       r = 0;
66 
67       SIMDE_VECTORIZE_REDUCTION(|:r)
68       for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) {
69         r |= (a_.i8[i] > b_.i8[i]) ? (UINT64_C(1) << i) : 0;
70       }
71     #endif
72 
73     return r;
74   #endif
75 }
76 #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
77   #undef _mm512_cmpgt_epi8_mask
78   #define _mm512_cmpgt_epi8_mask(a, b) simde_mm512_cmpgt_epi8_mask(a, b)
79 #endif
80 
81 SIMDE_FUNCTION_ATTRIBUTES
82 simde__mmask64
simde_mm512_cmpgt_epu8_mask(simde__m512i a,simde__m512i b)83 simde_mm512_cmpgt_epu8_mask (simde__m512i a, simde__m512i b) {
84   #if defined(SIMDE_X86_AVX512BW_NATIVE)
85     return _mm512_cmpgt_epu8_mask(a, b);
86   #else
87     simde__m512i_private
88       a_ = simde__m512i_to_private(a),
89       b_ = simde__m512i_to_private(b);
90     simde__mmask64 r = 0;
91 
92     #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
93       simde__m512i_private tmp;
94 
95       tmp.i8 = HEDLEY_STATIC_CAST(__typeof__(tmp.i8), a_.u8 > b_.u8);
96       r = simde_mm512_movepi8_mask(simde__m512i_from_private(tmp));
97     #else
98       SIMDE_VECTORIZE_REDUCTION(|:r)
99       for (size_t i = 0 ; i < (sizeof(a_.u8) / sizeof(a_.u8[0])) ; i++) {
100         r |= (a_.u8[i] > b_.u8[i]) ? (UINT64_C(1) << i) : 0;
101       }
102     #endif
103 
104     return r;
105   #endif
106 }
107 #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
108   #undef _mm512_cmpgt_epu8_mask
109   #define _mm512_cmpgt_epu8_mask(a, b) simde_mm512_cmpgt_epu8_mask(a, b)
110 #endif
111 
112 SIMDE_FUNCTION_ATTRIBUTES
113 simde__mmask16
simde_mm512_cmpgt_epi32_mask(simde__m512i a,simde__m512i b)114 simde_mm512_cmpgt_epi32_mask (simde__m512i a, simde__m512i b) {
115   #if defined(SIMDE_X86_AVX512F_NATIVE)
116     return _mm512_cmpgt_epi32_mask(a, b);
117   #else
118     simde__m512i_private
119       r_,
120       a_ = simde__m512i_to_private(a),
121       b_ = simde__m512i_to_private(b);
122 
123     for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
124       r_.m256i[i] = simde_mm256_cmpgt_epi32(a_.m256i[i], b_.m256i[i]);
125     }
126 
127     return simde_mm512_movepi32_mask(simde__m512i_from_private(r_));
128   #endif
129 }
130 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
131   #undef _mm512_cmpgt_epi32_mask
132   #define _mm512_cmpgt_epi32_mask(a, b) simde_mm512_cmpgt_epi32_mask(a, b)
133 #endif
134 
135 SIMDE_FUNCTION_ATTRIBUTES
136 simde__mmask16
simde_mm512_mask_cmpgt_epi32_mask(simde__mmask16 k1,simde__m512i a,simde__m512i b)137 simde_mm512_mask_cmpgt_epi32_mask (simde__mmask16 k1, simde__m512i a, simde__m512i b) {
138   #if defined(SIMDE_X86_AVX512F_NATIVE)
139     return _mm512_mask_cmpgt_epi32_mask(k1, a, b);
140   #else
141     return simde_mm512_cmpgt_epi32_mask(a, b) & k1;
142   #endif
143 }
144 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
145   #undef _mm512_mask_cmpgt_epi32_mask
146   #define _mm512_mask_cmpgt_epi32_mask(k1, a, b) simde_mm512_mask_cmpgt_epi32_mask(k1, a, b)
147 #endif
148 
149 SIMDE_FUNCTION_ATTRIBUTES
150 simde__mmask8
simde_mm512_cmpgt_epi64_mask(simde__m512i a,simde__m512i b)151 simde_mm512_cmpgt_epi64_mask (simde__m512i a, simde__m512i b) {
152   #if defined(SIMDE_X86_AVX512F_NATIVE)
153     return _mm512_cmpgt_epi64_mask(a, b);
154   #else
155     simde__m512i_private
156       r_,
157       a_ = simde__m512i_to_private(a),
158       b_ = simde__m512i_to_private(b);
159 
160     for (size_t i = 0 ; i < (sizeof(r_.m256i) / sizeof(r_.m256i[0])) ; i++) {
161       r_.m256i[i] = simde_mm256_cmpgt_epi64(a_.m256i[i], b_.m256i[i]);
162     }
163 
164     return simde_mm512_movepi64_mask(simde__m512i_from_private(r_));
165   #endif
166 }
167 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
168   #undef _mm512_cmpgt_epi64_mask
169   #define _mm512_cmpgt_epi64_mask(a, b) simde_mm512_cmpgt_epi64_mask(a, b)
170 #endif
171 
172 SIMDE_FUNCTION_ATTRIBUTES
173 simde__mmask8
simde_mm512_mask_cmpgt_epi64_mask(simde__mmask8 k1,simde__m512i a,simde__m512i b)174 simde_mm512_mask_cmpgt_epi64_mask (simde__mmask8 k1, simde__m512i a, simde__m512i b) {
175   #if defined(SIMDE_X86_AVX512F_NATIVE)
176     return _mm512_mask_cmpgt_epi64_mask(k1, a, b);
177   #else
178     return simde_mm512_cmpgt_epi64_mask(a, b) & k1;
179   #endif
180 }
181 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
182   #undef _mm512_mask_cmpgt_epi64_mask
183   #define _mm512_mask_cmpgt_epi64_mask(k1, a, b) simde_mm512_mask_cmpgt_epi64_mask(k1, a, b)
184 #endif
185 
186 SIMDE_END_DECLS_
187 HEDLEY_DIAGNOSTIC_POP
188 
189 #endif /* !defined(SIMDE_X86_AVX512_CMPGT_H) */
190