1 /* SPDX-License-Identifier: MIT
2  *
3  * Permission is hereby granted, free of charge, to any person
4  * obtaining a copy of this software and associated documentation
5  * files (the "Software"), to deal in the Software without
6  * restriction, including without limitation the rights to use, copy,
7  * modify, merge, publish, distribute, sublicense, and/or sell copies
8  * of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be
12  * included in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Copyright:
24  *   2020      Evan Nemerson <evan@nemerson.com>
25  *   2020      Hidayat Khan <huk2209@gmail.com>
26  */
27 
28 #if !defined(SIMDE_X86_AVX512_SRLI_H)
29 #define SIMDE_X86_AVX512_SRLI_H
30 
31 #include "types.h"
32 #include "../avx2.h"
33 #include "mov.h"
34 #include "setzero.h"
35 
36 HEDLEY_DIAGNOSTIC_PUSH
37 SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
38 SIMDE_BEGIN_DECLS_
39 
40 SIMDE_FUNCTION_ATTRIBUTES
41 simde__m512i
simde_mm512_srli_epi16(simde__m512i a,const unsigned int imm8)42 simde_mm512_srli_epi16 (simde__m512i a, const unsigned int imm8)
43     SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
44   #if defined(SIMDE_X86_AVX512BW_NATIVE) && (defined(HEDLEY_GCC_VERSION) && ((__GNUC__ == 5 && __GNUC_MINOR__ == 5) || (__GNUC__ == 6 && __GNUC_MINOR__ >= 4)))
45     simde__m512i r;
46 
47     SIMDE_CONSTIFY_16_(_mm512_srli_epi16, r, simde_mm512_setzero_si512(), imm8, a);
48 
49     return r;
50   #elif defined(SIMDE_X86_AVX512BW_NATIVE)
51     return SIMDE_BUG_IGNORE_SIGN_CONVERSION(_mm512_srli_epi16(a, imm8));
52   #else
53     simde__m512i_private
54       r_,
55       a_ = simde__m512i_to_private(a);
56 
57     if (HEDLEY_STATIC_CAST(unsigned int, imm8) > 15)
58       return simde_mm512_setzero_si512();
59 
60     #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
61       r_.u16 = a_.u16 >> SIMDE_CAST_VECTOR_SHIFT_COUNT(16, imm8);
62     #else
63       SIMDE_VECTORIZE
64       for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
65         r_.u16[i] = a_.u16[i] >> imm8;
66       }
67     #endif
68 
69     return simde__m512i_from_private(r_);
70   #endif
71 }
72 #if defined(SIMDE_X86_AVX512BW_NATIVE)
73   #define simde_mm512_srli_epi16(a, imm8) _mm512_srli_epi16(a, imm8)
74 #endif
75 #if defined(SIMDE_X86_AVX512BW_ENABLE_NATIVE_ALIASES)
76   #undef _mm512_srli_epi16
77   #define _mm512_srli_epi16(a, imm8) simde_mm512_srli_epi16(a, imm8)
78 #endif
79 
80 SIMDE_FUNCTION_ATTRIBUTES
81 simde__m512i
simde_mm512_srli_epi32(simde__m512i a,unsigned int imm8)82 simde_mm512_srli_epi32 (simde__m512i a, unsigned int imm8) {
83   #if defined(SIMDE_X86_AVX512F_NATIVE) && (defined(HEDLEY_GCC_VERSION) && ((__GNUC__ == 5 && __GNUC_MINOR__ == 5) || (__GNUC__ == 6 && __GNUC_MINOR__ >= 4)))
84     simde__m512i r;
85 
86     SIMDE_CONSTIFY_32_(_mm512_srli_epi32, r, simde_mm512_setzero_si512(), imm8, a);
87 
88     return r;
89   #elif defined(SIMDE_X86_AVX512F_NATIVE)
90     return SIMDE_BUG_IGNORE_SIGN_CONVERSION(_mm512_srli_epi32(a, imm8));
91   #else
92     simde__m512i_private
93       r_,
94       a_ = simde__m512i_to_private(a);
95 
96     #if defined(SIMDE_X86_AVX2_NATIVE)
97       r_.m256i[0] = simde_mm256_srli_epi32(a_.m256i[0], HEDLEY_STATIC_CAST(int, imm8));
98       r_.m256i[1] = simde_mm256_srli_epi32(a_.m256i[1], HEDLEY_STATIC_CAST(int, imm8));
99     #elif defined(SIMDE_X86_SSE2_NATIVE)
100       r_.m128i[0] = simde_mm_srli_epi32(a_.m128i[0], HEDLEY_STATIC_CAST(int, imm8));
101       r_.m128i[1] = simde_mm_srli_epi32(a_.m128i[1], HEDLEY_STATIC_CAST(int, imm8));
102       r_.m128i[2] = simde_mm_srli_epi32(a_.m128i[2], HEDLEY_STATIC_CAST(int, imm8));
103       r_.m128i[3] = simde_mm_srli_epi32(a_.m128i[3], HEDLEY_STATIC_CAST(int, imm8));
104     #else
105       if (imm8 > 31) {
106         simde_memset(&r_, 0, sizeof(r_));
107       } else {
108         #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
109           r_.u32 = a_.u32 >> imm8;
110         #else
111           SIMDE_VECTORIZE
112           for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
113             r_.u32[i] = a_.u32[i] >> imm8;
114           }
115         #endif
116       }
117     #endif
118 
119     return simde__m512i_from_private(r_);
120   #endif
121 }
122 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
123   #undef _mm512_srli_epi32
124   #define _mm512_srli_epi32(a, imm8) simde_mm512_srli_epi32(a, imm8)
125 #endif
126 
127 SIMDE_FUNCTION_ATTRIBUTES
128 simde__m512i
simde_mm512_srli_epi64(simde__m512i a,unsigned int imm8)129 simde_mm512_srli_epi64 (simde__m512i a, unsigned int imm8) {
130   #if defined(SIMDE_X86_AVX512F_NATIVE) && (defined(HEDLEY_GCC_VERSION) && ((__GNUC__ == 5 && __GNUC_MINOR__ == 5) || (__GNUC__ == 6 && __GNUC_MINOR__ >= 4)))
131     simde__m512i r;
132 
133     SIMDE_CONSTIFY_64_(_mm512_srli_epi64, r, simde_mm512_setzero_si512(), imm8, a);
134 
135     return r;
136   #elif defined(SIMDE_X86_AVX512F_NATIVE)
137     return SIMDE_BUG_IGNORE_SIGN_CONVERSION(_mm512_srli_epi64(a, imm8));
138   #else
139     simde__m512i_private
140       r_,
141       a_ = simde__m512i_to_private(a);
142 
143     #if defined(SIMDE_X86_AVX2_NATIVE)
144       r_.m256i[0] = simde_mm256_srli_epi64(a_.m256i[0], HEDLEY_STATIC_CAST(int, imm8));
145       r_.m256i[1] = simde_mm256_srli_epi64(a_.m256i[1], HEDLEY_STATIC_CAST(int, imm8));
146     #elif defined(SIMDE_X86_SSE2_NATIVE)
147       r_.m128i[0] = simde_mm_srli_epi64(a_.m128i[0], HEDLEY_STATIC_CAST(int, imm8));
148       r_.m128i[1] = simde_mm_srli_epi64(a_.m128i[1], HEDLEY_STATIC_CAST(int, imm8));
149       r_.m128i[2] = simde_mm_srli_epi64(a_.m128i[2], HEDLEY_STATIC_CAST(int, imm8));
150       r_.m128i[3] = simde_mm_srli_epi64(a_.m128i[3], HEDLEY_STATIC_CAST(int, imm8));
151     #else
152       /* The Intel Intrinsics Guide says that only the 8 LSBits of imm8 are
153       * used.  In this case we should do "imm8 &= 0xff" here.  However in
154       * practice all bits are used. */
155       if (imm8 > 63) {
156         simde_memset(&r_, 0, sizeof(r_));
157       } else {
158         #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
159           r_.u64 = a_.u64 >> imm8;
160         #else
161           SIMDE_VECTORIZE
162           for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
163             r_.u64[i] = a_.u64[i] >> imm8;
164           }
165         #endif
166       }
167     #endif
168 
169     return simde__m512i_from_private(r_);
170   #endif
171 }
172 #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES)
173   #undef _mm512_srli_epi64
174   #define _mm512_srli_epi64(a, imm8) simde_mm512_srli_epi64(a, imm8)
175 #endif
176 
177 SIMDE_END_DECLS_
178 HEDLEY_DIAGNOSTIC_POP
179 
180 #endif /* !defined(SIMDE_X86_AVX512_SRLI_H) */
181