1 /* SPDX-License-Identifier: MIT
2  *
3  * Permission is hereby granted, free of charge, to any person
4  * obtaining a copy of this software and associated documentation
5  * files (the "Software"), to deal in the Software without
6  * restriction, including without limitation the rights to use, copy,
7  * modify, merge, publish, distribute, sublicense, and/or sell copies
8  * of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be
12  * included in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Copyright:
24  *   2020      Evan Nemerson <evan@nemerson.com>
25  */
26 
27 #if !defined(SIMDE_ARM_NEON_MOVN_H)
28 #define SIMDE_ARM_NEON_MOVN_H
29 
30 #include "types.h"
31 
32 HEDLEY_DIAGNOSTIC_PUSH
33 SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
34 SIMDE_BEGIN_DECLS_
35 
36 SIMDE_FUNCTION_ATTRIBUTES
37 simde_int8x8_t
simde_vmovn_s16(simde_int16x8_t a)38 simde_vmovn_s16(simde_int16x8_t a) {
39   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
40     return vmovn_s16(a);
41   #else
42     simde_int8x8_private r_;
43     simde_int16x8_private a_ = simde_int16x8_to_private(a);
44 
45     #if defined(SIMDE_CONVERT_VECTOR_)
46       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
47     #else
48       SIMDE_VECTORIZE
49       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
50         r_.values[i] = HEDLEY_STATIC_CAST(int8_t, a_.values[i]);
51       }
52     #endif
53 
54     return simde_int8x8_from_private(r_);
55   #endif
56 }
57 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
58   #undef vmovn_s16
59   #define vmovn_s16(a) simde_vmovn_s16((a))
60 #endif
61 
62 SIMDE_FUNCTION_ATTRIBUTES
63 simde_int16x4_t
simde_vmovn_s32(simde_int32x4_t a)64 simde_vmovn_s32(simde_int32x4_t a) {
65   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
66     return vmovn_s32(a);
67   #else
68     simde_int16x4_private r_;
69     simde_int32x4_private a_ = simde_int32x4_to_private(a);
70 
71     #if defined(SIMDE_CONVERT_VECTOR_)
72       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
73     #else
74       SIMDE_VECTORIZE
75       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
76         r_.values[i] = HEDLEY_STATIC_CAST(int16_t, a_.values[i]);
77       }
78     #endif
79 
80     return simde_int16x4_from_private(r_);
81   #endif
82 }
83 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
84   #undef vmovn_s32
85   #define vmovn_s32(a) simde_vmovn_s32((a))
86 #endif
87 
88 SIMDE_FUNCTION_ATTRIBUTES
89 simde_int32x2_t
simde_vmovn_s64(simde_int64x2_t a)90 simde_vmovn_s64(simde_int64x2_t a) {
91   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
92     return vmovn_s64(a);
93   #else
94     simde_int32x2_private r_;
95     simde_int64x2_private a_ = simde_int64x2_to_private(a);
96 
97     #if defined(SIMDE_CONVERT_VECTOR_)
98       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
99     #else
100       SIMDE_VECTORIZE
101       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
102         r_.values[i] = HEDLEY_STATIC_CAST(int32_t, a_.values[i]);
103       }
104     #endif
105 
106     return simde_int32x2_from_private(r_);
107   #endif
108 }
109 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
110   #undef vmovn_s64
111   #define vmovn_s64(a) simde_vmovn_s64((a))
112 #endif
113 
114 SIMDE_FUNCTION_ATTRIBUTES
115 simde_uint8x8_t
simde_vmovn_u16(simde_uint16x8_t a)116 simde_vmovn_u16(simde_uint16x8_t a) {
117   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
118     return vmovn_u16(a);
119   #else
120     simde_uint8x8_private r_;
121     simde_uint16x8_private a_ = simde_uint16x8_to_private(a);
122 
123     #if defined(SIMDE_CONVERT_VECTOR_)
124       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
125     #else
126       SIMDE_VECTORIZE
127       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
128         r_.values[i] = HEDLEY_STATIC_CAST(uint8_t, a_.values[i]);
129       }
130     #endif
131 
132     return simde_uint8x8_from_private(r_);
133   #endif
134 }
135 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
136   #undef vmovn_u16
137   #define vmovn_u16(a) simde_vmovn_u16((a))
138 #endif
139 
140 SIMDE_FUNCTION_ATTRIBUTES
141 simde_uint16x4_t
simde_vmovn_u32(simde_uint32x4_t a)142 simde_vmovn_u32(simde_uint32x4_t a) {
143   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
144     return vmovn_u32(a);
145   #else
146     simde_uint16x4_private r_;
147     simde_uint32x4_private a_ = simde_uint32x4_to_private(a);
148 
149     #if defined(SIMDE_CONVERT_VECTOR_)
150       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
151     #else
152       SIMDE_VECTORIZE
153       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
154         r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, a_.values[i]);
155       }
156     #endif
157 
158     return simde_uint16x4_from_private(r_);
159   #endif
160 }
161 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
162   #undef vmovn_u32
163   #define vmovn_u32(a) simde_vmovn_u32((a))
164 #endif
165 
166 SIMDE_FUNCTION_ATTRIBUTES
167 simde_uint32x2_t
simde_vmovn_u64(simde_uint64x2_t a)168 simde_vmovn_u64(simde_uint64x2_t a) {
169   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
170     return vmovn_u64(a);
171   #else
172     simde_uint32x2_private r_;
173     simde_uint64x2_private a_ = simde_uint64x2_to_private(a);
174 
175     #if defined(SIMDE_CONVERT_VECTOR_)
176       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
177     #else
178       SIMDE_VECTORIZE
179       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
180         r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, a_.values[i]);
181       }
182     #endif
183 
184     return simde_uint32x2_from_private(r_);
185   #endif
186 }
187 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
188   #undef vmovn_u64
189   #define vmovn_u64(a) simde_vmovn_u64((a))
190 #endif
191 
192 SIMDE_END_DECLS_
193 HEDLEY_DIAGNOSTIC_POP
194 
195 #endif /* !defined(SIMDE_ARM_NEON_MOVN_H) */
196