1 /* SPDX-License-Identifier: MIT
2  *
3  * Permission is hereby granted, free of charge, to any person
4  * obtaining a copy of this software and associated documentation
5  * files (the "Software"), to deal in the Software without
6  * restriction, including without limitation the rights to use, copy,
7  * modify, merge, publish, distribute, sublicense, and/or sell copies
8  * of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be
12  * included in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Copyright:
24  *   2020      Evan Nemerson <evan@nemerson.com>
25  *   2020      Sean Maher <seanptmaher@gmail.com> (Copyright owned by Google, LLC)
26  */
27 
28 #if !defined(SIMDE_ARM_NEON_MOVL_H)
29 #define SIMDE_ARM_NEON_MOVL_H
30 
31 #include "types.h"
32 
33 HEDLEY_DIAGNOSTIC_PUSH
34 SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
35 SIMDE_BEGIN_DECLS_
36 
37 SIMDE_FUNCTION_ATTRIBUTES
38 simde_int16x8_t
simde_vmovl_s8(simde_int8x8_t a)39 simde_vmovl_s8(simde_int8x8_t a) {
40   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
41     return vmovl_s8(a);
42   #elif defined(SIMDE_WASM_SIMD128_NATIVE)
43     return wasm_i16x8_load_8x8(&a);
44   #else
45     simde_int16x8_private r_;
46     simde_int8x8_private a_ = simde_int8x8_to_private(a);
47 
48     #if defined(SIMDE_CONVERT_VECTOR_)
49       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
50     #else
51       SIMDE_VECTORIZE
52       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
53         r_.values[i] = HEDLEY_STATIC_CAST(int16_t, a_.values[i]);
54       }
55     #endif
56 
57     return simde_int16x8_from_private(r_);
58   #endif
59 }
60 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
61   #undef vmovl_s8
62   #define vmovl_s8(a) simde_vmovl_s8((a))
63 #endif
64 
65 SIMDE_FUNCTION_ATTRIBUTES
66 simde_int32x4_t
simde_vmovl_s16(simde_int16x4_t a)67 simde_vmovl_s16(simde_int16x4_t a) {
68   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
69     return vmovl_s16(a);
70   #elif defined(SIMDE_WASM_SIMD128_NATIVE)
71     return wasm_i32x4_load_16x4(&a);
72   #else
73     simde_int32x4_private r_;
74     simde_int16x4_private a_ = simde_int16x4_to_private(a);
75 
76     #if defined(SIMDE_CONVERT_VECTOR_)
77       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
78     #else
79       SIMDE_VECTORIZE
80       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
81         r_.values[i] = HEDLEY_STATIC_CAST(int32_t, a_.values[i]);
82       }
83     #endif
84 
85     return simde_int32x4_from_private(r_);
86   #endif
87 }
88 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
89   #undef vmovl_s16
90   #define vmovl_s16(a) simde_vmovl_s16((a))
91 #endif
92 
93 SIMDE_FUNCTION_ATTRIBUTES
94 simde_int64x2_t
simde_vmovl_s32(simde_int32x2_t a)95 simde_vmovl_s32(simde_int32x2_t a) {
96   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
97     return vmovl_s32(a);
98   #elif defined(SIMDE_WASM_SIMD128_NATIVE)
99     return wasm_i64x2_load_32x2(&a);
100   #else
101     simde_int64x2_private r_;
102     simde_int32x2_private a_ = simde_int32x2_to_private(a);
103 
104     #if defined(SIMDE_CONVERT_VECTOR_)
105       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
106     #else
107       SIMDE_VECTORIZE
108       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
109         r_.values[i] = HEDLEY_STATIC_CAST(int64_t, a_.values[i]);
110       }
111     #endif
112 
113     return simde_int64x2_from_private(r_);
114   #endif
115 }
116 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
117   #undef vmovl_s32
118   #define vmovl_s32(a) simde_vmovl_s32((a))
119 #endif
120 
121 SIMDE_FUNCTION_ATTRIBUTES
122 simde_uint16x8_t
simde_vmovl_u8(simde_uint8x8_t a)123 simde_vmovl_u8(simde_uint8x8_t a) {
124   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
125     return vmovl_u8(a);
126   #elif defined(SIMDE_WASM_SIMD128_NATIVE)
127     return wasm_u16x8_load_8x8(&a);
128   #else
129     simde_uint16x8_private r_;
130     simde_uint8x8_private a_ = simde_uint8x8_to_private(a);
131 
132     #if defined(SIMDE_CONVERT_VECTOR_)
133       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
134     #else
135       SIMDE_VECTORIZE
136       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
137         r_.values[i] = HEDLEY_STATIC_CAST(uint16_t, a_.values[i]);
138       }
139     #endif
140 
141     return simde_uint16x8_from_private(r_);
142   #endif
143 }
144 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
145   #undef vmovl_u8
146   #define vmovl_u8(a) simde_vmovl_u8((a))
147 #endif
148 
149 SIMDE_FUNCTION_ATTRIBUTES
150 simde_uint32x4_t
simde_vmovl_u16(simde_uint16x4_t a)151 simde_vmovl_u16(simde_uint16x4_t a) {
152   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
153     return vmovl_u16(a);
154   #elif defined(SIMDE_WASM_SIMD128_NATIVE)
155     return wasm_u32x4_load_16x4(&a);
156   #else
157     simde_uint32x4_private r_;
158     simde_uint16x4_private a_ = simde_uint16x4_to_private(a);
159 
160     #if defined(SIMDE_CONVERT_VECTOR_)
161       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
162     #else
163       SIMDE_VECTORIZE
164       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
165         r_.values[i] = HEDLEY_STATIC_CAST(uint32_t, a_.values[i]);
166       }
167     #endif
168 
169     return simde_uint32x4_from_private(r_);
170   #endif
171 }
172 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
173   #undef vmovl_u16
174   #define vmovl_u16(a) simde_vmovl_u16((a))
175 #endif
176 
177 SIMDE_FUNCTION_ATTRIBUTES
178 simde_uint64x2_t
simde_vmovl_u32(simde_uint32x2_t a)179 simde_vmovl_u32(simde_uint32x2_t a) {
180   #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
181     return vmovl_u32(a);
182   #elif defined(SIMDE_WASM_SIMD128_NATIVE)
183     return wasm_u64x2_load_32x2(&a);
184   #else
185     simde_uint64x2_private r_;
186     simde_uint32x2_private a_ = simde_uint32x2_to_private(a);
187 
188     #if defined(SIMDE_CONVERT_VECTOR_)
189       SIMDE_CONVERT_VECTOR_(r_.values, a_.values);
190     #else
191       SIMDE_VECTORIZE
192       for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
193         r_.values[i] = HEDLEY_STATIC_CAST(uint64_t, a_.values[i]);
194       }
195     #endif
196 
197     return simde_uint64x2_from_private(r_);
198   #endif
199 }
200 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
201   #undef vmovl_u32
202   #define vmovl_u32(a) simde_vmovl_u32((a))
203 #endif
204 
205 SIMDE_END_DECLS_
206 HEDLEY_DIAGNOSTIC_POP
207 
208 #endif /* !defined(SIMDE_ARM_NEON_MOVL_H) */
209