1 /* SPDX-License-Identifier: MIT
2 *
3 * Permission is hereby granted, free of charge, to any person
4 * obtaining a copy of this software and associated documentation
5 * files (the "Software"), to deal in the Software without
6 * restriction, including without limitation the rights to use, copy,
7 * modify, merge, publish, distribute, sublicense, and/or sell copies
8 * of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be
12 * included in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Copyright:
24 * 2020 Evan Nemerson <evan@nemerson.com>
25 */
26
27 #if !defined(SIMDE_ARM_NEON_NEG_H)
28 #define SIMDE_ARM_NEON_NEG_H
29
30 #include "types.h"
31
32 HEDLEY_DIAGNOSTIC_PUSH
33 SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
34 SIMDE_BEGIN_DECLS_
35
36 SIMDE_FUNCTION_ATTRIBUTES
37 simde_float32x2_t
simde_vneg_f32(simde_float32x2_t a)38 simde_vneg_f32(simde_float32x2_t a) {
39 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
40 return vneg_f32(a);
41 #else
42 simde_float32x2_private
43 r_,
44 a_ = simde_float32x2_to_private(a);
45
46 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
47 r_.values = -a_.values;
48 #else
49 SIMDE_VECTORIZE
50 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
51 r_.values[i] = -(a_.values[i]);
52 }
53 #endif
54
55 return simde_float32x2_from_private(r_);
56 #endif
57 }
58 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
59 #undef vneg_f32
60 #define vneg_f32(a) simde_vneg_f32(a)
61 #endif
62
63 SIMDE_FUNCTION_ATTRIBUTES
64 simde_float64x1_t
simde_vneg_f64(simde_float64x1_t a)65 simde_vneg_f64(simde_float64x1_t a) {
66 #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
67 return vneg_f64(a);
68 #else
69 simde_float64x1_private
70 r_,
71 a_ = simde_float64x1_to_private(a);
72
73 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
74 r_.values = -a_.values;
75 #else
76 SIMDE_VECTORIZE
77 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
78 r_.values[i] = -(a_.values[i]);
79 }
80 #endif
81
82 return simde_float64x1_from_private(r_);
83 #endif
84 }
85 #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
86 #undef vneg_f64
87 #define vneg_f64(a) simde_vneg_f64(a)
88 #endif
89
90 SIMDE_FUNCTION_ATTRIBUTES
91 simde_int8x8_t
simde_vneg_s8(simde_int8x8_t a)92 simde_vneg_s8(simde_int8x8_t a) {
93 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
94 return vneg_s8(a);
95 #else
96 simde_int8x8_private
97 r_,
98 a_ = simde_int8x8_to_private(a);
99
100 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
101 r_.values = -a_.values;
102 #else
103 SIMDE_VECTORIZE
104 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
105 r_.values[i] = -(a_.values[i]);
106 }
107 #endif
108
109 return simde_int8x8_from_private(r_);
110 #endif
111 }
112 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
113 #undef vneg_s8
114 #define vneg_s8(a) simde_vneg_s8(a)
115 #endif
116
117 SIMDE_FUNCTION_ATTRIBUTES
118 simde_int16x4_t
simde_vneg_s16(simde_int16x4_t a)119 simde_vneg_s16(simde_int16x4_t a) {
120 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
121 return vneg_s16(a);
122 #else
123 simde_int16x4_private
124 r_,
125 a_ = simde_int16x4_to_private(a);
126
127 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
128 r_.values = -a_.values;
129 #else
130 SIMDE_VECTORIZE
131 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
132 r_.values[i] = -(a_.values[i]);
133 }
134 #endif
135
136 return simde_int16x4_from_private(r_);
137 #endif
138 }
139 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
140 #undef vneg_s16
141 #define vneg_s16(a) simde_vneg_s16(a)
142 #endif
143
144 SIMDE_FUNCTION_ATTRIBUTES
145 simde_int32x2_t
simde_vneg_s32(simde_int32x2_t a)146 simde_vneg_s32(simde_int32x2_t a) {
147 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
148 return vneg_s32(a);
149 #else
150 simde_int32x2_private
151 r_,
152 a_ = simde_int32x2_to_private(a);
153
154 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
155 r_.values = -a_.values;
156 #else
157 SIMDE_VECTORIZE
158 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
159 r_.values[i] = -(a_.values[i]);
160 }
161 #endif
162
163 return simde_int32x2_from_private(r_);
164 #endif
165 }
166 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
167 #undef vneg_s32
168 #define vneg_s32(a) simde_vneg_s32(a)
169 #endif
170
171 SIMDE_FUNCTION_ATTRIBUTES
172 simde_int64x1_t
simde_vneg_s64(simde_int64x1_t a)173 simde_vneg_s64(simde_int64x1_t a) {
174 #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
175 return vneg_s64(a);
176 #else
177 simde_int64x1_private
178 r_,
179 a_ = simde_int64x1_to_private(a);
180
181 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
182 r_.values = -a_.values;
183 #else
184 SIMDE_VECTORIZE
185 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
186 r_.values[i] = -(a_.values[i]);
187 }
188 #endif
189
190 return simde_int64x1_from_private(r_);
191 #endif
192 }
193 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
194 #undef vneg_s64
195 #define vneg_s64(a) simde_vneg_s64(a)
196 #endif
197
198 SIMDE_FUNCTION_ATTRIBUTES
199 simde_float32x4_t
simde_vnegq_f32(simde_float32x4_t a)200 simde_vnegq_f32(simde_float32x4_t a) {
201 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
202 return vnegq_f32(a);
203 #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
204 (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
205 return vec_neg(a);
206 #elif defined(SIMDE_WASM_SIMD128_NATIVE)
207 return wasm_f32x4_neg(a);
208 #else
209 simde_float32x4_private
210 r_,
211 a_ = simde_float32x4_to_private(a);
212
213 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
214 r_.values = -a_.values;
215 #else
216 SIMDE_VECTORIZE
217 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
218 r_.values[i] = -(a_.values[i]);
219 }
220 #endif
221
222 return simde_float32x4_from_private(r_);
223 #endif
224 }
225 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
226 #undef vnegq_f32
227 #define vnegq_f32(a) simde_vnegq_f32(a)
228 #endif
229
230 SIMDE_FUNCTION_ATTRIBUTES
231 simde_float64x2_t
simde_vnegq_f64(simde_float64x2_t a)232 simde_vnegq_f64(simde_float64x2_t a) {
233 #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
234 return vnegq_f64(a);
235 #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
236 (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
237 return vec_neg(a);
238 #elif defined(SIMDE_WASM_SIMD128_NATIVE)
239 return wasm_f64x2_neg(a);
240 #else
241 simde_float64x2_private
242 r_,
243 a_ = simde_float64x2_to_private(a);
244
245 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
246 r_.values = -a_.values;
247 #else
248 SIMDE_VECTORIZE
249 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
250 r_.values[i] = -(a_.values[i]);
251 }
252 #endif
253
254 return simde_float64x2_from_private(r_);
255 #endif
256 }
257 #if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
258 #undef vnegq_f64
259 #define vnegq_f64(a) simde_vnegq_f64(a)
260 #endif
261
262 SIMDE_FUNCTION_ATTRIBUTES
263 simde_int8x16_t
simde_vnegq_s8(simde_int8x16_t a)264 simde_vnegq_s8(simde_int8x16_t a) {
265 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
266 return vnegq_s8(a);
267 #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
268 (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
269 return vec_neg(a);
270 #elif defined(SIMDE_WASM_SIMD128_NATIVE)
271 return wasm_i8x16_neg(a);
272 #else
273 simde_int8x16_private
274 r_,
275 a_ = simde_int8x16_to_private(a);
276
277 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
278 r_.values = -a_.values;
279 #else
280 SIMDE_VECTORIZE
281 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
282 r_.values[i] = -(a_.values[i]);
283 }
284 #endif
285
286 return simde_int8x16_from_private(r_);
287 #endif
288 }
289 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
290 #undef vnegq_s8
291 #define vnegq_s8(a) simde_vnegq_s8(a)
292 #endif
293
294 SIMDE_FUNCTION_ATTRIBUTES
295 simde_int16x8_t
simde_vnegq_s16(simde_int16x8_t a)296 simde_vnegq_s16(simde_int16x8_t a) {
297 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
298 return vnegq_s16(a);
299 #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
300 (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
301 return vec_neg(a);
302 #elif defined(SIMDE_WASM_SIMD128_NATIVE)
303 return wasm_i16x8_neg(a);
304 #else
305 simde_int16x8_private
306 r_,
307 a_ = simde_int16x8_to_private(a);
308
309 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
310 r_.values = -a_.values;
311 #else
312 SIMDE_VECTORIZE
313 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
314 r_.values[i] = -(a_.values[i]);
315 }
316 #endif
317
318 return simde_int16x8_from_private(r_);
319 #endif
320 }
321 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
322 #undef vnegq_s16
323 #define vnegq_s16(a) simde_vnegq_s16(a)
324 #endif
325
326 SIMDE_FUNCTION_ATTRIBUTES
327 simde_int32x4_t
simde_vnegq_s32(simde_int32x4_t a)328 simde_vnegq_s32(simde_int32x4_t a) {
329 #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
330 return vnegq_s32(a);
331 #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
332 (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
333 return vec_neg(a);
334 #elif defined(SIMDE_WASM_SIMD128_NATIVE)
335 return wasm_i32x4_neg(a);
336 #else
337 simde_int32x4_private
338 r_,
339 a_ = simde_int32x4_to_private(a);
340
341 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
342 r_.values = -a_.values;
343 #else
344 SIMDE_VECTORIZE
345 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
346 r_.values[i] = -(a_.values[i]);
347 }
348 #endif
349
350 return simde_int32x4_from_private(r_);
351 #endif
352 }
353 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
354 #undef vnegq_s32
355 #define vnegq_s32(a) simde_vnegq_s32(a)
356 #endif
357
358 SIMDE_FUNCTION_ATTRIBUTES
359 simde_int64x2_t
simde_vnegq_s64(simde_int64x2_t a)360 simde_vnegq_s64(simde_int64x2_t a) {
361 #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
362 return vnegq_s64(a);
363 #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
364 (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
365 return vec_neg(a);
366 #elif defined(SIMDE_WASM_SIMD128_NATIVE)
367 return wasm_i64x2_neg(a);
368 #else
369 simde_int64x2_private
370 r_,
371 a_ = simde_int64x2_to_private(a);
372
373 #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
374 r_.values = -a_.values;
375 #else
376 SIMDE_VECTORIZE
377 for (size_t i = 0 ; i < (sizeof(r_.values) / sizeof(r_.values[0])) ; i++) {
378 r_.values[i] = -(a_.values[i]);
379 }
380 #endif
381
382 return simde_int64x2_from_private(r_);
383 #endif
384 }
385 #if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
386 #undef vnegq_s64
387 #define vnegq_s64(a) simde_vnegq_s64(a)
388 #endif
389
390 SIMDE_END_DECLS_
391 HEDLEY_DIAGNOSTIC_POP
392
393 #endif /* !defined(SIMDE_ARM_NEON_NEG_H) */
394