1 /*
2 * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #ifndef VPX_VPX_DSP_ARM_MEM_NEON_H_
12 #define VPX_VPX_DSP_ARM_MEM_NEON_H_
13
14 #include <arm_neon.h>
15 #include <assert.h>
16 #include <string.h>
17
18 #include "./vpx_config.h"
19 #include "vpx/vpx_integer.h"
20 #include "vpx_dsp/vpx_dsp_common.h"
21
22 // Support for these xN intrinsics is lacking in older versions of GCC.
23 #if defined(__GNUC__) && !defined(__clang__)
24 #if __GNUC__ < 8 || defined(__arm__)
vld1q_u8_x2(uint8_t const * ptr)25 static INLINE uint8x16x2_t vld1q_u8_x2(uint8_t const *ptr) {
26 uint8x16x2_t res = { { vld1q_u8(ptr + 0 * 16), vld1q_u8(ptr + 1 * 16) } };
27 return res;
28 }
29 #endif
30
31 #if __GNUC__ < 9 || defined(__arm__)
vld1q_u8_x3(uint8_t const * ptr)32 static INLINE uint8x16x3_t vld1q_u8_x3(uint8_t const *ptr) {
33 uint8x16x3_t res = { { vld1q_u8(ptr + 0 * 16), vld1q_u8(ptr + 1 * 16),
34 vld1q_u8(ptr + 2 * 16) } };
35 return res;
36 }
37 #endif
38 #endif
39
create_s16x4_neon(const int16_t c0,const int16_t c1,const int16_t c2,const int16_t c3)40 static INLINE int16x4_t create_s16x4_neon(const int16_t c0, const int16_t c1,
41 const int16_t c2, const int16_t c3) {
42 return vcreate_s16((uint16_t)c0 | ((uint32_t)c1 << 16) |
43 ((int64_t)(uint16_t)c2 << 32) | ((int64_t)c3 << 48));
44 }
45
create_s32x2_neon(const int32_t c0,const int32_t c1)46 static INLINE int32x2_t create_s32x2_neon(const int32_t c0, const int32_t c1) {
47 return vcreate_s32((uint32_t)c0 | ((int64_t)(uint32_t)c1 << 32));
48 }
49
create_s32x4_neon(const int32_t c0,const int32_t c1,const int32_t c2,const int32_t c3)50 static INLINE int32x4_t create_s32x4_neon(const int32_t c0, const int32_t c1,
51 const int32_t c2, const int32_t c3) {
52 return vcombine_s32(create_s32x2_neon(c0, c1), create_s32x2_neon(c2, c3));
53 }
54
55 // Helper functions used to load tran_low_t into int16, narrowing if necessary.
load_tran_low_to_s16x2q(const tran_low_t * buf)56 static INLINE int16x8x2_t load_tran_low_to_s16x2q(const tran_low_t *buf) {
57 #if CONFIG_VP9_HIGHBITDEPTH
58 const int32x4x2_t v0 = vld2q_s32(buf);
59 const int32x4x2_t v1 = vld2q_s32(buf + 8);
60 const int16x4_t s0 = vmovn_s32(v0.val[0]);
61 const int16x4_t s1 = vmovn_s32(v0.val[1]);
62 const int16x4_t s2 = vmovn_s32(v1.val[0]);
63 const int16x4_t s3 = vmovn_s32(v1.val[1]);
64 int16x8x2_t res;
65 res.val[0] = vcombine_s16(s0, s2);
66 res.val[1] = vcombine_s16(s1, s3);
67 return res;
68 #else
69 return vld2q_s16(buf);
70 #endif
71 }
72
load_tran_low_to_s16q(const tran_low_t * buf)73 static INLINE int16x8_t load_tran_low_to_s16q(const tran_low_t *buf) {
74 #if CONFIG_VP9_HIGHBITDEPTH
75 const int32x4_t v0 = vld1q_s32(buf);
76 const int32x4_t v1 = vld1q_s32(buf + 4);
77 const int16x4_t s0 = vmovn_s32(v0);
78 const int16x4_t s1 = vmovn_s32(v1);
79 return vcombine_s16(s0, s1);
80 #else
81 return vld1q_s16(buf);
82 #endif
83 }
84
load_tran_low_to_s16d(const tran_low_t * buf)85 static INLINE int16x4_t load_tran_low_to_s16d(const tran_low_t *buf) {
86 #if CONFIG_VP9_HIGHBITDEPTH
87 const int32x4_t v0 = vld1q_s32(buf);
88 return vmovn_s32(v0);
89 #else
90 return vld1_s16(buf);
91 #endif
92 }
93
store_s16q_to_tran_low(tran_low_t * buf,const int16x8_t a)94 static INLINE void store_s16q_to_tran_low(tran_low_t *buf, const int16x8_t a) {
95 #if CONFIG_VP9_HIGHBITDEPTH
96 const int32x4_t v0 = vmovl_s16(vget_low_s16(a));
97 const int32x4_t v1 = vmovl_s16(vget_high_s16(a));
98 vst1q_s32(buf, v0);
99 vst1q_s32(buf + 4, v1);
100 #else
101 vst1q_s16(buf, a);
102 #endif
103 }
104
105 // Propagate type information to the compiler. Without this the compiler may
106 // assume the required alignment of uint32_t (4 bytes) and add alignment hints
107 // to the memory access.
108 //
109 // This is used for functions operating on uint8_t which wish to load or store 4
110 // values at a time but which may not be on 4 byte boundaries.
uint32_to_mem(uint8_t * buf,uint32_t a)111 static INLINE void uint32_to_mem(uint8_t *buf, uint32_t a) {
112 memcpy(buf, &a, 4);
113 }
114
115 // Load 2 sets of 4 bytes when alignment is not guaranteed.
load_unaligned_u8(const uint8_t * buf,ptrdiff_t stride)116 static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf,
117 ptrdiff_t stride) {
118 uint32_t a;
119 uint32x2_t a_u32 = vdup_n_u32(0);
120 if (stride == 4) return vld1_u8(buf);
121 memcpy(&a, buf, 4);
122 buf += stride;
123 a_u32 = vset_lane_u32(a, a_u32, 0);
124 memcpy(&a, buf, 4);
125 a_u32 = vset_lane_u32(a, a_u32, 1);
126 return vreinterpret_u8_u32(a_u32);
127 }
128
129 // Store 2 sets of 4 bytes when alignment is not guaranteed.
store_unaligned_u8(uint8_t * buf,ptrdiff_t stride,const uint8x8_t a)130 static INLINE void store_unaligned_u8(uint8_t *buf, ptrdiff_t stride,
131 const uint8x8_t a) {
132 const uint32x2_t a_u32 = vreinterpret_u32_u8(a);
133 if (stride == 4) {
134 vst1_u8(buf, a);
135 return;
136 }
137 uint32_to_mem(buf, vget_lane_u32(a_u32, 0));
138 buf += stride;
139 uint32_to_mem(buf, vget_lane_u32(a_u32, 1));
140 }
141
142 // Load 4 sets of 4 bytes when alignment is not guaranteed.
load_unaligned_u8q(const uint8_t * buf,ptrdiff_t stride)143 static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf,
144 ptrdiff_t stride) {
145 uint32_t a;
146 uint32x4_t a_u32 = vdupq_n_u32(0);
147 if (stride == 4) return vld1q_u8(buf);
148 memcpy(&a, buf, 4);
149 buf += stride;
150 a_u32 = vsetq_lane_u32(a, a_u32, 0);
151 memcpy(&a, buf, 4);
152 buf += stride;
153 a_u32 = vsetq_lane_u32(a, a_u32, 1);
154 memcpy(&a, buf, 4);
155 buf += stride;
156 a_u32 = vsetq_lane_u32(a, a_u32, 2);
157 memcpy(&a, buf, 4);
158 buf += stride;
159 a_u32 = vsetq_lane_u32(a, a_u32, 3);
160 return vreinterpretq_u8_u32(a_u32);
161 }
162
163 // Store 4 sets of 4 bytes when alignment is not guaranteed.
store_unaligned_u8q(uint8_t * buf,ptrdiff_t stride,const uint8x16_t a)164 static INLINE void store_unaligned_u8q(uint8_t *buf, ptrdiff_t stride,
165 const uint8x16_t a) {
166 const uint32x4_t a_u32 = vreinterpretq_u32_u8(a);
167 if (stride == 4) {
168 vst1q_u8(buf, a);
169 return;
170 }
171 uint32_to_mem(buf, vgetq_lane_u32(a_u32, 0));
172 buf += stride;
173 uint32_to_mem(buf, vgetq_lane_u32(a_u32, 1));
174 buf += stride;
175 uint32_to_mem(buf, vgetq_lane_u32(a_u32, 2));
176 buf += stride;
177 uint32_to_mem(buf, vgetq_lane_u32(a_u32, 3));
178 }
179
180 // Load 2 sets of 4 bytes when alignment is guaranteed.
load_u8(const uint8_t * buf,ptrdiff_t stride)181 static INLINE uint8x8_t load_u8(const uint8_t *buf, ptrdiff_t stride) {
182 uint32x2_t a = vdup_n_u32(0);
183
184 assert(!((intptr_t)buf % sizeof(uint32_t)));
185 assert(!(stride % sizeof(uint32_t)));
186
187 a = vld1_lane_u32((const uint32_t *)buf, a, 0);
188 buf += stride;
189 a = vld1_lane_u32((const uint32_t *)buf, a, 1);
190 return vreinterpret_u8_u32(a);
191 }
192
193 // Store 2 sets of 4 bytes when alignment is guaranteed.
store_u8(uint8_t * buf,ptrdiff_t stride,const uint8x8_t a)194 static INLINE void store_u8(uint8_t *buf, ptrdiff_t stride, const uint8x8_t a) {
195 uint32x2_t a_u32 = vreinterpret_u32_u8(a);
196
197 assert(!((intptr_t)buf % sizeof(uint32_t)));
198 assert(!(stride % sizeof(uint32_t)));
199
200 vst1_lane_u32((uint32_t *)buf, a_u32, 0);
201 buf += stride;
202 vst1_lane_u32((uint32_t *)buf, a_u32, 1);
203 }
204 #endif // VPX_VPX_DSP_ARM_MEM_NEON_H_
205