1 /*
2  * Copyright (c), Recep Aslantas.
3  *
4  * MIT License (MIT), http://opensource.org/licenses/MIT
5  * Full license can be found in the LICENSE file
6  */
7 
8 #ifndef cglm_mat4_neon_h
9 #define cglm_mat4_neon_h
10 #if defined(__ARM_NEON_FP)
11 
12 #include "../../common.h"
13 #include "../intrin.h"
14 
15 CGLM_INLINE
16 void
glm_mat4_scale_neon(mat4 m,float s)17 glm_mat4_scale_neon(mat4 m, float s) {
18   float32x4_t v0;
19 
20   v0 = vdupq_n_f32(s);
21 
22   vst1q_f32(m[0], vmulq_f32(vld1q_f32(m[0]), v0));
23   vst1q_f32(m[1], vmulq_f32(vld1q_f32(m[1]), v0));
24   vst1q_f32(m[2], vmulq_f32(vld1q_f32(m[2]), v0));
25   vst1q_f32(m[3], vmulq_f32(vld1q_f32(m[3]), v0));
26 }
27 
28 CGLM_INLINE
29 void
glm_mat4_transp_neon(mat4 m,mat4 dest)30 glm_mat4_transp_neon(mat4 m, mat4 dest) {
31   float32x4x4_t vmat;
32 
33   vmat = vld4q_f32(m[0]);
34 
35   vst1q_f32(dest[0], vmat.val[0]);
36   vst1q_f32(dest[1], vmat.val[1]);
37   vst1q_f32(dest[2], vmat.val[2]);
38   vst1q_f32(dest[3], vmat.val[3]);
39 }
40 
41 CGLM_INLINE
42 void
glm_mat4_mul_neon(mat4 m1,mat4 m2,mat4 dest)43 glm_mat4_mul_neon(mat4 m1, mat4 m2, mat4 dest) {
44   /* D = R * L (Column-Major) */
45 
46   glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3;
47 
48   l  = glmm_load(m1[0]);
49   r0 = glmm_load(m2[0]);
50   r1 = glmm_load(m2[1]);
51   r2 = glmm_load(m2[2]);
52   r3 = glmm_load(m2[3]);
53 
54   v0 = vmulq_f32(glmm_splat_x(r0), l);
55   v1 = vmulq_f32(glmm_splat_x(r1), l);
56   v2 = vmulq_f32(glmm_splat_x(r2), l);
57   v3 = vmulq_f32(glmm_splat_x(r3), l);
58 
59   l  = glmm_load(m1[1]);
60   v0 = glmm_fmadd(glmm_splat_y(r0), l, v0);
61   v1 = glmm_fmadd(glmm_splat_y(r1), l, v1);
62   v2 = glmm_fmadd(glmm_splat_y(r2), l, v2);
63   v3 = glmm_fmadd(glmm_splat_y(r3), l, v3);
64 
65   l  = glmm_load(m1[2]);
66   v0 = glmm_fmadd(glmm_splat_z(r0), l, v0);
67   v1 = glmm_fmadd(glmm_splat_z(r1), l, v1);
68   v2 = glmm_fmadd(glmm_splat_z(r2), l, v2);
69   v3 = glmm_fmadd(glmm_splat_z(r3), l, v3);
70 
71   l  = glmm_load(m1[3]);
72   v0 = glmm_fmadd(glmm_splat_w(r0), l, v0);
73   v1 = glmm_fmadd(glmm_splat_w(r1), l, v1);
74   v2 = glmm_fmadd(glmm_splat_w(r2), l, v2);
75   v3 = glmm_fmadd(glmm_splat_w(r3), l, v3);
76 
77   glmm_store(dest[0], v0);
78   glmm_store(dest[1], v1);
79   glmm_store(dest[2], v2);
80   glmm_store(dest[3], v3);
81 }
82 
83 CGLM_INLINE
84 void
glm_mat4_mulv_neon(mat4 m,vec4 v,vec4 dest)85 glm_mat4_mulv_neon(mat4 m, vec4 v, vec4 dest) {
86   float32x4_t l0, l1, l2, l3;
87   float32x2_t vlo, vhi;
88 
89   l0  = vld1q_f32(m[0]);
90   l1  = vld1q_f32(m[1]);
91   l2  = vld1q_f32(m[2]);
92   l3  = vld1q_f32(m[3]);
93 
94   vlo = vld1_f32(&v[0]);
95   vhi = vld1_f32(&v[2]);
96 
97   l0  = vmulq_lane_f32(l0, vlo, 0);
98   l0  = vmlaq_lane_f32(l0, l1, vlo, 1);
99   l0  = vmlaq_lane_f32(l0, l2, vhi, 0);
100   l0  = vmlaq_lane_f32(l0, l3, vhi, 1);
101 
102   vst1q_f32(dest, l0);
103 }
104 
105 CGLM_INLINE
106 float
glm_mat4_det_neon(mat4 mat)107 glm_mat4_det_neon(mat4 mat) {
108   float32x4_t   r0, r1, r2, r3, x0, x1, x2;
109   float32x2_t   ij, op, mn, kl, nn, mm, jj, ii, gh, ef, t12, t34;
110   float32x4x2_t a1;
111   float32x4_t   x3 = { 0.f, -0.f, 0.f, -0.f };
112 
113   /* 127 <- 0, [square] det(A) = det(At) */
114   r0 = glmm_load(mat[0]);              /* d c b a */
115   r1 = vrev64q_f32(glmm_load(mat[1])); /* g h e f */
116   r2 = vrev64q_f32(glmm_load(mat[2])); /* l k i j */
117   r3 = vrev64q_f32(glmm_load(mat[3])); /* o p m n */
118 
119   gh = vget_high_f32(r1);
120   ef = vget_low_f32(r1);
121   kl = vget_high_f32(r2);
122   ij = vget_low_f32(r2);
123   op = vget_high_f32(r3);
124   mn = vget_low_f32(r3);
125   mm = vdup_lane_f32(mn, 1);
126   nn = vdup_lane_f32(mn, 0);
127   ii = vdup_lane_f32(ij, 1);
128   jj = vdup_lane_f32(ij, 0);
129 
130   /*
131    t[1] = j * p - n * l;
132    t[2] = j * o - n * k;
133    t[3] = i * p - m * l;
134    t[4] = i * o - m * k;
135    */
136   x0 = glmm_fnmadd(vcombine_f32(kl, kl), vcombine_f32(nn, mm),
137                    vmulq_f32(vcombine_f32(op, op), vcombine_f32(jj, ii)));
138 
139   t12 = vget_low_f32(x0);
140   t34 = vget_high_f32(x0);
141 
142   /* 1 3 1 3 2 4 2 4 */
143   a1 = vuzpq_f32(x0, x0);
144 
145   /*
146    t[0] = k * p - o * l;
147    t[0] = k * p - o * l;
148    t[5] = i * n - m * j;
149    t[5] = i * n - m * j;
150    */
151   x1 = glmm_fnmadd(vcombine_f32(vdup_lane_f32(kl, 0), jj),
152                    vcombine_f32(vdup_lane_f32(op, 1), mm),
153                    vmulq_f32(vcombine_f32(vdup_lane_f32(op, 0), nn),
154                              vcombine_f32(vdup_lane_f32(kl, 1), ii)));
155 
156   /*
157      a * (f * t[0] - g * t[1] + h * t[2])
158    - b * (e * t[0] - g * t[3] + h * t[4])
159    + c * (e * t[1] - f * t[3] + h * t[5])
160    - d * (e * t[2] - f * t[4] + g * t[5])
161    */
162   x2 = glmm_fnmadd(vcombine_f32(vdup_lane_f32(gh, 1), vdup_lane_f32(ef, 0)),
163                    vcombine_f32(vget_low_f32(a1.val[0]), t34),
164                    vmulq_f32(vcombine_f32(ef, vdup_lane_f32(ef, 1)),
165                              vcombine_f32(vget_low_f32(x1), t12)));
166 
167   x2 = glmm_fmadd(vcombine_f32(vdup_lane_f32(gh, 0), gh),
168                   vcombine_f32(vget_low_f32(a1.val[1]), vget_high_f32(x1)), x2);
169 
170   x2 = glmm_xor(x2, x3);
171 
172   return glmm_hadd(vmulq_f32(x2, r0));
173 }
174 
175 CGLM_INLINE
176 void
glm_mat4_inv_neon(mat4 mat,mat4 dest)177 glm_mat4_inv_neon(mat4 mat, mat4 dest) {
178   float32x4_t   r0, r1, r2, r3,
179                 v0, v1, v2, v3,
180                 t0, t1, t2, t3, t4, t5,
181                 x0, x1, x2, x3, x4, x5, x6, x7, x8;
182   float32x4x2_t a1;
183   float32x2_t   lp, ko, hg, jn, im, fe, ae, bf, cg, dh;
184   float32x4_t   x9 = { -0.f, 0.f, -0.f, 0.f };
185 
186   x8 = vrev64q_f32(x9);
187 
188   /* 127 <- 0 */
189   r0 = glmm_load(mat[0]); /* d c b a */
190   r1 = glmm_load(mat[1]); /* h g f e */
191   r2 = glmm_load(mat[2]); /* l k j i */
192   r3 = glmm_load(mat[3]); /* p o n m */
193 
194   /* l p k o, j n i m */
195   a1  = vzipq_f32(r3, r2);
196 
197   jn  = vget_high_f32(a1.val[0]);
198   im  = vget_low_f32(a1.val[0]);
199   lp  = vget_high_f32(a1.val[1]);
200   ko  = vget_low_f32(a1.val[1]);
201   hg  = vget_high_f32(r1);
202 
203   x1  = vcombine_f32(vdup_lane_f32(lp, 0), lp);                   /* l p p p */
204   x2  = vcombine_f32(vdup_lane_f32(ko, 0), ko);                   /* k o o o */
205   x0  = vcombine_f32(vdup_lane_f32(lp, 1), vdup_lane_f32(hg, 1)); /* h h l l */
206   x3  = vcombine_f32(vdup_lane_f32(ko, 1), vdup_lane_f32(hg, 0)); /* g g k k */
207 
208   /* t1[0] = k * p - o * l;
209      t1[0] = k * p - o * l;
210      t2[0] = g * p - o * h;
211      t3[0] = g * l - k * h; */
212   t0 = glmm_fnmadd(x2, x0, vmulq_f32(x3, x1));
213 
214   fe = vget_low_f32(r1);
215   x4 = vcombine_f32(vdup_lane_f32(jn, 0), jn);                   /* j n n n */
216   x5 = vcombine_f32(vdup_lane_f32(jn, 1), vdup_lane_f32(fe, 1)); /* f f j j */
217 
218   /* t1[1] = j * p - n * l;
219      t1[1] = j * p - n * l;
220      t2[1] = f * p - n * h;
221      t3[1] = f * l - j * h; */
222    t1 = glmm_fnmadd(x4, x0, vmulq_f32(x5, x1));
223 
224   /* t1[2] = j * o - n * k
225      t1[2] = j * o - n * k;
226      t2[2] = f * o - n * g;
227      t3[2] = f * k - j * g; */
228   t2 = glmm_fnmadd(x4, x3, vmulq_f32(x5, x2));
229 
230   x6 = vcombine_f32(vdup_lane_f32(im, 1), vdup_lane_f32(fe, 0)); /* e e i i */
231   x7 = vcombine_f32(vdup_lane_f32(im, 0), im);                   /* i m m m */
232 
233   /* t1[3] = i * p - m * l;
234      t1[3] = i * p - m * l;
235      t2[3] = e * p - m * h;
236      t3[3] = e * l - i * h; */
237   t3 = glmm_fnmadd(x7, x0, vmulq_f32(x6, x1));
238 
239   /* t1[4] = i * o - m * k;
240      t1[4] = i * o - m * k;
241      t2[4] = e * o - m * g;
242      t3[4] = e * k - i * g; */
243   t4 = glmm_fnmadd(x7, x3, vmulq_f32(x6, x2));
244 
245   /* t1[5] = i * n - m * j;
246      t1[5] = i * n - m * j;
247      t2[5] = e * n - m * f;
248      t3[5] = e * j - i * f; */
249   t5 = glmm_fnmadd(x7, x5, vmulq_f32(x6, x4));
250 
251   /* h d f b, g c e a */
252   a1 = vtrnq_f32(r0, r1);
253 
254   x4 = vrev64q_f32(a1.val[0]); /* c g a e */
255   x5 = vrev64q_f32(a1.val[1]); /* d h b f */
256 
257   ae = vget_low_f32(x4);
258   cg = vget_high_f32(x4);
259   bf = vget_low_f32(x5);
260   dh = vget_high_f32(x5);
261 
262   x0 = vcombine_f32(ae, vdup_lane_f32(ae, 1)); /* a a a e */
263   x1 = vcombine_f32(bf, vdup_lane_f32(bf, 1)); /* b b b f */
264   x2 = vcombine_f32(cg, vdup_lane_f32(cg, 1)); /* c c c g */
265   x3 = vcombine_f32(dh, vdup_lane_f32(dh, 1)); /* d d d h */
266 
267   /*
268    dest[0][0] =  f * t1[0] - g * t1[1] + h * t1[2];
269    dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]);
270    dest[0][2] =  b * t2[0] - c * t2[1] + d * t2[2];
271    dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */
272   v0 = glmm_xor(glmm_fmadd(x3, t2, glmm_fnmadd(x2, t1, vmulq_f32(x1, t0))), x8);
273 
274   /*
275    dest[2][0] =  e * t1[1] - f * t1[3] + h * t1[5];
276    dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
277    dest[2][2] =  a * t2[1] - b * t2[3] + d * t2[5];
278    dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
279   v2 = glmm_xor(glmm_fmadd(x3, t5, glmm_fnmadd(x1, t3, vmulq_f32(x0, t1))), x8);
280 
281   /*
282    dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
283    dest[1][1] =  a * t1[0] - c * t1[3] + d * t1[4];
284    dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
285    dest[1][3] =  a * t3[0] - c * t3[3] + d * t3[4]; */
286   v1 = glmm_xor(glmm_fmadd(x3, t4, glmm_fnmadd(x2, t3, vmulq_f32(x0, t0))), x9);
287 
288   /*
289    dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]);
290    dest[3][1] =  a * t1[2] - b * t1[4] + c * t1[5];
291    dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]);
292    dest[3][3] =  a * t3[2] - b * t3[4] + c * t3[5]; */
293   v3 = glmm_xor(glmm_fmadd(x2, t5, glmm_fnmadd(x1, t4, vmulq_f32(x0, t2))), x9);
294 
295   /* determinant */
296   x0 = vcombine_f32(vget_low_f32(vzipq_f32(v0, v1).val[0]),
297                     vget_low_f32(vzipq_f32(v2, v3).val[0]));
298 
299   /*
300   x0 = glmm_div(glmm_set1(1.0f), glmm_vhadd(vmulq_f32(x0, r0)));
301 
302   glmm_store(dest[0], vmulq_f32(v0, x0));
303   glmm_store(dest[1], vmulq_f32(v1, x0));
304   glmm_store(dest[2], vmulq_f32(v2, x0));
305   glmm_store(dest[3], vmulq_f32(v3, x0));
306   */
307 
308   x0 = glmm_vhadd(vmulq_f32(x0, r0));
309 
310   glmm_store(dest[0], glmm_div(v0, x0));
311   glmm_store(dest[1], glmm_div(v1, x0));
312   glmm_store(dest[2], glmm_div(v2, x0));
313   glmm_store(dest[3], glmm_div(v3, x0));
314 }
315 
316 #endif
317 #endif /* cglm_mat4_neon_h */
318