Home
last modified time | relevance | path

Searched refs:glmm_fmadd (Results 1 – 13 of 13) sorted by relevance

/dports/math/cglm/cglm-0.8.4/include/cglm/simd/neon/
H A Daffine.h34 v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); in glm_mul_neon()
35 v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); in glm_mul_neon()
36 v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); in glm_mul_neon()
37 v3 = glmm_fmadd(glmm_splat_y(r3), l, v3); in glm_mul_neon()
40 v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); in glm_mul_neon()
41 v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); in glm_mul_neon()
42 v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); in glm_mul_neon()
43 v3 = glmm_fmadd(glmm_splat_z(r3), l, v3); in glm_mul_neon()
70 v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); in glm_mul_rot_neon()
71 v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); in glm_mul_rot_neon()
[all …]
H A Dmat4.h60 v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); in glm_mat4_mul_neon()
61 v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); in glm_mat4_mul_neon()
62 v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); in glm_mat4_mul_neon()
63 v3 = glmm_fmadd(glmm_splat_y(r3), l, v3); in glm_mat4_mul_neon()
66 v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); in glm_mat4_mul_neon()
67 v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); in glm_mat4_mul_neon()
68 v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); in glm_mat4_mul_neon()
69 v3 = glmm_fmadd(glmm_splat_z(r3), l, v3); in glm_mat4_mul_neon()
72 v0 = glmm_fmadd(glmm_splat_w(r0), l, v0); in glm_mat4_mul_neon()
73 v1 = glmm_fmadd(glmm_splat_w(r1), l, v1); in glm_mat4_mul_neon()
[all …]
H A Dquat.h45 r = glmm_fmadd(glmm_xor(x, s3), vcombine_f32(qh, ql), r); in glm_quat_mul_neon()
47 r = glmm_fmadd(glmm_xor(y, s2), vcombine_f32(vget_high_f32(xq), in glm_quat_mul_neon()
50 r = glmm_fmadd(glmm_xor(z, s1), vcombine_f32(ql, qh), r); in glm_quat_mul_neon()
H A Dmat2.h37 x0 = glmm_fmadd(vcombine_f32(ba, ba), a1.val[0], in glm_mat2_mul_neon()
/dports/math/cglm/cglm-0.8.4/include/cglm/simd/sse2/
H A Daffine.h33 v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); in glm_mul_sse2()
34 v1 = glmm_fmadd(glmm_splat_y(r1), l, v1); in glm_mul_sse2()
35 v2 = glmm_fmadd(glmm_splat_y(r2), l, v2); in glm_mul_sse2()
36 v3 = glmm_fmadd(glmm_splat_y(r3), l, v3); in glm_mul_sse2()
39 v0 = glmm_fmadd(glmm_splat_z(r0), l, v0); in glm_mul_sse2()
40 v1 = glmm_fmadd(glmm_splat_z(r1), l, v1); in glm_mul_sse2()
41 v2 = glmm_fmadd(glmm_splat_z(r2), l, v2); in glm_mul_sse2()
42 v3 = glmm_fmadd(glmm_splat_z(r3), l, v3); in glm_mul_sse2()
45 v3 = glmm_fmadd(glmm_splat_w(r3), l, v3); in glm_mul_sse2()
70 v0 = glmm_fmadd(glmm_splat_y(r0), l, v0); in glm_mul_rot_sse2()
[all …]
H A Dmat4.h106 x1 = glmm_fmadd(m2, v2, x1); in glm_mat4_mulv_sse2()
107 x1 = glmm_fmadd(m1, v1, x1); in glm_mat4_mulv_sse2()
108 x1 = glmm_fmadd(m0, v0, x1); in glm_mat4_mulv_sse2()
251 v3 = glmm_fmadd(x2, t5, v3); in glm_mat4_inv_fast_sse2()
252 v0 = glmm_fmadd(x3, t2, v0); in glm_mat4_inv_fast_sse2()
253 v2 = glmm_fmadd(x3, t5, v2); in glm_mat4_inv_fast_sse2()
254 v1 = glmm_fmadd(x3, t4, v1); in glm_mat4_inv_fast_sse2()
387 v3 = glmm_fmadd(x2, t5, v3); in glm_mat4_inv_sse2()
388 v0 = glmm_fmadd(x3, t2, v0); in glm_mat4_inv_sse2()
389 v2 = glmm_fmadd(x3, t5, v2); in glm_mat4_inv_sse2()
[all …]
H A Dmat3.h48 x0 = glmm_fmadd(x4, x6, x0); in glm_mat3_mul_sse2()
49 x1 = glmm_fmadd(x5, x2, x1); in glm_mat3_mul_sse2()
58 x0 = glmm_fmadd(x3, x5, x0); in glm_mat3_mul_sse2()
59 x1 = glmm_fmadd(x2, x4, x1); in glm_mat3_mul_sse2()
H A Dquat.h46 r = glmm_fmadd(x, x1, r); in glm_quat_mul_sse2()
47 r = glmm_fmadd(y, x2, r); in glm_quat_mul_sse2()
48 r = glmm_fmadd(z, x3, r); in glm_quat_mul_sse2()
H A Dmat2.h34 x0 = glmm_fmadd(x0, x3, _mm_mul_ps(x2, x4)); in glm_mat2_mul_sse2()
/dports/math/cglm/cglm-0.8.4/include/cglm/
H A Daffine.h62 glmm_fmadd(m0, glmm_set1(v[0]), in glm_translate()
63 glmm_fmadd(m1, glmm_set1(v[1]), in glm_translate()
64 glmm_fmadd(m2, glmm_set1(v[2]), m3)))); in glm_translate()
99 glmm_store(m[3], glmm_fmadd(glmm_load(m[0]), glmm_set1(x), glmm_load(m[3]))); in glm_translate_x()
117 glmm_store(m[3], glmm_fmadd(glmm_load(m[1]), glmm_set1(y), glmm_load(m[3]))); in glm_translate_y()
135 glmm_store(m[3], glmm_fmadd(glmm_load(m[2]), glmm_set1(z), glmm_load(m[3]))); in glm_translate_z()
H A Dvec4.h572 glmm_store(dest, glmm_fmadd(glmm_load(a), glmm_load(b), glmm_load(dest))); in glm_vec4_muladd()
594 glmm_store(dest, glmm_fmadd(glmm_load(a), glmm_set1(s), glmm_load(dest))); in glm_vec4_muladds()
/dports/math/cglm/cglm-0.8.4/include/cglm/simd/
H A Darm.h138 glmm_fmadd(float32x4_t a, float32x4_t b, float32x4_t c) { in glmm_fmadd() function
169 return vsubq_f32(vdupq_n_f32(0.0f), glmm_fmadd(a, b, c)); in glmm_fnmsub()
H A Dx86.h225 glmm_fmadd(__m128 a, __m128 b, __m128 c) { in glmm_fmadd() function