first commit

This commit is contained in:
2024-08-24 00:47:58 -04:00
commit f6ef842a28
400 changed files with 43479 additions and 0 deletions

173
include/cglm/simd/arm.h Normal file
View File

@ -0,0 +1,173 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_simd_arm_h
#define cglm_simd_arm_h
#include "intrin.h"
#ifdef CGLM_SIMD_ARM
#if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || defined(_M_ARM64EC) || defined(__aarch64__)
# define CGLM_ARM64 1
#endif
#define glmm_load(p) vld1q_f32(p)
#define glmm_store(p, a) vst1q_f32(p, a)
#define glmm_set1(x) vdupq_n_f32(x)
#define glmm_128 float32x4_t
#define glmm_splat_x(x) vdupq_lane_f32(vget_low_f32(x), 0)
#define glmm_splat_y(x) vdupq_lane_f32(vget_low_f32(x), 1)
#define glmm_splat_z(x) vdupq_lane_f32(vget_high_f32(x), 0)
#define glmm_splat_w(x) vdupq_lane_f32(vget_high_f32(x), 1)
#define glmm_xor(a, b) \
vreinterpretq_f32_s32(veorq_s32(vreinterpretq_s32_f32(a), \
vreinterpretq_s32_f32(b)))
#define glmm_swplane(v) vextq_f32(v, v, 2)
#define glmm_low(x) vget_low_f32(x)
#define glmm_high(x) vget_high_f32(x)
#define glmm_combine_ll(x, y) vcombine_f32(vget_low_f32(x), vget_low_f32(y))
#define glmm_combine_hl(x, y) vcombine_f32(vget_high_f32(x), vget_low_f32(y))
#define glmm_combine_lh(x, y) vcombine_f32(vget_low_f32(x), vget_high_f32(y))
#define glmm_combine_hh(x, y) vcombine_f32(vget_high_f32(x), vget_high_f32(y))
static inline
float32x4_t
glmm_abs(float32x4_t v) {
return vabsq_f32(v);
}
static inline
float32x4_t
glmm_vhadd(float32x4_t v) {
return vaddq_f32(vaddq_f32(glmm_splat_x(v), glmm_splat_y(v)),
vaddq_f32(glmm_splat_z(v), glmm_splat_w(v)));
/*
this seems slower:
v = vaddq_f32(v, vrev64q_f32(v));
return vaddq_f32(v, vcombine_f32(vget_high_f32(v), vget_low_f32(v)));
*/
}
static inline
float
glmm_hadd(float32x4_t v) {
#if CGLM_ARM64
return vaddvq_f32(v);
#else
v = vaddq_f32(v, vrev64q_f32(v));
v = vaddq_f32(v, vcombine_f32(vget_high_f32(v), vget_low_f32(v)));
return vgetq_lane_f32(v, 0);
#endif
}
static inline
float
glmm_hmin(float32x4_t v) {
float32x2_t t;
t = vpmin_f32(vget_low_f32(v), vget_high_f32(v));
t = vpmin_f32(t, t);
return vget_lane_f32(t, 0);
}
static inline
float
glmm_hmax(float32x4_t v) {
float32x2_t t;
t = vpmax_f32(vget_low_f32(v), vget_high_f32(v));
t = vpmax_f32(t, t);
return vget_lane_f32(t, 0);
}
static inline
float
glmm_dot(float32x4_t a, float32x4_t b) {
return glmm_hadd(vmulq_f32(a, b));
}
static inline
float
glmm_norm(float32x4_t a) {
return sqrtf(glmm_dot(a, a));
}
static inline
float
glmm_norm2(float32x4_t a) {
return glmm_dot(a, a);
}
static inline
float
glmm_norm_one(float32x4_t a) {
return glmm_hadd(glmm_abs(a));
}
static inline
float
glmm_norm_inf(float32x4_t a) {
return glmm_hmax(glmm_abs(a));
}
static inline
float32x4_t
glmm_div(float32x4_t a, float32x4_t b) {
#if CGLM_ARM64
return vdivq_f32(a, b);
#else
/* 2 iterations of Newton-Raphson refinement of reciprocal */
float32x4_t r0, r1;
r0 = vrecpeq_f32(b);
r1 = vrecpsq_f32(r0, b);
r0 = vmulq_f32(r1, r0);
r1 = vrecpsq_f32(r0, b);
r0 = vmulq_f32(r1, r0);
return vmulq_f32(a, r0);
#endif
}
static inline
float32x4_t
glmm_fmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
#if CGLM_ARM64
return vfmaq_f32(c, a, b); /* why vfmaq_f32 is slower than vmlaq_f32 ??? */
#else
return vmlaq_f32(c, a, b);
#endif
}
static inline
float32x4_t
glmm_fnmadd(float32x4_t a, float32x4_t b, float32x4_t c) {
#if CGLM_ARM64
return vfmsq_f32(c, a, b);
#else
return vmlsq_f32(c, a, b);
#endif
}
static inline
float32x4_t
glmm_fmsub(float32x4_t a, float32x4_t b, float32x4_t c) {
#if CGLM_ARM64
return vfmsq_f32(c, a, b);
#else
return vmlsq_f32(c, a, b);
#endif
}
static inline
float32x4_t
glmm_fnmsub(float32x4_t a, float32x4_t b, float32x4_t c) {
return vsubq_f32(vdupq_n_f32(0.0f), glmm_fmadd(a, b, c));
}
#endif
#endif /* cglm_simd_arm_h */

View File

@ -0,0 +1,66 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_affine_mat_avx_h
#define cglm_affine_mat_avx_h
#ifdef __AVX__
#include "../../common.h"
#include "../intrin.h"
#include <immintrin.h>
CGLM_INLINE
void
glm_mul_avx(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
__m256 y0, y1, y2, y3, y4, y5, y6, y7, y8, y9;
y0 = glmm_load256(m2[0]); /* h g f e d c b a */
y1 = glmm_load256(m2[2]); /* p o n m l k j i */
y2 = glmm_load256(m1[0]); /* h g f e d c b a */
y3 = glmm_load256(m1[2]); /* p o n m l k j i */
/* 0x03: 0b00000011 */
y4 = _mm256_permute2f128_ps(y2, y2, 0x03); /* d c b a h g f e */
y5 = _mm256_permute2f128_ps(y3, y3, 0x03); /* l k j i p o n m */
/* f f f f a a a a */
/* h h h h c c c c */
/* e e e e b b b b */
/* g g g g d d d d */
y6 = _mm256_permutevar_ps(y0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
y7 = _mm256_permutevar_ps(y0, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
y8 = _mm256_permutevar_ps(y0, _mm256_set_epi32(0, 0, 0, 0, 1, 1, 1, 1));
y9 = _mm256_permutevar_ps(y0, _mm256_set_epi32(2, 2, 2, 2, 3, 3, 3, 3));
glmm_store256(dest[0],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
/* n n n n i i i i */
/* p p p p k k k k */
/* m m m m j j j j */
/* o o o o l l l l */
y6 = _mm256_permutevar_ps(y1, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
y7 = _mm256_permutevar_ps(y1, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
y8 = _mm256_permutevar_ps(y1, _mm256_set_epi32(0, 0, 0, 0, 1, 1, 1, 1));
y9 = _mm256_permutevar_ps(y1, _mm256_set_epi32(2, 2, 2, 2, 3, 3, 3, 3));
glmm_store256(dest[2],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
}
#endif
#endif /* cglm_affine_mat_avx_h */

View File

@ -0,0 +1,76 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_mat_simd_avx_h
#define cglm_mat_simd_avx_h
#ifdef __AVX__
#include "../../common.h"
#include "../intrin.h"
#include <immintrin.h>
CGLM_INLINE
void
glm_mat4_scale_avx(mat4 m, float s) {
__m256 y0;
y0 = _mm256_set1_ps(s);
glmm_store256(m[0], _mm256_mul_ps(y0, glmm_load256(m[0])));
glmm_store256(m[2], _mm256_mul_ps(y0, glmm_load256(m[2])));
}
CGLM_INLINE
void
glm_mat4_mul_avx(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
__m256 y0, y1, y2, y3, y4, y5, y6, y7, y8, y9;
y0 = glmm_load256(m2[0]); /* h g f e d c b a */
y1 = glmm_load256(m2[2]); /* p o n m l k j i */
y2 = glmm_load256(m1[0]); /* h g f e d c b a */
y3 = glmm_load256(m1[2]); /* p o n m l k j i */
/* 0x03: 0b00000011 */
y4 = _mm256_permute2f128_ps(y2, y2, 0x03); /* d c b a h g f e */
y5 = _mm256_permute2f128_ps(y3, y3, 0x03); /* l k j i p o n m */
/* f f f f a a a a */
/* h h h h c c c c */
/* e e e e b b b b */
/* g g g g d d d d */
y6 = _mm256_permutevar_ps(y0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
y7 = _mm256_permutevar_ps(y0, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
y8 = _mm256_permutevar_ps(y0, _mm256_set_epi32(0, 0, 0, 0, 1, 1, 1, 1));
y9 = _mm256_permutevar_ps(y0, _mm256_set_epi32(2, 2, 2, 2, 3, 3, 3, 3));
glmm_store256(dest[0],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
/* n n n n i i i i */
/* p p p p k k k k */
/* m m m m j j j j */
/* o o o o l l l l */
y6 = _mm256_permutevar_ps(y1, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
y7 = _mm256_permutevar_ps(y1, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
y8 = _mm256_permutevar_ps(y1, _mm256_set_epi32(0, 0, 0, 0, 1, 1, 1, 1));
y9 = _mm256_permutevar_ps(y1, _mm256_set_epi32(2, 2, 2, 2, 3, 3, 3, 3));
glmm_store256(dest[2],
_mm256_add_ps(_mm256_add_ps(_mm256_mul_ps(y2, y6),
_mm256_mul_ps(y3, y7)),
_mm256_add_ps(_mm256_mul_ps(y4, y8),
_mm256_mul_ps(y5, y9))));
}
#endif
#endif /* cglm_mat_simd_avx_h */

View File

@ -0,0 +1,90 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_intrin_h
#define cglm_intrin_h
#if defined( _MSC_VER )
# if (defined(_M_AMD64) || defined(_M_X64)) || _M_IX86_FP == 2
# ifndef __SSE2__
# define __SSE2__
# endif
# elif _M_IX86_FP == 1
# ifndef __SSE__
# define __SSE__
# endif
# endif
/* do not use alignment for older visual studio versions */
# if _MSC_VER < 1913 /* Visual Studio 2017 version 15.6 */
# define CGLM_ALL_UNALIGNED
# endif
#endif
#if defined( __SSE__ ) || defined( __SSE2__ )
# include <xmmintrin.h>
# include <emmintrin.h>
# define CGLM_SSE_FP 1
# ifndef CGLM_SIMD_x86
# define CGLM_SIMD_x86
# endif
#endif
#if defined(__SSE3__)
# include <pmmintrin.h>
# ifndef CGLM_SIMD_x86
# define CGLM_SIMD_x86
# endif
#endif
#if defined(__SSE4_1__)
# include <smmintrin.h>
# ifndef CGLM_SIMD_x86
# define CGLM_SIMD_x86
# endif
#endif
#if defined(__SSE4_2__)
# include <nmmintrin.h>
# ifndef CGLM_SIMD_x86
# define CGLM_SIMD_x86
# endif
#endif
#ifdef __AVX__
# include <immintrin.h>
# define CGLM_AVX_FP 1
# ifndef CGLM_SIMD_x86
# define CGLM_SIMD_x86
# endif
#endif
/* ARM Neon */
#if defined(__ARM_NEON)
# include <arm_neon.h>
# if defined(__ARM_NEON_FP)
# define CGLM_NEON_FP 1
# ifndef CGLM_SIMD_ARM
# define CGLM_SIMD_ARM
# endif
# endif
#endif
#if defined(CGLM_SIMD_x86) || defined(CGLM_NEON_FP)
# ifndef CGLM_SIMD
# define CGLM_SIMD
# endif
#endif
#if defined(CGLM_SIMD_x86)
# include "x86.h"
#endif
#if defined(CGLM_SIMD_ARM)
# include "arm.h"
#endif
#endif /* cglm_intrin_h */

View File

@ -0,0 +1,121 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_affine_neon_h
#define cglm_affine_neon_h
#if defined(__ARM_NEON_FP)
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_mul_neon(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3;
l = glmm_load(m1[0]);
r0 = glmm_load(m2[0]);
r1 = glmm_load(m2[1]);
r2 = glmm_load(m2[2]);
r3 = glmm_load(m2[3]);
v0 = vmulq_f32(glmm_splat_x(r0), l);
v1 = vmulq_f32(glmm_splat_x(r1), l);
v2 = vmulq_f32(glmm_splat_x(r2), l);
v3 = vmulq_f32(glmm_splat_x(r3), l);
l = glmm_load(m1[1]);
v0 = glmm_fmadd(glmm_splat_y(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_y(r3), l, v3);
l = glmm_load(m1[2]);
v0 = glmm_fmadd(glmm_splat_z(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_z(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_z(r3), l, v3);
v3 = glmm_fmadd(glmm_splat_w(r3), glmm_load(m1[3]), v3);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], v3);
}
CGLM_INLINE
void
glm_mul_rot_neon(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
glmm_128 l, r0, r1, r2, v0, v1, v2;
l = glmm_load(m1[0]);
r0 = glmm_load(m2[0]);
r1 = glmm_load(m2[1]);
r2 = glmm_load(m2[2]);
v0 = vmulq_f32(glmm_splat_x(r0), l);
v1 = vmulq_f32(glmm_splat_x(r1), l);
v2 = vmulq_f32(glmm_splat_x(r2), l);
l = glmm_load(m1[1]);
v0 = glmm_fmadd(glmm_splat_y(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l, v2);
l = glmm_load(m1[2]);
v0 = glmm_fmadd(glmm_splat_z(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_z(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l, v2);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], glmm_load(m1[3]));
}
CGLM_INLINE
void
glm_inv_tr_neon(mat4 mat) {
float32x4x4_t vmat;
glmm_128 r0, r1, r2, x0;
vmat = vld4q_f32(mat[0]);
r0 = vmat.val[0];
r1 = vmat.val[1];
r2 = vmat.val[2];
x0 = glmm_fmadd(r0, glmm_splat_w(r0),
glmm_fmadd(r1, glmm_splat_w(r1),
vmulq_f32(r2, glmm_splat_w(r2))));
x0 = vnegq_f32(x0);
glmm_store(mat[0], r0);
glmm_store(mat[1], r1);
glmm_store(mat[2], r2);
glmm_store(mat[3], x0);
mat[0][3] = 0.0f;
mat[1][3] = 0.0f;
mat[2][3] = 0.0f;
mat[3][3] = 1.0f;
/* TODO: ?
zo = vget_high_f32(r3);
vst1_lane_f32(&mat[0][3], zo, 0);
vst1_lane_f32(&mat[1][3], zo, 0);
vst1_lane_f32(&mat[2][3], zo, 0);
vst1_lane_f32(&mat[3][3], zo, 1);
*/
}
#endif
#endif /* cglm_affine_neon_h */

View File

@ -0,0 +1,44 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_mat2_neon_h
#define cglm_mat2_neon_h
#if defined(__ARM_NEON_FP)
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_mat2_mul_neon(mat2 m1, mat2 m2, mat2 dest) {
float32x4x2_t a1;
glmm_128 x0, x1, x2;
float32x2_t dc, ba;
x1 = glmm_load(m1[0]); /* d c b a */
x2 = glmm_load(m2[0]); /* h g f e */
dc = vget_high_f32(x1);
ba = vget_low_f32(x1);
/* g g e e, h h f f */
a1 = vtrnq_f32(x2, x2);
/*
dest[0][0] = a * e + c * f;
dest[0][1] = b * e + d * f;
dest[1][0] = a * g + c * h;
dest[1][1] = b * g + d * h;
*/
x0 = glmm_fmadd(vcombine_f32(ba, ba), a1.val[0],
vmulq_f32(vcombine_f32(dc, dc), a1.val[1]));
glmm_store(dest[0], x0);
}
#endif
#endif /* cglm_mat2_neon_h */

View File

@ -0,0 +1,317 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_mat4_neon_h
#define cglm_mat4_neon_h
#if defined(__ARM_NEON_FP)
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_mat4_scale_neon(mat4 m, float s) {
float32x4_t v0;
v0 = vdupq_n_f32(s);
vst1q_f32(m[0], vmulq_f32(vld1q_f32(m[0]), v0));
vst1q_f32(m[1], vmulq_f32(vld1q_f32(m[1]), v0));
vst1q_f32(m[2], vmulq_f32(vld1q_f32(m[2]), v0));
vst1q_f32(m[3], vmulq_f32(vld1q_f32(m[3]), v0));
}
CGLM_INLINE
void
glm_mat4_transp_neon(mat4 m, mat4 dest) {
float32x4x4_t vmat;
vmat = vld4q_f32(m[0]);
vst1q_f32(dest[0], vmat.val[0]);
vst1q_f32(dest[1], vmat.val[1]);
vst1q_f32(dest[2], vmat.val[2]);
vst1q_f32(dest[3], vmat.val[3]);
}
CGLM_INLINE
void
glm_mat4_mul_neon(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3;
l = glmm_load(m1[0]);
r0 = glmm_load(m2[0]);
r1 = glmm_load(m2[1]);
r2 = glmm_load(m2[2]);
r3 = glmm_load(m2[3]);
v0 = vmulq_f32(glmm_splat_x(r0), l);
v1 = vmulq_f32(glmm_splat_x(r1), l);
v2 = vmulq_f32(glmm_splat_x(r2), l);
v3 = vmulq_f32(glmm_splat_x(r3), l);
l = glmm_load(m1[1]);
v0 = glmm_fmadd(glmm_splat_y(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_y(r3), l, v3);
l = glmm_load(m1[2]);
v0 = glmm_fmadd(glmm_splat_z(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_z(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_z(r3), l, v3);
l = glmm_load(m1[3]);
v0 = glmm_fmadd(glmm_splat_w(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_w(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_w(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_w(r3), l, v3);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], v3);
}
CGLM_INLINE
void
glm_mat4_mulv_neon(mat4 m, vec4 v, vec4 dest) {
float32x4_t l0, l1, l2, l3;
float32x2_t vlo, vhi;
l0 = vld1q_f32(m[0]);
l1 = vld1q_f32(m[1]);
l2 = vld1q_f32(m[2]);
l3 = vld1q_f32(m[3]);
vlo = vld1_f32(&v[0]);
vhi = vld1_f32(&v[2]);
l0 = vmulq_lane_f32(l0, vlo, 0);
l0 = vmlaq_lane_f32(l0, l1, vlo, 1);
l0 = vmlaq_lane_f32(l0, l2, vhi, 0);
l0 = vmlaq_lane_f32(l0, l3, vhi, 1);
vst1q_f32(dest, l0);
}
CGLM_INLINE
float
glm_mat4_det_neon(mat4 mat) {
float32x4_t r0, r1, r2, r3, x0, x1, x2;
float32x2_t ij, op, mn, kl, nn, mm, jj, ii, gh, ef, t12, t34;
float32x4x2_t a1;
float32x4_t x3 = { 0.f, -0.f, 0.f, -0.f };
/* 127 <- 0, [square] det(A) = det(At) */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = vrev64q_f32(glmm_load(mat[1])); /* g h e f */
r2 = vrev64q_f32(glmm_load(mat[2])); /* l k i j */
r3 = vrev64q_f32(glmm_load(mat[3])); /* o p m n */
gh = vget_high_f32(r1);
ef = vget_low_f32(r1);
kl = vget_high_f32(r2);
ij = vget_low_f32(r2);
op = vget_high_f32(r3);
mn = vget_low_f32(r3);
mm = vdup_lane_f32(mn, 1);
nn = vdup_lane_f32(mn, 0);
ii = vdup_lane_f32(ij, 1);
jj = vdup_lane_f32(ij, 0);
/*
t[1] = j * p - n * l;
t[2] = j * o - n * k;
t[3] = i * p - m * l;
t[4] = i * o - m * k;
*/
x0 = glmm_fnmadd(vcombine_f32(kl, kl), vcombine_f32(nn, mm),
vmulq_f32(vcombine_f32(op, op), vcombine_f32(jj, ii)));
t12 = vget_low_f32(x0);
t34 = vget_high_f32(x0);
/* 1 3 1 3 2 4 2 4 */
a1 = vuzpq_f32(x0, x0);
/*
t[0] = k * p - o * l;
t[0] = k * p - o * l;
t[5] = i * n - m * j;
t[5] = i * n - m * j;
*/
x1 = glmm_fnmadd(vcombine_f32(vdup_lane_f32(kl, 0), jj),
vcombine_f32(vdup_lane_f32(op, 1), mm),
vmulq_f32(vcombine_f32(vdup_lane_f32(op, 0), nn),
vcombine_f32(vdup_lane_f32(kl, 1), ii)));
/*
a * (f * t[0] - g * t[1] + h * t[2])
- b * (e * t[0] - g * t[3] + h * t[4])
+ c * (e * t[1] - f * t[3] + h * t[5])
- d * (e * t[2] - f * t[4] + g * t[5])
*/
x2 = glmm_fnmadd(vcombine_f32(vdup_lane_f32(gh, 1), vdup_lane_f32(ef, 0)),
vcombine_f32(vget_low_f32(a1.val[0]), t34),
vmulq_f32(vcombine_f32(ef, vdup_lane_f32(ef, 1)),
vcombine_f32(vget_low_f32(x1), t12)));
x2 = glmm_fmadd(vcombine_f32(vdup_lane_f32(gh, 0), gh),
vcombine_f32(vget_low_f32(a1.val[1]), vget_high_f32(x1)), x2);
x2 = glmm_xor(x2, x3);
return glmm_hadd(vmulq_f32(x2, r0));
}
CGLM_INLINE
void
glm_mat4_inv_neon(mat4 mat, mat4 dest) {
float32x4_t r0, r1, r2, r3,
v0, v1, v2, v3,
t0, t1, t2, t3, t4, t5,
x0, x1, x2, x3, x4, x5, x6, x7, x8;
float32x4x2_t a1;
float32x2_t lp, ko, hg, jn, im, fe, ae, bf, cg, dh;
float32x4_t x9 = { -0.f, 0.f, -0.f, 0.f };
x8 = vrev64q_f32(x9);
/* 127 <- 0 */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = glmm_load(mat[1]); /* h g f e */
r2 = glmm_load(mat[2]); /* l k j i */
r3 = glmm_load(mat[3]); /* p o n m */
/* l p k o, j n i m */
a1 = vzipq_f32(r3, r2);
jn = vget_high_f32(a1.val[0]);
im = vget_low_f32(a1.val[0]);
lp = vget_high_f32(a1.val[1]);
ko = vget_low_f32(a1.val[1]);
hg = vget_high_f32(r1);
x1 = vcombine_f32(vdup_lane_f32(lp, 0), lp); /* l p p p */
x2 = vcombine_f32(vdup_lane_f32(ko, 0), ko); /* k o o o */
x0 = vcombine_f32(vdup_lane_f32(lp, 1), vdup_lane_f32(hg, 1)); /* h h l l */
x3 = vcombine_f32(vdup_lane_f32(ko, 1), vdup_lane_f32(hg, 0)); /* g g k k */
/* t1[0] = k * p - o * l;
t1[0] = k * p - o * l;
t2[0] = g * p - o * h;
t3[0] = g * l - k * h; */
t0 = glmm_fnmadd(x2, x0, vmulq_f32(x3, x1));
fe = vget_low_f32(r1);
x4 = vcombine_f32(vdup_lane_f32(jn, 0), jn); /* j n n n */
x5 = vcombine_f32(vdup_lane_f32(jn, 1), vdup_lane_f32(fe, 1)); /* f f j j */
/* t1[1] = j * p - n * l;
t1[1] = j * p - n * l;
t2[1] = f * p - n * h;
t3[1] = f * l - j * h; */
t1 = glmm_fnmadd(x4, x0, vmulq_f32(x5, x1));
/* t1[2] = j * o - n * k
t1[2] = j * o - n * k;
t2[2] = f * o - n * g;
t3[2] = f * k - j * g; */
t2 = glmm_fnmadd(x4, x3, vmulq_f32(x5, x2));
x6 = vcombine_f32(vdup_lane_f32(im, 1), vdup_lane_f32(fe, 0)); /* e e i i */
x7 = vcombine_f32(vdup_lane_f32(im, 0), im); /* i m m m */
/* t1[3] = i * p - m * l;
t1[3] = i * p - m * l;
t2[3] = e * p - m * h;
t3[3] = e * l - i * h; */
t3 = glmm_fnmadd(x7, x0, vmulq_f32(x6, x1));
/* t1[4] = i * o - m * k;
t1[4] = i * o - m * k;
t2[4] = e * o - m * g;
t3[4] = e * k - i * g; */
t4 = glmm_fnmadd(x7, x3, vmulq_f32(x6, x2));
/* t1[5] = i * n - m * j;
t1[5] = i * n - m * j;
t2[5] = e * n - m * f;
t3[5] = e * j - i * f; */
t5 = glmm_fnmadd(x7, x5, vmulq_f32(x6, x4));
/* h d f b, g c e a */
a1 = vtrnq_f32(r0, r1);
x4 = vrev64q_f32(a1.val[0]); /* c g a e */
x5 = vrev64q_f32(a1.val[1]); /* d h b f */
ae = vget_low_f32(x4);
cg = vget_high_f32(x4);
bf = vget_low_f32(x5);
dh = vget_high_f32(x5);
x0 = vcombine_f32(ae, vdup_lane_f32(ae, 1)); /* a a a e */
x1 = vcombine_f32(bf, vdup_lane_f32(bf, 1)); /* b b b f */
x2 = vcombine_f32(cg, vdup_lane_f32(cg, 1)); /* c c c g */
x3 = vcombine_f32(dh, vdup_lane_f32(dh, 1)); /* d d d h */
/*
dest[0][0] = f * t1[0] - g * t1[1] + h * t1[2];
dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]);
dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2];
dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */
v0 = glmm_xor(glmm_fmadd(x3, t2, glmm_fnmadd(x2, t1, vmulq_f32(x1, t0))), x8);
/*
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5];
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
v2 = glmm_xor(glmm_fmadd(x3, t5, glmm_fnmadd(x1, t3, vmulq_f32(x0, t1))), x8);
/*
dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
v1 = glmm_xor(glmm_fmadd(x3, t4, glmm_fnmadd(x2, t3, vmulq_f32(x0, t0))), x9);
/*
dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]);
dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5];
dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]);
dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */
v3 = glmm_xor(glmm_fmadd(x2, t5, glmm_fnmadd(x1, t4, vmulq_f32(x0, t2))), x9);
/* determinant */
x0 = vcombine_f32(vget_low_f32(vzipq_f32(v0, v1).val[0]),
vget_low_f32(vzipq_f32(v2, v3).val[0]));
/*
x0 = glmm_div(glmm_set1(1.0f), glmm_vhadd(vmulq_f32(x0, r0)));
glmm_store(dest[0], vmulq_f32(v0, x0));
glmm_store(dest[1], vmulq_f32(v1, x0));
glmm_store(dest[2], vmulq_f32(v2, x0));
glmm_store(dest[3], vmulq_f32(v3, x0));
*/
x0 = glmm_vhadd(vmulq_f32(x0, r0));
glmm_store(dest[0], glmm_div(v0, x0));
glmm_store(dest[1], glmm_div(v1, x0));
glmm_store(dest[2], glmm_div(v2, x0));
glmm_store(dest[3], glmm_div(v3, x0));
}
#endif
#endif /* cglm_mat4_neon_h */

View File

@ -0,0 +1,56 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_quat_neon_h
#define cglm_quat_neon_h
#if defined(__ARM_NEON_FP)
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_quat_mul_neon(versor p, versor q, versor dest) {
/*
+ (a1 b2 + b1 a2 + c1 d2 d1 c2)i
+ (a1 c2 b1 d2 + c1 a2 + d1 b2)j
+ (a1 d2 + b1 c2 c1 b2 + d1 a2)k
a1 a2 b1 b2 c1 c2 d1 d2
*/
glmm_128 xp, xq, xqr, r, x, y, z, s2, s3;
glmm_128 s1 = {-0.f, 0.f, 0.f, -0.f};
float32x2_t qh, ql;
xp = glmm_load(p); /* 3 2 1 0 */
xq = glmm_load(q);
r = vmulq_f32(glmm_splat_w(xp), xq);
x = glmm_splat_x(xp);
y = glmm_splat_y(xp);
z = glmm_splat_z(xp);
ql = vget_high_f32(s1);
s3 = vcombine_f32(ql, ql);
s2 = vzipq_f32(s3, s3).val[0];
xqr = vrev64q_f32(xq);
qh = vget_high_f32(xqr);
ql = vget_low_f32(xqr);
r = glmm_fmadd(glmm_xor(x, s3), vcombine_f32(qh, ql), r);
r = glmm_fmadd(glmm_xor(y, s2), vcombine_f32(vget_high_f32(xq),
vget_low_f32(xq)), r);
r = glmm_fmadd(glmm_xor(z, s1), vcombine_f32(ql, qh), r);
glmm_store(dest, r);
}
#endif
#endif /* cglm_quat_neon_h */

View File

@ -0,0 +1,115 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_affine_mat_sse2_h
#define cglm_affine_mat_sse2_h
#if defined( __SSE__ ) || defined( __SSE2__ )
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3;
l = glmm_load(m1[0]);
r0 = glmm_load(m2[0]);
r1 = glmm_load(m2[1]);
r2 = glmm_load(m2[2]);
r3 = glmm_load(m2[3]);
v0 = _mm_mul_ps(glmm_splat_x(r0), l);
v1 = _mm_mul_ps(glmm_splat_x(r1), l);
v2 = _mm_mul_ps(glmm_splat_x(r2), l);
v3 = _mm_mul_ps(glmm_splat_x(r3), l);
l = glmm_load(m1[1]);
v0 = glmm_fmadd(glmm_splat_y(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_y(r3), l, v3);
l = glmm_load(m1[2]);
v0 = glmm_fmadd(glmm_splat_z(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_z(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_z(r3), l, v3);
l = glmm_load(m1[3]);
v3 = glmm_fmadd(glmm_splat_w(r3), l, v3);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], v3);
}
CGLM_INLINE
void
glm_mul_rot_sse2(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
glmm_128 l, r0, r1, r2, v0, v1, v2;
l = glmm_load(m1[0]);
r0 = glmm_load(m2[0]);
r1 = glmm_load(m2[1]);
r2 = glmm_load(m2[2]);
v0 = _mm_mul_ps(glmm_splat_x(r0), l);
v1 = _mm_mul_ps(glmm_splat_x(r1), l);
v2 = _mm_mul_ps(glmm_splat_x(r2), l);
l = glmm_load(m1[1]);
v0 = glmm_fmadd(glmm_splat_y(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l, v2);
l = glmm_load(m1[2]);
v0 = glmm_fmadd(glmm_splat_z(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_z(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l, v2);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], glmm_load(m1[3]));
}
CGLM_INLINE
void
glm_inv_tr_sse2(mat4 mat) {
__m128 r0, r1, r2, r3, x0, x1, x2, x3, x4, x5;
r0 = glmm_load(mat[0]);
r1 = glmm_load(mat[1]);
r2 = glmm_load(mat[2]);
r3 = glmm_load(mat[3]);
x1 = _mm_set_ps(1.0f, 0.0f, 0.0f, 0.0f);
_MM_TRANSPOSE4_PS(r0, r1, r2, x1);
x2 = glmm_shuff1(r3, 0, 0, 0, 0);
x3 = glmm_shuff1(r3, 1, 1, 1, 1);
x4 = glmm_shuff1(r3, 2, 2, 2, 2);
x5 = _mm_set1_ps(-0.f);
x0 = glmm_fmadd(r0, x2, glmm_fmadd(r1, x3, _mm_mul_ps(r2, x4)));
x0 = _mm_xor_ps(x0, x5);
x0 = _mm_add_ps(x0, x1);
glmm_store(mat[0], r0);
glmm_store(mat[1], r1);
glmm_store(mat[2], r2);
glmm_store(mat[3], x0);
}
#endif
#endif /* cglm_affine_mat_sse2_h */

View File

@ -0,0 +1,48 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_mat2_sse_h
#define cglm_mat2_sse_h
#if defined( __SSE__ ) || defined( __SSE2__ )
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_mat2_mul_sse2(mat2 m1, mat2 m2, mat2 dest) {
__m128 x0, x1, x2, x3, x4;
x1 = glmm_load(m1[0]); /* d c b a */
x2 = glmm_load(m2[0]); /* h g f e */
x3 = glmm_shuff1(x2, 2, 2, 0, 0);
x4 = glmm_shuff1(x2, 3, 3, 1, 1);
x0 = _mm_movelh_ps(x1, x1);
x2 = _mm_movehl_ps(x1, x1);
/*
dest[0][0] = a * e + c * f;
dest[0][1] = b * e + d * f;
dest[1][0] = a * g + c * h;
dest[1][1] = b * g + d * h;
*/
x0 = glmm_fmadd(x0, x3, _mm_mul_ps(x2, x4));
glmm_store(dest[0], x0);
}
CGLM_INLINE
void
glm_mat2_transp_sse2(mat2 m, mat2 dest) {
/* d c b a */
/* d b c a */
glmm_store(dest[0], glmm_shuff1(glmm_load(m[0]), 3, 1, 2, 0));
}
#endif
#endif /* cglm_mat2_sse_h */

View File

@ -0,0 +1,76 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_mat3_sse_h
#define cglm_mat3_sse_h
#if defined( __SSE__ ) || defined( __SSE2__ )
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_mat3_mul_sse2(mat3 m1, mat3 m2, mat3 dest) {
__m128 l0, l1, l2, r0, r1, r2, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9;
l0 = _mm_loadu_ps(m1[0]);
l1 = _mm_loadu_ps(&m1[1][1]);
r0 = _mm_loadu_ps(m2[0]);
r1 = _mm_loadu_ps(&m2[1][1]);
x8 = glmm_shuff1(l0, 0, 2, 1, 0); /* a00 a02 a01 a00 */
x1 = glmm_shuff1(r0, 3, 0, 0, 0); /* b10 b00 b00 b00 */
x2 = _mm_shuffle_ps(l0, l1, _MM_SHUFFLE(1, 0, 3, 3)); /* a12 a11 a10 a10 */
x3 = _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(2, 0, 3, 1)); /* b20 b11 b10 b01 */
x0 = _mm_mul_ps(x8, x1);
x6 = glmm_shuff1(l0, 1, 0, 2, 1); /* a01 a00 a02 a01 */
x7 = glmm_shuff1(x3, 3, 3, 1, 1); /* b20 b20 b10 b10 */
l2 = _mm_load_ss(&m1[2][2]);
r2 = _mm_load_ss(&m2[2][2]);
x1 = _mm_mul_ps(x6, x7);
l2 = glmm_shuff1(l2, 0, 0, 1, 0); /* a22 a22 0.f a22 */
r2 = glmm_shuff1(r2, 0, 0, 1, 0); /* b22 b22 0.f b22 */
x4 = glmm_shuff1(x2, 0, 3, 2, 0); /* a10 a12 a11 a10 */
x5 = glmm_shuff1(x2, 2, 0, 3, 2); /* a11 a10 a12 a11 */
x6 = glmm_shuff1(x3, 2, 0, 0, 0); /* b11 b01 b01 b01 */
x2 = glmm_shuff1(r1, 3, 3, 0, 0); /* b21 b21 b11 b11 */
x8 = _mm_unpackhi_ps(x8, x4); /* a10 a00 a12 a02 */
x9 = _mm_unpackhi_ps(x7, x2); /* b21 b20 b21 b20 */
x0 = glmm_fmadd(x4, x6, x0);
x1 = glmm_fmadd(x5, x2, x1);
x2 = _mm_movehl_ps(l2, l1); /* a22 a22 a21 a20 */
x3 = glmm_shuff1(x2, 0, 2, 1, 0); /* a20 a22 a21 a20 */
x2 = glmm_shuff1(x2, 1, 0, 2, 1); /* a21 a20 a22 a21 */
x4 = _mm_shuffle_ps(r0, r1, _MM_SHUFFLE(1, 1, 2, 2)); /* b12 b12 b02 b02 */
x5 = glmm_shuff1(x4, 3, 0, 0, 0); /* b12 b02 b02 b02 */
x4 = _mm_movehl_ps(r2, x4); /* b22 b22 b12 b12 */
x0 = glmm_fmadd(x3, x5, x0);
x1 = glmm_fmadd(x2, x4, x1);
/*
Dot Product : dest[2][2] = a02 * b20 +
a12 * b21 +
a22 * b22 +
0 * 00 */
x2 = _mm_movelh_ps(x8, l2); /* 0.f a22 a12 a02 */
x3 = _mm_movelh_ps(x9, r2); /* 0.f b22 b21 b20 */
x2 = glmm_vdots(x2, x3);
_mm_storeu_ps(&dest[0][0], x0);
_mm_storeu_ps(&dest[1][1], x1);
_mm_store_ss (&dest[2][2], x2);
}
#endif
#endif /* cglm_mat3_sse_h */

View File

@ -0,0 +1,434 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_mat_sse_h
#define cglm_mat_sse_h
#if defined( __SSE__ ) || defined( __SSE2__ )
#include "../../common.h"
#include "../intrin.h"
#define glm_mat4_inv_precise_sse2(mat, dest) glm_mat4_inv_sse2(mat, dest)
CGLM_INLINE
void
glm_mat4_scale_sse2(mat4 m, float s) {
__m128 x0;
x0 = _mm_set1_ps(s);
glmm_store(m[0], _mm_mul_ps(glmm_load(m[0]), x0));
glmm_store(m[1], _mm_mul_ps(glmm_load(m[1]), x0));
glmm_store(m[2], _mm_mul_ps(glmm_load(m[2]), x0));
glmm_store(m[3], _mm_mul_ps(glmm_load(m[3]), x0));
}
CGLM_INLINE
void
glm_mat4_transp_sse2(mat4 m, mat4 dest) {
__m128 r0, r1, r2, r3;
r0 = glmm_load(m[0]);
r1 = glmm_load(m[1]);
r2 = glmm_load(m[2]);
r3 = glmm_load(m[3]);
_MM_TRANSPOSE4_PS(r0, r1, r2, r3);
glmm_store(dest[0], r0);
glmm_store(dest[1], r1);
glmm_store(dest[2], r2);
glmm_store(dest[3], r3);
}
CGLM_INLINE
void
glm_mat4_mul_sse2(mat4 m1, mat4 m2, mat4 dest) {
/* D = R * L (Column-Major) */
glmm_128 l, r0, r1, r2, r3, v0, v1, v2, v3;
l = glmm_load(m1[0]);
r0 = glmm_load(m2[0]);
r1 = glmm_load(m2[1]);
r2 = glmm_load(m2[2]);
r3 = glmm_load(m2[3]);
v0 = _mm_mul_ps(glmm_splat_x(r0), l);
v1 = _mm_mul_ps(glmm_splat_x(r1), l);
v2 = _mm_mul_ps(glmm_splat_x(r2), l);
v3 = _mm_mul_ps(glmm_splat_x(r3), l);
l = glmm_load(m1[1]);
v0 = glmm_fmadd(glmm_splat_y(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_y(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_y(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_y(r3), l, v3);
l = glmm_load(m1[2]);
v0 = glmm_fmadd(glmm_splat_z(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_z(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_z(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_z(r3), l, v3);
l = glmm_load(m1[3]);
v0 = glmm_fmadd(glmm_splat_w(r0), l, v0);
v1 = glmm_fmadd(glmm_splat_w(r1), l, v1);
v2 = glmm_fmadd(glmm_splat_w(r2), l, v2);
v3 = glmm_fmadd(glmm_splat_w(r3), l, v3);
glmm_store(dest[0], v0);
glmm_store(dest[1], v1);
glmm_store(dest[2], v2);
glmm_store(dest[3], v3);
}
CGLM_INLINE
void
glm_mat4_mulv_sse2(mat4 m, vec4 v, vec4 dest) {
__m128 x0, x1, m0, m1, m2, m3, v0, v1, v2, v3;
m0 = glmm_load(m[0]);
m1 = glmm_load(m[1]);
m2 = glmm_load(m[2]);
m3 = glmm_load(m[3]);
x0 = glmm_load(v);
v0 = glmm_splat_x(x0);
v1 = glmm_splat_y(x0);
v2 = glmm_splat_z(x0);
v3 = glmm_splat_w(x0);
x1 = _mm_mul_ps(m3, v3);
x1 = glmm_fmadd(m2, v2, x1);
x1 = glmm_fmadd(m1, v1, x1);
x1 = glmm_fmadd(m0, v0, x1);
glmm_store(dest, x1);
}
CGLM_INLINE
float
glm_mat4_det_sse2(mat4 mat) {
__m128 r0, r1, r2, r3, x0, x1, x2;
/* 127 <- 0, [square] det(A) = det(At) */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = glmm_load(mat[1]); /* h g f e */
r2 = glmm_load(mat[2]); /* l k j i */
r3 = glmm_load(mat[3]); /* p o n m */
/*
t[1] = j * p - n * l;
t[2] = j * o - n * k;
t[3] = i * p - m * l;
t[4] = i * o - m * k;
*/
x0 = glmm_fnmadd(glmm_shuff1(r3, 0, 0, 1, 1), glmm_shuff1(r2, 2, 3, 2, 3),
_mm_mul_ps(glmm_shuff1(r2, 0, 0, 1, 1),
glmm_shuff1(r3, 2, 3, 2, 3)));
/*
t[0] = k * p - o * l;
t[0] = k * p - o * l;
t[5] = i * n - m * j;
t[5] = i * n - m * j;
*/
x1 = glmm_fnmadd(glmm_shuff1(r3, 0, 0, 2, 2), glmm_shuff1(r2, 1, 1, 3, 3),
_mm_mul_ps(glmm_shuff1(r2, 0, 0, 2, 2),
glmm_shuff1(r3, 1, 1, 3, 3)));
/*
a * (f * t[0] - g * t[1] + h * t[2])
- b * (e * t[0] - g * t[3] + h * t[4])
+ c * (e * t[1] - f * t[3] + h * t[5])
- d * (e * t[2] - f * t[4] + g * t[5])
*/
x2 = glmm_fnmadd(glmm_shuff1(r1, 1, 1, 2, 2), glmm_shuff1(x0, 3, 2, 2, 0),
_mm_mul_ps(glmm_shuff1(r1, 0, 0, 0, 1),
_mm_shuffle_ps(x1, x0, _MM_SHUFFLE(1, 0, 0, 0))));
x2 = glmm_fmadd(glmm_shuff1(r1, 2, 3, 3, 3),
_mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 2, 3, 1)),
x2);
x2 = _mm_xor_ps(x2, _mm_set_ps(-0.f, 0.f, -0.f, 0.f));
return glmm_hadd(_mm_mul_ps(x2, r0));
}
CGLM_INLINE
void
glm_mat4_inv_fast_sse2(mat4 mat, mat4 dest) {
__m128 r0, r1, r2, r3,
v0, v1, v2, v3,
t0, t1, t2, t3, t4, t5,
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9;
x8 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f);
x9 = glmm_shuff1(x8, 2, 1, 2, 1);
/* 127 <- 0 */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = glmm_load(mat[1]); /* h g f e */
r2 = glmm_load(mat[2]); /* l k j i */
r3 = glmm_load(mat[3]); /* p o n m */
x0 = _mm_movehl_ps(r3, r2); /* p o l k */
x3 = _mm_movelh_ps(r2, r3); /* n m j i */
x1 = glmm_shuff1(x0, 1, 3, 3 ,3); /* l p p p */
x2 = glmm_shuff1(x0, 0, 2, 2, 2); /* k o o o */
x4 = glmm_shuff1(x3, 1, 3, 3, 3); /* j n n n */
x7 = glmm_shuff1(x3, 0, 2, 2, 2); /* i m m m */
x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */
x5 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(1, 1, 1, 1)); /* f f j j */
x3 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(2, 2, 2, 2)); /* g g k k */
x0 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(3, 3, 3, 3)); /* h h l l */
t0 = _mm_mul_ps(x3, x1);
t1 = _mm_mul_ps(x5, x1);
t2 = _mm_mul_ps(x5, x2);
t3 = _mm_mul_ps(x6, x1);
t4 = _mm_mul_ps(x6, x2);
t5 = _mm_mul_ps(x6, x4);
/* t1[0] = k * p - o * l;
t1[0] = k * p - o * l;
t2[0] = g * p - o * h;
t3[0] = g * l - k * h; */
t0 = glmm_fnmadd(x2, x0, t0);
/* t1[1] = j * p - n * l;
t1[1] = j * p - n * l;
t2[1] = f * p - n * h;
t3[1] = f * l - j * h; */
t1 = glmm_fnmadd(x4, x0, t1);
/* t1[2] = j * o - n * k
t1[2] = j * o - n * k;
t2[2] = f * o - n * g;
t3[2] = f * k - j * g; */
t2 = glmm_fnmadd(x4, x3, t2);
/* t1[3] = i * p - m * l;
t1[3] = i * p - m * l;
t2[3] = e * p - m * h;
t3[3] = e * l - i * h; */
t3 = glmm_fnmadd(x7, x0, t3);
/* t1[4] = i * o - m * k;
t1[4] = i * o - m * k;
t2[4] = e * o - m * g;
t3[4] = e * k - i * g; */
t4 = glmm_fnmadd(x7, x3, t4);
/* t1[5] = i * n - m * j;
t1[5] = i * n - m * j;
t2[5] = e * n - m * f;
t3[5] = e * j - i * f; */
t5 = glmm_fnmadd(x7, x5, t5);
x4 = _mm_movelh_ps(r0, r1); /* f e b a */
x5 = _mm_movehl_ps(r1, r0); /* h g d c */
x0 = glmm_shuff1(x4, 0, 0, 0, 2); /* a a a e */
x1 = glmm_shuff1(x4, 1, 1, 1, 3); /* b b b f */
x2 = glmm_shuff1(x5, 0, 0, 0, 2); /* c c c g */
x3 = glmm_shuff1(x5, 1, 1, 1, 3); /* d d d h */
v2 = _mm_mul_ps(x0, t1);
v1 = _mm_mul_ps(x0, t0);
v3 = _mm_mul_ps(x0, t2);
v0 = _mm_mul_ps(x1, t0);
v2 = glmm_fnmadd(x1, t3, v2);
v3 = glmm_fnmadd(x1, t4, v3);
v0 = glmm_fnmadd(x2, t1, v0);
v1 = glmm_fnmadd(x2, t3, v1);
v3 = glmm_fmadd(x2, t5, v3);
v0 = glmm_fmadd(x3, t2, v0);
v2 = glmm_fmadd(x3, t5, v2);
v1 = glmm_fmadd(x3, t4, v1);
/*
dest[0][0] = f * t1[0] - g * t1[1] + h * t1[2];
dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]);
dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2];
dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */
v0 = _mm_xor_ps(v0, x8);
/*
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5];
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
v2 = _mm_xor_ps(v2, x8);
/*
dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
v1 = _mm_xor_ps(v1, x9);
/*
dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]);
dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5];
dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]);
dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */
v3 = _mm_xor_ps(v3, x9);
/* determinant */
x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0));
x1 = _mm_shuffle_ps(v2, v3, _MM_SHUFFLE(0, 0, 0, 0));
x0 = _mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 0, 2, 0));
x0 = _mm_rcp_ps(glmm_vhadd(_mm_mul_ps(x0, r0)));
glmm_store(dest[0], _mm_mul_ps(v0, x0));
glmm_store(dest[1], _mm_mul_ps(v1, x0));
glmm_store(dest[2], _mm_mul_ps(v2, x0));
glmm_store(dest[3], _mm_mul_ps(v3, x0));
}
CGLM_INLINE
void
glm_mat4_inv_sse2(mat4 mat, mat4 dest) {
__m128 r0, r1, r2, r3,
v0, v1, v2, v3,
t0, t1, t2, t3, t4, t5,
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9;
x8 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f);
x9 = glmm_shuff1(x8, 2, 1, 2, 1);
/* 127 <- 0 */
r0 = glmm_load(mat[0]); /* d c b a */
r1 = glmm_load(mat[1]); /* h g f e */
r2 = glmm_load(mat[2]); /* l k j i */
r3 = glmm_load(mat[3]); /* p o n m */
x0 = _mm_movehl_ps(r3, r2); /* p o l k */
x3 = _mm_movelh_ps(r2, r3); /* n m j i */
x1 = glmm_shuff1(x0, 1, 3, 3 ,3); /* l p p p */
x2 = glmm_shuff1(x0, 0, 2, 2, 2); /* k o o o */
x4 = glmm_shuff1(x3, 1, 3, 3, 3); /* j n n n */
x7 = glmm_shuff1(x3, 0, 2, 2, 2); /* i m m m */
x6 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(0, 0, 0, 0)); /* e e i i */
x5 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(1, 1, 1, 1)); /* f f j j */
x3 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(2, 2, 2, 2)); /* g g k k */
x0 = _mm_shuffle_ps(r2, r1, _MM_SHUFFLE(3, 3, 3, 3)); /* h h l l */
t0 = _mm_mul_ps(x3, x1);
t1 = _mm_mul_ps(x5, x1);
t2 = _mm_mul_ps(x5, x2);
t3 = _mm_mul_ps(x6, x1);
t4 = _mm_mul_ps(x6, x2);
t5 = _mm_mul_ps(x6, x4);
/* t1[0] = k * p - o * l;
t1[0] = k * p - o * l;
t2[0] = g * p - o * h;
t3[0] = g * l - k * h; */
t0 = glmm_fnmadd(x2, x0, t0);
/* t1[1] = j * p - n * l;
t1[1] = j * p - n * l;
t2[1] = f * p - n * h;
t3[1] = f * l - j * h; */
t1 = glmm_fnmadd(x4, x0, t1);
/* t1[2] = j * o - n * k
t1[2] = j * o - n * k;
t2[2] = f * o - n * g;
t3[2] = f * k - j * g; */
t2 = glmm_fnmadd(x4, x3, t2);
/* t1[3] = i * p - m * l;
t1[3] = i * p - m * l;
t2[3] = e * p - m * h;
t3[3] = e * l - i * h; */
t3 = glmm_fnmadd(x7, x0, t3);
/* t1[4] = i * o - m * k;
t1[4] = i * o - m * k;
t2[4] = e * o - m * g;
t3[4] = e * k - i * g; */
t4 = glmm_fnmadd(x7, x3, t4);
/* t1[5] = i * n - m * j;
t1[5] = i * n - m * j;
t2[5] = e * n - m * f;
t3[5] = e * j - i * f; */
t5 = glmm_fnmadd(x7, x5, t5);
x4 = _mm_movelh_ps(r0, r1); /* f e b a */
x5 = _mm_movehl_ps(r1, r0); /* h g d c */
x0 = glmm_shuff1(x4, 0, 0, 0, 2); /* a a a e */
x1 = glmm_shuff1(x4, 1, 1, 1, 3); /* b b b f */
x2 = glmm_shuff1(x5, 0, 0, 0, 2); /* c c c g */
x3 = glmm_shuff1(x5, 1, 1, 1, 3); /* d d d h */
v2 = _mm_mul_ps(x0, t1);
v1 = _mm_mul_ps(x0, t0);
v3 = _mm_mul_ps(x0, t2);
v0 = _mm_mul_ps(x1, t0);
v2 = glmm_fnmadd(x1, t3, v2);
v3 = glmm_fnmadd(x1, t4, v3);
v0 = glmm_fnmadd(x2, t1, v0);
v1 = glmm_fnmadd(x2, t3, v1);
v3 = glmm_fmadd(x2, t5, v3);
v0 = glmm_fmadd(x3, t2, v0);
v2 = glmm_fmadd(x3, t5, v2);
v1 = glmm_fmadd(x3, t4, v1);
/*
dest[0][0] = f * t1[0] - g * t1[1] + h * t1[2];
dest[0][1] =-(b * t1[0] - c * t1[1] + d * t1[2]);
dest[0][2] = b * t2[0] - c * t2[1] + d * t2[2];
dest[0][3] =-(b * t3[0] - c * t3[1] + d * t3[2]); */
v0 = _mm_xor_ps(v0, x8);
/*
dest[2][0] = e * t1[1] - f * t1[3] + h * t1[5];
dest[2][1] =-(a * t1[1] - b * t1[3] + d * t1[5]);
dest[2][2] = a * t2[1] - b * t2[3] + d * t2[5];
dest[2][3] =-(a * t3[1] - b * t3[3] + d * t3[5]);*/
v2 = _mm_xor_ps(v2, x8);
/*
dest[1][0] =-(e * t1[0] - g * t1[3] + h * t1[4]);
dest[1][1] = a * t1[0] - c * t1[3] + d * t1[4];
dest[1][2] =-(a * t2[0] - c * t2[3] + d * t2[4]);
dest[1][3] = a * t3[0] - c * t3[3] + d * t3[4]; */
v1 = _mm_xor_ps(v1, x9);
/*
dest[3][0] =-(e * t1[2] - f * t1[4] + g * t1[5]);
dest[3][1] = a * t1[2] - b * t1[4] + c * t1[5];
dest[3][2] =-(a * t2[2] - b * t2[4] + c * t2[5]);
dest[3][3] = a * t3[2] - b * t3[4] + c * t3[5]; */
v3 = _mm_xor_ps(v3, x9);
/* determinant */
x0 = _mm_shuffle_ps(v0, v1, _MM_SHUFFLE(0, 0, 0, 0));
x1 = _mm_shuffle_ps(v2, v3, _MM_SHUFFLE(0, 0, 0, 0));
x0 = _mm_shuffle_ps(x0, x1, _MM_SHUFFLE(2, 0, 2, 0));
x0 = _mm_div_ps(_mm_set1_ps(1.0f), glmm_vhadd(_mm_mul_ps(x0, r0)));
glmm_store(dest[0], _mm_mul_ps(v0, x0));
glmm_store(dest[1], _mm_mul_ps(v1, x0));
glmm_store(dest[2], _mm_mul_ps(v2, x0));
glmm_store(dest[3], _mm_mul_ps(v3, x0));
}
#endif
#endif /* cglm_mat_sse_h */

View File

@ -0,0 +1,54 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_quat_simd_h
#define cglm_quat_simd_h
#if defined( __SSE__ ) || defined( __SSE2__ )
#include "../../common.h"
#include "../intrin.h"
CGLM_INLINE
void
glm_quat_mul_sse2(versor p, versor q, versor dest) {
/*
+ (a1 b2 + b1 a2 + c1 d2 d1 c2)i
+ (a1 c2 b1 d2 + c1 a2 + d1 b2)j
+ (a1 d2 + b1 c2 c1 b2 + d1 a2)k
a1 a2 b1 b2 c1 c2 d1 d2
*/
__m128 xp, xq, x1, x2, x3, r, x, y, z;
xp = glmm_load(p); /* 3 2 1 0 */
xq = glmm_load(q);
x1 = _mm_set_ps(-0.f, 0.f, -0.f, 0.f); /* TODO: _mm_set1_ss() + shuff ? */
r = _mm_mul_ps(glmm_splat_w(xp), xq);
x2 = _mm_unpackhi_ps(x1, x1);
x3 = glmm_shuff1(x1, 3, 2, 0, 1);
x = glmm_splat_x(xp);
y = glmm_splat_y(xp);
z = glmm_splat_z(xp);
x = _mm_xor_ps(x, x1);
y = _mm_xor_ps(y, x2);
z = _mm_xor_ps(z, x3);
x1 = glmm_shuff1(xq, 0, 1, 2, 3);
x2 = glmm_shuff1(xq, 1, 0, 3, 2);
x3 = glmm_shuff1(xq, 2, 3, 0, 1);
r = glmm_fmadd(x, x1, r);
r = glmm_fmadd(y, x2, r);
r = glmm_fmadd(z, x3, r);
glmm_store(dest, r);
}
#endif
#endif /* cglm_quat_simd_h */

307
include/cglm/simd/x86.h Normal file
View File

@ -0,0 +1,307 @@
/*
* Copyright (c), Recep Aslantas.
*
* MIT License (MIT), http://opensource.org/licenses/MIT
* Full license can be found in the LICENSE file
*/
#ifndef cglm_simd_x86_h
#define cglm_simd_x86_h
#include "intrin.h"
#ifdef CGLM_SIMD_x86
#ifdef CGLM_ALL_UNALIGNED
# define glmm_load(p) _mm_loadu_ps(p)
# define glmm_store(p, a) _mm_storeu_ps(p, a)
#else
# define glmm_load(p) _mm_load_ps(p)
# define glmm_store(p, a) _mm_store_ps(p, a)
#endif
#define glmm_set1(x) _mm_set1_ps(x)
#define glmm_128 __m128
#ifdef CGLM_USE_INT_DOMAIN
# define glmm_shuff1(xmm, z, y, x, w) \
_mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(xmm), \
_MM_SHUFFLE(z, y, x, w)))
#else
# define glmm_shuff1(xmm, z, y, x, w) \
_mm_shuffle_ps(xmm, xmm, _MM_SHUFFLE(z, y, x, w))
#endif
#define glmm_splat(x, lane) glmm_shuff1(x, lane, lane, lane, lane)
#define glmm_splat_x(x) glmm_splat(x, 0)
#define glmm_splat_y(x) glmm_splat(x, 1)
#define glmm_splat_z(x) glmm_splat(x, 2)
#define glmm_splat_w(x) glmm_splat(x, 3)
/* glmm_shuff1x() is DEPRECATED!, use glmm_splat() */
#define glmm_shuff1x(xmm, x) glmm_shuff1(xmm, x, x, x, x)
#define glmm_shuff2(a, b, z0, y0, x0, w0, z1, y1, x1, w1) \
glmm_shuff1(_mm_shuffle_ps(a, b, _MM_SHUFFLE(z0, y0, x0, w0)), \
z1, y1, x1, w1)
#ifdef __AVX__
# ifdef CGLM_ALL_UNALIGNED
# define glmm_load256(p) _mm256_loadu_ps(p)
# define glmm_store256(p, a) _mm256_storeu_ps(p, a)
# else
# define glmm_load256(p) _mm256_load_ps(p)
# define glmm_store256(p, a) _mm256_store_ps(p, a)
# endif
#endif
static inline
__m128
glmm_abs(__m128 x) {
return _mm_andnot_ps(_mm_set1_ps(-0.0f), x);
}
static inline
__m128
glmm_vhadd(__m128 v) {
__m128 x0;
x0 = _mm_add_ps(v, glmm_shuff1(v, 0, 1, 2, 3));
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 1, 0, 0, 1));
return x0;
}
static inline
__m128
glmm_vhadds(__m128 v) {
#if defined(__SSE3__)
__m128 shuf, sums;
shuf = _mm_movehdup_ps(v);
sums = _mm_add_ps(v, shuf);
shuf = _mm_movehl_ps(shuf, sums);
sums = _mm_add_ss(sums, shuf);
return sums;
#else
__m128 shuf, sums;
shuf = glmm_shuff1(v, 2, 3, 0, 1);
sums = _mm_add_ps(v, shuf);
shuf = _mm_movehl_ps(shuf, sums);
sums = _mm_add_ss(sums, shuf);
return sums;
#endif
}
static inline
float
glmm_hadd(__m128 v) {
return _mm_cvtss_f32(glmm_vhadds(v));
}
static inline
__m128
glmm_vhmin(__m128 v) {
__m128 x0, x1, x2;
x0 = _mm_movehl_ps(v, v); /* [2, 3, 2, 3] */
x1 = _mm_min_ps(x0, v); /* [0|2, 1|3, 2|2, 3|3] */
x2 = glmm_splat(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
return _mm_min_ss(x1, x2);
}
static inline
float
glmm_hmin(__m128 v) {
return _mm_cvtss_f32(glmm_vhmin(v));
}
static inline
__m128
glmm_vhmax(__m128 v) {
__m128 x0, x1, x2;
x0 = _mm_movehl_ps(v, v); /* [2, 3, 2, 3] */
x1 = _mm_max_ps(x0, v); /* [0|2, 1|3, 2|2, 3|3] */
x2 = glmm_splat(x1, 1); /* [1|3, 1|3, 1|3, 1|3] */
return _mm_max_ss(x1, x2);
}
static inline
float
glmm_hmax(__m128 v) {
return _mm_cvtss_f32(glmm_vhmax(v));
}
static inline
__m128
glmm_vdots(__m128 a, __m128 b) {
#if (defined(__SSE4_1__) || defined(__SSE4_2__)) && defined(CGLM_SSE4_DOT)
return _mm_dp_ps(a, b, 0xFF);
#elif defined(__SSE3__) && defined(CGLM_SSE3_DOT)
__m128 x0, x1;
x0 = _mm_mul_ps(a, b);
x1 = _mm_hadd_ps(x0, x0);
return _mm_hadd_ps(x1, x1);
#else
return glmm_vhadds(_mm_mul_ps(a, b));
#endif
}
static inline
__m128
glmm_vdot(__m128 a, __m128 b) {
#if (defined(__SSE4_1__) || defined(__SSE4_2__)) && defined(CGLM_SSE4_DOT)
return _mm_dp_ps(a, b, 0xFF);
#elif defined(__SSE3__) && defined(CGLM_SSE3_DOT)
__m128 x0, x1;
x0 = _mm_mul_ps(a, b);
x1 = _mm_hadd_ps(x0, x0);
return _mm_hadd_ps(x1, x1);
#else
__m128 x0;
x0 = _mm_mul_ps(a, b);
x0 = _mm_add_ps(x0, glmm_shuff1(x0, 1, 0, 3, 2));
return _mm_add_ps(x0, glmm_shuff1(x0, 0, 1, 0, 1));
#endif
}
static inline
float
glmm_dot(__m128 a, __m128 b) {
return _mm_cvtss_f32(glmm_vdots(a, b));
}
static inline
float
glmm_norm(__m128 a) {
return _mm_cvtss_f32(_mm_sqrt_ss(glmm_vhadds(_mm_mul_ps(a, a))));
}
static inline
float
glmm_norm2(__m128 a) {
return _mm_cvtss_f32(glmm_vhadds(_mm_mul_ps(a, a)));
}
static inline
float
glmm_norm_one(__m128 a) {
return _mm_cvtss_f32(glmm_vhadds(glmm_abs(a)));
}
static inline
float
glmm_norm_inf(__m128 a) {
return _mm_cvtss_f32(glmm_vhmax(glmm_abs(a)));
}
static inline
__m128
glmm_load3(float v[3]) {
__m128i xy;
__m128 z;
xy = _mm_loadl_epi64(CGLM_CASTPTR_ASSUME_ALIGNED(v, const __m128i));
z = _mm_load_ss(&v[2]);
return _mm_movelh_ps(_mm_castsi128_ps(xy), z);
}
static inline
void
glmm_store3(float v[3], __m128 vx) {
_mm_storel_pi(CGLM_CASTPTR_ASSUME_ALIGNED(v, __m64), vx);
_mm_store_ss(&v[2], glmm_shuff1(vx, 2, 2, 2, 2));
}
static inline
__m128
glmm_div(__m128 a, __m128 b) {
return _mm_div_ps(a, b);
}
/* enable FMA macro for MSVC? */
#if defined(_MSC_VER) && !defined(__FMA__) && defined(__AVX2__)
# define __FMA__ 1
#endif
static inline
__m128
glmm_fmadd(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fmadd_ps(a, b, c);
#else
return _mm_add_ps(c, _mm_mul_ps(a, b));
#endif
}
static inline
__m128
glmm_fnmadd(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fnmadd_ps(a, b, c);
#else
return _mm_sub_ps(c, _mm_mul_ps(a, b));
#endif
}
static inline
__m128
glmm_fmsub(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fmsub_ps(a, b, c);
#else
return _mm_sub_ps(_mm_mul_ps(a, b), c);
#endif
}
static inline
__m128
glmm_fnmsub(__m128 a, __m128 b, __m128 c) {
#ifdef __FMA__
return _mm_fnmsub_ps(a, b, c);
#else
return _mm_xor_ps(_mm_add_ps(_mm_mul_ps(a, b), c), _mm_set1_ps(-0.0f));
#endif
}
#if defined(__AVX__)
static inline
__m256
glmm256_fmadd(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fmadd_ps(a, b, c);
#else
return _mm256_add_ps(c, _mm256_mul_ps(a, b));
#endif
}
static inline
__m256
glmm256_fnmadd(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fnmadd_ps(a, b, c);
#else
return _mm256_sub_ps(c, _mm256_mul_ps(a, b));
#endif
}
static inline
__m256
glmm256_fmsub(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fmsub_ps(a, b, c);
#else
return _mm256_sub_ps(_mm256_mul_ps(a, b), c);
#endif
}
static inline
__m256
glmm256_fnmsub(__m256 a, __m256 b, __m256 c) {
#ifdef __FMA__
return _mm256_fmsub_ps(a, b, c);
#else
return _mm256_xor_ps(_mm256_sub_ps(_mm256_mul_ps(a, b), c),
_mm256_set1_ps(-0.0f));
#endif
}
#endif
#endif
#endif /* cglm_simd_x86_h */