|
@@ -3,14 +3,27 @@
|
|
|
|
|
|
|
|
#pragma once
|
|
#pragma once
|
|
|
|
|
|
|
|
-static const __m128 GLM_VAR_USED glm_zero = _mm_setzero_ps();
|
|
|
|
|
-static const __m128 GLM_VAR_USED glm_one = _mm_set_ps1(1.0f);
|
|
|
|
|
-static const __m128 GLM_VAR_USED glm_half = _mm_set_ps1(0.5f);
|
|
|
|
|
-static const __m128 GLM_VAR_USED glm_minus_one = _mm_set_ps1(-1.0f);
|
|
|
|
|
-static const __m128 GLM_VAR_USED glm_two = _mm_set_ps1(2.0f);
|
|
|
|
|
-static const __m128 GLM_VAR_USED glm_three = _mm_set_ps1(3.0f);
|
|
|
|
|
|
|
+#if GLM_ARCH & GLM_ARCH_SSE2
|
|
|
|
|
|
|
|
-static const __m128 GLM_VAR_USED glm_ps_2pow23 = _mm_set_ps1(8388608.0f);
|
|
|
|
|
|
|
+//mad
|
|
|
|
|
+GLM_FUNC_QUALIFIER __m128 glm_f32v1_mad(__m128 a, __m128 b, __m128 c)
|
|
|
|
|
+{
|
|
|
|
|
+# if GLM_ARCH & GLM_ARCH_AVX2
|
|
|
|
|
+ return _mm_fmadd_ss(a, b, c);
|
|
|
|
|
+# else
|
|
|
|
|
+ return _mm_add_ss(_mm_mul_ss(a, b), c);
|
|
|
|
|
+# endif
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+//mad
|
|
|
|
|
+GLM_FUNC_QUALIFIER __m128 glm_f32v4_mad(__m128 a, __m128 b, __m128 c)
|
|
|
|
|
+{
|
|
|
|
|
+# if GLM_ARCH & GLM_ARCH_AVX2
|
|
|
|
|
+ return _mm_fmadd_ps(a, b, c);
|
|
|
|
|
+# else
|
|
|
|
|
+ return _mm_add_ps(_mm_mul_ps(a, b), c);
|
|
|
|
|
+# endif
|
|
|
|
|
+}
|
|
|
|
|
|
|
|
//abs
|
|
//abs
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_abs(__m128 x)
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_abs(__m128 x)
|
|
@@ -33,19 +46,21 @@ GLM_FUNC_QUALIFIER __m128i glm_i32v4_abs(__m128i x)
|
|
|
//sign
|
|
//sign
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_sgn(__m128 x)
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_sgn(__m128 x)
|
|
|
{
|
|
{
|
|
|
- __m128 const Cmp0 = _mm_cmplt_ps(x, glm_zero);
|
|
|
|
|
- __m128 const Cmp1 = _mm_cmpgt_ps(x, glm_zero);
|
|
|
|
|
- __m128 const And0 = _mm_and_ps(Cmp0, glm_minus_one);
|
|
|
|
|
- __m128 const And1 = _mm_and_ps(Cmp1, glm_one);
|
|
|
|
|
- return _mm_or_ps(And0, And1);
|
|
|
|
|
|
|
+ __m128 const zro0 = _mm_setzero_ps();
|
|
|
|
|
+ __m128 const cmp0 = _mm_cmplt_ps(x, zro0);
|
|
|
|
|
+ __m128 const cmp1 = _mm_cmpgt_ps(x, zro0);
|
|
|
|
|
+ __m128 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(-1.0f));
|
|
|
|
|
+ __m128 const and1 = _mm_and_ps(cmp1, _mm_set1_ps(1.0f));
|
|
|
|
|
+ __m128 const or0 = _mm_or_ps(and0, and1);;
|
|
|
|
|
+ return or0;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
//round
|
|
//round
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_rnd(__m128 x)
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_rnd(__m128 x)
|
|
|
{
|
|
{
|
|
|
- __m128 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(static_cast<int>(0x80000000)));
|
|
|
|
|
|
|
+ __m128 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(0x80000000));
|
|
|
__m128 const and0 = _mm_and_ps(sgn0, x);
|
|
__m128 const and0 = _mm_and_ps(sgn0, x);
|
|
|
- __m128 const or0 = _mm_or_ps(and0, glm_ps_2pow23);
|
|
|
|
|
|
|
+ __m128 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f));
|
|
|
__m128 const add0 = _mm_add_ps(x, or0);
|
|
__m128 const add0 = _mm_add_ps(x, or0);
|
|
|
__m128 const sub0 = _mm_sub_ps(add0, or0);
|
|
__m128 const sub0 = _mm_sub_ps(add0, or0);
|
|
|
return sub0;
|
|
return sub0;
|
|
@@ -70,9 +85,9 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_flr(__m128 x)
|
|
|
//roundEven
|
|
//roundEven
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_rde(__m128 x)
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_rde(__m128 x)
|
|
|
{
|
|
{
|
|
|
- __m128 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(static_cast<int>(0x80000000)));
|
|
|
|
|
|
|
+ __m128 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(0x80000000));
|
|
|
__m128 const and0 = _mm_and_ps(sgn0, x);
|
|
__m128 const and0 = _mm_and_ps(sgn0, x);
|
|
|
- __m128 const or0 = _mm_or_ps(and0, glm_ps_2pow23);
|
|
|
|
|
|
|
+ __m128 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f));
|
|
|
__m128 const add0 = _mm_add_ps(x, or0);
|
|
__m128 const add0 = _mm_add_ps(x, or0);
|
|
|
__m128 const sub0 = _mm_sub_ps(add0, or0);
|
|
__m128 const sub0 = _mm_sub_ps(add0, or0);
|
|
|
return sub0;
|
|
return sub0;
|
|
@@ -114,16 +129,15 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_mix(__m128 v1, __m128 v2, __m128 a)
|
|
|
{
|
|
{
|
|
|
__m128 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), a);
|
|
__m128 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), a);
|
|
|
__m128 const mul0 = _mm_mul_ps(v1, sub0);
|
|
__m128 const mul0 = _mm_mul_ps(v1, sub0);
|
|
|
- __m128 const mul1 = _mm_mul_ps(v2, a);
|
|
|
|
|
- __m128 const add0 = _mm_add_ps(mul0, mul1);
|
|
|
|
|
- return add0;
|
|
|
|
|
|
|
+ __m128 const mad0 = glm_f32v4_mad(v2, a, mul0);
|
|
|
|
|
+ return mad0;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
//step
|
|
//step
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_stp(__m128 edge, __m128 x)
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_stp(__m128 edge, __m128 x)
|
|
|
{
|
|
{
|
|
|
__m128 const cmp = _mm_cmple_ps(x, edge);
|
|
__m128 const cmp = _mm_cmple_ps(x, edge);
|
|
|
- return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_set1_ps(0.0f);
|
|
|
|
|
|
|
+ return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_setzero_ps();
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
// smoothstep
|
|
// smoothstep
|
|
@@ -132,9 +146,9 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_ssp(__m128 edge0, __m128 edge1, __m128 x)
|
|
|
__m128 const sub0 = _mm_sub_ps(x, edge0);
|
|
__m128 const sub0 = _mm_sub_ps(x, edge0);
|
|
|
__m128 const sub1 = _mm_sub_ps(edge1, edge0);
|
|
__m128 const sub1 = _mm_sub_ps(edge1, edge0);
|
|
|
__m128 const div0 = _mm_sub_ps(sub0, sub1);
|
|
__m128 const div0 = _mm_sub_ps(sub0, sub1);
|
|
|
- __m128 const clp0 = glm_f32v4_clp(div0, _mm_set1_ps(0.0f), _mm_set1_ps(1.0f));
|
|
|
|
|
- __m128 const mul0 = _mm_mul_ps(glm_two, clp0);
|
|
|
|
|
- __m128 const sub2 = _mm_sub_ps(glm_three, mul0);
|
|
|
|
|
|
|
+ __m128 const clp0 = glm_f32v4_clp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f));
|
|
|
|
|
+ __m128 const mul0 = _mm_mul_ps(_mm_set1_ps(2.0f), clp0);
|
|
|
|
|
+ __m128 const sub2 = _mm_sub_ps(_mm_set1_ps(3.0f), mul0);
|
|
|
__m128 const mul1 = _mm_mul_ps(clp0, clp0);
|
|
__m128 const mul1 = _mm_mul_ps(clp0, clp0);
|
|
|
__m128 const mul2 = _mm_mul_ps(mul1, sub2);
|
|
__m128 const mul2 = _mm_mul_ps(mul1, sub2);
|
|
|
return mul2;
|
|
return mul2;
|
|
@@ -149,7 +163,7 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_nan(__m128 x)
|
|
|
__m128i const t4 = _mm_and_si128(t2, t3); // exponent
|
|
__m128i const t4 = _mm_and_si128(t2, t3); // exponent
|
|
|
__m128i const t5 = _mm_andnot_si128(t3, t2); // fraction
|
|
__m128i const t5 = _mm_andnot_si128(t3, t2); // fraction
|
|
|
__m128i const Equal = _mm_cmpeq_epi32(t3, t4);
|
|
__m128i const Equal = _mm_cmpeq_epi32(t3, t4);
|
|
|
- __m128i const Nequal = _mm_cmpeq_epi32(t5, _mm_set1_epi32(0));
|
|
|
|
|
|
|
+ __m128i const Nequal = _mm_cmpeq_epi32(t5, _mm_setzero_si128());
|
|
|
__m128i const And = _mm_and_si128(Equal, Nequal);
|
|
__m128i const And = _mm_and_si128(Equal, Nequal);
|
|
|
return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0
|
|
return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0
|
|
|
}
|
|
}
|
|
@@ -162,24 +176,6 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_inf(__m128 x)
|
|
|
return _mm_castsi128_ps(_mm_cmpeq_epi32(t2, _mm_set1_epi32(0xFF000000))); // exponent is all 1s, fraction is 0
|
|
return _mm_castsi128_ps(_mm_cmpeq_epi32(t2, _mm_set1_epi32(0xFF000000))); // exponent is all 1s, fraction is 0
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-GLM_FUNC_QUALIFIER __m128 glm_f32v1_fma(__m128 a, __m128 b, __m128 c)
|
|
|
|
|
-{
|
|
|
|
|
-# if GLM_ARCH & GLM_ARCH_AVX2
|
|
|
|
|
- return _mm_fmadd_ss(a, b, c);
|
|
|
|
|
-# else
|
|
|
|
|
- return _mm_add_ss(_mm_mul_ss(a, b), c);
|
|
|
|
|
-# endif
|
|
|
|
|
-}
|
|
|
|
|
-
|
|
|
|
|
-GLM_FUNC_QUALIFIER __m128 glm_f32v4_fma(__m128 a, __m128 b, __m128 c)
|
|
|
|
|
-{
|
|
|
|
|
-# if GLM_ARCH & GLM_ARCH_AVX2
|
|
|
|
|
- return _mm_fmadd_ps(a, b, c);
|
|
|
|
|
-# else
|
|
|
|
|
- return _mm_add_ps(_mm_mul_ps(a, b), c);
|
|
|
|
|
-# endif
|
|
|
|
|
-}
|
|
|
|
|
-
|
|
|
|
|
// SSE scalar reciprocal sqrt using rsqrt op, plus one Newton-Rhaphson iteration
|
|
// SSE scalar reciprocal sqrt using rsqrt op, plus one Newton-Rhaphson iteration
|
|
|
// By Elan Ruskin, http://assemblyrequired.crashworks.org/
|
|
// By Elan Ruskin, http://assemblyrequired.crashworks.org/
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v1_sqrt_wip(__m128 x)
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v1_sqrt_wip(__m128 x)
|
|
@@ -206,4 +202,4 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_sqrt_wip(__m128 x)
|
|
|
return Mul3;
|
|
return Mul3;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-
|
|
|
|
|
|
|
+#endif//GLM_ARCH & GLM_ARCH_SSE2
|