Quellcode durchsuchen

Updated naming convension and namespace

Christophe Riccio vor 15 Jahren
Ursprung
Commit
1afc8012a2

+ 36 - 25
glm/core/intrinsic_common.hpp

@@ -7,49 +7,60 @@
 // File    : glm/core/intrinsic_common.hpp
 ///////////////////////////////////////////////////////////////////////////////////////////////////
 
-#ifndef GLM_DETAIL_INTRINSIC_COMMON_INCLUDED
-#define GLM_DETAIL_INTRINSIC_COMMON_INCLUDED
+#ifndef glm_detail_intrinsic_common
+#define glm_detail_intrinsic_common
 
-__m128 _mm_abs_ps(__m128 x);
+#include "../setup.hpp"
 
-__m128 _mm_sgn_ps(__m128 x);
+#if(GLM_ARCH != GLM_ARCH_PURE)
 
-//floor
-__m128 _mm_flr_ps(__m128 v);
+namespace glm{
+namespace detail
+{
+	__m128 sse_abs_ps(__m128 x);
 
-//trunc
-__m128 _mm_trc_ps(__m128 v);
+	__m128 sse_sgn_ps(__m128 x);
 
-//round
-__m128 _mm_rnd_ps(__m128 v);
+	//floor
+	__m128 sse_flr_ps(__m128 v);
 
-//roundEven
-__m128 _mm_rde_ps(__m128 v);
+	//trunc
+	__m128 sse_trc_ps(__m128 v);
 
-__m128 _mm_ceil_ps(__m128 v);
+	//round
+	__m128 sse_nd_ps(__m128 v);
 
-__m128 _mm_frc_ps(__m128 x);
+	//roundEven
+	__m128 sse_rde_ps(__m128 v);
 
-__m128 _mm_mod_ps(__m128 x, __m128 y);
+	__m128 sse_ceil_ps(__m128 v);
 
-__m128 _mm_modf_ps(__m128 x, __m128i & i);
+	__m128 sse_frc_ps(__m128 x);
 
-//inline __m128 _mm_min_ps(__m128 x, __m128 y)
+	__m128 sse_mod_ps(__m128 x, __m128 y);
 
-//inline __m128 _mm_max_ps(__m128 x, __m128 y)
+	__m128 sse_modf_ps(__m128 x, __m128i & i);
 
-__m128 _mm_clp_ps(__m128 v, __m128 minVal, __m128 maxVal);
+	//inline __m128 sse_min_ps(__m128 x, __m128 y)
 
-__m128 _mm_mix_ps(__m128 v1, __m128 v2, __m128 a);
+	//inline __m128 sse_max_ps(__m128 x, __m128 y)
 
-__m128 _mm_stp_ps(__m128 edge, __m128 x);
+	__m128 sse_clp_ps(__m128 v, __m128 minVal, __m128 maxVal);
 
-__m128 _mm_ssp_ps(__m128 edge0, __m128 edge1, __m128 x);
+	__m128 sse_mix_ps(__m128 v1, __m128 v2, __m128 a);
 
-__m128 _mm_nan_ps(__m128 x);
+	__m128 sse_stp_ps(__m128 edge, __m128 x);
 
-__m128 _mm_inf_ps(__m128 x);
+	__m128 sse_ssp_ps(__m128 edge0, __m128 edge1, __m128 x);
+
+	__m128 sse_nan_ps(__m128 x);
+
+	__m128 sse_inf_ps(__m128 x);
+
+}//namespace detail
+}//namespace glm
 
 #include "intrinsic_common.inl"
 
-#endif//GLM_DETAIL_INTRINSIC_COMMON_INCLUDED
+#endif//(GLM_ARCH != GLM_ARCH_PURE)
+#endif//glm_detail_intrinsic_common

+ 19 - 19
glm/core/intrinsic_common.inl

@@ -123,15 +123,12 @@ namespace detail{
 	static const __m128 _ps_log_c0 = _mm_set_ps1(0.693147180559945f);
 	static const __m128 _ps_log2_c0 = _mm_set_ps1(1.44269504088896340735992f);
 
-}//namespace detail
-}//namespace glm
-
-inline __m128 _mm_abs_ps(__m128 x)
+inline __m128 sse_abs_ps(__m128 x)
 {
 	return _mm_and_ps(glm::detail::abs4Mask, x);
 } 
 
-inline __m128 _mm_sgn_ps(__m128 x)
+inline __m128 sse_sgn_ps(__m128 x)
 {
 	//__m128 cmp0 = _mm_cmpeq_ps(x, zero);
 	//__m128 cmp1 = _mm_cmple_ps(x, zero);
@@ -154,7 +151,7 @@ inline __m128 _mm_sgn_ps(__m128 x)
 }
 
 //floor
-inline __m128 _mm_flr_ps(__m128 x)
+inline __m128 sse_flr_ps(__m128 x)
 {
 	__m128 rnd0 = _mm_rnd_ps(x);
 	__m128 cmp0 = _mm_cmplt_ps(x, rnd0);
@@ -171,7 +168,7 @@ inline __m128 _mm_trc_ps(__m128 v)
 }
 */
 //round
-inline __m128 _mm_rnd_ps(__m128 x)
+inline __m128 sse_rnd_ps(__m128 x)
 {
 	__m128 and0;// = _mm_and_ps(glm::detail::_epi32_sign_mask, x);
 	__m128 or0 = _mm_or_ps(and0, glm::detail::_ps_2pow23);
@@ -181,12 +178,12 @@ inline __m128 _mm_rnd_ps(__m128 x)
 }
 
 //roundEven
-inline __m128 _mm_rde_ps(__m128 v)
+inline __m128 sse_rde_ps(__m128 v)
 {
 
 }
 
-inline __m128 _mm_ceil_ps(__m128 x)
+inline __m128 sse_ceil_ps(__m128 x)
 {
 	__m128 rnd0 = _mm_rnd_ps(x);
 	__m128 cmp0 = _mm_cmpgt_ps(x, rnd0);
@@ -195,14 +192,14 @@ inline __m128 _mm_ceil_ps(__m128 x)
 	return add0;
 }
 
-inline __m128 _mm_frc_ps(__m128 x)
+inline __m128 sse_frc_ps(__m128 x)
 {
 	__m128 flr0 = _mm_flr_ps(x);
 	__m128 sub0 = _mm_sub_ps(x, flr0);
 	return sub0;
 }
 
-inline __m128 _mm_mod_ps(__m128 x, __m128 y)
+inline __m128 sse_mod_ps(__m128 x, __m128 y)
 {
 	__m128 div0 = _mm_div_ps(x, y);
 	__m128 flr0 = _mm_flr_ps(div0);
@@ -211,7 +208,7 @@ inline __m128 _mm_mod_ps(__m128 x, __m128 y)
 	return sub0;
 }
 
-inline __m128 _mm_modf_ps(__m128 x, __m128i & i)
+inline __m128 sse_modf_ps(__m128 x, __m128i & i)
 {
 
 }
@@ -220,14 +217,14 @@ inline __m128 _mm_modf_ps(__m128 x, __m128i & i)
 
 //inline __m128 _mm_max_ps(__m128 x, __m128 y)
 
-inline __m128 _mm_clp_ps(__m128 v, __m128 minVal, __m128 maxVal)
+inline __m128 sse_clp_ps(__m128 v, __m128 minVal, __m128 maxVal)
 {
 	__m128 min0 = _mm_min_ps(v, maxVal);
 	__m128 max0 = _mm_max_ps(min0, minVal);
 	return max0;
 }
 
-inline __m128 _mm_mix_ps(__m128 v1, __m128 v2, __m128 a)
+inline __m128 sse_mix_ps(__m128 v1, __m128 v2, __m128 a)
 {
 	__m128 sub0 = _mm_sub_ps(glm::detail::one, a);
 	__m128 mul0 = _mm_mul_ps(v1, sub0);
@@ -236,7 +233,7 @@ inline __m128 _mm_mix_ps(__m128 v1, __m128 v2, __m128 a)
 	return add0;
 }
 
-inline __m128 _mm_stp_ps(__m128 edge, __m128 x)
+inline __m128 sse_stp_ps(__m128 edge, __m128 x)
 {
 	__m128 cmp = _mm_cmple_ps(x, edge);
 	if(_mm_movemask_ps(cmp) == 0)
@@ -245,7 +242,7 @@ inline __m128 _mm_stp_ps(__m128 edge, __m128 x)
 		return glm::detail::zero;
 }
 
-inline __m128 _mm_ssp_ps(__m128 edge0, __m128 edge1, __m128 x)
+inline __m128 sse_ssp_ps(__m128 edge0, __m128 edge1, __m128 x)
 {
 	__m128 sub0 = _mm_sub_ps(x, edge0);
 	__m128 sub1 = _mm_sub_ps(edge1, edge0);
@@ -258,19 +255,19 @@ inline __m128 _mm_ssp_ps(__m128 edge0, __m128 edge1, __m128 x)
 	return mul2;
 }
 
-inline __m128 _mm_nan_ps(__m128 x)
+inline __m128 sse_nan_ps(__m128 x)
 {
 
 }
 
-inline __m128 _mm_inf_ps(__m128 x)
+inline __m128 sse_inf_ps(__m128 x)
 {
 
 }
 
 // SSE scalar reciprocal sqrt using rsqrt op, plus one Newton-Rhaphson iteration
 // By Elan Ruskin, 
-inline __m128 _mm_sqrt_wip_ss(__m128 const & x)
+inline __m128 sse_sqrt_wip_ss(__m128 const & x)
 {
 	__m128 recip = _mm_rsqrt_ss(x);  // "estimate" opcode
 	const static __m128 three = {3, 3, 3, 3}; // aligned consts for fast load
@@ -279,3 +276,6 @@ inline __m128 _mm_sqrt_wip_ss(__m128 const & x)
 	__m128 threeminus_xrr = _mm_sub_ss(three, _mm_mul_ss(x, _mm_mul_ss (recip, recip)));
 	return _mm_mul_ss( halfrecip, threeminus_xrr);
 }
+
+}//namespace detail
+}//namespace glms

+ 27 - 3
glm/core/intrinsic_exponential.hpp

@@ -1,5 +1,24 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// OpenGL Mathematics Copyright (c) 2005 - 2010 G-Truc Creation (www.g-truc.net)
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Created : 2009-05-11
+// Updated : 2009-05-11
+// Licence : This source is under MIT License
+// File    : glm/core/intrinsic_exponential.hpp
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifndef glm_detail_intrinsic_exponential
+#define glm_detail_intrinsic_exponential
+
+#include "../setup.hpp"
+
+#if(GLM_ARCH != GLM_ARCH_PURE)
+
+namespace glm{
+namespace detail
+{
 /*
-inline __m128 _mm_rsqrt_nr_ss(__m128 const x)
+inline __m128 sse_rsqrt_nr_ss(__m128 const x)
 {
 	__m128 recip = _mm_rsqrt_ss( x );  // "estimate" opcode
 	const static __m128 three = { 3, 3, 3, 3 }; // aligned consts for fast load
@@ -9,7 +28,7 @@ inline __m128 _mm_rsqrt_nr_ss(__m128 const x)
 	return _mm_mul_ss( halfrecip, threeminus_xrr );
 }
  
-inline __m128 __mm_normalize_fast_ps(  float * RESTRICT vOut, float * RESTRICT vIn )
+inline __m128 sse_normalize_fast_ps(  float * RESTRICT vOut, float * RESTRICT vIn )
 {
         __m128 x = _mm_load_ss(&vIn[0]);
         __m128 y = _mm_load_ss(&vIn[1]);
@@ -31,4 +50,9 @@ inline __m128 __mm_normalize_fast_ps(  float * RESTRICT vOut, float * RESTRICT v
  
         return _mm_mul_ss( l , rsqt );
 }
-*/
+*/
+}//namespace detail
+}//namespace glm
+
+#endif//GLM_ARCH != GLM_ARCH_PURE)
+#endif//glm_detail_intrinsic_exponential

+ 23 - 18
glm/core/intrinsic_geometric.hpp

@@ -12,33 +12,38 @@
 
 #include "intrinsic_common.hpp"
 
-//length
-__m128 _mm_len_ps(__m128 x);
+namespace glm{
+namespace detail
+{
+	//length
+	__m128 sse_len_ps(__m128 x);
 
-//distance
-__m128 _mm_dst_ps(__m128 p0, __m128 p1);
+	//distance
+	__m128 sse_dst_ps(__m128 p0, __m128 p1);
 
-//dot
-__m128 _mm_dot_ps(__m128 v1, __m128 v2);
+	//dot
+	__m128 sse_dot_ps(__m128 v1, __m128 v2);
 
-// SSE1
-__m128 _mm_dot_ss(__m128 v1, __m128 v2);
+	// SSE1
+	__m128 sse_dot_ss(__m128 v1, __m128 v2);
 
-//cross
-__m128 _mm_xpd_ps(__m128 v1, __m128 v2);
+	//cross
+	__m128 sse_xpd_ps(__m128 v1, __m128 v2);
 
-//normalize
-__m128 _mm_nrm_ps(__m128 v);
+	//normalize
+	__m128 sse_nrm_ps(__m128 v);
 
-//faceforward
-__m128 _mm_ffd_ps(__m128 N, __m128 I, __m128 Nref);
+	//faceforward
+	__m128 sse_ffd_ps(__m128 N, __m128 I, __m128 Nref);
 
-//reflect
-__m128 _mm_rfe_ps(__m128 I, __m128 N);
+	//reflect
+	__m128 sse_rfe_ps(__m128 I, __m128 N);
 
-//refract
-__m128 _mm_rfa_ps(__m128 I, __m128 N, __m128 eta);
+	//refract
+	__m128 sse_rfa_ps(__m128 I, __m128 N, __m128 eta);
 
+}//namespace detail
+}//namespace glm
 
 #include "intrinsic_geometric.inl"
 

+ 6 - 0
glm/core/intrinsic_geometric.inl

@@ -7,6 +7,9 @@
 // File    : glm/core/intrinsic_geometric.inl
 ///////////////////////////////////////////////////////////////////////////////////////////////////
 
+namespace glm{
+namespace detail{
+
 //length
 inline __m128 _mm_len_ps(__m128 x)
 {
@@ -115,3 +118,6 @@ inline __m128 _mm_rfa_ps(__m128 I, __m128 N, __m128 eta)
 
 	return sub2;
 }
+
+}//namespace detail
+}//namespace glm

+ 17 - 12
glm/core/intrinsic_matrix.hpp

@@ -7,28 +7,33 @@
 // File    : glm/core/intrinsic_common.hpp
 ///////////////////////////////////////////////////////////////////////////////////////////////////
 
-#ifndef GLM_DETAIL_INTRINSIC_MATRIX_INCLUDED
-#define GLM_DETAIL_INTRINSIC_MATRIX_INCLUDED
+#ifndef glm_detail_intrinsic_matrix
+#define glm_detail_intrinsic_matrix
 
-#include "../glm.hpp"
 #include "intrinsic_geometric.hpp"
 
-void _mm_add_ps(__m128 in1[4], __m128 in2[4], __m128 out[4]);
+namespace glm{
+namespace detail
+{
+	void sse_add_ps(__m128 in1[4], __m128 in2[4], __m128 out[4]);
 
-void _mm_sub_ps(__m128 in1[4], __m128 in2[4], __m128 out[4]);
+	void sse_sub_ps(__m128 in1[4], __m128 in2[4], __m128 out[4]);
 
-__m128 _mm_mul_ps(__m128 m[4], __m128 v);
+	__m128 sse_mul_ps(__m128 m[4], __m128 v);
 
-__m128 _mm_mul_ps(__m128 v, __m128 m[4]);
+	__m128 sse_mul_ps(__m128 v, __m128 m[4]);
 
-void _mm_mul_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4]);
+	void sse_mul_ps(__m128 const in1[4], __m128 const in2[4], __m128 out[4]);
 
-void _mm_transpose_ps(__m128 const in[4], __m128 out[4]);
+	void sse_transpose_ps(__m128 const in[4], __m128 out[4]);
 
-void _mm_inverse_ps(__m128 const in[4], __m128 out[4]);
+	void sse_inverse_ps(__m128 const in[4], __m128 out[4]);
 
-void _mm_rotate_ps(__m128 const in[4], float Angle, float const v[3], __m128 out[4]);
+	void sse_rotate_ps(__m128 const in[4], float Angle, float const v[3], __m128 out[4]);
+
+}//namespace detail
+}//namespace glm
 
 #include "intrinsic_matrix.inl"
 
-#endif//GLM_DETAIL_INTRINSIC_MATRIX_INCLUDED
+#endif//glm_detail_intrinsic_matrix

+ 41 - 54
glm/core/intrinsic_matrix.inl

@@ -7,6 +7,9 @@
 // File    : glm/core/intrinsic_common.inl
 ///////////////////////////////////////////////////////////////////////////////////////////////////
 
+namespace glm{
+namespace detail{
+
 static const __m128 one = _mm_set_ps1(1.0f);
 static const __m128 pi = _mm_set_ps1(3.141592653589793238462643383279f);
 static const __m128 _m128_rad_ps = _mm_set_ps1(3.141592653589793238462643383279f / 180.f);
@@ -181,7 +184,7 @@ inline void _mm_transpose_ps(__m128 const in[4], __m128 out[4])
     out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD);
 }
 
-inline __m128 _mm_slow_det_ps(__m128 const in[4])
+inline __m128 _mm_det_ps(__m128 const in[4])
 {
 	__m128 Fac0;
 	{
@@ -407,29 +410,14 @@ inline __m128 _mm_slow_det_ps(__m128 const in[4])
 	return Det0;
 }
 
-/*
-float ssedot(const float * __restrict u, const float *  __restrict v){  
-    __m128 uv = _mm_mul_ps(_mm_load_ps(u), _mm_load_ps(v));  
-    uv = _mm_hadd_ps(uv, uv); // or shuffle like there's no tomorrow   
-    uv = _mm_hadd_ps(uv, uv); // if there ain't no haddps around.  
-    return __builtin_ia32_vec_ext_v4sf(uv, 0);  
-} 
-
-float dot_sse(float *u, float *v) 
-{  
-    __m128 mul  = _mm_mul_ps(_mm_load_ps(u), _mm_load_ps(v)); // { a,b,c,d }  
-    __m128 hi   = _mm_movehl_ps(mul, mul); // { c,d,c,d }  
-    __m128 add  = _mm_add_ps(mul, hi); // { a+c, b+d, c+c, d+d }  
-    __m128 half = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(add), _MM_SHUFFLE(1, 1, 1, 1))); // { b+d }  
-    __m128 join = _mm_add_ss(add, half); // { a+c+b+d }  
-    float f;  
-    _mm_store_ss(&f, join);  
-    return f;  
-}
-*/
-
-inline __m128 _mm_det_ps(__m128 const m[4])
+template <typename T>
+inline typename detail::tmat4x4<T>::value_type _mm_det2_ps
+(
+	__m128 const & m[4]
+)
 {
+	GLM_STATIC_ASSERT(detail::type<T>::is_float, "'determinant' only accept floating-point inputs");
+
 	//T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
 	//T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
 	//T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
@@ -437,27 +425,24 @@ inline __m128 _mm_det_ps(__m128 const m[4])
 	//T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
 	//T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
 
-	__m128 const & m2 = m[2];
-	__m128 const & m3 = m[3];
-
 	// First 2 columns
 	__m128 Swp2A = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 1, 1, 2));
 	__m128 Swp3A = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(3, 2, 3, 3));
-	__m128 MulA = _mm_mul_ps(Swp2A, Swp3A);
+	__m128 MulA = __mm_mul_ps(Swp2A, Swp3A);
 
 	// Second 2 columns
-	__m128 Swp2B = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(3, 2, 3, 3));
-	__m128 Swp3B = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(0, 1, 1, 2));
-	__m128 MulB = _mm_mul_ps(Swp2B, Swp3B);
+	__m128 Swp2B = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 1, 1, 2));
+	__m128 Swp3B = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(3, 2, 3, 3));
+	__m128 MulB = __mm_mul_ps(Swp2A, Swp3A);
 
 	// Columns subtraction
-	__m128 SubE = _mm_sub_ps(MulA, MulB);
+	__m128 SubAB = __mm_sub_ps(MulA, MulB);
 
 	// Last 2 rows
-	__m128 Swp2C = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 0, 1, 2));
-	__m128 Swp3C = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(1, 2, 0, 0));
-	__m128 MulC = _mm_mul_ps(Swp2C, Swp3C);
-	__m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC);
+	__m128 Swp2C = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(1, 2, 0, 0));
+	__m128 Swp3C = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(0, 0, 1, 2));
+	__m128 MulC = __mm_mul_ps(Swp2C, Swp3C);
+	__m128 SwpD = __mm_hl_ps(MulC);
 
 	//detail::tvec4<T> DetCof(
 	//	+ (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02),
@@ -465,31 +450,29 @@ inline __m128 _mm_det_ps(__m128 const m[4])
 	//	+ (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05),
 	//	- (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05));
 
-	__m128 SubFacA = _mm_shuffle_ps(SubE, SubE, _MM_SHUFFLE(2, 1, 0, 0));
-	__m128 SwpFacA = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(0, 0, 0, 1));
-	__m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA);
+	__128 SubFacA = _mm_shuffle_ps(SubAB, SubAB, _MM_SHUFFLE(2, 1, 0, 0));
+	__128 SwpFacA = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(0, 0, 0, 1));
+	__128 MulFacA = __mm_mul_ps(SwpFacA, SubFacA);
 
-	__m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1));
-	__m128 SubFacB = _mm_shuffle_ps(SubTmpB, SubTmpB, _MM_SHUFFLE(3, 1, 1, 0));//SubF[0], SubE[3], SubE[3], SubE[1];
-	__m128 SwpFacB = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(1, 1, 2, 2));
-	__m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB);
+	__128 SubFacB = ;
+	__128 SwpFacB = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(1, 1, 2, 2));
+	__128 MulFacB = __mm_mul_ps(SwpFacB, SubFacB);
 
-	__m128 SubRes = _mm_sub_ps(MulFacA, MulFacB);
+	__128 SubRes = __mm_sub_ps(MulFacA, MulFacA);
 
-	__m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2));
-	__m128 SubFacC = _mm_shuffle_ps(SubTmpC, SubTmpC, _MM_SHUFFLE(3, 3, 2, 0));
-	__m128 SwpFacC = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(2, 3, 3, 3));
-	__m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC);
+	__128 SubFacC = ;
+	__128 SwpFacC = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(1, 1, 2, 2));
+	__128 MulFacC = __mm_mul_ps(SwpFacC, SubFacC);
 
-	__m128 AddRes = _mm_add_ps(SubRes, MulFacC);
-	__m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f));
+	__m128 AddRes = __mm_add_ps(SubRes, MulFacC);
+	__m128 DetCof = __mm_mul_ps(AddRes, _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f));
 
 	//return m[0][0] * DetCof[0]
 	//	 + m[0][1] * DetCof[1]
 	//	 + m[0][2] * DetCof[2]
 	//	 + m[0][3] * DetCof[3];
 
-	return _mm_dot_ps(m[0], DetCof);
+	return _mm_dot_ps(m[0], Signed);
 }
 
 inline void _mm_inverse_ps(__m128 const in[4], __m128 out[4])
@@ -957,6 +940,7 @@ inline void _mm_inverse_fast_ps(__m128 const in[4], __m128 out[4])
 	out[3] = _mm_mul_ps(Inv3, Rcp0);
 }
 
+
 void _mm_rotate_ps(__m128 const in[4], float Angle, float const v[3], __m128 out[4])
 {
 	float a = glm::radians(Angle);
@@ -1029,8 +1013,11 @@ void _mm_rotate_ps(__m128 const in[4], float Angle, float const v[3], __m128 out
 
 void _mm_outer_ps(__m128 const & c, __m128 const & r, __m128 out[4])
 {
-	out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0)));
-	out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1)));
-	out[2] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(2, 2, 2, 2)));
-	out[3] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(3, 3, 3, 3)));
+	out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0));
+	out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1));
+	out[2] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(2, 2, 2, 2));
+	out[3] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(3, 3, 3, 3));
 }
+
+}//namespace detail
+}//namespace glm

+ 24 - 0
glm/core/intrinsic_trigonometric.hpp

@@ -0,0 +1,24 @@
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// OpenGL Mathematics Copyright (c) 2005 - 2010 G-Truc Creation (www.g-truc.net)
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// Created : 2009-06-09
+// Updated : 2009-06-09
+// Licence : This source is under MIT License
+// File    : glm/core/intrinsic_trigonometric.hpp
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifndef glm_detail_intrinsic_trigonometric
+#define glm_detail_intrinsic_trigonometric
+
+#include "../setup.hpp"
+
+namespace glm{
+namespace detail
+{
+
+}//namespace detail
+}//namespace glm
+
+#include "intrinsic_trigonometric.inl"
+
+#endif//glm_detail_intrinsic_trigonometric

+ 12 - 3
glm/core/intrinsic_vector_relational.hpp

@@ -7,9 +7,18 @@
 // File    : glm/core/intrinsic_vector_relational.hpp
 ///////////////////////////////////////////////////////////////////////////////////////////////////
 
-#ifndef GLM_DETAIL_INTRINSIC_VECTOR_RELATIONAL_INCLUDED
-#define GLM_DETAIL_INTRINSIC_VECTOR_RELATIONAL_INCLUDED
+#ifndef glm_detail_intrinsic_vector_relational
+#define glm_detail_intrinsic_vector_relational
+
+#include "../setup.hpp"
+
+namespace glm{
+namespace detail
+{
+
+}//namespace detail
+}//namespace glm
 
 #include "intrinsic_vector_relational.inl"
 
-#endif//GLM_DETAIL_INTRINSIC_VECTOR_RELATIONAL_INCLUDED
+#endif//glm_detail_intrinsic_vector_relational