|
|
@@ -1,370 +1,348 @@
|
|
|
#if GLM_ARCH & GLM_ARCH_SSE2_BIT
|
|
|
|
|
|
-namespace glm{
|
|
|
-namespace detail
|
|
|
-{
|
|
|
-# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
|
|
|
- template<qualifier Q, int E0, int E1, int E2, int E3>
|
|
|
- struct _swizzle_base1<4, float, Q, E0,E1,E2,E3, true> : public _swizzle_base0<float, 4>
|
|
|
+namespace glm {
|
|
|
+ namespace detail
|
|
|
{
|
|
|
- GLM_FUNC_QUALIFIER vec<4, float, Q> operator ()() const
|
|
|
+# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
|
|
|
+ template<qualifier Q, int E0, int E1, int E2, int E3>
|
|
|
+ struct _swizzle_base1<4, float, Q, E0, E1, E2, E3, true> : public _swizzle_base0<float, 4>
|
|
|
{
|
|
|
- __m128 data = *reinterpret_cast<__m128 const*>(&this->_buffer);
|
|
|
+ GLM_FUNC_QUALIFIER vec<4, float, Q> operator ()() const
|
|
|
+ {
|
|
|
+ __m128 data = *reinterpret_cast<__m128 const*>(&this->_buffer);
|
|
|
|
|
|
- vec<4, float, Q> Result;
|
|
|
+ vec<4, float, Q> Result;
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX_BIT
|
|
|
Result.data = _mm_permute_ps(data, _MM_SHUFFLE(E3, E2, E1, E0));
|
|
|
# else
|
|
|
Result.data = _mm_shuffle_ps(data, data, _MM_SHUFFLE(E3, E2, E1, E0));
|
|
|
# endif
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q, int E0, int E1, int E2, int E3>
|
|
|
- struct _swizzle_base1<4, int, Q, E0,E1,E2,E3, true> : public _swizzle_base0<int, 4>
|
|
|
- {
|
|
|
- GLM_FUNC_QUALIFIER vec<4, int, Q> operator ()() const
|
|
|
- {
|
|
|
- __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer);
|
|
|
-
|
|
|
- vec<4, int, Q> Result;
|
|
|
- Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0));
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q, int E0, int E1, int E2, int E3>
|
|
|
- struct _swizzle_base1<4, uint, Q, E0,E1,E2,E3, true> : public _swizzle_base0<uint, 4>
|
|
|
- {
|
|
|
- GLM_FUNC_QUALIFIER vec<4, uint, Q> operator ()() const
|
|
|
- {
|
|
|
- __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer);
|
|
|
-
|
|
|
- vec<4, uint, Q> Result;
|
|
|
- Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0));
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q, int E0, int E1, int E2, int E3>
|
|
|
+ struct _swizzle_base1<4, int, Q, E0, E1, E2, E3, true> : public _swizzle_base0<int, 4>
|
|
|
+ {
|
|
|
+ GLM_FUNC_QUALIFIER vec<4, int, Q> operator ()() const
|
|
|
+ {
|
|
|
+ __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer);
|
|
|
+
|
|
|
+ vec<4, int, Q> Result;
|
|
|
+ Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0));
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q, int E0, int E1, int E2, int E3>
|
|
|
+ struct _swizzle_base1<4, uint, Q, E0, E1, E2, E3, true> : public _swizzle_base0<uint, 4>
|
|
|
+ {
|
|
|
+ GLM_FUNC_QUALIFIER vec<4, uint, Q> operator ()() const
|
|
|
+ {
|
|
|
+ __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer);
|
|
|
+
|
|
|
+ vec<4, uint, Q> Result;
|
|
|
+ Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0));
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif// GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR
|
|
|
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_add<float, Q, true>
|
|
|
- {
|
|
|
- static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_add<float, Q, true>
|
|
|
{
|
|
|
- vec<4, float, Q> Result;
|
|
|
- Result.data = _mm_add_ps(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, float, Q> Result;
|
|
|
+ Result.data = _mm_add_ps(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX_BIT
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_add<double, Q, true>
|
|
|
- {
|
|
|
- static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, double, Q> Result;
|
|
|
- Result.data = _mm256_add_pd(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_add<double, Q, true>
|
|
|
+ {
|
|
|
+ static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, double, Q> Result;
|
|
|
+ Result.data = _mm256_add_pd(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_sub<float, Q, true>
|
|
|
- {
|
|
|
- static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_sub<float, Q, true>
|
|
|
{
|
|
|
- vec<4, float, Q> Result;
|
|
|
- Result.data = _mm_sub_ps(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, float, Q> Result;
|
|
|
+ Result.data = _mm_sub_ps(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX_BIT
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_sub<double, Q, true>
|
|
|
- {
|
|
|
- static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, double, Q> Result;
|
|
|
- Result.data = _mm256_sub_pd(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_sub<double, Q, true>
|
|
|
+ {
|
|
|
+ static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, double, Q> Result;
|
|
|
+ Result.data = _mm256_sub_pd(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_mul<float, Q, true>
|
|
|
- {
|
|
|
- static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_mul<float, Q, true>
|
|
|
{
|
|
|
- vec<4, float, Q> Result;
|
|
|
- Result.data = _mm_mul_ps(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, float, Q> Result;
|
|
|
+ Result.data = _mm_mul_ps(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX_BIT
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_mul<double, Q, true>
|
|
|
- {
|
|
|
- static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, double, Q> Result;
|
|
|
- Result.data = _mm256_mul_pd(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_mul<double, Q, true>
|
|
|
+ {
|
|
|
+ static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, double, Q> Result;
|
|
|
+ Result.data = _mm256_mul_pd(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_div<float, Q, true>
|
|
|
- {
|
|
|
- static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_div<float, Q, true>
|
|
|
{
|
|
|
- vec<4, float, Q> Result;
|
|
|
- Result.data = _mm_div_ps(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, float, Q> Result;
|
|
|
+ Result.data = _mm_div_ps(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX_BIT
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_div<double, Q, true>
|
|
|
- {
|
|
|
- static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, double, Q> Result;
|
|
|
- Result.data = _mm256_div_pd(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_div<double, Q, true>
|
|
|
+ {
|
|
|
+ static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, double, Q> Result;
|
|
|
+ Result.data = _mm256_div_pd(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
|
|
|
- template<>
|
|
|
- struct compute_vec4_div<float, aligned_lowp, true>
|
|
|
- {
|
|
|
- static vec<4, float, aligned_lowp> call(vec<4, float, aligned_lowp> const& a, vec<4, float, aligned_lowp> const& b)
|
|
|
+ template<>
|
|
|
+ struct compute_vec4_div<float, aligned_lowp, true>
|
|
|
{
|
|
|
- vec<4, float, aligned_lowp> Result;
|
|
|
- Result.data = _mm_mul_ps(a.data, _mm_rcp_ps(b.data));
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ static vec<4, float, aligned_lowp> call(vec<4, float, aligned_lowp> const& a, vec<4, float, aligned_lowp> const& b)
|
|
|
+ {
|
|
|
+ vec<4, float, aligned_lowp> Result;
|
|
|
+ Result.data = _mm_mul_ps(a.data, _mm_rcp_ps(b.data));
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_and<uint, Q, true, 32, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_and<T, Q, true, 32, true>
|
|
|
{
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm_and_si128(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm_and_si128(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX2_BIT
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_and<T, Q, true, 64, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm256_and_si256(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_and<T, Q, true, 64, true>
|
|
|
+ {
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm256_and_si256(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_or<int, Q, true, 32, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_or<T, Q, true, 32, true>
|
|
|
{
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm_or_si128(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm_or_si128(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX2_BIT
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_or<int64, Q, true, 64, true>
|
|
|
- {
|
|
|
- GLM_FUNC_QUALIFIER static vec<4, int64, Q> call(vec<4, int64, Q> const& a, vec<4, int64, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, int64, Q> Result;
|
|
|
- Result.data = _mm256_or_si256(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_or<uint64, Q, true, 64, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm256_or_si256(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_or<T, Q, true, 64, true>
|
|
|
+ {
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm256_or_si256(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_xor<int, Q, true, 32, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_xor<T, Q, true, 32, true>
|
|
|
{
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm_xor_si128(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm_xor_si128(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX2_BIT
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_xor<T, Q, true, 64, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm256_xor_si256(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_xor<T, Q, true, 64, true>
|
|
|
+ {
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm256_xor_si256(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_shift_left<T, Q, true, 32, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_shift_left<T, Q, true, 32, true>
|
|
|
{
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm_sll_epi32(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm_sll_epi32(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX2_BIT
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_shift_left<T, Q, true, 64, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm256_sll_epi64(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_shift_left<T, Q, true, 64, true>
|
|
|
+ {
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm256_sll_epi64(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_shift_right<T, Q, true, 32, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_shift_right<T, Q, true, 32, true>
|
|
|
{
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm_srl_epi32(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm_srl_epi32(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX2_BIT
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_shift_right<int64, Q, true, 64, true>
|
|
|
- {
|
|
|
- GLM_FUNC_QUALIFIER static vec<4, int64, Q> call(vec<4, int64, Q> const& a, vec<4, int64, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, int64, Q> Result;
|
|
|
- Result.data = _mm256_srl_epi64(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_shift_right<T, Q, true, 64, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm256_srl_epi64(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_shift_right<T, Q, true, 64, true>
|
|
|
+ {
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm256_srl_epi64(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_bitwise_not<T, Q, true, 32, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& v)
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_bitwise_not<T, Q, true, 32, true>
|
|
|
{
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm_xor_si128(v.data, _mm_set1_epi32(-1));
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& v)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm_xor_si128(v.data, _mm_set1_epi32(-1));
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX2_BIT
|
|
|
- template<typename T, qualifier Q>
|
|
|
- struct compute_vec4_bitwise_not<T, Q, true, 64, true>
|
|
|
- {
|
|
|
- static vec<4, T, Q> call(vec<4, T, Q> const& v)
|
|
|
- {
|
|
|
- vec<4, T, Q> Result;
|
|
|
- Result.data = _mm256_xor_si256(v.data, _mm_set1_epi32(-1));
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<typename T, qualifier Q>
|
|
|
+ struct compute_vec4_bitwise_not<T, Q, true, 64, true>
|
|
|
+ {
|
|
|
+ static vec<4, T, Q> call(vec<4, T, Q> const& v)
|
|
|
+ {
|
|
|
+ vec<4, T, Q> Result;
|
|
|
+ Result.data = _mm256_xor_si256(v.data, _mm_set1_epi32(-1));
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_equal<float, Q, false, 32, true>
|
|
|
- {
|
|
|
- static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_equal<float, Q, false, 32, true>
|
|
|
{
|
|
|
- return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) == 0;
|
|
|
- }
|
|
|
- };
|
|
|
+ static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
|
|
|
+ {
|
|
|
+ return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) == 0;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_SSE41_BIT
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_equal<int, Q, true, 32, true>
|
|
|
- {
|
|
|
- static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
|
|
|
- {
|
|
|
- //return _mm_movemask_epi8(_mm_cmpeq_epi32(v1.data, v2.data)) != 0;
|
|
|
- __m128i neq = _mm_xor_si128(v1.data, v2.data);
|
|
|
- return _mm_test_all_zeros(neq, neq) == 0;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_equal<int, Q, true, 32, true>
|
|
|
+ {
|
|
|
+ static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
|
|
|
+ {
|
|
|
+ //return _mm_movemask_epi8(_mm_cmpeq_epi32(v1.data, v2.data)) != 0;
|
|
|
+ __m128i neq = _mm_xor_si128(v1.data, v2.data);
|
|
|
+ return _mm_test_all_zeros(neq, neq) == 0;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_nequal<float, Q, false, 32, true>
|
|
|
- {
|
|
|
- static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_nequal<float, Q, false, 32, true>
|
|
|
{
|
|
|
- return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) != 0;
|
|
|
- }
|
|
|
- };
|
|
|
+ static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
|
|
|
+ {
|
|
|
+ return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) != 0;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
# if GLM_ARCH & GLM_ARCH_SSE41_BIT
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_nequal<int, Q, true, 32, true>
|
|
|
- {
|
|
|
- static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
|
|
|
- {
|
|
|
- //return _mm_movemask_epi8(_mm_cmpneq_epi32(v1.data, v2.data)) != 0;
|
|
|
- __m128i neq = _mm_xor_si128(v1.data, v2.data);
|
|
|
- return _mm_test_all_zeros(neq, neq) != 0;
|
|
|
- }
|
|
|
- };
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_nequal<int, Q, true, 32, true>
|
|
|
+ {
|
|
|
+ static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
|
|
|
+ {
|
|
|
+ //return _mm_movemask_epi8(_mm_cmpneq_epi32(v1.data, v2.data)) != 0;
|
|
|
+ __m128i neq = _mm_xor_si128(v1.data, v2.data);
|
|
|
+ return _mm_test_all_zeros(neq, neq) != 0;
|
|
|
+ }
|
|
|
+ };
|
|
|
# endif
|
|
|
-}//namespace detail
|
|
|
+ }//namespace detail
|
|
|
|
|
|
template<>
|
|
|
GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _s) :
|
|
|
@@ -486,222 +464,222 @@ namespace detail
|
|
|
|
|
|
#if GLM_ARCH & GLM_ARCH_NEON_BIT
|
|
|
namespace glm {
|
|
|
-namespace detail {
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_add<float, Q, true>
|
|
|
- {
|
|
|
- static
|
|
|
- vec<4, float, Q>
|
|
|
- call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, float, Q> Result;
|
|
|
- Result.data = vaddq_f32(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_add<uint, Q, true>
|
|
|
- {
|
|
|
- static
|
|
|
- vec<4, uint, Q>
|
|
|
- call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, uint, Q> Result;
|
|
|
- Result.data = vaddq_u32(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_add<int, Q, true>
|
|
|
- {
|
|
|
- static
|
|
|
- vec<4, int, Q>
|
|
|
- call(vec<4, int, Q> const& a, vec<4, int, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, int, Q> Result;
|
|
|
- Result.data = vaddq_s32(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_sub<float, Q, true>
|
|
|
- {
|
|
|
- static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, float, Q> Result;
|
|
|
- Result.data = vsubq_f32(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_sub<uint, Q, true>
|
|
|
- {
|
|
|
- static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, uint, Q> Result;
|
|
|
- Result.data = vsubq_u32(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_sub<int, Q, true>
|
|
|
- {
|
|
|
- static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, int, Q> Result;
|
|
|
- Result.data = vsubq_s32(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_mul<float, Q, true>
|
|
|
- {
|
|
|
- static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, float, Q> Result;
|
|
|
- Result.data = vmulq_f32(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_mul<uint, Q, true>
|
|
|
- {
|
|
|
- static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, uint, Q> Result;
|
|
|
- Result.data = vmulq_u32(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_mul<int, Q, true>
|
|
|
- {
|
|
|
- static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, int, Q> Result;
|
|
|
- Result.data = vmulq_s32(a.data, b.data);
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_div<float, Q, true>
|
|
|
- {
|
|
|
- static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
- {
|
|
|
- vec<4, float, Q> Result;
|
|
|
+ namespace detail {
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_add<float, Q, true>
|
|
|
+ {
|
|
|
+ static
|
|
|
+ vec<4, float, Q>
|
|
|
+ call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, float, Q> Result;
|
|
|
+ Result.data = vaddq_f32(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_add<uint, Q, true>
|
|
|
+ {
|
|
|
+ static
|
|
|
+ vec<4, uint, Q>
|
|
|
+ call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, uint, Q> Result;
|
|
|
+ Result.data = vaddq_u32(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_add<int, Q, true>
|
|
|
+ {
|
|
|
+ static
|
|
|
+ vec<4, int, Q>
|
|
|
+ call(vec<4, int, Q> const& a, vec<4, int, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, int, Q> Result;
|
|
|
+ Result.data = vaddq_s32(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_sub<float, Q, true>
|
|
|
+ {
|
|
|
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, float, Q> Result;
|
|
|
+ Result.data = vsubq_f32(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_sub<uint, Q, true>
|
|
|
+ {
|
|
|
+ static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, uint, Q> Result;
|
|
|
+ Result.data = vsubq_u32(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_sub<int, Q, true>
|
|
|
+ {
|
|
|
+ static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, int, Q> Result;
|
|
|
+ Result.data = vsubq_s32(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_mul<float, Q, true>
|
|
|
+ {
|
|
|
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, float, Q> Result;
|
|
|
+ Result.data = vmulq_f32(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_mul<uint, Q, true>
|
|
|
+ {
|
|
|
+ static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, uint, Q> Result;
|
|
|
+ Result.data = vmulq_u32(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_mul<int, Q, true>
|
|
|
+ {
|
|
|
+ static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, int, Q> Result;
|
|
|
+ Result.data = vmulq_s32(a.data, b.data);
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_div<float, Q, true>
|
|
|
+ {
|
|
|
+ static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b)
|
|
|
+ {
|
|
|
+ vec<4, float, Q> Result;
|
|
|
#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
|
|
|
- Result.data = vdivq_f32(a.data, b.data);
|
|
|
+ Result.data = vdivq_f32(a.data, b.data);
|
|
|
#else
|
|
|
- /* Arm assembler reference:
|
|
|
- *
|
|
|
- * The Newton-Raphson iteration: x[n+1] = x[n] * (2 - d * x[n])
|
|
|
- * converges to (1/d) if x0 is the result of VRECPE applied to d.
|
|
|
- *
|
|
|
- * Note: The precision usually improves with two interactions, but more than two iterations are not helpful. */
|
|
|
- float32x4_t x = vrecpeq_f32(b.data);
|
|
|
- x = vmulq_f32(vrecpsq_f32(b.data, x), x);
|
|
|
- x = vmulq_f32(vrecpsq_f32(b.data, x), x);
|
|
|
- Result.data = vmulq_f32(a.data, x);
|
|
|
+ /* Arm assembler reference:
|
|
|
+ *
|
|
|
+ * The Newton-Raphson iteration: x[n+1] = x[n] * (2 - d * x[n])
|
|
|
+ * converges to (1/d) if x0 is the result of VRECPE applied to d.
|
|
|
+ *
|
|
|
+ * Note: The precision usually improves with two interactions, but more than two iterations are not helpful. */
|
|
|
+ float32x4_t x = vrecpeq_f32(b.data);
|
|
|
+ x = vmulq_f32(vrecpsq_f32(b.data, x), x);
|
|
|
+ x = vmulq_f32(vrecpsq_f32(b.data, x), x);
|
|
|
+ Result.data = vmulq_f32(a.data, x);
|
|
|
#endif
|
|
|
- return Result;
|
|
|
- }
|
|
|
- };
|
|
|
+ return Result;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_equal<float, Q, false, 32, true>
|
|
|
- {
|
|
|
- static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_equal<float, Q, false, 32, true>
|
|
|
{
|
|
|
- uint32x4_t cmp = vceqq_f32(v1.data, v2.data);
|
|
|
+ static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
|
|
|
+ {
|
|
|
+ uint32x4_t cmp = vceqq_f32(v1.data, v2.data);
|
|
|
#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
|
|
|
- cmp = vpminq_u32(cmp, cmp);
|
|
|
- cmp = vpminq_u32(cmp, cmp);
|
|
|
- uint32_t r = cmp[0];
|
|
|
+ cmp = vpminq_u32(cmp, cmp);
|
|
|
+ cmp = vpminq_u32(cmp, cmp);
|
|
|
+ uint32_t r = cmp[0];
|
|
|
#else
|
|
|
- uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp));
|
|
|
- cmpx2 = vpmin_u32(cmpx2, cmpx2);
|
|
|
- uint32_t r = cmpx2[0];
|
|
|
+ uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp));
|
|
|
+ cmpx2 = vpmin_u32(cmpx2, cmpx2);
|
|
|
+ uint32_t r = cmpx2[0];
|
|
|
#endif
|
|
|
- return r == ~0u;
|
|
|
- }
|
|
|
- };
|
|
|
+ return r == ~0u;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_equal<uint, Q, false, 32, true>
|
|
|
- {
|
|
|
- static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2)
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_equal<uint, Q, false, 32, true>
|
|
|
{
|
|
|
- uint32x4_t cmp = vceqq_u32(v1.data, v2.data);
|
|
|
+ static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2)
|
|
|
+ {
|
|
|
+ uint32x4_t cmp = vceqq_u32(v1.data, v2.data);
|
|
|
#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
|
|
|
- cmp = vpminq_u32(cmp, cmp);
|
|
|
- cmp = vpminq_u32(cmp, cmp);
|
|
|
- uint32_t r = cmp[0];
|
|
|
+ cmp = vpminq_u32(cmp, cmp);
|
|
|
+ cmp = vpminq_u32(cmp, cmp);
|
|
|
+ uint32_t r = cmp[0];
|
|
|
#else
|
|
|
- uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp));
|
|
|
- cmpx2 = vpmin_u32(cmpx2, cmpx2);
|
|
|
- uint32_t r = cmpx2[0];
|
|
|
+ uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp));
|
|
|
+ cmpx2 = vpmin_u32(cmpx2, cmpx2);
|
|
|
+ uint32_t r = cmpx2[0];
|
|
|
#endif
|
|
|
- return r == ~0u;
|
|
|
- }
|
|
|
- };
|
|
|
+ return r == ~0u;
|
|
|
+ }
|
|
|
+ };
|
|
|
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_equal<int, Q, false, 32, true>
|
|
|
- {
|
|
|
- static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_equal<int, Q, false, 32, true>
|
|
|
{
|
|
|
- uint32x4_t cmp = vceqq_s32(v1.data, v2.data);
|
|
|
+ static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
|
|
|
+ {
|
|
|
+ uint32x4_t cmp = vceqq_s32(v1.data, v2.data);
|
|
|
#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
|
|
|
- cmp = vpminq_u32(cmp, cmp);
|
|
|
- cmp = vpminq_u32(cmp, cmp);
|
|
|
- uint32_t r = cmp[0];
|
|
|
+ cmp = vpminq_u32(cmp, cmp);
|
|
|
+ cmp = vpminq_u32(cmp, cmp);
|
|
|
+ uint32_t r = cmp[0];
|
|
|
#else
|
|
|
- uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp));
|
|
|
- cmpx2 = vpmin_u32(cmpx2, cmpx2);
|
|
|
- uint32_t r = cmpx2[0];
|
|
|
+ uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp));
|
|
|
+ cmpx2 = vpmin_u32(cmpx2, cmpx2);
|
|
|
+ uint32_t r = cmpx2[0];
|
|
|
#endif
|
|
|
- return r == ~0u;
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_nequal<float, Q, false, 32, true>
|
|
|
- {
|
|
|
- static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
|
|
|
- {
|
|
|
- return !compute_vec4_equal<float, Q, false, 32, true>::call(v1, v2);
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_nequal<uint, Q, false, 32, true>
|
|
|
- {
|
|
|
- static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2)
|
|
|
- {
|
|
|
- return !compute_vec4_equal<uint, Q, false, 32, true>::call(v1, v2);
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- template<qualifier Q>
|
|
|
- struct compute_vec4_nequal<int, Q, false, 32, true>
|
|
|
- {
|
|
|
- static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
|
|
|
- {
|
|
|
- return !compute_vec4_equal<int, Q, false, 32, true>::call(v1, v2);
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
-}//namespace detail
|
|
|
+ return r == ~0u;
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_nequal<float, Q, false, 32, true>
|
|
|
+ {
|
|
|
+ static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2)
|
|
|
+ {
|
|
|
+ return !compute_vec4_equal<float, Q, false, 32, true>::call(v1, v2);
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_nequal<uint, Q, false, 32, true>
|
|
|
+ {
|
|
|
+ static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2)
|
|
|
+ {
|
|
|
+ return !compute_vec4_equal<uint, Q, false, 32, true>::call(v1, v2);
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ template<qualifier Q>
|
|
|
+ struct compute_vec4_nequal<int, Q, false, 32, true>
|
|
|
+ {
|
|
|
+ static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2)
|
|
|
+ {
|
|
|
+ return !compute_vec4_equal<int, Q, false, 32, true>::call(v1, v2);
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ }//namespace detail
|
|
|
|
|
|
#if !GLM_CONFIG_XYZW_ONLY
|
|
|
template<>
|