Browse Source

Experiments to add bitfieldInterleave

Christophe Riccio 13 years ago
parent
commit
920ca0a242
3 changed files with 507 additions and 0 deletions
  1. 24 0
      glm/gtx/bit.hpp
  2. 42 0
      glm/gtx/bit.inl
  3. 441 0
      test/gtx/gtx_bit.cpp

+ 24 - 0
glm/gtx/bit.hpp

@@ -132,6 +132,30 @@ namespace glm
 		int const & FromBit, 
 		int const & ToBit);
 
+	///
+	/// @see gtx_bit
+	int16 bitfieldInterleave(int8 x, int8 y);
+
+	///
+	/// @see gtx_bit
+	uint16 bitfieldInterleave(uint8 x, uint8 y);
+
+	///
+	/// @see gtx_bit
+	int32 bitfieldInterleave(int16 x, int16 y);
+
+	///
+	/// @see gtx_bit
+	uint32 bitfieldInterleave(uint16 x, uint16 y);
+
+	///
+	/// @see gtx_bit
+	int64 bitfieldInterleave(int32 x, int32 y);
+
+	///
+	/// @see gtx_bit
+	uint64 bitfieldInterleave(uint32 x, uint32 y);
+
 	/// @}
 } //namespace glm
 

+ 42 - 0
glm/gtx/bit.inl

@@ -597,4 +597,46 @@ namespace glm
 			Result &= ~(1 << i);
 		return Result;
 	}
+
+	namespace detail
+	{
+		template <typename PARAM, typename RET>
+		inline RET bitfieldInterleave(PARAM x, PARAM y)
+		{
+			RET Result = 0; 
+			for (int i = 0; i < sizeof(PARAM) * 8; i++)
+				Result |= (x & 1U << i) << i | (y & 1U << i) << (i + 1);
+			return Result;
+		}
+	}//namespace detail
+
+	inline int16 bitfieldInterleave(int8 x, int8 y)
+	{
+		return detail::bitfieldInterleave<int8, int16>(x, y);
+	}
+
+	inline uint16 bitfieldInterleave(uint8 x, uint8 y)
+	{
+		return detail::bitfieldInterleave<uint8, uint16>(x, y);
+	}
+
+	inline int32 bitfieldInterleave(int16 x, int16 y)
+	{
+		return detail::bitfieldInterleave<int16, int32>(x, y);
+	}
+
+	inline uint32 bitfieldInterleave(uint16 x, uint16 y)
+	{
+		return detail::bitfieldInterleave<uint16, uint32>(x, y);
+	}
+
+	inline int64 bitfieldInterleave(int32 x, int32 y)
+	{
+		return detail::bitfieldInterleave<int32, int64>(x, y);
+	}
+
+	inline uint64 bitfieldInterleave(uint32 x, uint32 y)
+	{
+		return detail::bitfieldInterleave<uint32, uint64>(x, y);
+	}
 }//namespace glm

+ 441 - 0
test/gtx/gtx_bit.cpp

@@ -11,6 +11,10 @@
 #include <glm/gtc/type_precision.hpp>
 #include <glm/gtx/bit.hpp>
 #include <iostream>
+#include <vector>
+#include <ctime>
+
+#include <emmintrin.h>
 
 enum result
 {
@@ -162,10 +166,447 @@ namespace bitRevert
 	}
 }//bitRevert
 
+inline glm::uint64 fastBitfieldInterleave(glm::uint32 x, glm::uint32 y)
+{
+	glm::uint64 REG1;
+	glm::uint64 REG2;
+
+	REG1 = x;
+	REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+	REG1 = ((REG1 <<  8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+	REG1 = ((REG1 <<  4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+	REG1 = ((REG1 <<  2) | REG1) & glm::uint64(0x3333333333333333);
+	REG1 = ((REG1 <<  1) | REG1) & glm::uint64(0x5555555555555555);
+
+	REG2 = y;
+	REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+	REG2 = ((REG2 <<  8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+	REG2 = ((REG2 <<  4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+	REG2 = ((REG2 <<  2) | REG2) & glm::uint64(0x3333333333333333);
+	REG2 = ((REG2 <<  1) | REG2) & glm::uint64(0x5555555555555555);
+
+	return REG1 | (REG2 << 1);
+}
+
+inline glm::uint64 interleaveBitfieldInterleave(glm::uint32 x, glm::uint32 y)
+{
+	glm::uint64 REG1;
+	glm::uint64 REG2;
+
+	REG1 = x;
+	REG2 = y;
+
+	REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+	REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+
+	REG1 = ((REG1 <<  8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+	REG2 = ((REG2 <<  8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+
+	REG1 = ((REG1 <<  4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+	REG2 = ((REG2 <<  4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+
+	REG1 = ((REG1 <<  2) | REG1) & glm::uint64(0x3333333333333333);
+	REG2 = ((REG2 <<  2) | REG2) & glm::uint64(0x3333333333333333);
+
+	REG1 = ((REG1 <<  1) | REG1) & glm::uint64(0x5555555555555555);
+	REG2 = ((REG2 <<  1) | REG2) & glm::uint64(0x5555555555555555);
+
+	return REG1 | (REG2 << 1);
+}
+
+inline glm::uint64 loopBitfieldInterleave(glm::uint32 x, glm::uint32 y)
+{
+	static glm::uint64 const Mask[5] = 
+	{
+		0x5555555555555555,
+		0x3333333333333333,
+		0x0F0F0F0F0F0F0F0F,
+		0x00FF00FF00FF00FF,
+		0x0000FFFF0000FFFF
+	};
+
+	glm::uint64 REG1 = x;
+	glm::uint64 REG2 = y;
+	for(int i = 4; i >= 0; --i)
+	{
+		REG1 = ((REG1 << (1 << i)) | REG1) & Mask[i];
+		REG2 = ((REG2 << (1 << i)) | REG2) & Mask[i];
+	}
+
+	return REG1 | (REG2 << 1);
+}
+
+/*
+const int N = 1024;
+
+int32_t b1[N]; // 2 x arrays of input bit sets
+int32_t b2[N];
+int32_t b3[N]; // 1 x array of output bit sets
+
+for (int i = 0; i < N; i += 4)
+{
+    __m128i v1 = _mm_loadu_si128(&b1[i]); // load input bits sets
+    __m128i v2 = _mm_loadu_si128(&b2[i]);
+    __m128i v3 = _mm_and_si128(v1, v2);   // do the bitwise AND
+    _mm_storeu_si128(&b3[i], v3);         // store the result
+}
+If you just want to AND an array in-place with a fixed mask then it would simplify to this:
+
+const int N = 1024;
+
+int32_t b1[N]; // input/output array of bit sets
+
+const __m128i v2 = _mm_set1_epi32(0x12345678); // mask
+
+for (int i = 0; i < N; i += 4)
+{
+    __m128i v1 = _mm_loadu_si128(&b1[i]); // load input bits sets
+    __m128i v3 = _mm_and_si128(v1, v2);   // do the bitwise AND
+    _mm_storeu_si128(&b1[i], v3);         // store the result
+}
+Note: for better performance make sure your input/output arrays are 16 byte aligned and then use _mm_load_si128/_mm_store_si128 rather than their unaligned counterparts as above.
+*/
+
+inline glm::uint64 sseBitfieldInterleave(glm::uint32 x, glm::uint32 y)
+{
+	GLM_ALIGN(16) glm::uint32 const Array[4] = {x, 0, y, 0};
+
+	__m128i const Mask4 = _mm_set1_epi32(0x0000FFFF);
+	__m128i const Mask3 = _mm_set1_epi32(0x00FF00FF);
+	__m128i const Mask2 = _mm_set1_epi32(0x0F0F0F0F);
+	__m128i const Mask1 = _mm_set1_epi32(0x33333333);
+	__m128i const Mask0 = _mm_set1_epi32(0x55555555);
+
+	__m128i Reg1;
+	__m128i Reg2;
+
+	// REG1 = x;
+	// REG2 = y;
+	Reg1 = _mm_load_si128((__m128i*)Array);
+
+	//REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+	//REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+	Reg2 = _mm_slli_si128(Reg1, 2);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask4);
+
+	//REG1 = ((REG1 <<  8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+	//REG2 = ((REG2 <<  8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+	Reg2 = _mm_slli_si128(Reg1, 1);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask3);
+
+	//REG1 = ((REG1 <<  4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+	//REG2 = ((REG2 <<  4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+	Reg2 = _mm_slli_epi32(Reg1, 4);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask2);
+
+	//REG1 = ((REG1 <<  2) | REG1) & glm::uint64(0x3333333333333333);
+	//REG2 = ((REG2 <<  2) | REG2) & glm::uint64(0x3333333333333333);
+	Reg2 = _mm_slli_epi32(Reg1, 2);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask1);
+
+	//REG1 = ((REG1 <<  1) | REG1) & glm::uint64(0x5555555555555555);
+	//REG2 = ((REG2 <<  1) | REG2) & glm::uint64(0x5555555555555555);
+	Reg2 = _mm_slli_epi32(Reg1, 1);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask0);
+
+	//return REG1 | (REG2 << 1);
+	Reg2 = _mm_slli_epi32(Reg1, 1);
+	Reg2 = _mm_srli_si128(Reg2, 8);
+	Reg1 = _mm_or_si128(Reg1, Reg2);
+	
+	GLM_ALIGN(16) glm::uint64 Result[2];
+	_mm_store_si128((__m128i*)Result, Reg1);
+
+	return Result[0];
+}
+
+inline glm::uint64 sseUnalignedBitfieldInterleave(glm::uint32 x, glm::uint32 y)
+{
+	glm::uint32 const Array[4] = {x, 0, y, 0};
+
+	__m128i const Mask4 = _mm_set1_epi32(0x0000FFFF);
+	__m128i const Mask3 = _mm_set1_epi32(0x00FF00FF);
+	__m128i const Mask2 = _mm_set1_epi32(0x0F0F0F0F);
+	__m128i const Mask1 = _mm_set1_epi32(0x33333333);
+	__m128i const Mask0 = _mm_set1_epi32(0x55555555);
+
+	__m128i Reg1;
+	__m128i Reg2;
+
+	// REG1 = x;
+	// REG2 = y;
+	Reg1 = _mm_loadu_si128((__m128i*)Array);
+
+	//REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+	//REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+	Reg2 = _mm_slli_si128(Reg1, 2);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask4);
+
+	//REG1 = ((REG1 <<  8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+	//REG2 = ((REG2 <<  8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+	Reg2 = _mm_slli_si128(Reg1, 1);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask3);
+
+	//REG1 = ((REG1 <<  4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+	//REG2 = ((REG2 <<  4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+	Reg2 = _mm_slli_epi32(Reg1, 4);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask2);
+
+	//REG1 = ((REG1 <<  2) | REG1) & glm::uint64(0x3333333333333333);
+	//REG2 = ((REG2 <<  2) | REG2) & glm::uint64(0x3333333333333333);
+	Reg2 = _mm_slli_epi32(Reg1, 2);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask1);
+
+	//REG1 = ((REG1 <<  1) | REG1) & glm::uint64(0x5555555555555555);
+	//REG2 = ((REG2 <<  1) | REG2) & glm::uint64(0x5555555555555555);
+	Reg2 = _mm_slli_epi32(Reg1, 1);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask0);
+
+	//return REG1 | (REG2 << 1);
+	Reg2 = _mm_slli_epi32(Reg1, 1);
+	Reg2 = _mm_srli_si128(Reg2, 8);
+	Reg1 = _mm_or_si128(Reg1, Reg2);
+	
+	glm::uint64 Result[2];
+	_mm_storeu_si128((__m128i*)Result, Reg1);
+
+	return Result[0];
+}
+
+inline __m128i _mm_bit_interleave_si128(__m128i x, __m128i y)
+{
+	__m128i const Mask4 = _mm_set1_epi32(0x0000FFFF);
+	__m128i const Mask3 = _mm_set1_epi32(0x00FF00FF);
+	__m128i const Mask2 = _mm_set1_epi32(0x0F0F0F0F);
+	__m128i const Mask1 = _mm_set1_epi32(0x33333333);
+	__m128i const Mask0 = _mm_set1_epi32(0x55555555);
+
+	__m128i Reg1;
+	__m128i Reg2;
+
+	// REG1 = x;
+	// REG2 = y;
+	Reg1 = _mm_unpacklo_epi64(x, y);
+
+	//REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+	//REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+	Reg2 = _mm_slli_si128(Reg1, 2);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask4);
+
+	//REG1 = ((REG1 <<  8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+	//REG2 = ((REG2 <<  8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+	Reg2 = _mm_slli_si128(Reg1, 1);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask3);
+
+	//REG1 = ((REG1 <<  4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+	//REG2 = ((REG2 <<  4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+	Reg2 = _mm_slli_epi32(Reg1, 4);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask2);
+
+	//REG1 = ((REG1 <<  2) | REG1) & glm::uint64(0x3333333333333333);
+	//REG2 = ((REG2 <<  2) | REG2) & glm::uint64(0x3333333333333333);
+	Reg2 = _mm_slli_epi32(Reg1, 2);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask1);
+
+	//REG1 = ((REG1 <<  1) | REG1) & glm::uint64(0x5555555555555555);
+	//REG2 = ((REG2 <<  1) | REG2) & glm::uint64(0x5555555555555555);
+	Reg2 = _mm_slli_epi32(Reg1, 1);
+	Reg1 = _mm_or_si128(Reg2, Reg1);
+	Reg1 = _mm_and_si128(Reg1, Mask0);
+
+	//return REG1 | (REG2 << 1);
+	Reg2 = _mm_slli_epi32(Reg1, 1);
+	Reg2 = _mm_srli_si128(Reg2, 8);
+	Reg1 = _mm_or_si128(Reg1, Reg2);
+	
+	return Reg1;
+}
+
+namespace bitfieldInterleave
+{
+	int test()
+	{
+		glm::uint32 x_max = 1 << 13;
+		glm::uint32 y_max = 1 << 12;
+
+		// ALU
+		std::vector<glm::u64vec2> Data(x_max * y_max);
+		std::vector<glm::u64vec2> ParamX(x_max);
+		std::vector<glm::u64vec2> ParamY(y_max);
+		for(glm::uint32 x = 0; x < x_max; ++x)
+			ParamX[x] = glm::u64vec2(x);
+		for(glm::uint32 y = 0; y < y_max; ++y)
+			ParamY[y] = glm::u64vec2(y);
+
+		{
+			for(glm::uint32 y = 0; y < (1 << 10); ++y)
+			for(glm::uint32 x = 0; x < (1 << 10); ++x)
+			{
+				glm::uint64 A = glm::bitfieldInterleave(x, y);
+				glm::uint64 B = fastBitfieldInterleave(x, y);
+				glm::uint64 C = loopBitfieldInterleave(x, y);
+				glm::uint64 D = interleaveBitfieldInterleave(x, y);
+				glm::uint64 E = sseBitfieldInterleave(x, y);
+				glm::uint64 F = sseUnalignedBitfieldInterleave(x, y);
+				assert(A == B);
+				assert(A == C);
+				assert(A == D);
+				assert(A == E);
+				assert(A == F);
+			}
+		}
+
+		{
+			std::clock_t LastTime = std::clock();
+
+			for(glm::uint32 y = 0; y < y_max; ++y)
+			for(glm::uint32 x = 0; x < x_max; ++x)
+			{
+				glm::uint64 Result = glm::bitfieldInterleave(glm::uint32(ParamX[x].x), glm::uint32(ParamY[y].x));
+				Data[x + y * x_max].x = Result;
+			}
+
+			std::clock_t Time = std::clock() - LastTime;
+
+			std::cout << "glm::bitfieldInterleave Time " << Time << " clocks" << std::endl;
+		}
+
+		{
+			std::clock_t LastTime = std::clock();
+
+			for(glm::uint32 y = 0; y < y_max; ++y)
+			for(glm::uint32 x = 0; x < x_max; ++x)
+			{
+				glm::uint64 Result = fastBitfieldInterleave(glm::uint32(ParamX[x].x), glm::uint32(ParamY[y].x));
+				Data[x + y * x_max].x = Result;
+			}
+
+			std::clock_t Time = std::clock() - LastTime;
+
+			std::cout << "fastBitfieldInterleave Time " << Time << " clocks" << std::endl;
+		}
+
+		{
+			std::clock_t LastTime = std::clock();
+
+			for(glm::uint32 y = 0; y < y_max; ++y)
+			for(glm::uint32 x = 0; x < x_max; ++x)
+			{
+				glm::uint64 Result = loopBitfieldInterleave(glm::uint32(ParamX[x].x), glm::uint32(ParamY[y].x));
+				Data[x + y * x_max].x = Result;
+			}
+
+			std::clock_t Time = std::clock() - LastTime;
+
+			std::cout << "loopBitfieldInterleave Time " << Time << " clocks" << std::endl;
+		}
+
+		{
+			std::clock_t LastTime = std::clock();
+
+			for(glm::uint32 y = 0; y < y_max; ++y)
+			for(glm::uint32 x = 0; x < x_max; ++x)
+			{
+				glm::uint64 Result = interleaveBitfieldInterleave(glm::uint32(ParamX[x].x), glm::uint32(ParamY[y].x));
+				Data[x + y * x_max].x = Result;
+			}
+
+			std::clock_t Time = std::clock() - LastTime;
+
+			std::cout << "interleaveBitfieldInterleave Time " << Time << " clocks" << std::endl;
+		}
+
+		{
+			std::clock_t LastTime = std::clock();
+
+			for(glm::uint32 y = 0; y < y_max; ++y)
+			for(glm::uint32 x = 0; x < x_max; ++x)
+			{
+				glm::uint64 Result = sseBitfieldInterleave(glm::uint32(ParamX[x].x), glm::uint32(ParamY[y].x));
+				Data[x + y * x_max].x = Result;
+			}
+
+			std::clock_t Time = std::clock() - LastTime;
+
+			std::cout << "sseBitfieldInterleave Time " << Time << " clocks" << std::endl;
+		}
+
+		{
+			std::clock_t LastTime = std::clock();
+
+			for(glm::uint32 y = 0; y < y_max; ++y)
+			for(glm::uint32 x = 0; x < x_max; ++x)
+			{
+				glm::uint64 Result = sseUnalignedBitfieldInterleave(glm::uint32(ParamX[x].x), glm::uint32(ParamY[y].x));
+				Data[x + y * x_max].x = Result;
+			}
+
+			std::clock_t Time = std::clock() - LastTime;
+
+			std::cout << "sseUnalignedBitfieldInterleave Time " << Time << " clocks" << std::endl;
+		}
+
+		{
+			// SIMD
+			glm::int32 simd_x_max = 1 << 13;
+			glm::int32 simd_y_max = 1 << 12;
+
+			std::vector<__m128i> SimdData(x_max * y_max);
+			std::vector<__m128i> SimdParamX(x_max);
+			std::vector<__m128i> SimdParamY(y_max);
+			for(int x = 0; x < simd_x_max; ++x)
+				SimdParamX[x] = _mm_set1_epi32(x);
+			for(int y = 0; y < simd_y_max; ++y)
+				SimdParamY[y] = _mm_set1_epi32(y);
+
+			std::clock_t LastTime = std::clock();
+
+			for(glm::int32 y = 0; y < simd_y_max; ++y)
+			for(glm::int32 x = 0; x < simd_x_max; ++x)
+			{
+				__m128i Result = _mm_bit_interleave_si128(SimdParamX[x], SimdParamX[y]);
+				SimdData[x + y * x_max] = Result;
+			}
+
+			std::clock_t Time = std::clock() - LastTime;
+
+			std::cout << "_mm_bit_interleave_si128 Time " << Time << " clocks" << std::endl;
+		}
+
+		
+
+		return 0;
+	}
+}
+
 int main()
 {
+	//__m64 REG3 = _mm_set1_pi32(static_cast<int>(0x80000000));
+	//__m64 REG1 = _mm_set1_pi32(0xFFFFFFFF);
+	//__m64 REG2 = _mm_set1_pi32(0x55555555);
+	//__m128i REG = _mm_set_epi64(REG1, REG2);
+
+
 	int Error = 0;
+	Error += ::bitfieldInterleave::test();
 	Error += ::extractField::test();
 	Error += ::bitRevert::test();
+
+	while(true);
+
 	return Error;
 }