gtc_bitfield.cpp 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904
  1. #include <glm/gtc/bitfield.hpp>
  2. #include <glm/gtc/type_precision.hpp>
  3. #include <glm/vector_relational.hpp>
  4. #include <glm/integer.hpp>
  5. #include <ctime>
  6. #include <cstdio>
  7. #include <vector>
  8. namespace mask
  9. {
  10. template<typename genType>
  11. struct type
  12. {
  13. genType Value;
  14. genType Return;
  15. };
  16. inline int mask_zero(int Bits)
  17. {
  18. return ~((~0) << Bits);
  19. }
  20. inline int mask_mix(int Bits)
  21. {
  22. return Bits >= sizeof(int) * 8 ? 0xffffffff : (static_cast<int>(1) << Bits) - static_cast<int>(1);
  23. }
  24. inline int mask_half(int Bits)
  25. {
  26. // We do the shift in two steps because 1 << 32 on an int is undefined.
  27. int const Half = Bits >> 1;
  28. int const Fill = ~0;
  29. int const ShiftHaft = (Fill << Half);
  30. int const Rest = Bits - Half;
  31. int const Reversed = ShiftHaft << Rest;
  32. return ~Reversed;
  33. }
  34. inline int mask_loop(int Bits)
  35. {
  36. int Mask = 0;
  37. for(int Bit = 0; Bit < Bits; ++Bit)
  38. Mask |= (static_cast<int>(1) << Bit);
  39. return Mask;
  40. }
  41. int perf()
  42. {
  43. int const Count = 100000000;
  44. std::clock_t Timestamp1 = std::clock();
  45. {
  46. std::vector<int> Mask;
  47. Mask.resize(Count);
  48. for(int i = 0; i < Count; ++i)
  49. Mask[i] = mask_mix(i % 32);
  50. }
  51. std::clock_t Timestamp2 = std::clock();
  52. {
  53. std::vector<int> Mask;
  54. Mask.resize(Count);
  55. for(int i = 0; i < Count; ++i)
  56. Mask[i] = mask_loop(i % 32);
  57. }
  58. std::clock_t Timestamp3 = std::clock();
  59. {
  60. std::vector<int> Mask;
  61. Mask.resize(Count);
  62. for(int i = 0; i < Count; ++i)
  63. Mask[i] = glm::mask(i % 32);
  64. }
  65. std::clock_t Timestamp4 = std::clock();
  66. {
  67. std::vector<int> Mask;
  68. Mask.resize(Count);
  69. for(int i = 0; i < Count; ++i)
  70. Mask[i] = mask_zero(i % 32);
  71. }
  72. std::clock_t Timestamp5 = std::clock();
  73. {
  74. std::vector<int> Mask;
  75. Mask.resize(Count);
  76. for(int i = 0; i < Count; ++i)
  77. Mask[i] = mask_half(i % 32);
  78. }
  79. std::clock_t Timestamp6 = std::clock();
  80. std::clock_t TimeMix = Timestamp2 - Timestamp1;
  81. std::clock_t TimeLoop = Timestamp3 - Timestamp2;
  82. std::clock_t TimeDefault = Timestamp4 - Timestamp3;
  83. std::clock_t TimeZero = Timestamp5 - Timestamp4;
  84. std::clock_t TimeHalf = Timestamp6 - Timestamp5;
  85. printf("mask[mix]: %d\n", static_cast<unsigned int>(TimeMix));
  86. printf("mask[loop]: %d\n", static_cast<unsigned int>(TimeLoop));
  87. printf("mask[default]: %d\n", static_cast<unsigned int>(TimeDefault));
  88. printf("mask[zero]: %d\n", static_cast<unsigned int>(TimeZero));
  89. printf("mask[half]: %d\n", static_cast<unsigned int>(TimeHalf));
  90. return TimeDefault < TimeLoop ? 0 : 1;
  91. }
  92. int test_uint()
  93. {
  94. type<glm::uint> const Data[] =
  95. {
  96. { 0, 0x00000000},
  97. { 1, 0x00000001},
  98. { 2, 0x00000003},
  99. { 3, 0x00000007},
  100. {31, 0x7fffffff},
  101. {32, 0xffffffff}
  102. };
  103. int Error = 0;
  104. /* mask_zero is sadly not a correct code
  105. for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int>); ++i)
  106. {
  107. int Result = mask_zero(Data[i].Value);
  108. Error += Data[i].Return == Result ? 0 : 1;
  109. }
  110. */
  111. for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int>); ++i)
  112. {
  113. int Result = mask_mix(Data[i].Value);
  114. Error += Data[i].Return == Result ? 0 : 1;
  115. }
  116. for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int>); ++i)
  117. {
  118. int Result = mask_half(Data[i].Value);
  119. Error += Data[i].Return == Result ? 0 : 1;
  120. }
  121. for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int>); ++i)
  122. {
  123. int Result = mask_loop(Data[i].Value);
  124. Error += Data[i].Return == Result ? 0 : 1;
  125. }
  126. for(std::size_t i = 0; i < sizeof(Data) / sizeof(type<int>); ++i)
  127. {
  128. int Result = glm::mask(Data[i].Value);
  129. Error += Data[i].Return == Result ? 0 : 1;
  130. }
  131. return Error;
  132. }
  133. int test_uvec4()
  134. {
  135. type<glm::ivec4> const Data[] =
  136. {
  137. {glm::ivec4( 0), glm::ivec4(0x00000000)},
  138. {glm::ivec4( 1), glm::ivec4(0x00000001)},
  139. {glm::ivec4( 2), glm::ivec4(0x00000003)},
  140. {glm::ivec4( 3), glm::ivec4(0x00000007)},
  141. {glm::ivec4(31), glm::ivec4(0x7fffffff)},
  142. {glm::ivec4(32), glm::ivec4(0xffffffff)}
  143. };
  144. int Error(0);
  145. for(std::size_t i = 0, n = sizeof(Data) / sizeof(type<glm::ivec4>); i < n; ++i)
  146. {
  147. glm::ivec4 Result = glm::mask(Data[i].Value);
  148. Error += glm::all(glm::equal(Data[i].Return, Result)) ? 0 : 1;
  149. }
  150. return Error;
  151. }
  152. int test()
  153. {
  154. int Error(0);
  155. Error += test_uint();
  156. Error += test_uvec4();
  157. return Error;
  158. }
  159. }//namespace mask
  160. namespace bitfieldInterleave3
  161. {
  162. template<typename PARAM, typename RET>
  163. inline RET refBitfieldInterleave(PARAM x, PARAM y, PARAM z)
  164. {
  165. RET Result = 0;
  166. for(RET i = 0; i < sizeof(PARAM) * 8; ++i)
  167. {
  168. Result |= ((RET(x) & (RET(1U) << i)) << ((i << 1) + 0));
  169. Result |= ((RET(y) & (RET(1U) << i)) << ((i << 1) + 1));
  170. Result |= ((RET(z) & (RET(1U) << i)) << ((i << 1) + 2));
  171. }
  172. return Result;
  173. }
  174. int test()
  175. {
  176. int Error(0);
  177. glm::uint16 x_max = 1 << 11;
  178. glm::uint16 y_max = 1 << 11;
  179. glm::uint16 z_max = 1 << 11;
  180. for(glm::uint16 z = 0; z < z_max; z += 27)
  181. for(glm::uint16 y = 0; y < y_max; y += 27)
  182. for(glm::uint16 x = 0; x < x_max; x += 27)
  183. {
  184. glm::uint64 ResultA = refBitfieldInterleave<glm::uint16, glm::uint64>(x, y, z);
  185. glm::uint64 ResultB = glm::bitfieldInterleave(x, y, z);
  186. Error += ResultA == ResultB ? 0 : 1;
  187. }
  188. return Error;
  189. }
  190. }
  191. namespace bitfieldInterleave4
  192. {
  193. template<typename PARAM, typename RET>
  194. inline RET loopBitfieldInterleave(PARAM x, PARAM y, PARAM z, PARAM w)
  195. {
  196. RET const v[4] = {x, y, z, w};
  197. RET Result = 0;
  198. for(RET i = 0; i < sizeof(PARAM) * 8; i++)
  199. {
  200. Result |= ((((v[0] >> i) & 1U)) << ((i << 2) + 0));
  201. Result |= ((((v[1] >> i) & 1U)) << ((i << 2) + 1));
  202. Result |= ((((v[2] >> i) & 1U)) << ((i << 2) + 2));
  203. Result |= ((((v[3] >> i) & 1U)) << ((i << 2) + 3));
  204. }
  205. return Result;
  206. }
  207. int test()
  208. {
  209. int Error(0);
  210. glm::uint16 x_max = 1 << 11;
  211. glm::uint16 y_max = 1 << 11;
  212. glm::uint16 z_max = 1 << 11;
  213. glm::uint16 w_max = 1 << 11;
  214. for(glm::uint16 w = 0; w < w_max; w += 27)
  215. for(glm::uint16 z = 0; z < z_max; z += 27)
  216. for(glm::uint16 y = 0; y < y_max; y += 27)
  217. for(glm::uint16 x = 0; x < x_max; x += 27)
  218. {
  219. glm::uint64 ResultA = loopBitfieldInterleave<glm::uint16, glm::uint64>(x, y, z, w);
  220. glm::uint64 ResultB = glm::bitfieldInterleave(x, y, z, w);
  221. Error += ResultA == ResultB ? 0 : 1;
  222. }
  223. return Error;
  224. }
  225. }
  226. namespace bitfieldInterleave
  227. {
  228. inline glm::uint64 fastBitfieldInterleave(glm::uint32 x, glm::uint32 y)
  229. {
  230. glm::uint64 REG1;
  231. glm::uint64 REG2;
  232. REG1 = x;
  233. REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
  234. REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
  235. REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
  236. REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
  237. REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
  238. REG2 = y;
  239. REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
  240. REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
  241. REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
  242. REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
  243. REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
  244. return REG1 | (REG2 << 1);
  245. }
  246. inline glm::uint64 interleaveBitfieldInterleave(glm::uint32 x, glm::uint32 y)
  247. {
  248. glm::uint64 REG1;
  249. glm::uint64 REG2;
  250. REG1 = x;
  251. REG2 = y;
  252. REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
  253. REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
  254. REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
  255. REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
  256. REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
  257. REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
  258. REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
  259. REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
  260. REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
  261. REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
  262. return REG1 | (REG2 << 1);
  263. }
  264. /*
  265. inline glm::uint64 loopBitfieldInterleave(glm::uint32 x, glm::uint32 y)
  266. {
  267. static glm::uint64 const Mask[5] =
  268. {
  269. 0x5555555555555555,
  270. 0x3333333333333333,
  271. 0x0F0F0F0F0F0F0F0F,
  272. 0x00FF00FF00FF00FF,
  273. 0x0000FFFF0000FFFF
  274. };
  275. glm::uint64 REG1 = x;
  276. glm::uint64 REG2 = y;
  277. for(int i = 4; i >= 0; --i)
  278. {
  279. REG1 = ((REG1 << (1 << i)) | REG1) & Mask[i];
  280. REG2 = ((REG2 << (1 << i)) | REG2) & Mask[i];
  281. }
  282. return REG1 | (REG2 << 1);
  283. }
  284. */
  285. #if GLM_ARCH & GLM_ARCH_SSE2_BIT
  286. inline glm::uint64 sseBitfieldInterleave(glm::uint32 x, glm::uint32 y)
  287. {
  288. __m128i const Array = _mm_set_epi32(0, y, 0, x);
  289. __m128i const Mask4 = _mm_set1_epi32(0x0000FFFF);
  290. __m128i const Mask3 = _mm_set1_epi32(0x00FF00FF);
  291. __m128i const Mask2 = _mm_set1_epi32(0x0F0F0F0F);
  292. __m128i const Mask1 = _mm_set1_epi32(0x33333333);
  293. __m128i const Mask0 = _mm_set1_epi32(0x55555555);
  294. __m128i Reg1;
  295. __m128i Reg2;
  296. // REG1 = x;
  297. // REG2 = y;
  298. Reg1 = _mm_load_si128(&Array);
  299. //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
  300. //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
  301. Reg2 = _mm_slli_si128(Reg1, 2);
  302. Reg1 = _mm_or_si128(Reg2, Reg1);
  303. Reg1 = _mm_and_si128(Reg1, Mask4);
  304. //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
  305. //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
  306. Reg2 = _mm_slli_si128(Reg1, 1);
  307. Reg1 = _mm_or_si128(Reg2, Reg1);
  308. Reg1 = _mm_and_si128(Reg1, Mask3);
  309. //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
  310. //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
  311. Reg2 = _mm_slli_epi32(Reg1, 4);
  312. Reg1 = _mm_or_si128(Reg2, Reg1);
  313. Reg1 = _mm_and_si128(Reg1, Mask2);
  314. //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
  315. //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
  316. Reg2 = _mm_slli_epi32(Reg1, 2);
  317. Reg1 = _mm_or_si128(Reg2, Reg1);
  318. Reg1 = _mm_and_si128(Reg1, Mask1);
  319. //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
  320. //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
  321. Reg2 = _mm_slli_epi32(Reg1, 1);
  322. Reg1 = _mm_or_si128(Reg2, Reg1);
  323. Reg1 = _mm_and_si128(Reg1, Mask0);
  324. //return REG1 | (REG2 << 1);
  325. Reg2 = _mm_slli_epi32(Reg1, 1);
  326. Reg2 = _mm_srli_si128(Reg2, 8);
  327. Reg1 = _mm_or_si128(Reg1, Reg2);
  328. __m128i Result;
  329. _mm_store_si128(&Result, Reg1);
  330. return *reinterpret_cast<glm::uint64*>(&Result);
  331. }
  332. inline glm::uint64 sseUnalignedBitfieldInterleave(glm::uint32 x, glm::uint32 y)
  333. {
  334. __m128i const Array = _mm_set_epi32(0, y, 0, x);
  335. __m128i const Mask4 = _mm_set1_epi32(0x0000FFFF);
  336. __m128i const Mask3 = _mm_set1_epi32(0x00FF00FF);
  337. __m128i const Mask2 = _mm_set1_epi32(0x0F0F0F0F);
  338. __m128i const Mask1 = _mm_set1_epi32(0x33333333);
  339. __m128i const Mask0 = _mm_set1_epi32(0x55555555);
  340. __m128i Reg1;
  341. __m128i Reg2;
  342. // REG1 = x;
  343. // REG2 = y;
  344. Reg1 = _mm_loadu_si128(&Array);
  345. //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
  346. //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
  347. Reg2 = _mm_slli_si128(Reg1, 2);
  348. Reg1 = _mm_or_si128(Reg2, Reg1);
  349. Reg1 = _mm_and_si128(Reg1, Mask4);
  350. //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
  351. //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
  352. Reg2 = _mm_slli_si128(Reg1, 1);
  353. Reg1 = _mm_or_si128(Reg2, Reg1);
  354. Reg1 = _mm_and_si128(Reg1, Mask3);
  355. //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
  356. //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
  357. Reg2 = _mm_slli_epi32(Reg1, 4);
  358. Reg1 = _mm_or_si128(Reg2, Reg1);
  359. Reg1 = _mm_and_si128(Reg1, Mask2);
  360. //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
  361. //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
  362. Reg2 = _mm_slli_epi32(Reg1, 2);
  363. Reg1 = _mm_or_si128(Reg2, Reg1);
  364. Reg1 = _mm_and_si128(Reg1, Mask1);
  365. //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
  366. //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
  367. Reg2 = _mm_slli_epi32(Reg1, 1);
  368. Reg1 = _mm_or_si128(Reg2, Reg1);
  369. Reg1 = _mm_and_si128(Reg1, Mask0);
  370. //return REG1 | (REG2 << 1);
  371. Reg2 = _mm_slli_epi32(Reg1, 1);
  372. Reg2 = _mm_srli_si128(Reg2, 8);
  373. Reg1 = _mm_or_si128(Reg1, Reg2);
  374. __m128i Result;
  375. _mm_store_si128(&Result, Reg1);
  376. return *reinterpret_cast<glm::uint64*>(&Result);
  377. }
  378. #endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
  379. int test()
  380. {
  381. /*
  382. {
  383. for(glm::uint32 y = 0; y < (1 << 10); ++y)
  384. for(glm::uint32 x = 0; x < (1 << 10); ++x)
  385. {
  386. glm::uint64 A = glm::bitfieldInterleave(x, y);
  387. glm::uint64 B = fastBitfieldInterleave(x, y);
  388. //glm::uint64 C = loopBitfieldInterleave(x, y);
  389. glm::uint64 D = interleaveBitfieldInterleave(x, y);
  390. assert(A == B);
  391. //assert(A == C);
  392. assert(A == D);
  393. # if GLM_ARCH & GLM_ARCH_SSE2_BIT
  394. glm::uint64 E = sseBitfieldInterleave(x, y);
  395. glm::uint64 F = sseUnalignedBitfieldInterleave(x, y);
  396. assert(A == E);
  397. assert(A == F);
  398. __m128i G = glm_i128_interleave(_mm_set_epi32(0, y, 0, x));
  399. glm::uint64 Result[2];
  400. _mm_storeu_si128((__m128i*)Result, G);
  401. assert(A == Result[0]);
  402. # endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
  403. }
  404. }
  405. */
  406. {
  407. for(glm::uint8 y = 0; y < 127; ++y)
  408. for(glm::uint8 x = 0; x < 127; ++x)
  409. {
  410. glm::uint64 A(glm::bitfieldInterleave(glm::uint8(x), glm::uint8(y)));
  411. glm::uint64 B(glm::bitfieldInterleave(glm::uint16(x), glm::uint16(y)));
  412. glm::uint64 C(glm::bitfieldInterleave(glm::uint32(x), glm::uint32(y)));
  413. assert(A == B);
  414. assert(A == C);
  415. glm::int64 D(glm::bitfieldInterleave(glm::int8(x), glm::int8(y)));
  416. glm::int64 E(glm::bitfieldInterleave(glm::int16(x), glm::int16(y)));
  417. glm::int64 F(glm::bitfieldInterleave(glm::int32(x), glm::int32(y)));
  418. assert(D == E);
  419. assert(D == F);
  420. }
  421. }
  422. return 0;
  423. }
  424. int perf()
  425. {
  426. glm::uint32 x_max = 1 << 11;
  427. glm::uint32 y_max = 1 << 10;
  428. // ALU
  429. std::vector<glm::uint64> Data(x_max * y_max);
  430. std::vector<glm::u32vec2> Param(x_max * y_max);
  431. for(glm::uint32 i = 0; i < Param.size(); ++i)
  432. Param[i] = glm::u32vec2(i % x_max, i / y_max);
  433. {
  434. std::clock_t LastTime = std::clock();
  435. for(std::size_t i = 0; i < Data.size(); ++i)
  436. Data[i] = glm::bitfieldInterleave(Param[i].x, Param[i].y);
  437. std::clock_t Time = std::clock() - LastTime;
  438. std::printf("glm::bitfieldInterleave Time %d clocks\n", static_cast<unsigned int>(Time));
  439. }
  440. {
  441. std::clock_t LastTime = std::clock();
  442. for(std::size_t i = 0; i < Data.size(); ++i)
  443. Data[i] = fastBitfieldInterleave(Param[i].x, Param[i].y);
  444. std::clock_t Time = std::clock() - LastTime;
  445. std::printf("fastBitfieldInterleave Time %d clocks\n", static_cast<unsigned int>(Time));
  446. }
  447. /*
  448. {
  449. std::clock_t LastTime = std::clock();
  450. for(std::size_t i = 0; i < Data.size(); ++i)
  451. Data[i] = loopBitfieldInterleave(Param[i].x, Param[i].y);
  452. std::clock_t Time = std::clock() - LastTime;
  453. std::printf("loopBitfieldInterleave Time %d clocks\n", static_cast<unsigned int>(Time));
  454. }
  455. */
  456. {
  457. std::clock_t LastTime = std::clock();
  458. for(std::size_t i = 0; i < Data.size(); ++i)
  459. Data[i] = interleaveBitfieldInterleave(Param[i].x, Param[i].y);
  460. std::clock_t Time = std::clock() - LastTime;
  461. std::printf("interleaveBitfieldInterleave Time %d clocks\n", static_cast<unsigned int>(Time));
  462. }
  463. # if GLM_ARCH & GLM_ARCH_SSE2_BIT
  464. {
  465. std::clock_t LastTime = std::clock();
  466. for(std::size_t i = 0; i < Data.size(); ++i)
  467. Data[i] = sseBitfieldInterleave(Param[i].x, Param[i].y);
  468. std::clock_t Time = std::clock() - LastTime;
  469. std::printf("sseBitfieldInterleave Time %d clocks\n", static_cast<unsigned int>(Time));
  470. }
  471. {
  472. std::clock_t LastTime = std::clock();
  473. for(std::size_t i = 0; i < Data.size(); ++i)
  474. Data[i] = sseUnalignedBitfieldInterleave(Param[i].x, Param[i].y);
  475. std::clock_t Time = std::clock() - LastTime;
  476. std::printf("sseUnalignedBitfieldInterleave Time %d clocks\n", static_cast<unsigned int>(Time));
  477. }
  478. # endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
  479. {
  480. std::clock_t LastTime = std::clock();
  481. for(std::size_t i = 0; i < Data.size(); ++i)
  482. Data[i] = glm::bitfieldInterleave(Param[i].x, Param[i].y, Param[i].x);
  483. std::clock_t Time = std::clock() - LastTime;
  484. std::printf("glm::detail::bitfieldInterleave Time %d clocks\n", static_cast<unsigned int>(Time));
  485. }
  486. # if(GLM_ARCH & GLM_ARCH_SSE2_BIT && !(GLM_COMPILER & GLM_COMPILER_GCC))
  487. {
  488. // SIMD
  489. std::vector<__m128i> SimdData;
  490. SimdData.resize(x_max * y_max);
  491. std::vector<__m128i> SimdParam;
  492. SimdParam.resize(x_max * y_max);
  493. for(int i = 0; i < SimdParam.size(); ++i)
  494. SimdParam[i] = _mm_set_epi32(i % x_max, 0, i / y_max, 0);
  495. std::clock_t LastTime = std::clock();
  496. for(std::size_t i = 0; i < SimdData.size(); ++i)
  497. SimdData[i] = glm_i128_interleave(SimdParam[i]);
  498. std::clock_t Time = std::clock() - LastTime;
  499. std::printf("_mm_bit_interleave_si128 Time %d clocks\n", static_cast<unsigned int>(Time));
  500. }
  501. # endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
  502. return 0;
  503. }
  504. }//namespace bitfieldInterleave
  505. namespace bitfieldInterleave5
  506. {
  507. GLM_FUNC_QUALIFIER glm::uint16 bitfieldInterleave_u8vec2(glm::uint8 x, glm::uint8 y)
  508. {
  509. glm::uint32 Result = (glm::uint32(y) << 16) | glm::uint32(x);
  510. Result = ((Result << 4) | Result) & 0x0F0F0F0F;
  511. Result = ((Result << 2) | Result) & 0x33333333;
  512. Result = ((Result << 1) | Result) & 0x55555555;
  513. return static_cast<glm::uint16>((Result & 0x0000FFFF) | (Result >> 15));
  514. }
  515. GLM_FUNC_QUALIFIER glm::u8vec2 bitfieldDeinterleave_u8vec2(glm::uint16 InterleavedBitfield)
  516. {
  517. glm::uint32 Result(InterleavedBitfield);
  518. Result = ((Result << 15) | Result) & 0x55555555;
  519. Result = ((Result >> 1) | Result) & 0x33333333;
  520. Result = ((Result >> 2) | Result) & 0x0F0F0F0F;
  521. Result = ((Result >> 4) | Result) & 0x00FF00FF;
  522. return glm::u8vec2(Result & 0x0000FFFF, Result >> 16);
  523. }
  524. GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave_u8vec4(glm::uint8 x, glm::uint8 y, glm::uint8 z, glm::uint8 w)
  525. {
  526. glm::uint64 Result = (glm::uint64(w) << 48) | (glm::uint64(z) << 32) | (glm::uint64(y) << 16) | glm::uint64(x);
  527. Result = ((Result << 12) | Result) & 0x000F000F000F000Full;
  528. Result = ((Result << 6) | Result) & 0x0303030303030303ull;
  529. Result = ((Result << 3) | Result) & 0x1111111111111111ull;
  530. const glm::uint32 a = static_cast<glm::uint32>((Result & 0x000000000000FFFF) >> ( 0 - 0));
  531. const glm::uint32 b = static_cast<glm::uint32>((Result & 0x00000000FFFF0000) >> (16 - 3));
  532. const glm::uint32 c = static_cast<glm::uint32>((Result & 0x0000FFFF00000000) >> (32 - 6));
  533. const glm::uint32 d = static_cast<glm::uint32>((Result & 0xFFFF000000000000) >> (48 - 12));
  534. return a | b | c | d;
  535. }
  536. GLM_FUNC_QUALIFIER glm::u8vec4 bitfieldDeinterleave_u8vec4(glm::uint32 InterleavedBitfield)
  537. {
  538. glm::uint64 Result(InterleavedBitfield);
  539. Result = ((Result << 15) | Result) & 0x9249249249249249ull;
  540. Result = ((Result >> 1) | Result) & 0x30C30C30C30C30C3ull;
  541. Result = ((Result >> 2) | Result) & 0xF00F00F00F00F00Full;
  542. Result = ((Result >> 4) | Result) & 0x00FF0000FF0000FFull;
  543. return glm::u8vec4(
  544. (Result >> 0) & 0x000000000000FFFFull,
  545. (Result >> 16) & 0x00000000FFFF0000ull,
  546. (Result >> 32) & 0x0000FFFF00000000ull,
  547. (Result >> 48) & 0xFFFF000000000000ull);
  548. }
  549. GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave_u16vec2(glm::uint16 x, glm::uint16 y)
  550. {
  551. glm::uint64 Result = (glm::uint64(y) << 32) | glm::uint64(x);
  552. Result = ((Result << 8) | Result) & static_cast<glm::uint32>(0x00FF00FF00FF00FFull);
  553. Result = ((Result << 4) | Result) & static_cast<glm::uint32>(0x0F0F0F0F0F0F0F0Full);
  554. Result = ((Result << 2) | Result) & static_cast<glm::uint32>(0x3333333333333333ull);
  555. Result = ((Result << 1) | Result) & static_cast<glm::uint32>(0x5555555555555555ull);
  556. return static_cast<glm::uint32>((Result & 0x00000000FFFFFFFFull) | (Result >> 31));
  557. }
  558. GLM_FUNC_QUALIFIER glm::u16vec2 bitfieldDeinterleave_u16vec2(glm::uint32 InterleavedBitfield)
  559. {
  560. glm::uint64 Result(InterleavedBitfield);
  561. Result = ((Result << 31) | Result) & 0x5555555555555555ull;
  562. Result = ((Result >> 1) | Result) & 0x3333333333333333ull;
  563. Result = ((Result >> 2) | Result) & 0x0F0F0F0F0F0F0F0Full;
  564. Result = ((Result >> 4) | Result) & 0x00FF00FF00FF00FFull;
  565. Result = ((Result >> 8) | Result) & 0x0000FFFF0000FFFFull;
  566. return glm::u16vec2(Result & 0x00000000FFFFFFFFull, Result >> 32);
  567. }
  568. int test()
  569. {
  570. int Error = 0;
  571. for(glm::size_t j = 0; j < 256; ++j)
  572. for(glm::size_t i = 0; i < 256; ++i)
  573. {
  574. glm::uint16 A = bitfieldInterleave_u8vec2(glm::uint8(i), glm::uint8(j));
  575. glm::uint16 B = glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j));
  576. Error += A == B ? 0 : 1;
  577. glm::u8vec2 C = bitfieldDeinterleave_u8vec2(A);
  578. Error += C.x == glm::uint8(i) ? 0 : 1;
  579. Error += C.y == glm::uint8(j) ? 0 : 1;
  580. }
  581. for(glm::size_t j = 0; j < 256; ++j)
  582. for(glm::size_t i = 0; i < 256; ++i)
  583. {
  584. glm::uint32 A = bitfieldInterleave_u8vec4(glm::uint8(i), glm::uint8(j), glm::uint8(i), glm::uint8(j));
  585. glm::uint32 B = glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j), glm::uint8(i), glm::uint8(j));
  586. Error += A == B ? 0 : 1;
  587. /*
  588. glm::u8vec4 C = bitfieldDeinterleave_u8vec4(A);
  589. Error += C.x == glm::uint8(i) ? 0 : 1;
  590. Error += C.y == glm::uint8(j) ? 0 : 1;
  591. Error += C.z == glm::uint8(i) ? 0 : 1;
  592. Error += C.w == glm::uint8(j) ? 0 : 1;
  593. */
  594. }
  595. for(glm::size_t j = 0; j < 256; ++j)
  596. for(glm::size_t i = 0; i < 256; ++i)
  597. {
  598. glm::uint32 A = bitfieldInterleave_u16vec2(glm::uint16(i), glm::uint16(j));
  599. glm::uint32 B = glm::bitfieldInterleave(glm::uint16(i), glm::uint16(j));
  600. Error += A == B ? 0 : 1;
  601. }
  602. return Error;
  603. }
  604. int perf_old_u8vec2(std::vector<glm::uint16>& Result)
  605. {
  606. int Error = 0;
  607. const std::clock_t BeginTime = std::clock();
  608. for(glm::size_t k = 0; k < 10000; ++k)
  609. for(glm::size_t j = 0; j < 256; ++j)
  610. for(glm::size_t i = 0; i < 256; ++i)
  611. Error += Result[j * 256 + i] == glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j)) ? 0 : 1;
  612. const std::clock_t EndTime = std::clock();
  613. std::printf("glm::bitfieldInterleave<u8vec2> Time %d clocks\n", static_cast<unsigned int>(EndTime - BeginTime));
  614. return Error;
  615. }
  616. int perf_new_u8vec2(std::vector<glm::uint16>& Result)
  617. {
  618. int Error = 0;
  619. const std::clock_t BeginTime = std::clock();
  620. for(glm::size_t k = 0; k < 10000; ++k)
  621. for(glm::size_t j = 0; j < 256; ++j)
  622. for(glm::size_t i = 0; i < 256; ++i)
  623. Error += Result[j * 256 + i] == bitfieldInterleave_u8vec2(glm::uint8(i), glm::uint8(j)) ? 0 : 1;
  624. const std::clock_t EndTime = std::clock();
  625. std::printf("bitfieldInterleave_u8vec2 Time %d clocks\n", static_cast<unsigned int>(EndTime - BeginTime));
  626. return Error;
  627. }
  628. int perf_old_u8vec4(std::vector<glm::uint32>& Result)
  629. {
  630. int Error = 0;
  631. const std::clock_t BeginTime = std::clock();
  632. for(glm::size_t k = 0; k < 10000; ++k)
  633. for(glm::size_t j = 0; j < 256; ++j)
  634. for(glm::size_t i = 0; i < 256; ++i)
  635. Error += Result[j * 256 + i] == glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j), glm::uint8(i), glm::uint8(j)) ? 0 : 1;
  636. const std::clock_t EndTime = std::clock();
  637. std::printf("glm::bitfieldInterleave<u8vec4> Time %d clocks\n", static_cast<unsigned int>(EndTime - BeginTime));
  638. return Error;
  639. }
  640. int perf_new_u8vec4(std::vector<glm::uint32>& Result)
  641. {
  642. int Error = 0;
  643. const std::clock_t BeginTime = std::clock();
  644. for(glm::size_t k = 0; k < 10000; ++k)
  645. for(glm::size_t j = 0; j < 256; ++j)
  646. for(glm::size_t i = 0; i < 256; ++i)
  647. Error += Result[j * 256 + i] == bitfieldInterleave_u8vec4(glm::uint8(i), glm::uint8(j), glm::uint8(i), glm::uint8(j)) ? 0 : 1;
  648. const std::clock_t EndTime = std::clock();
  649. std::printf("bitfieldInterleave_u8vec4 Time %d clocks\n", static_cast<unsigned int>(EndTime - BeginTime));
  650. return Error;
  651. }
  652. int perf_old_u16vec2(std::vector<glm::uint32>& Result)
  653. {
  654. int Error = 0;
  655. const std::clock_t BeginTime = std::clock();
  656. for(glm::size_t k = 0; k < 10000; ++k)
  657. for(glm::size_t j = 0; j < 256; ++j)
  658. for(glm::size_t i = 0; i < 256; ++i)
  659. Error += Result[j * 256 + i] == glm::bitfieldInterleave(glm::uint16(i), glm::uint16(j)) ? 0 : 1;
  660. const std::clock_t EndTime = std::clock();
  661. std::printf("glm::bitfieldInterleave<u16vec2> Time %d clocks\n", static_cast<unsigned int>(EndTime - BeginTime));
  662. return Error;
  663. }
  664. int perf_new_u16vec2(std::vector<glm::uint32>& Result)
  665. {
  666. int Error = 0;
  667. const std::clock_t BeginTime = std::clock();
  668. for(glm::size_t k = 0; k < 10000; ++k)
  669. for(glm::size_t j = 0; j < 256; ++j)
  670. for(glm::size_t i = 0; i < 256; ++i)
  671. Error += Result[j * 256 + i] == bitfieldInterleave_u16vec2(glm::uint16(i), glm::uint16(j)) ? 0 : 1;
  672. const std::clock_t EndTime = std::clock();
  673. std::printf("bitfieldInterleave_u16vec2 Time %d clocks\n", static_cast<unsigned int>(EndTime - BeginTime));
  674. return Error;
  675. }
  676. int perf()
  677. {
  678. int Error = 0;
  679. std::printf("bitfieldInterleave perf: init\r");
  680. std::vector<glm::uint16> Result_u8vec2(256 * 256, 0);
  681. for(glm::size_t j = 0; j < 256; ++j)
  682. for(glm::size_t i = 0; i < 256; ++i)
  683. Result_u8vec2[j * 256 + i] = glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j));
  684. Error += perf_old_u8vec2(Result_u8vec2);
  685. Error += perf_new_u8vec2(Result_u8vec2);
  686. std::vector<glm::uint32> Result_u8vec4(256 * 256, 0);
  687. for(glm::size_t j = 0; j < 256; ++j)
  688. for(glm::size_t i = 0; i < 256; ++i)
  689. Result_u8vec4[j * 256 + i] = glm::bitfieldInterleave(glm::uint8(i), glm::uint8(j), glm::uint8(i), glm::uint8(j));
  690. Error += perf_old_u8vec4(Result_u8vec4);
  691. Error += perf_new_u8vec4(Result_u8vec4);
  692. std::vector<glm::uint32> Result_u16vec2(256 * 256, 0);
  693. for(glm::size_t j = 0; j < 256; ++j)
  694. for(glm::size_t i = 0; i < 256; ++i)
  695. Result_u16vec2[j * 256 + i] = glm::bitfieldInterleave(glm::uint16(i), glm::uint16(j));
  696. Error += perf_old_u16vec2(Result_u16vec2);
  697. Error += perf_new_u16vec2(Result_u16vec2);
  698. std::printf("bitfieldInterleave perf: %d Errors\n", Error);
  699. return Error;
  700. }
  701. }//namespace bitfieldInterleave5
  702. int main()
  703. {
  704. int Error = 0;
  705. /* Tests for a faster and to reserve bitfieldInterleave
  706. Error += ::bitfieldInterleave5::test();
  707. Error += ::bitfieldInterleave5::perf();
  708. */
  709. Error += ::mask::test();
  710. Error += ::bitfieldInterleave3::test();
  711. Error += ::bitfieldInterleave4::test();
  712. Error += ::bitfieldInterleave::test();
  713. # ifdef NDEBUG
  714. Error += ::mask::perf();
  715. Error += ::bitfieldInterleave::perf();
  716. # endif//NDEBUG
  717. return Error;
  718. }