tsMeshIntrinsics.sse.cpp 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. //-----------------------------------------------------------------------------
  2. // Copyright (c) 2012 GarageGames, LLC
  3. //
  4. // Permission is hereby granted, free of charge, to any person obtaining a copy
  5. // of this software and associated documentation files (the "Software"), to
  6. // deal in the Software without restriction, including without limitation the
  7. // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  8. // sell copies of the Software, and to permit persons to whom the Software is
  9. // furnished to do so, subject to the following conditions:
  10. //
  11. // The above copyright notice and this permission notice shall be included in
  12. // all copies or substantial portions of the Software.
  13. //
  14. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  20. // IN THE SOFTWARE.
  21. //-----------------------------------------------------------------------------
  22. #include "ts/tsMesh.h"
  23. #if defined(TORQUE_CPU_X86)
  24. #include "ts/tsMeshIntrinsics.h"
  25. #include <xmmintrin.h>
  26. void zero_vert_normal_bulk_SSE(const dsize_t count, U8 * __restrict const outPtr, const dsize_t outStride)
  27. {
  28. // A U8 * version of the in/out pointer
  29. register char *outData = reinterpret_cast<char *>(outPtr);
  30. register __m128 vPos;
  31. register __m128 vNrm;
  32. register __m128 vMask;
  33. const __m128 _point3f_zero_mask = { 0.0f, 0.0f, 0.0f, 1.0f };
  34. vMask = _mm_load_ps((const F32*)&_point3f_zero_mask);
  35. // pre-populate cache
  36. for(S32 i = 0; i < 8; i++)
  37. _mm_prefetch(reinterpret_cast<const char *>(outData + outStride * i), _MM_HINT_T0);
  38. for(S32 i = 0; i < count; i++)
  39. {
  40. TSMesh::__TSMeshVertexBase *curElem = reinterpret_cast<TSMesh::__TSMeshVertexBase *>(outData);
  41. // prefetch 8 items ahead (should really detect cache size or something)
  42. _mm_prefetch(reinterpret_cast<const char *>(outData + outStride * 8), _MM_HINT_T0);
  43. // load
  44. vPos = _mm_load_ps(curElem->_vert);
  45. vNrm = _mm_load_ps(curElem->_normal);
  46. // mask
  47. vPos = _mm_mul_ps(vPos, _point3f_zero_mask);
  48. vNrm = _mm_mul_ps(vNrm, _point3f_zero_mask);
  49. // store
  50. _mm_store_ps(curElem->_vert, vPos);
  51. _mm_store_ps(curElem->_normal, vNrm);
  52. // update output pointer
  53. outData += outStride;
  54. }
  55. }
  56. //------------------------------------------------------------------------------
  57. void m_matF_x_BatchedVertWeightList_SSE(const MatrixF &mat,
  58. const dsize_t count,
  59. const TSSkinMesh::BatchData::BatchedVertWeight * __restrict batch,
  60. U8 * const __restrict outPtr,
  61. const dsize_t outStride)
  62. {
  63. const char * __restrict iPtr = reinterpret_cast<const char *>(batch);
  64. const dsize_t inStride = sizeof(TSSkinMesh::BatchData::BatchedVertWeight);
  65. // SSE intrinsic version
  66. // Based on: http://www.cortstratton.org/articles/HugiCode.html
  67. // Load matrix, transposed, into registers
  68. MatrixF transMat;
  69. mat.transposeTo(transMat);
  70. register __m128 sseMat[4];
  71. sseMat[0] = _mm_loadu_ps(&transMat[0]);
  72. sseMat[1] = _mm_loadu_ps(&transMat[4]);
  73. sseMat[2] = _mm_loadu_ps(&transMat[8]);
  74. sseMat[3] = _mm_loadu_ps(&transMat[12]);
  75. // mask
  76. const __m128 _w_mask = { 1.0f, 1.0f, 1.0f, 0.0f };
  77. // temp registers
  78. register __m128 tempPos;
  79. register __m128 tempNrm;
  80. register __m128 scratch0;
  81. register __m128 scratch1;
  82. register __m128 inPos;
  83. register __m128 inNrm;
  84. // pre-populate cache
  85. const TSSkinMesh::BatchData::BatchedVertWeight &firstElem = batch[0];
  86. for(S32 i = 0; i < 8; i++)
  87. {
  88. _mm_prefetch(reinterpret_cast<const char *>(iPtr + inStride * i), _MM_HINT_T0);
  89. _mm_prefetch(reinterpret_cast<const char *>(outPtr + outStride * (i + firstElem.vidx)), _MM_HINT_T0);
  90. }
  91. for(register S32 i = 0; i < count; i++)
  92. {
  93. const TSSkinMesh::BatchData::BatchedVertWeight &inElem = batch[i];
  94. TSMesh::__TSMeshVertexBase *outElem = reinterpret_cast<TSMesh::__TSMeshVertexBase *>(outPtr + inElem.vidx * outStride);
  95. // process x (hiding the prefetches in the delays)
  96. inPos = _mm_load_ps(inElem.vert);
  97. inNrm = _mm_load_ps(inElem.normal);
  98. // prefetch input
  99. #define INPUT_PREFETCH_LOOKAHEAD 64
  100. const char *prefetchInput = reinterpret_cast<const char *>(batch) + inStride * (i + INPUT_PREFETCH_LOOKAHEAD);
  101. _mm_prefetch(prefetchInput, _MM_HINT_T0);
  102. // propagate the .x elements across the vectors
  103. tempPos = _mm_shuffle_ps(inPos, inPos, _MM_SHUFFLE(0, 0, 0, 0));
  104. tempNrm = _mm_shuffle_ps(inNrm, inNrm, _MM_SHUFFLE(0, 0, 0, 0));
  105. // prefetch ouput with half the lookahead distance of the input
  106. #define OUTPUT_PREFETCH_LOOKAHEAD (INPUT_PREFETCH_LOOKAHEAD >> 1)
  107. const char *outPrefetch = reinterpret_cast<const char*>(outPtr) + outStride * (inElem.vidx + OUTPUT_PREFETCH_LOOKAHEAD);
  108. _mm_prefetch(outPrefetch, _MM_HINT_T0);
  109. // mul by column 0
  110. tempPos = _mm_mul_ps(tempPos, sseMat[0]);
  111. tempNrm = _mm_mul_ps(tempNrm, sseMat[0]);
  112. // process y
  113. scratch0 = _mm_shuffle_ps(inPos, inPos, _MM_SHUFFLE(1, 1, 1, 1));
  114. scratch1 = _mm_shuffle_ps(inNrm, inNrm, _MM_SHUFFLE(1, 1, 1, 1));
  115. scratch0 = _mm_mul_ps(scratch0, sseMat[1]);
  116. scratch1 = _mm_mul_ps(scratch1, sseMat[1]);
  117. tempPos = _mm_add_ps(tempPos, scratch0);
  118. tempNrm = _mm_add_ps(tempNrm, scratch1);
  119. // process z
  120. scratch0 = _mm_shuffle_ps(inPos, inPos, _MM_SHUFFLE(2, 2, 2, 2));
  121. scratch1 = _mm_shuffle_ps(inNrm, inNrm, _MM_SHUFFLE(2, 2, 2, 2));
  122. scratch0 = _mm_mul_ps(scratch0, sseMat[2]);
  123. scratch1 = _mm_mul_ps(scratch1, sseMat[2]);
  124. tempPos = _mm_add_ps(tempPos, scratch0);
  125. inNrm = _mm_load_ps(outElem->_normal); //< load normal for accumulation
  126. scratch0 = _mm_shuffle_ps(inPos, inPos, _MM_SHUFFLE(3, 3, 3, 3));//< load bone weight across all elements of scratch0
  127. tempNrm = _mm_add_ps(tempNrm, scratch1);
  128. scratch0 = _mm_mul_ps(scratch0, _w_mask); //< mask off last
  129. // Translate the position by adding the 4th column of the matrix to it
  130. tempPos = _mm_add_ps(tempPos, sseMat[3]);
  131. // now multiply by the blend weight, and mask out the W component of both vectors
  132. tempPos = _mm_mul_ps(tempPos, scratch0);
  133. tempNrm = _mm_mul_ps(tempNrm, scratch0);
  134. inPos = _mm_load_ps(outElem->_vert); //< load position for accumulation
  135. // accumulate with previous values
  136. tempNrm = _mm_add_ps(tempNrm, inNrm);
  137. tempPos = _mm_add_ps(tempPos, inPos);
  138. _mm_store_ps(outElem->_vert, tempPos); //< output position
  139. _mm_store_ps(outElem->_normal, tempNrm); //< output normal
  140. }
  141. }
  142. #endif // TORQUE_CPU_X86