mMathSSE.cc 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. //-----------------------------------------------------------------------------
  2. // Copyright (c) 2013 GarageGames, LLC
  3. //
  4. // Permission is hereby granted, free of charge, to any person obtaining a copy
  5. // of this software and associated documentation files (the "Software"), to
  6. // deal in the Software without restriction, including without limitation the
  7. // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  8. // sell copies of the Software, and to permit persons to whom the Software is
  9. // furnished to do so, subject to the following conditions:
  10. //
  11. // The above copyright notice and this permission notice shall be included in
  12. // all copies or substantial portions of the Software.
  13. //
  14. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19. // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  20. // IN THE SOFTWARE.
  21. //-----------------------------------------------------------------------------
  22. #include "math/mMathFn.h"
  23. #include "math/mPlane.h"
  24. #include "math/mMatrix.h"
  25. #if defined(TORQUE_SUPPORTS_VC_INLINE_X86_ASM)
  26. #define ADD_SSE_FN
  27. // inlined version here.
  28. void SSE_MatrixF_x_MatrixF(const F32 *matA, const F32 *matB, F32 *result)
  29. {
  30. __asm
  31. {
  32. mov edx, matA
  33. mov ecx, matB
  34. mov eax, result
  35. movss xmm0, [edx]
  36. movups xmm1, [ecx]
  37. shufps xmm0, xmm0, 0
  38. movss xmm2, [edx + 4]
  39. mulps xmm0, xmm1
  40. shufps xmm2, xmm2, 0
  41. movups xmm3, [ecx + 10h]
  42. movss xmm7, [edx + 8]
  43. mulps xmm2, xmm3
  44. shufps xmm7, xmm7, 0
  45. addps xmm0, xmm2
  46. movups xmm4, [ecx + 20h]
  47. movss xmm2, [edx + 0Ch]
  48. mulps xmm7, xmm4
  49. shufps xmm2, xmm2, 0
  50. addps xmm0, xmm7
  51. movups xmm5, [ecx + 30h]
  52. movss xmm6, [edx + 10h]
  53. mulps xmm2, xmm5
  54. movss xmm7, [edx + 14h]
  55. shufps xmm6, xmm6, 0
  56. addps xmm0, xmm2
  57. shufps xmm7, xmm7, 0
  58. movlps[eax], xmm0
  59. movhps[eax + 8], xmm0
  60. mulps xmm7, xmm3
  61. movss xmm0, [edx + 18h]
  62. mulps xmm6, xmm1
  63. shufps xmm0, xmm0, 0
  64. addps xmm6, xmm7
  65. mulps xmm0, xmm4
  66. movss xmm2, [edx + 24h]
  67. addps xmm6, xmm0
  68. movss xmm0, [edx + 1Ch]
  69. movss xmm7, [edx + 20h]
  70. shufps xmm0, xmm0, 0
  71. shufps xmm7, xmm7, 0
  72. mulps xmm0, xmm5
  73. mulps xmm7, xmm1
  74. addps xmm6, xmm0
  75. shufps xmm2, xmm2, 0
  76. movlps[eax + 10h], xmm6
  77. movhps[eax + 18h], xmm6
  78. mulps xmm2, xmm3
  79. movss xmm6, [edx + 28h]
  80. addps xmm7, xmm2
  81. shufps xmm6, xmm6, 0
  82. movss xmm2, [edx + 2Ch]
  83. mulps xmm6, xmm4
  84. shufps xmm2, xmm2, 0
  85. addps xmm7, xmm6
  86. mulps xmm2, xmm5
  87. movss xmm0, [edx + 34h]
  88. addps xmm7, xmm2
  89. shufps xmm0, xmm0, 0
  90. movlps[eax + 20h], xmm7
  91. movss xmm2, [edx + 30h]
  92. movhps[eax + 28h], xmm7
  93. mulps xmm0, xmm3
  94. shufps xmm2, xmm2, 0
  95. movss xmm6, [edx + 38h]
  96. mulps xmm2, xmm1
  97. shufps xmm6, xmm6, 0
  98. addps xmm2, xmm0
  99. mulps xmm6, xmm4
  100. movss xmm7, [edx + 3Ch]
  101. shufps xmm7, xmm7, 0
  102. addps xmm2, xmm6
  103. mulps xmm7, xmm5
  104. addps xmm2, xmm7
  105. movups[eax + 30h], xmm2
  106. }
  107. }
  108. void SSE_MatrixF_x_MatrixF_Aligned(const F32 *matA, const F32 *matB, F32 *result)
  109. {
  110. __asm
  111. {
  112. mov edx, matA
  113. mov ecx, matB
  114. mov eax, result
  115. movss xmm0, [edx]
  116. movaps xmm1, [ecx]
  117. shufps xmm0, xmm0, 0
  118. movss xmm2, [edx + 4]
  119. mulps xmm0, xmm1
  120. shufps xmm2, xmm2, 0
  121. movaps xmm3, [ecx + 10h]
  122. movss xmm7, [edx + 8]
  123. mulps xmm2, xmm3
  124. shufps xmm7, xmm7, 0
  125. addps xmm0, xmm2
  126. movaps xmm4, [ecx + 20h]
  127. movss xmm2, [edx + 0Ch]
  128. mulps xmm7, xmm4
  129. shufps xmm2, xmm2, 0
  130. addps xmm0, xmm7
  131. movaps xmm5, [ecx + 30h]
  132. movss xmm6, [edx + 10h]
  133. mulps xmm2, xmm5
  134. movss xmm7, [edx + 14h]
  135. shufps xmm6, xmm6, 0
  136. addps xmm0, xmm2
  137. shufps xmm7, xmm7, 0
  138. movlps[eax], xmm0
  139. movhps[eax + 8], xmm0
  140. mulps xmm7, xmm3
  141. movss xmm0, [edx + 18h]
  142. mulps xmm6, xmm1
  143. shufps xmm0, xmm0, 0
  144. addps xmm6, xmm7
  145. mulps xmm0, xmm4
  146. movss xmm2, [edx + 24h]
  147. addps xmm6, xmm0
  148. movss xmm0, [edx + 1Ch]
  149. movss xmm7, [edx + 20h]
  150. shufps xmm0, xmm0, 0
  151. shufps xmm7, xmm7, 0
  152. mulps xmm0, xmm5
  153. mulps xmm7, xmm1
  154. addps xmm6, xmm0
  155. shufps xmm2, xmm2, 0
  156. movlps[eax + 10h], xmm6
  157. movhps[eax + 18h], xmm6
  158. mulps xmm2, xmm3
  159. movss xmm6, [edx + 28h]
  160. addps xmm7, xmm2
  161. shufps xmm6, xmm6, 0
  162. movss xmm2, [edx + 2Ch]
  163. mulps xmm6, xmm4
  164. shufps xmm2, xmm2, 0
  165. addps xmm7, xmm6
  166. mulps xmm2, xmm5
  167. movss xmm0, [edx + 34h]
  168. addps xmm7, xmm2
  169. shufps xmm0, xmm0, 0
  170. movlps[eax + 20h], xmm7
  171. movss xmm2, [edx + 30h]
  172. movhps[eax + 28h], xmm7
  173. mulps xmm0, xmm3
  174. shufps xmm2, xmm2, 0
  175. movss xmm6, [edx + 38h]
  176. mulps xmm2, xmm1
  177. shufps xmm6, xmm6, 0
  178. addps xmm2, xmm0
  179. mulps xmm6, xmm4
  180. movss xmm7, [edx + 3Ch]
  181. shufps xmm7, xmm7, 0
  182. addps xmm2, xmm6
  183. mulps xmm7, xmm5
  184. addps xmm2, xmm7
  185. movaps[eax + 30h], xmm2
  186. }
  187. }
  188. // if we set our flag, we always try to build the inlined asm.
  189. // EXCEPT if we're in an old version of Codewarrior that can't handle SSE code.
  190. // TODO: the NASM implementation of SSE_MatrixF_x_MatrixF_Aligned is missing,
  191. // so we temporary disable this until fixed (needed for linux dedicated build)
  192. #elif defined(TORQUE_SUPPORTS_NASM)
  193. #define ADD_SSE_FN
  194. extern "C"
  195. {
  196. void SSE_MatrixF_x_MatrixF(const F32 *matA, const F32 *matB, F32 *result);
  197. void SSE_MatrixF_x_MatrixF_Aligned(const F32 *matA, const F32 *matB, F32 *result);
  198. }
  199. #elif defined( TORQUE_COMPILER_GCC ) && (defined( TORQUE_CPU_X86 ) || defined( TORQUE_CPU_X64 ) && !defined(TORQUE_OS_IOS))
  200. #define ADD_SSE_FN
  201. void SSE_MatrixF_x_MatrixF(const F32 *matA, const F32 *matB, F32 *result)
  202. {
  203. asm
  204. (
  205. "movss (%%edx),%%xmm0\n"
  206. "movups (%%ecx),%%xmm1\n"
  207. "shufps $0,%%xmm0,%%xmm0\n"
  208. "movss 4(%%edx),%%xmm2\n"
  209. "mulps %%xmm1,%%xmm0\n"
  210. "shufps $0,%%xmm2,%%xmm2\n"
  211. "movups 0x10(%%ecx),%%xmm3\n"
  212. "movss 8(%%edx),%%xmm7\n"
  213. "mulps %%xmm3,%%xmm2\n"
  214. "shufps $0,%%xmm7,%%xmm7\n"
  215. "addps %%xmm2,%%xmm0\n"
  216. "movups 0x20(%%ecx),%%xmm4\n"
  217. "movss 0x0c(%%edx),%%xmm2\n"
  218. "mulps %%xmm4,%%xmm7\n"
  219. "shufps $0,%%xmm2,%%xmm2\n"
  220. "addps %%xmm7,%%xmm0\n"
  221. "movups 0x30(%%ecx),%%xmm5\n"
  222. "movss 0x10(%%edx),%%xmm6\n"
  223. "mulps %%xmm5,%%xmm2\n"
  224. "movss 0x14(%%edx),%%xmm7\n"
  225. "shufps $0,%%xmm6,%%xmm6\n"
  226. "addps %%xmm2,%%xmm0\n"
  227. "shufps $0,%%xmm7,%%xmm7\n"
  228. "movlps %%xmm0,(%%eax)\n"
  229. "movhps %%xmm0,8(%%eax)\n"
  230. "mulps %%xmm3,%%xmm7\n"
  231. "movss 0x18(%%edx),%%xmm0\n"
  232. "mulps %%xmm1,%%xmm6\n"
  233. "shufps $0,%%xmm0,%%xmm0\n"
  234. "addps %%xmm7,%%xmm6\n"
  235. "mulps %%xmm4,%%xmm0\n"
  236. "movss 0x24(%%edx),%%xmm2\n"
  237. "addps %%xmm0,%%xmm6\n"
  238. "movss 0x1c(%%edx),%%xmm0\n"
  239. "movss 0x20(%%edx),%%xmm7\n"
  240. "shufps $0,%%xmm0,%%xmm0\n"
  241. "shufps $0,%%xmm7,%%xmm7\n"
  242. "mulps %%xmm5,%%xmm0\n"
  243. "mulps %%xmm1,%%xmm7\n"
  244. "addps %%xmm0,%%xmm6\n"
  245. "shufps $0,%%xmm2,%%xmm2\n"
  246. "movlps %%xmm6,0x10(%%eax)\n"
  247. "movhps %%xmm6,0x18(%%eax)\n"
  248. "mulps %%xmm3,%%xmm2\n"
  249. "movss 0x28(%%edx),%%xmm6\n"
  250. "addps %%xmm2,%%xmm7\n"
  251. "shufps $0,%%xmm6,%%xmm6\n"
  252. "movss 0x2c(%%edx),%%xmm2\n"
  253. "mulps %%xmm4,%%xmm6\n"
  254. "shufps $0,%%xmm2,%%xmm2\n"
  255. "addps %%xmm6,%%xmm7\n"
  256. "mulps %%xmm5,%%xmm2\n"
  257. "movss 0x34(%%edx),%%xmm0\n"
  258. "addps %%xmm2,%%xmm7\n"
  259. "shufps $0,%%xmm0,%%xmm0\n"
  260. "movlps %%xmm7,0x20(%%eax)\n"
  261. "movss 0x30(%%edx),%%xmm2\n"
  262. "movhps %%xmm7,0x28(%%eax)\n"
  263. "mulps %%xmm3,%%xmm0\n"
  264. "shufps $0,%%xmm2,%%xmm2\n"
  265. "movss 0x38(%%edx),%%xmm6\n"
  266. "mulps %%xmm1,%%xmm2\n"
  267. "shufps $0,%%xmm6,%%xmm6\n"
  268. "addps %%xmm0,%%xmm2\n"
  269. "mulps %%xmm4,%%xmm6\n"
  270. "movss 0x3c(%%edx),%%xmm7\n"
  271. "shufps $0,%%xmm7,%%xmm7\n"
  272. "addps %%xmm6,%%xmm2\n"
  273. "mulps %%xmm5,%%xmm7\n"
  274. "addps %%xmm7,%%xmm2\n"
  275. "movups %%xmm2,0x30(%%eax)\n"
  276. :
  277. : "d" (matA),
  278. "c" (matB),
  279. "a" (result)
  280. );
  281. }
  282. void SSE_MatrixF_x_MatrixF_Aligned(const F32 *matA, const F32 *matB, F32 *result)
  283. {
  284. asm
  285. (
  286. "movss (%%edx),%%xmm0\n"
  287. "movaps (%%ecx),%%xmm1\n"
  288. "shufps $0,%%xmm0,%%xmm0\n"
  289. "movss 4(%%edx),%%xmm2\n"
  290. "mulps %%xmm1,%%xmm0\n"
  291. "shufps $0,%%xmm2,%%xmm2\n"
  292. "movaps 0x10(%%ecx),%%xmm3\n"
  293. "movss 8(%%edx),%%xmm7\n"
  294. "mulps %%xmm3,%%xmm2\n"
  295. "shufps $0,%%xmm7,%%xmm7\n"
  296. "addps %%xmm2,%%xmm0\n"
  297. "movaps 0x20(%%ecx),%%xmm4\n"
  298. "movss 0x0c(%%edx),%%xmm2\n"
  299. "mulps %%xmm4,%%xmm7\n"
  300. "shufps $0,%%xmm2,%%xmm2\n"
  301. "addps %%xmm7,%%xmm0\n"
  302. "movaps 0x30(%%ecx),%%xmm5\n"
  303. "movss 0x10(%%edx),%%xmm6\n"
  304. "mulps %%xmm5,%%xmm2\n"
  305. "movss 0x14(%%edx),%%xmm7\n"
  306. "shufps $0,%%xmm6,%%xmm6\n"
  307. "addps %%xmm2,%%xmm0\n"
  308. "shufps $0,%%xmm7,%%xmm7\n"
  309. "movlps %%xmm0,(%%eax)\n"
  310. "movhps %%xmm0,8(%%eax)\n"
  311. "mulps %%xmm3,%%xmm7\n"
  312. "movss 0x18(%%edx),%%xmm0\n"
  313. "mulps %%xmm1,%%xmm6\n"
  314. "shufps $0,%%xmm0,%%xmm0\n"
  315. "addps %%xmm7,%%xmm6\n"
  316. "mulps %%xmm4,%%xmm0\n"
  317. "movss 0x24(%%edx),%%xmm2\n"
  318. "addps %%xmm0,%%xmm6\n"
  319. "movss 0x1c(%%edx),%%xmm0\n"
  320. "movss 0x20(%%edx),%%xmm7\n"
  321. "shufps $0,%%xmm0,%%xmm0\n"
  322. "shufps $0,%%xmm7,%%xmm7\n"
  323. "mulps %%xmm5,%%xmm0\n"
  324. "mulps %%xmm1,%%xmm7\n"
  325. "addps %%xmm0,%%xmm6\n"
  326. "shufps $0,%%xmm2,%%xmm2\n"
  327. "movlps %%xmm6,0x10(%%eax)\n"
  328. "movhps %%xmm6,0x18(%%eax)\n"
  329. "mulps %%xmm3,%%xmm2\n"
  330. "movss 0x28(%%edx),%%xmm6\n"
  331. "addps %%xmm2,%%xmm7\n"
  332. "shufps $0,%%xmm6,%%xmm6\n"
  333. "movss 0x2c(%%edx),%%xmm2\n"
  334. "mulps %%xmm4,%%xmm6\n"
  335. "shufps $0,%%xmm2,%%xmm2\n"
  336. "addps %%xmm6,%%xmm7\n"
  337. "mulps %%xmm5,%%xmm2\n"
  338. "movss 0x34(%%edx),%%xmm0\n"
  339. "addps %%xmm2,%%xmm7\n"
  340. "shufps $0,%%xmm0,%%xmm0\n"
  341. "movlps %%xmm7,0x20(%%eax)\n"
  342. "movss 0x30(%%edx),%%xmm2\n"
  343. "movhps %%xmm7,0x28(%%eax)\n"
  344. "mulps %%xmm3,%%xmm0\n"
  345. "shufps $0,%%xmm2,%%xmm2\n"
  346. "movss 0x38(%%edx),%%xmm6\n"
  347. "mulps %%xmm1,%%xmm2\n"
  348. "shufps $0,%%xmm6,%%xmm6\n"
  349. "addps %%xmm0,%%xmm2\n"
  350. "mulps %%xmm4,%%xmm6\n"
  351. "movss 0x3c(%%edx),%%xmm7\n"
  352. "shufps $0,%%xmm7,%%xmm7\n"
  353. "addps %%xmm6,%%xmm2\n"
  354. "mulps %%xmm5,%%xmm7\n"
  355. "addps %%xmm7,%%xmm2\n"
  356. "movaps %%xmm2,0x30(%%eax)\n"
  357. :
  358. : "d" (matA),
  359. "c" (matB),
  360. "a" (result)
  361. );
  362. }
  363. #endif
  364. void mInstall_Library_SSE()
  365. {
  366. #if defined(ADD_SSE_FN)
  367. m_matF_x_matF = SSE_MatrixF_x_MatrixF;
  368. m_matF_x_matF_aligned = SSE_MatrixF_x_MatrixF_Aligned;
  369. // m_matF_x_point3F = Athlon_MatrixF_x_Point3F;
  370. // m_matF_x_vectorF = Athlon_MatrixF_x_VectorF;
  371. #endif
  372. }