Mat44.inl 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217
  1. // Jolt Physics Library (https://github.com/jrouwe/JoltPhysics)
  2. // SPDX-FileCopyrightText: 2021 Jorrit Rouwe
  3. // SPDX-License-Identifier: MIT
  4. #pragma once
  5. #include <Jolt/Math/Vec3.h>
  6. #include <Jolt/Math/Vec4.h>
  7. #include <Jolt/Math/Quat.h>
  8. JPH_NAMESPACE_BEGIN
  9. #define JPH_EL(r, c) mCol[c].mF32[r]
  10. Mat44::Mat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, Vec4Arg inC4) :
  11. mCol { inC1, inC2, inC3, inC4 }
  12. {
  13. }
  14. Mat44::Mat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, Vec3Arg inC4) :
  15. mCol { inC1, inC2, inC3, Vec4(inC4, 1.0f) }
  16. {
  17. }
  18. Mat44::Mat44(Type inC1, Type inC2, Type inC3, Type inC4) :
  19. mCol { inC1, inC2, inC3, inC4 }
  20. {
  21. }
  22. Mat44 Mat44::sZero()
  23. {
  24. return Mat44(Vec4::sZero(), Vec4::sZero(), Vec4::sZero(), Vec4::sZero());
  25. }
  26. Mat44 Mat44::sIdentity()
  27. {
  28. return Mat44(Vec4(1, 0, 0, 0), Vec4(0, 1, 0, 0), Vec4(0, 0, 1, 0), Vec4(0, 0, 0, 1));
  29. }
  30. Mat44 Mat44::sNaN()
  31. {
  32. return Mat44(Vec4::sNaN(), Vec4::sNaN(), Vec4::sNaN(), Vec4::sNaN());
  33. }
  34. Mat44 Mat44::sLoadFloat4x4(const Float4 *inV)
  35. {
  36. Mat44 result;
  37. for (int c = 0; c < 4; ++c)
  38. result.mCol[c] = Vec4::sLoadFloat4(inV + c);
  39. return result;
  40. }
  41. Mat44 Mat44::sLoadFloat4x4Aligned(const Float4 *inV)
  42. {
  43. Mat44 result;
  44. for (int c = 0; c < 4; ++c)
  45. result.mCol[c] = Vec4::sLoadFloat4Aligned(inV + c);
  46. return result;
  47. }
  48. Mat44 Mat44::sRotationX(float inX)
  49. {
  50. Vec4 sv, cv;
  51. Vec4::sReplicate(inX).SinCos(sv, cv);
  52. float s = sv.GetX(), c = cv.GetX();
  53. return Mat44(Vec4(1, 0, 0, 0), Vec4(0, c, s, 0), Vec4(0, -s, c, 0), Vec4(0, 0, 0, 1));
  54. }
  55. Mat44 Mat44::sRotationY(float inY)
  56. {
  57. Vec4 sv, cv;
  58. Vec4::sReplicate(inY).SinCos(sv, cv);
  59. float s = sv.GetX(), c = cv.GetX();
  60. return Mat44(Vec4(c, 0, -s, 0), Vec4(0, 1, 0, 0), Vec4(s, 0, c, 0), Vec4(0, 0, 0, 1));
  61. }
  62. Mat44 Mat44::sRotationZ(float inZ)
  63. {
  64. Vec4 sv, cv;
  65. Vec4::sReplicate(inZ).SinCos(sv, cv);
  66. float s = sv.GetX(), c = cv.GetX();
  67. return Mat44(Vec4(c, s, 0, 0), Vec4(-s, c, 0, 0), Vec4(0, 0, 1, 0), Vec4(0, 0, 0, 1));
  68. }
  69. Mat44 Mat44::sRotation(QuatArg inQuat)
  70. {
  71. JPH_ASSERT(inQuat.IsNormalized());
  72. // See: https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation section 'Quaternion-derived rotation matrix'
  73. #ifdef JPH_USE_SSE4_1
  74. __m128 xyzw = inQuat.mValue.mValue;
  75. __m128 two_xyzw = _mm_add_ps(xyzw, xyzw);
  76. __m128 yzxw = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 0, 2, 1));
  77. __m128 two_yzxw = _mm_add_ps(yzxw, yzxw);
  78. __m128 zxyw = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 1, 0, 2));
  79. __m128 two_zxyw = _mm_add_ps(zxyw, zxyw);
  80. __m128 wwww = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 3, 3, 3));
  81. __m128 diagonal = _mm_sub_ps(_mm_sub_ps(_mm_set1_ps(1.0f), _mm_mul_ps(two_yzxw, yzxw)), _mm_mul_ps(two_zxyw, zxyw)); // (1 - 2 y^2 - 2 z^2, 1 - 2 x^2 - 2 z^2, 1 - 2 x^2 - 2 y^2, 1 - 4 w^2)
  82. __m128 plus = _mm_add_ps(_mm_mul_ps(two_xyzw, zxyw), _mm_mul_ps(two_yzxw, wwww)); // 2 * (xz + yw, xy + zw, yz + xw, ww)
  83. __m128 minus = _mm_sub_ps(_mm_mul_ps(two_yzxw, xyzw), _mm_mul_ps(two_zxyw, wwww)); // 2 * (xy - zw, yz - xw, xz - yw, 0)
  84. // Workaround for compiler changing _mm_sub_ps(_mm_mul_ps(...), ...) into a fused multiply sub instruction, resulting in w not being 0
  85. // There doesn't appear to be a reliable way to turn this off in Clang
  86. minus = _mm_insert_ps(minus, minus, 0b1000);
  87. __m128 col0 = _mm_blend_ps(_mm_blend_ps(plus, diagonal, 0b0001), minus, 0b1100); // (1 - 2 y^2 - 2 z^2, 2 xy + 2 zw, 2 xz - 2 yw, 0)
  88. __m128 col1 = _mm_blend_ps(_mm_blend_ps(diagonal, minus, 0b1001), plus, 0b0100); // (2 xy - 2 zw, 1 - 2 x^2 - 2 z^2, 2 yz + 2 xw, 0)
  89. __m128 col2 = _mm_blend_ps(_mm_blend_ps(minus, plus, 0b0001), diagonal, 0b0100); // (2 xz + 2 yw, 2 yz - 2 xw, 1 - 2 x^2 - 2 y^2, 0)
  90. __m128 col3 = _mm_set_ps(1, 0, 0, 0);
  91. return Mat44(col0, col1, col2, col3);
  92. #else
  93. float x = inQuat.GetX();
  94. float y = inQuat.GetY();
  95. float z = inQuat.GetZ();
  96. float w = inQuat.GetW();
  97. float tx = x + x; // Note: Using x + x instead of 2.0f * x to force this function to return the same value as the SSE4.1 version across platforms.
  98. float ty = y + y;
  99. float tz = z + z;
  100. float xx = tx * x;
  101. float yy = ty * y;
  102. float zz = tz * z;
  103. float xy = tx * y;
  104. float xz = tx * z;
  105. float xw = tx * w;
  106. float yz = ty * z;
  107. float yw = ty * w;
  108. float zw = tz * w;
  109. return Mat44(Vec4((1.0f - yy) - zz, xy + zw, xz - yw, 0.0f), // Note: Added extra brackets to force this function to return the same value as the SSE4.1 version across platforms.
  110. Vec4(xy - zw, (1.0f - zz) - xx, yz + xw, 0.0f),
  111. Vec4(xz + yw, yz - xw, (1.0f - xx) - yy, 0.0f),
  112. Vec4(0.0f, 0.0f, 0.0f, 1.0f));
  113. #endif
  114. }
  115. Mat44 Mat44::sRotation(Vec3Arg inAxis, float inAngle)
  116. {
  117. return sRotation(Quat::sRotation(inAxis, inAngle));
  118. }
  119. Mat44 Mat44::sTranslation(Vec3Arg inV)
  120. {
  121. return Mat44(Vec4(1, 0, 0, 0), Vec4(0, 1, 0, 0), Vec4(0, 0, 1, 0), Vec4(inV, 1));
  122. }
  123. Mat44 Mat44::sRotationTranslation(QuatArg inR, Vec3Arg inT)
  124. {
  125. Mat44 m = sRotation(inR);
  126. m.SetTranslation(inT);
  127. return m;
  128. }
  129. Mat44 Mat44::sInverseRotationTranslation(QuatArg inR, Vec3Arg inT)
  130. {
  131. Mat44 m = sRotation(inR.Conjugated());
  132. m.SetTranslation(-m.Multiply3x3(inT));
  133. return m;
  134. }
  135. Mat44 Mat44::sScale(float inScale)
  136. {
  137. return Mat44(Vec4(inScale, 0, 0, 0), Vec4(0, inScale, 0, 0), Vec4(0, 0, inScale, 0), Vec4(0, 0, 0, 1));
  138. }
  139. Mat44 Mat44::sScale(Vec3Arg inV)
  140. {
  141. return Mat44(Vec4(inV.GetX(), 0, 0, 0), Vec4(0, inV.GetY(), 0, 0), Vec4(0, 0, inV.GetZ(), 0), Vec4(0, 0, 0, 1));
  142. }
  143. Mat44 Mat44::sOuterProduct(Vec3Arg inV1, Vec3Arg inV2)
  144. {
  145. Vec4 v1(inV1, 0);
  146. return Mat44(v1 * inV2.SplatX(), v1 * inV2.SplatY(), v1 * inV2.SplatZ(), Vec4(0, 0, 0, 1));
  147. }
  148. Mat44 Mat44::sCrossProduct(Vec3Arg inV)
  149. {
  150. #ifdef JPH_USE_SSE4_1
  151. // Zero out the W component
  152. __m128 zero = _mm_setzero_ps();
  153. __m128 v = _mm_blend_ps(inV.mValue, zero, 0b1000);
  154. // Negate
  155. __m128 min_v = _mm_sub_ps(zero, v);
  156. return Mat44(
  157. _mm_shuffle_ps(v, min_v, _MM_SHUFFLE(3, 1, 2, 3)), // [0, z, -y, 0]
  158. _mm_shuffle_ps(min_v, v, _MM_SHUFFLE(3, 0, 3, 2)), // [-z, 0, x, 0]
  159. _mm_blend_ps(_mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 1)), _mm_shuffle_ps(min_v, min_v, _MM_SHUFFLE(3, 3, 0, 3)), 0b0010), // [y, -x, 0, 0]
  160. Vec4(0, 0, 0, 1));
  161. #else
  162. float x = inV.GetX();
  163. float y = inV.GetY();
  164. float z = inV.GetZ();
  165. return Mat44(
  166. Vec4(0, z, -y, 0),
  167. Vec4(-z, 0, x, 0),
  168. Vec4(y, -x, 0, 0),
  169. Vec4(0, 0, 0, 1));
  170. #endif
  171. }
  172. Mat44 Mat44::sLookAt(Vec3Arg inPos, Vec3Arg inTarget, Vec3Arg inUp)
  173. {
  174. Vec3 direction = (inTarget - inPos).NormalizedOr(-Vec3::sAxisZ());
  175. Vec3 right = direction.Cross(inUp).NormalizedOr(Vec3::sAxisX());
  176. Vec3 up = right.Cross(direction);
  177. return Mat44(Vec4(right, 0), Vec4(up, 0), Vec4(-direction, 0), Vec4(inPos, 1)).InversedRotationTranslation();
  178. }
  179. bool Mat44::operator == (Mat44Arg inM2) const
  180. {
  181. return UVec4::sAnd(
  182. UVec4::sAnd(Vec4::sEquals(mCol[0], inM2.mCol[0]), Vec4::sEquals(mCol[1], inM2.mCol[1])),
  183. UVec4::sAnd(Vec4::sEquals(mCol[2], inM2.mCol[2]), Vec4::sEquals(mCol[3], inM2.mCol[3]))
  184. ).TestAllTrue();
  185. }
  186. bool Mat44::IsClose(Mat44Arg inM2, float inMaxDistSq) const
  187. {
  188. for (int i = 0; i < 4; ++i)
  189. if (!mCol[i].IsClose(inM2.mCol[i], inMaxDistSq))
  190. return false;
  191. return true;
  192. }
  193. Mat44 Mat44::operator * (Mat44Arg inM) const
  194. {
  195. Mat44 result;
  196. #if defined(JPH_USE_SSE)
  197. for (int i = 0; i < 4; ++i)
  198. {
  199. __m128 c = inM.mCol[i].mValue;
  200. __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)));
  201. t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1))));
  202. t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2))));
  203. t = _mm_add_ps(t, _mm_mul_ps(mCol[3].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3))));
  204. result.mCol[i].mValue = t;
  205. }
  206. #elif defined(JPH_USE_NEON)
  207. for (int i = 0; i < 4; ++i)
  208. {
  209. Type c = inM.mCol[i].mValue;
  210. Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0));
  211. t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1));
  212. t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2));
  213. t = vmlaq_f32(t, mCol[3].mValue, vdupq_laneq_f32(c, 3));
  214. result.mCol[i].mValue = t;
  215. }
  216. #else
  217. for (int i = 0; i < 4; ++i)
  218. result.mCol[i] = mCol[0] * inM.mCol[i].mF32[0] + mCol[1] * inM.mCol[i].mF32[1] + mCol[2] * inM.mCol[i].mF32[2] + mCol[3] * inM.mCol[i].mF32[3];
  219. #endif
  220. return result;
  221. }
  222. Vec3 Mat44::operator * (Vec3Arg inV) const
  223. {
  224. #if defined(JPH_USE_SSE)
  225. __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0)));
  226. t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1))));
  227. t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
  228. t = _mm_add_ps(t, mCol[3].mValue);
  229. return Vec3::sFixW(t);
  230. #elif defined(JPH_USE_NEON)
  231. Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0));
  232. t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1));
  233. t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2));
  234. t = vaddq_f32(t, mCol[3].mValue); // Don't combine this with the first mul into a fused multiply add, causes precision issues
  235. return Vec3::sFixW(t);
  236. #else
  237. return Vec3(
  238. mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2] + mCol[3].mF32[0],
  239. mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2] + mCol[3].mF32[1],
  240. mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2] + mCol[3].mF32[2]);
  241. #endif
  242. }
  243. Vec4 Mat44::operator * (Vec4Arg inV) const
  244. {
  245. #if defined(JPH_USE_SSE)
  246. __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0)));
  247. t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1))));
  248. t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
  249. t = _mm_add_ps(t, _mm_mul_ps(mCol[3].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(3, 3, 3, 3))));
  250. return t;
  251. #elif defined(JPH_USE_NEON)
  252. Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0));
  253. t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1));
  254. t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2));
  255. t = vmlaq_f32(t, mCol[3].mValue, vdupq_laneq_f32(inV.mValue, 3));
  256. return t;
  257. #else
  258. return Vec4(
  259. mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2] + mCol[3].mF32[0] * inV.mF32[3],
  260. mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2] + mCol[3].mF32[1] * inV.mF32[3],
  261. mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2] + mCol[3].mF32[2] * inV.mF32[3],
  262. mCol[0].mF32[3] * inV.mF32[0] + mCol[1].mF32[3] * inV.mF32[1] + mCol[2].mF32[3] * inV.mF32[2] + mCol[3].mF32[3] * inV.mF32[3]);
  263. #endif
  264. }
  265. Vec3 Mat44::Multiply3x3(Vec3Arg inV) const
  266. {
  267. #if defined(JPH_USE_SSE)
  268. __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0)));
  269. t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1))));
  270. t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
  271. return Vec3::sFixW(t);
  272. #elif defined(JPH_USE_NEON)
  273. Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0));
  274. t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1));
  275. t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2));
  276. return Vec3::sFixW(t);
  277. #else
  278. return Vec3(
  279. mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2],
  280. mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2],
  281. mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2]);
  282. #endif
  283. }
  284. Vec3 Mat44::Multiply3x3Transposed(Vec3Arg inV) const
  285. {
  286. #if defined(JPH_USE_SSE4_1)
  287. __m128 x = _mm_dp_ps(mCol[0].mValue, inV.mValue, 0x7f);
  288. __m128 y = _mm_dp_ps(mCol[1].mValue, inV.mValue, 0x7f);
  289. __m128 xy = _mm_blend_ps(x, y, 0b0010);
  290. __m128 z = _mm_dp_ps(mCol[2].mValue, inV.mValue, 0x7f);
  291. __m128 xyzz = _mm_blend_ps(xy, z, 0b1100);
  292. return xyzz;
  293. #else
  294. return Transposed3x3().Multiply3x3(inV);
  295. #endif
  296. }
  297. Mat44 Mat44::Multiply3x3(Mat44Arg inM) const
  298. {
  299. JPH_ASSERT(mCol[0][3] == 0.0f);
  300. JPH_ASSERT(mCol[1][3] == 0.0f);
  301. JPH_ASSERT(mCol[2][3] == 0.0f);
  302. Mat44 result;
  303. #if defined(JPH_USE_SSE)
  304. for (int i = 0; i < 3; ++i)
  305. {
  306. __m128 c = inM.mCol[i].mValue;
  307. __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)));
  308. t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1))));
  309. t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2))));
  310. result.mCol[i].mValue = t;
  311. }
  312. #elif defined(JPH_USE_NEON)
  313. for (int i = 0; i < 3; ++i)
  314. {
  315. Type c = inM.mCol[i].mValue;
  316. Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0));
  317. t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1));
  318. t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2));
  319. result.mCol[i].mValue = t;
  320. }
  321. #else
  322. for (int i = 0; i < 3; ++i)
  323. result.mCol[i] = mCol[0] * inM.mCol[i].mF32[0] + mCol[1] * inM.mCol[i].mF32[1] + mCol[2] * inM.mCol[i].mF32[2];
  324. #endif
  325. result.mCol[3] = Vec4(0, 0, 0, 1);
  326. return result;
  327. }
  328. Mat44 Mat44::Multiply3x3LeftTransposed(Mat44Arg inM) const
  329. {
  330. // Transpose left hand side
  331. Mat44 trans = Transposed3x3();
  332. // Do 3x3 matrix multiply
  333. Mat44 result;
  334. result.mCol[0] = trans.mCol[0] * inM.mCol[0].SplatX() + trans.mCol[1] * inM.mCol[0].SplatY() + trans.mCol[2] * inM.mCol[0].SplatZ();
  335. result.mCol[1] = trans.mCol[0] * inM.mCol[1].SplatX() + trans.mCol[1] * inM.mCol[1].SplatY() + trans.mCol[2] * inM.mCol[1].SplatZ();
  336. result.mCol[2] = trans.mCol[0] * inM.mCol[2].SplatX() + trans.mCol[1] * inM.mCol[2].SplatY() + trans.mCol[2] * inM.mCol[2].SplatZ();
  337. result.mCol[3] = Vec4(0, 0, 0, 1);
  338. return result;
  339. }
  340. Mat44 Mat44::Multiply3x3RightTransposed(Mat44Arg inM) const
  341. {
  342. JPH_ASSERT(mCol[0][3] == 0.0f);
  343. JPH_ASSERT(mCol[1][3] == 0.0f);
  344. JPH_ASSERT(mCol[2][3] == 0.0f);
  345. Mat44 result;
  346. result.mCol[0] = mCol[0] * inM.mCol[0].SplatX() + mCol[1] * inM.mCol[1].SplatX() + mCol[2] * inM.mCol[2].SplatX();
  347. result.mCol[1] = mCol[0] * inM.mCol[0].SplatY() + mCol[1] * inM.mCol[1].SplatY() + mCol[2] * inM.mCol[2].SplatY();
  348. result.mCol[2] = mCol[0] * inM.mCol[0].SplatZ() + mCol[1] * inM.mCol[1].SplatZ() + mCol[2] * inM.mCol[2].SplatZ();
  349. result.mCol[3] = Vec4(0, 0, 0, 1);
  350. return result;
  351. }
  352. Mat44 Mat44::operator * (float inV) const
  353. {
  354. Vec4 multiplier = Vec4::sReplicate(inV);
  355. Mat44 result;
  356. for (int c = 0; c < 4; ++c)
  357. result.mCol[c] = mCol[c] * multiplier;
  358. return result;
  359. }
  360. Mat44 &Mat44::operator *= (float inV)
  361. {
  362. for (int c = 0; c < 4; ++c)
  363. mCol[c] *= inV;
  364. return *this;
  365. }
  366. Mat44 Mat44::operator + (Mat44Arg inM) const
  367. {
  368. Mat44 result;
  369. for (int i = 0; i < 4; ++i)
  370. result.mCol[i] = mCol[i] + inM.mCol[i];
  371. return result;
  372. }
  373. Mat44 Mat44::operator - () const
  374. {
  375. Mat44 result;
  376. for (int i = 0; i < 4; ++i)
  377. result.mCol[i] = -mCol[i];
  378. return result;
  379. }
  380. Mat44 Mat44::operator - (Mat44Arg inM) const
  381. {
  382. Mat44 result;
  383. for (int i = 0; i < 4; ++i)
  384. result.mCol[i] = mCol[i] - inM.mCol[i];
  385. return result;
  386. }
  387. Mat44 &Mat44::operator += (Mat44Arg inM)
  388. {
  389. for (int c = 0; c < 4; ++c)
  390. mCol[c] += inM.mCol[c];
  391. return *this;
  392. }
  393. void Mat44::StoreFloat4x4(Float4 *outV) const
  394. {
  395. for (int c = 0; c < 4; ++c)
  396. mCol[c].StoreFloat4(outV + c);
  397. }
  398. Mat44 Mat44::Transposed() const
  399. {
  400. #if defined(JPH_USE_SSE)
  401. __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  402. __m128 tmp3 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  403. __m128 tmp2 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  404. __m128 tmp4 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  405. Mat44 result;
  406. result.mCol[0].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(2, 0, 2, 0));
  407. result.mCol[1].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(3, 1, 3, 1));
  408. result.mCol[2].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(2, 0, 2, 0));
  409. result.mCol[3].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(3, 1, 3, 1));
  410. return result;
  411. #elif defined(JPH_USE_NEON)
  412. float32x4x2_t tmp1 = vzipq_f32(mCol[0].mValue, mCol[2].mValue);
  413. float32x4x2_t tmp2 = vzipq_f32(mCol[1].mValue, mCol[3].mValue);
  414. float32x4x2_t tmp3 = vzipq_f32(tmp1.val[0], tmp2.val[0]);
  415. float32x4x2_t tmp4 = vzipq_f32(tmp1.val[1], tmp2.val[1]);
  416. Mat44 result;
  417. result.mCol[0].mValue = tmp3.val[0];
  418. result.mCol[1].mValue = tmp3.val[1];
  419. result.mCol[2].mValue = tmp4.val[0];
  420. result.mCol[3].mValue = tmp4.val[1];
  421. return result;
  422. #else
  423. Mat44 result;
  424. for (int c = 0; c < 4; ++c)
  425. for (int r = 0; r < 4; ++r)
  426. result.mCol[r].mF32[c] = mCol[c].mF32[r];
  427. return result;
  428. #endif
  429. }
  430. Mat44 Mat44::Transposed3x3() const
  431. {
  432. #if defined(JPH_USE_SSE)
  433. __m128 zero = _mm_setzero_ps();
  434. __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  435. __m128 tmp3 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  436. __m128 tmp2 = _mm_shuffle_ps(mCol[2].mValue, zero, _MM_SHUFFLE(1, 0, 1, 0));
  437. __m128 tmp4 = _mm_shuffle_ps(mCol[2].mValue, zero, _MM_SHUFFLE(3, 2, 3, 2));
  438. Mat44 result;
  439. result.mCol[0].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(2, 0, 2, 0));
  440. result.mCol[1].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(3, 1, 3, 1));
  441. result.mCol[2].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(2, 0, 2, 0));
  442. #elif defined(JPH_USE_NEON)
  443. float32x4x2_t tmp1 = vzipq_f32(mCol[0].mValue, mCol[2].mValue);
  444. float32x4x2_t tmp2 = vzipq_f32(mCol[1].mValue, vdupq_n_f32(0));
  445. float32x4x2_t tmp3 = vzipq_f32(tmp1.val[0], tmp2.val[0]);
  446. float32x4x2_t tmp4 = vzipq_f32(tmp1.val[1], tmp2.val[1]);
  447. Mat44 result;
  448. result.mCol[0].mValue = tmp3.val[0];
  449. result.mCol[1].mValue = tmp3.val[1];
  450. result.mCol[2].mValue = tmp4.val[0];
  451. #else
  452. Mat44 result;
  453. for (int c = 0; c < 3; ++c)
  454. {
  455. for (int r = 0; r < 3; ++r)
  456. result.mCol[c].mF32[r] = mCol[r].mF32[c];
  457. result.mCol[c].mF32[3] = 0;
  458. }
  459. #endif
  460. result.mCol[3] = Vec4(0, 0, 0, 1);
  461. return result;
  462. }
  463. Mat44 Mat44::Inversed() const
  464. {
  465. #if defined(JPH_USE_SSE)
  466. // Algorithm from: http://download.intel.com/design/PentiumIII/sml/24504301.pdf
  467. // Streaming SIMD Extensions - Inverse of 4x4 Matrix
  468. // Adapted to load data using _mm_shuffle_ps instead of loading from memory
  469. // Replaced _mm_rcp_ps with _mm_div_ps for better accuracy
  470. __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  471. __m128 row1 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  472. __m128 row0 = _mm_shuffle_ps(tmp1, row1, _MM_SHUFFLE(2, 0, 2, 0));
  473. row1 = _mm_shuffle_ps(row1, tmp1, _MM_SHUFFLE(3, 1, 3, 1));
  474. tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  475. __m128 row3 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  476. __m128 row2 = _mm_shuffle_ps(tmp1, row3, _MM_SHUFFLE(2, 0, 2, 0));
  477. row3 = _mm_shuffle_ps(row3, tmp1, _MM_SHUFFLE(3, 1, 3, 1));
  478. tmp1 = _mm_mul_ps(row2, row3);
  479. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  480. __m128 minor0 = _mm_mul_ps(row1, tmp1);
  481. __m128 minor1 = _mm_mul_ps(row0, tmp1);
  482. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  483. minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0);
  484. minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1);
  485. minor1 = _mm_shuffle_ps(minor1, minor1, _MM_SHUFFLE(1, 0, 3, 2));
  486. tmp1 = _mm_mul_ps(row1, row2);
  487. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  488. minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0);
  489. __m128 minor3 = _mm_mul_ps(row0, tmp1);
  490. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  491. minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1));
  492. minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3);
  493. minor3 = _mm_shuffle_ps(minor3, minor3, _MM_SHUFFLE(1, 0, 3, 2));
  494. tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, _MM_SHUFFLE(1, 0, 3, 2)), row3);
  495. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  496. row2 = _mm_shuffle_ps(row2, row2, _MM_SHUFFLE(1, 0, 3, 2));
  497. minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0);
  498. __m128 minor2 = _mm_mul_ps(row0, tmp1);
  499. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  500. minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1));
  501. minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2);
  502. minor2 = _mm_shuffle_ps(minor2, minor2, _MM_SHUFFLE(1, 0, 3, 2));
  503. tmp1 = _mm_mul_ps(row0, row1);
  504. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  505. minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2);
  506. minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3);
  507. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  508. minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2);
  509. minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1));
  510. tmp1 = _mm_mul_ps(row0, row3);
  511. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  512. minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1));
  513. minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2);
  514. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  515. minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1);
  516. minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1));
  517. tmp1 = _mm_mul_ps(row0, row2);
  518. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  519. minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1);
  520. minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1));
  521. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  522. minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1));
  523. minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3);
  524. __m128 det = _mm_mul_ps(row0, minor0);
  525. det = _mm_add_ps(_mm_shuffle_ps(det, det, _MM_SHUFFLE(2, 3, 0, 1)), det); // Original code did (x + z) + (y + w), changed to (x + y) + (z + w) to match the ARM code below and make the result cross platform deterministic
  526. det = _mm_add_ss(_mm_shuffle_ps(det, det, _MM_SHUFFLE(1, 0, 3, 2)), det);
  527. det = _mm_div_ss(_mm_set_ss(1.0f), det);
  528. det = _mm_shuffle_ps(det, det, _MM_SHUFFLE(0, 0, 0, 0));
  529. Mat44 result;
  530. result.mCol[0].mValue = _mm_mul_ps(det, minor0);
  531. result.mCol[1].mValue = _mm_mul_ps(det, minor1);
  532. result.mCol[2].mValue = _mm_mul_ps(det, minor2);
  533. result.mCol[3].mValue = _mm_mul_ps(det, minor3);
  534. return result;
  535. #elif defined(JPH_USE_NEON)
  536. // Adapted from the SSE version, there's surprising few articles about efficient ways of calculating an inverse for ARM on the internet
  537. Type tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 0, 1, 4, 5);
  538. Type row1 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, mCol[3].mValue, 0, 1, 4, 5);
  539. Type row0 = JPH_NEON_SHUFFLE_F32x4(tmp1, row1, 0, 2, 4, 6);
  540. row1 = JPH_NEON_SHUFFLE_F32x4(row1, tmp1, 1, 3, 5, 7);
  541. tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 2, 3, 6, 7);
  542. Type row3 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, mCol[3].mValue, 2, 3, 6, 7);
  543. Type row2 = JPH_NEON_SHUFFLE_F32x4(tmp1, row3, 0, 2, 4, 6);
  544. row3 = JPH_NEON_SHUFFLE_F32x4(row3, tmp1, 1, 3, 5, 7);
  545. tmp1 = vmulq_f32(row2, row3);
  546. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  547. Type minor0 = vmulq_f32(row1, tmp1);
  548. Type minor1 = vmulq_f32(row0, tmp1);
  549. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  550. minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0);
  551. minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1);
  552. minor1 = JPH_NEON_SHUFFLE_F32x4(minor1, minor1, 2, 3, 0, 1);
  553. tmp1 = vmulq_f32(row1, row2);
  554. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  555. minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0);
  556. Type minor3 = vmulq_f32(row0, tmp1);
  557. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  558. minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1));
  559. minor3 = vsubq_f32(vmulq_f32(row0, tmp1), minor3);
  560. minor3 = JPH_NEON_SHUFFLE_F32x4(minor3, minor3, 2, 3, 0, 1);
  561. tmp1 = JPH_NEON_SHUFFLE_F32x4(row1, row1, 2, 3, 0, 1);
  562. tmp1 = vmulq_f32(tmp1, row3);
  563. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  564. row2 = JPH_NEON_SHUFFLE_F32x4(row2, row2, 2, 3, 0, 1);
  565. minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0);
  566. Type minor2 = vmulq_f32(row0, tmp1);
  567. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  568. minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1));
  569. minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2);
  570. minor2 = JPH_NEON_SHUFFLE_F32x4(minor2, minor2, 2, 3, 0, 1);
  571. tmp1 = vmulq_f32(row0, row1);
  572. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  573. minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2);
  574. minor3 = vsubq_f32(vmulq_f32(row2, tmp1), minor3);
  575. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  576. minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2);
  577. minor3 = vsubq_f32(minor3, vmulq_f32(row2, tmp1));
  578. tmp1 = vmulq_f32(row0, row3);
  579. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  580. minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1));
  581. minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2);
  582. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  583. minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1);
  584. minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1));
  585. tmp1 = vmulq_f32(row0, row2);
  586. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  587. minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1);
  588. minor3 = vsubq_f32(minor3, vmulq_f32(row1, tmp1));
  589. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  590. minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1));
  591. minor3 = vaddq_f32(vmulq_f32(row1, tmp1), minor3);
  592. Type det = vmulq_f32(row0, minor0);
  593. det = vdupq_n_f32(vaddvq_f32(det));
  594. det = vdivq_f32(vdupq_n_f32(1.0f), det);
  595. Mat44 result;
  596. result.mCol[0].mValue = vmulq_f32(det, minor0);
  597. result.mCol[1].mValue = vmulq_f32(det, minor1);
  598. result.mCol[2].mValue = vmulq_f32(det, minor2);
  599. result.mCol[3].mValue = vmulq_f32(det, minor3);
  600. return result;
  601. #else
  602. float m00 = JPH_EL(0, 0), m10 = JPH_EL(1, 0), m20 = JPH_EL(2, 0), m30 = JPH_EL(3, 0);
  603. float m01 = JPH_EL(0, 1), m11 = JPH_EL(1, 1), m21 = JPH_EL(2, 1), m31 = JPH_EL(3, 1);
  604. float m02 = JPH_EL(0, 2), m12 = JPH_EL(1, 2), m22 = JPH_EL(2, 2), m32 = JPH_EL(3, 2);
  605. float m03 = JPH_EL(0, 3), m13 = JPH_EL(1, 3), m23 = JPH_EL(2, 3), m33 = JPH_EL(3, 3);
  606. float m10211120 = m10 * m21 - m11 * m20;
  607. float m10221220 = m10 * m22 - m12 * m20;
  608. float m10231320 = m10 * m23 - m13 * m20;
  609. float m10311130 = m10 * m31 - m11 * m30;
  610. float m10321230 = m10 * m32 - m12 * m30;
  611. float m10331330 = m10 * m33 - m13 * m30;
  612. float m11221221 = m11 * m22 - m12 * m21;
  613. float m11231321 = m11 * m23 - m13 * m21;
  614. float m11321231 = m11 * m32 - m12 * m31;
  615. float m11331331 = m11 * m33 - m13 * m31;
  616. float m12231322 = m12 * m23 - m13 * m22;
  617. float m12331332 = m12 * m33 - m13 * m32;
  618. float m20312130 = m20 * m31 - m21 * m30;
  619. float m20322230 = m20 * m32 - m22 * m30;
  620. float m20332330 = m20 * m33 - m23 * m30;
  621. float m21322231 = m21 * m32 - m22 * m31;
  622. float m21332331 = m21 * m33 - m23 * m31;
  623. float m22332332 = m22 * m33 - m23 * m32;
  624. Vec4 col0(m11 * m22332332 - m12 * m21332331 + m13 * m21322231, -m10 * m22332332 + m12 * m20332330 - m13 * m20322230, m10 * m21332331 - m11 * m20332330 + m13 * m20312130, -m10 * m21322231 + m11 * m20322230 - m12 * m20312130);
  625. Vec4 col1(-m01 * m22332332 + m02 * m21332331 - m03 * m21322231, m00 * m22332332 - m02 * m20332330 + m03 * m20322230, -m00 * m21332331 + m01 * m20332330 - m03 * m20312130, m00 * m21322231 - m01 * m20322230 + m02 * m20312130);
  626. Vec4 col2(m01 * m12331332 - m02 * m11331331 + m03 * m11321231, -m00 * m12331332 + m02 * m10331330 - m03 * m10321230, m00 * m11331331 - m01 * m10331330 + m03 * m10311130, -m00 * m11321231 + m01 * m10321230 - m02 * m10311130);
  627. Vec4 col3(-m01 * m12231322 + m02 * m11231321 - m03 * m11221221, m00 * m12231322 - m02 * m10231320 + m03 * m10221220, -m00 * m11231321 + m01 * m10231320 - m03 * m10211120, m00 * m11221221 - m01 * m10221220 + m02 * m10211120);
  628. float det = m00 * col0.mF32[0] + m01 * col0.mF32[1] + m02 * col0.mF32[2] + m03 * col0.mF32[3];
  629. return Mat44(col0 / det, col1 / det, col2 / det, col3 / det);
  630. #endif
  631. }
  632. Mat44 Mat44::InversedRotationTranslation() const
  633. {
  634. Mat44 m = Transposed3x3();
  635. m.SetTranslation(-m.Multiply3x3(GetTranslation()));
  636. return m;
  637. }
  638. float Mat44::GetDeterminant3x3() const
  639. {
  640. return GetAxisX().Dot(GetAxisY().Cross(GetAxisZ()));
  641. }
  642. Mat44 Mat44::Adjointed3x3() const
  643. {
  644. // Adapted from Inversed() to remove 4th column and the division by the determinant
  645. // Note: This can be optimized.
  646. JPH_ASSERT(mCol[0][3] == 0.0f);
  647. JPH_ASSERT(mCol[1][3] == 0.0f);
  648. JPH_ASSERT(mCol[2][3] == 0.0f);
  649. #if defined(JPH_USE_SSE)
  650. __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  651. __m128 row1 = _mm_shuffle_ps(mCol[2].mValue, _mm_setzero_ps(), _MM_SHUFFLE(1, 0, 1, 0));
  652. __m128 row0 = _mm_shuffle_ps(tmp1, row1, _MM_SHUFFLE(2, 0, 2, 0));
  653. row1 = _mm_shuffle_ps(row1, tmp1, _MM_SHUFFLE(3, 1, 3, 1));
  654. tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  655. __m128 row3 = _mm_shuffle_ps(mCol[2].mValue, _mm_set_ps(1, 0, 0, 0), _MM_SHUFFLE(3, 2, 3, 2));
  656. __m128 row2 = _mm_shuffle_ps(tmp1, row3, _MM_SHUFFLE(2, 0, 2, 0));
  657. row3 = _mm_shuffle_ps(row3, tmp1, _MM_SHUFFLE(3, 1, 3, 1));
  658. tmp1 = _mm_mul_ps(row2, row3);
  659. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  660. __m128 minor0 = _mm_mul_ps(row1, tmp1);
  661. __m128 minor1 = _mm_mul_ps(row0, tmp1);
  662. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  663. minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0);
  664. minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1);
  665. minor1 = _mm_shuffle_ps(minor1, minor1, _MM_SHUFFLE(1, 0, 3, 2));
  666. tmp1 = _mm_mul_ps(row1, row2);
  667. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  668. minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0);
  669. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  670. minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1));
  671. tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, _MM_SHUFFLE(1, 0, 3, 2)), row3);
  672. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  673. row2 = _mm_shuffle_ps(row2, row2, _MM_SHUFFLE(1, 0, 3, 2));
  674. minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0);
  675. __m128 minor2 = _mm_mul_ps(row0, tmp1);
  676. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  677. minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1));
  678. minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2);
  679. minor2 = _mm_shuffle_ps(minor2, minor2, _MM_SHUFFLE(1, 0, 3, 2));
  680. tmp1 = _mm_mul_ps(row0, row1);
  681. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  682. minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2);
  683. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  684. minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2);
  685. tmp1 = _mm_mul_ps(row0, row3);
  686. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  687. minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1));
  688. minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2);
  689. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  690. minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1);
  691. minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1));
  692. tmp1 = _mm_mul_ps(row0, row2);
  693. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  694. minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1);
  695. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  696. minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1));
  697. Mat44 result;
  698. result.mCol[0].mValue = minor0;
  699. result.mCol[1].mValue = minor1;
  700. result.mCol[2].mValue = minor2;
  701. result.mCol[3] = Vec4(0, 0, 0, 1);
  702. return result;
  703. #elif defined(JPH_USE_NEON)
  704. Type v0001 = vsetq_lane_f32(1, vdupq_n_f32(0), 3);
  705. Type tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 0, 1, 4, 5);
  706. Type row1 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, v0001, 0, 1, 4, 5);
  707. Type row0 = JPH_NEON_SHUFFLE_F32x4(tmp1, row1, 0, 2, 4, 6);
  708. row1 = JPH_NEON_SHUFFLE_F32x4(row1, tmp1, 1, 3, 5, 7);
  709. tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 2, 3, 6, 7);
  710. Type row3 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, v0001, 2, 3, 6, 7);
  711. Type row2 = JPH_NEON_SHUFFLE_F32x4(tmp1, row3, 0, 2, 4, 6);
  712. row3 = JPH_NEON_SHUFFLE_F32x4(row3, tmp1, 1, 3, 5, 7);
  713. tmp1 = vmulq_f32(row2, row3);
  714. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  715. Type minor0 = vmulq_f32(row1, tmp1);
  716. Type minor1 = vmulq_f32(row0, tmp1);
  717. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  718. minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0);
  719. minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1);
  720. minor1 = JPH_NEON_SHUFFLE_F32x4(minor1, minor1, 2, 3, 0, 1);
  721. tmp1 = vmulq_f32(row1, row2);
  722. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  723. minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0);
  724. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  725. minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1));
  726. tmp1 = JPH_NEON_SHUFFLE_F32x4(row1, row1, 2, 3, 0, 1);
  727. tmp1 = vmulq_f32(tmp1, row3);
  728. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  729. row2 = JPH_NEON_SHUFFLE_F32x4(row2, row2, 2, 3, 0, 1);
  730. minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0);
  731. Type minor2 = vmulq_f32(row0, tmp1);
  732. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  733. minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1));
  734. minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2);
  735. minor2 = JPH_NEON_SHUFFLE_F32x4(minor2, minor2, 2, 3, 0, 1);
  736. tmp1 = vmulq_f32(row0, row1);
  737. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  738. minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2);
  739. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  740. minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2);
  741. tmp1 = vmulq_f32(row0, row3);
  742. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  743. minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1));
  744. minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2);
  745. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  746. minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1);
  747. minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1));
  748. tmp1 = vmulq_f32(row0, row2);
  749. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  750. minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1);
  751. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  752. minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1));
  753. Mat44 result;
  754. result.mCol[0].mValue = minor0;
  755. result.mCol[1].mValue = minor1;
  756. result.mCol[2].mValue = minor2;
  757. result.mCol[3].mValue = v0001;
  758. return result;
  759. #else
  760. return Mat44(
  761. Vec4(JPH_EL(1, 1) * JPH_EL(2, 2) - JPH_EL(1, 2) * JPH_EL(2, 1),
  762. JPH_EL(1, 2) * JPH_EL(2, 0) - JPH_EL(1, 0) * JPH_EL(2, 2),
  763. JPH_EL(1, 0) * JPH_EL(2, 1) - JPH_EL(1, 1) * JPH_EL(2, 0),
  764. 0),
  765. Vec4(JPH_EL(0, 2) * JPH_EL(2, 1) - JPH_EL(0, 1) * JPH_EL(2, 2),
  766. JPH_EL(0, 0) * JPH_EL(2, 2) - JPH_EL(0, 2) * JPH_EL(2, 0),
  767. JPH_EL(0, 1) * JPH_EL(2, 0) - JPH_EL(0, 0) * JPH_EL(2, 1),
  768. 0),
  769. Vec4(JPH_EL(0, 1) * JPH_EL(1, 2) - JPH_EL(0, 2) * JPH_EL(1, 1),
  770. JPH_EL(0, 2) * JPH_EL(1, 0) - JPH_EL(0, 0) * JPH_EL(1, 2),
  771. JPH_EL(0, 0) * JPH_EL(1, 1) - JPH_EL(0, 1) * JPH_EL(1, 0),
  772. 0),
  773. Vec4(0, 0, 0, 1));
  774. #endif
  775. }
  776. Mat44 Mat44::Inversed3x3() const
  777. {
  778. // Adapted from Inversed() to remove 4th column
  779. // Note: This can be optimized.
  780. JPH_ASSERT(mCol[0][3] == 0.0f);
  781. JPH_ASSERT(mCol[1][3] == 0.0f);
  782. JPH_ASSERT(mCol[2][3] == 0.0f);
  783. #if defined(JPH_USE_SSE)
  784. __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  785. __m128 row1 = _mm_shuffle_ps(mCol[2].mValue, _mm_setzero_ps(), _MM_SHUFFLE(1, 0, 1, 0));
  786. __m128 row0 = _mm_shuffle_ps(tmp1, row1, _MM_SHUFFLE(2, 0, 2, 0));
  787. row1 = _mm_shuffle_ps(row1, tmp1, _MM_SHUFFLE(3, 1, 3, 1));
  788. tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  789. __m128 row3 = _mm_shuffle_ps(mCol[2].mValue, _mm_set_ps(1, 0, 0, 0), _MM_SHUFFLE(3, 2, 3, 2));
  790. __m128 row2 = _mm_shuffle_ps(tmp1, row3, _MM_SHUFFLE(2, 0, 2, 0));
  791. row3 = _mm_shuffle_ps(row3, tmp1, _MM_SHUFFLE(3, 1, 3, 1));
  792. tmp1 = _mm_mul_ps(row2, row3);
  793. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  794. __m128 minor0 = _mm_mul_ps(row1, tmp1);
  795. __m128 minor1 = _mm_mul_ps(row0, tmp1);
  796. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  797. minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0);
  798. minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1);
  799. minor1 = _mm_shuffle_ps(minor1, minor1, _MM_SHUFFLE(1, 0, 3, 2));
  800. tmp1 = _mm_mul_ps(row1, row2);
  801. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  802. minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0);
  803. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  804. minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1));
  805. tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, _MM_SHUFFLE(1, 0, 3, 2)), row3);
  806. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  807. row2 = _mm_shuffle_ps(row2, row2, _MM_SHUFFLE(1, 0, 3, 2));
  808. minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0);
  809. __m128 minor2 = _mm_mul_ps(row0, tmp1);
  810. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  811. minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1));
  812. minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2);
  813. minor2 = _mm_shuffle_ps(minor2, minor2, _MM_SHUFFLE(1, 0, 3, 2));
  814. tmp1 = _mm_mul_ps(row0, row1);
  815. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  816. minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2);
  817. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  818. minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2);
  819. tmp1 = _mm_mul_ps(row0, row3);
  820. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  821. minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1));
  822. minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2);
  823. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  824. minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1);
  825. minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1));
  826. tmp1 = _mm_mul_ps(row0, row2);
  827. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  828. minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1);
  829. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  830. minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1));
  831. __m128 det = _mm_mul_ps(row0, minor0);
  832. det = _mm_add_ps(_mm_shuffle_ps(det, det, _MM_SHUFFLE(2, 3, 0, 1)), det); // Original code did (x + z) + (y + w), changed to (x + y) + (z + w) to match the ARM code below and make the result cross platform deterministic
  833. det = _mm_add_ss(_mm_shuffle_ps(det, det, _MM_SHUFFLE(1, 0, 3, 2)), det);
  834. det = _mm_div_ss(_mm_set_ss(1.0f), det);
  835. det = _mm_shuffle_ps(det, det, _MM_SHUFFLE(0, 0, 0, 0));
  836. Mat44 result;
  837. result.mCol[0].mValue = _mm_mul_ps(det, minor0);
  838. result.mCol[1].mValue = _mm_mul_ps(det, minor1);
  839. result.mCol[2].mValue = _mm_mul_ps(det, minor2);
  840. result.mCol[3] = Vec4(0, 0, 0, 1);
  841. return result;
  842. #elif defined(JPH_USE_NEON)
  843. Type v0001 = vsetq_lane_f32(1, vdupq_n_f32(0), 3);
  844. Type tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 0, 1, 4, 5);
  845. Type row1 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, v0001, 0, 1, 4, 5);
  846. Type row0 = JPH_NEON_SHUFFLE_F32x4(tmp1, row1, 0, 2, 4, 6);
  847. row1 = JPH_NEON_SHUFFLE_F32x4(row1, tmp1, 1, 3, 5, 7);
  848. tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 2, 3, 6, 7);
  849. Type row3 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, v0001, 2, 3, 6, 7);
  850. Type row2 = JPH_NEON_SHUFFLE_F32x4(tmp1, row3, 0, 2, 4, 6);
  851. row3 = JPH_NEON_SHUFFLE_F32x4(row3, tmp1, 1, 3, 5, 7);
  852. tmp1 = vmulq_f32(row2, row3);
  853. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  854. Type minor0 = vmulq_f32(row1, tmp1);
  855. Type minor1 = vmulq_f32(row0, tmp1);
  856. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  857. minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0);
  858. minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1);
  859. minor1 = JPH_NEON_SHUFFLE_F32x4(minor1, minor1, 2, 3, 0, 1);
  860. tmp1 = vmulq_f32(row1, row2);
  861. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  862. minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0);
  863. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  864. minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1));
  865. tmp1 = JPH_NEON_SHUFFLE_F32x4(row1, row1, 2, 3, 0, 1);
  866. tmp1 = vmulq_f32(tmp1, row3);
  867. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  868. row2 = JPH_NEON_SHUFFLE_F32x4(row2, row2, 2, 3, 0, 1);
  869. minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0);
  870. Type minor2 = vmulq_f32(row0, tmp1);
  871. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  872. minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1));
  873. minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2);
  874. minor2 = JPH_NEON_SHUFFLE_F32x4(minor2, minor2, 2, 3, 0, 1);
  875. tmp1 = vmulq_f32(row0, row1);
  876. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  877. minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2);
  878. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  879. minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2);
  880. tmp1 = vmulq_f32(row0, row3);
  881. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  882. minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1));
  883. minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2);
  884. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  885. minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1);
  886. minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1));
  887. tmp1 = vmulq_f32(row0, row2);
  888. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  889. minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1);
  890. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  891. minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1));
  892. Type det = vmulq_f32(row0, minor0);
  893. det = vdupq_n_f32(vaddvq_f32(det));
  894. det = vdivq_f32(vdupq_n_f32(1.0f), det);
  895. Mat44 result;
  896. result.mCol[0].mValue = vmulq_f32(det, minor0);
  897. result.mCol[1].mValue = vmulq_f32(det, minor1);
  898. result.mCol[2].mValue = vmulq_f32(det, minor2);
  899. result.mCol[3].mValue = v0001;
  900. return result;
  901. #else
  902. float det = GetDeterminant3x3();
  903. return Mat44(
  904. Vec4((JPH_EL(1, 1) * JPH_EL(2, 2) - JPH_EL(1, 2) * JPH_EL(2, 1)) / det,
  905. (JPH_EL(1, 2) * JPH_EL(2, 0) - JPH_EL(1, 0) * JPH_EL(2, 2)) / det,
  906. (JPH_EL(1, 0) * JPH_EL(2, 1) - JPH_EL(1, 1) * JPH_EL(2, 0)) / det,
  907. 0),
  908. Vec4((JPH_EL(0, 2) * JPH_EL(2, 1) - JPH_EL(0, 1) * JPH_EL(2, 2)) / det,
  909. (JPH_EL(0, 0) * JPH_EL(2, 2) - JPH_EL(0, 2) * JPH_EL(2, 0)) / det,
  910. (JPH_EL(0, 1) * JPH_EL(2, 0) - JPH_EL(0, 0) * JPH_EL(2, 1)) / det,
  911. 0),
  912. Vec4((JPH_EL(0, 1) * JPH_EL(1, 2) - JPH_EL(0, 2) * JPH_EL(1, 1)) / det,
  913. (JPH_EL(0, 2) * JPH_EL(1, 0) - JPH_EL(0, 0) * JPH_EL(1, 2)) / det,
  914. (JPH_EL(0, 0) * JPH_EL(1, 1) - JPH_EL(0, 1) * JPH_EL(1, 0)) / det,
  915. 0),
  916. Vec4(0, 0, 0, 1));
  917. #endif
  918. }
  919. Quat Mat44::GetQuaternion() const
  920. {
  921. JPH_ASSERT(mCol[3] == Vec4(0, 0, 0, 1));
  922. float tr = mCol[0].mF32[0] + mCol[1].mF32[1] + mCol[2].mF32[2];
  923. if (tr >= 0.0f)
  924. {
  925. float s = sqrt(tr + 1.0f);
  926. float is = 0.5f / s;
  927. return Quat(
  928. (mCol[1].mF32[2] - mCol[2].mF32[1]) * is,
  929. (mCol[2].mF32[0] - mCol[0].mF32[2]) * is,
  930. (mCol[0].mF32[1] - mCol[1].mF32[0]) * is,
  931. 0.5f * s);
  932. }
  933. else
  934. {
  935. int i = 0;
  936. if (mCol[1].mF32[1] > mCol[0].mF32[0]) i = 1;
  937. if (mCol[2].mF32[2] > mCol[i].mF32[i]) i = 2;
  938. if (i == 0)
  939. {
  940. float s = sqrt(mCol[0].mF32[0] - (mCol[1].mF32[1] + mCol[2].mF32[2]) + 1);
  941. float is = 0.5f / s;
  942. return Quat(
  943. 0.5f * s,
  944. (mCol[1].mF32[0] + mCol[0].mF32[1]) * is,
  945. (mCol[0].mF32[2] + mCol[2].mF32[0]) * is,
  946. (mCol[1].mF32[2] - mCol[2].mF32[1]) * is);
  947. }
  948. else if (i == 1)
  949. {
  950. float s = sqrt(mCol[1].mF32[1] - (mCol[2].mF32[2] + mCol[0].mF32[0]) + 1);
  951. float is = 0.5f / s;
  952. return Quat(
  953. (mCol[1].mF32[0] + mCol[0].mF32[1]) * is,
  954. 0.5f * s,
  955. (mCol[2].mF32[1] + mCol[1].mF32[2]) * is,
  956. (mCol[2].mF32[0] - mCol[0].mF32[2]) * is);
  957. }
  958. else
  959. {
  960. JPH_ASSERT(i == 2);
  961. float s = sqrt(mCol[2].mF32[2] - (mCol[0].mF32[0] + mCol[1].mF32[1]) + 1);
  962. float is = 0.5f / s;
  963. return Quat(
  964. (mCol[0].mF32[2] + mCol[2].mF32[0]) * is,
  965. (mCol[2].mF32[1] + mCol[1].mF32[2]) * is,
  966. 0.5f * s,
  967. (mCol[0].mF32[1] - mCol[1].mF32[0]) * is);
  968. }
  969. }
  970. }
  971. Mat44 Mat44::sQuatLeftMultiply(QuatArg inQ)
  972. {
  973. return Mat44(
  974. Vec4(1, 1, -1, -1) * inQ.mValue.Swizzle<SWIZZLE_W, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_X>(),
  975. Vec4(-1, 1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(),
  976. Vec4(1, -1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(),
  977. inQ.mValue);
  978. }
  979. Mat44 Mat44::sQuatRightMultiply(QuatArg inQ)
  980. {
  981. return Mat44(
  982. Vec4(1, -1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_W, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_X>(),
  983. Vec4(1, 1, -1, -1) * inQ.mValue.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(),
  984. Vec4(-1, 1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(),
  985. inQ.mValue);
  986. }
  987. Mat44 Mat44::GetRotation() const
  988. {
  989. JPH_ASSERT(mCol[0][3] == 0.0f);
  990. JPH_ASSERT(mCol[1][3] == 0.0f);
  991. JPH_ASSERT(mCol[2][3] == 0.0f);
  992. return Mat44(mCol[0], mCol[1], mCol[2], Vec4(0, 0, 0, 1));
  993. }
  994. Mat44 Mat44::GetRotationSafe() const
  995. {
  996. #if defined(JPH_USE_AVX512)
  997. return Mat44(_mm_maskz_mov_ps(0b0111, mCol[0].mValue),
  998. _mm_maskz_mov_ps(0b0111, mCol[1].mValue),
  999. _mm_maskz_mov_ps(0b0111, mCol[2].mValue),
  1000. Vec4(0, 0, 0, 1));
  1001. #elif defined(JPH_USE_SSE4_1)
  1002. __m128 zero = _mm_setzero_ps();
  1003. return Mat44(_mm_blend_ps(mCol[0].mValue, zero, 8),
  1004. _mm_blend_ps(mCol[1].mValue, zero, 8),
  1005. _mm_blend_ps(mCol[2].mValue, zero, 8),
  1006. Vec4(0, 0, 0, 1));
  1007. #elif defined(JPH_USE_NEON)
  1008. return Mat44(vsetq_lane_f32(0, mCol[0].mValue, 3),
  1009. vsetq_lane_f32(0, mCol[1].mValue, 3),
  1010. vsetq_lane_f32(0, mCol[2].mValue, 3),
  1011. Vec4(0, 0, 0, 1));
  1012. #else
  1013. return Mat44(Vec4(mCol[0].mF32[0], mCol[0].mF32[1], mCol[0].mF32[2], 0),
  1014. Vec4(mCol[1].mF32[0], mCol[1].mF32[1], mCol[1].mF32[2], 0),
  1015. Vec4(mCol[2].mF32[0], mCol[2].mF32[1], mCol[2].mF32[2], 0),
  1016. Vec4(0, 0, 0, 1));
  1017. #endif
  1018. }
  1019. void Mat44::SetRotation(Mat44Arg inRotation)
  1020. {
  1021. mCol[0] = inRotation.mCol[0];
  1022. mCol[1] = inRotation.mCol[1];
  1023. mCol[2] = inRotation.mCol[2];
  1024. }
  1025. Mat44 Mat44::PreTranslated(Vec3Arg inTranslation) const
  1026. {
  1027. return Mat44(mCol[0], mCol[1], mCol[2], Vec4(GetTranslation() + Multiply3x3(inTranslation), 1));
  1028. }
  1029. Mat44 Mat44::PostTranslated(Vec3Arg inTranslation) const
  1030. {
  1031. return Mat44(mCol[0], mCol[1], mCol[2], Vec4(GetTranslation() + inTranslation, 1));
  1032. }
  1033. Mat44 Mat44::PreScaled(Vec3Arg inScale) const
  1034. {
  1035. return Mat44(inScale.GetX() * mCol[0], inScale.GetY() * mCol[1], inScale.GetZ() * mCol[2], mCol[3]);
  1036. }
  1037. Mat44 Mat44::PostScaled(Vec3Arg inScale) const
  1038. {
  1039. Vec4 scale(inScale, 1);
  1040. return Mat44(scale * mCol[0], scale * mCol[1], scale * mCol[2], scale * mCol[3]);
  1041. }
  1042. Mat44 Mat44::Decompose(Vec3 &outScale) const
  1043. {
  1044. // Start the modified Gram-Schmidt algorithm
  1045. // X axis will just be normalized
  1046. Vec3 x = GetAxisX();
  1047. // Make Y axis perpendicular to X
  1048. Vec3 y = GetAxisY();
  1049. float x_dot_x = x.LengthSq();
  1050. y -= (x.Dot(y) / x_dot_x) * x;
  1051. // Make Z axis perpendicular to X
  1052. Vec3 z = GetAxisZ();
  1053. z -= (x.Dot(z) / x_dot_x) * x;
  1054. // Make Z axis perpendicular to Y
  1055. float y_dot_y = y.LengthSq();
  1056. z -= (y.Dot(z) / y_dot_y) * y;
  1057. // Determine the scale
  1058. float z_dot_z = z.LengthSq();
  1059. outScale = Vec3(x_dot_x, y_dot_y, z_dot_z).Sqrt();
  1060. // If the resulting x, y and z vectors don't form a right handed matrix, flip the z axis.
  1061. if (x.Cross(y).Dot(z) < 0.0f)
  1062. outScale.SetZ(-outScale.GetZ());
  1063. // Determine the rotation and translation
  1064. return Mat44(Vec4(x / outScale.GetX(), 0), Vec4(y / outScale.GetY(), 0), Vec4(z / outScale.GetZ(), 0), GetColumn4(3));
  1065. }
  1066. #undef JPH_EL
  1067. JPH_NAMESPACE_END