Mat44.inl 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945
  1. // Jolt Physics Library (https://github.com/jrouwe/JoltPhysics)
  2. // SPDX-FileCopyrightText: 2021 Jorrit Rouwe
  3. // SPDX-License-Identifier: MIT
  4. #pragma once
  5. #include <Jolt/Math/Vec3.h>
  6. #include <Jolt/Math/Vec4.h>
  7. #include <Jolt/Math/Quat.h>
  8. JPH_NAMESPACE_BEGIN
  9. #define JPH_EL(r, c) mCol[c].mF32[r]
  10. Mat44::Mat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, Vec4Arg inC4) :
  11. mCol { inC1, inC2, inC3, inC4 }
  12. {
  13. }
  14. Mat44::Mat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, Vec3Arg inC4) :
  15. mCol { inC1, inC2, inC3, Vec4(inC4, 1.0f) }
  16. {
  17. }
  18. Mat44::Mat44(Type inC1, Type inC2, Type inC3, Type inC4) :
  19. mCol { inC1, inC2, inC3, inC4 }
  20. {
  21. }
  22. Mat44 Mat44::sZero()
  23. {
  24. return Mat44(Vec4::sZero(), Vec4::sZero(), Vec4::sZero(), Vec4::sZero());
  25. }
  26. Mat44 Mat44::sIdentity()
  27. {
  28. return Mat44(Vec4(1, 0, 0, 0), Vec4(0, 1, 0, 0), Vec4(0, 0, 1, 0), Vec4(0, 0, 0, 1));
  29. }
  30. Mat44 Mat44::sNaN()
  31. {
  32. return Mat44(Vec4::sNaN(), Vec4::sNaN(), Vec4::sNaN(), Vec4::sNaN());
  33. }
  34. Mat44 Mat44::sLoadFloat4x4(const Float4 *inV)
  35. {
  36. Mat44 result;
  37. for (int c = 0; c < 4; ++c)
  38. result.mCol[c] = Vec4::sLoadFloat4(inV + c);
  39. return result;
  40. }
  41. Mat44 Mat44::sLoadFloat4x4Aligned(const Float4 *inV)
  42. {
  43. Mat44 result;
  44. for (int c = 0; c < 4; ++c)
  45. result.mCol[c] = Vec4::sLoadFloat4Aligned(inV + c);
  46. return result;
  47. }
  48. Mat44 Mat44::sRotationX(float inX)
  49. {
  50. Vec4 sv, cv;
  51. Vec4::sReplicate(inX).SinCos(sv, cv);
  52. float s = sv.GetX(), c = cv.GetX();
  53. return Mat44(Vec4(1, 0, 0, 0), Vec4(0, c, s, 0), Vec4(0, -s, c, 0), Vec4(0, 0, 0, 1));
  54. }
  55. Mat44 Mat44::sRotationY(float inY)
  56. {
  57. Vec4 sv, cv;
  58. Vec4::sReplicate(inY).SinCos(sv, cv);
  59. float s = sv.GetX(), c = cv.GetX();
  60. return Mat44(Vec4(c, 0, -s, 0), Vec4(0, 1, 0, 0), Vec4(s, 0, c, 0), Vec4(0, 0, 0, 1));
  61. }
  62. Mat44 Mat44::sRotationZ(float inZ)
  63. {
  64. Vec4 sv, cv;
  65. Vec4::sReplicate(inZ).SinCos(sv, cv);
  66. float s = sv.GetX(), c = cv.GetX();
  67. return Mat44(Vec4(c, s, 0, 0), Vec4(-s, c, 0, 0), Vec4(0, 0, 1, 0), Vec4(0, 0, 0, 1));
  68. }
  69. Mat44 Mat44::sRotation(QuatArg inQuat)
  70. {
  71. JPH_ASSERT(inQuat.IsNormalized());
  72. // See: https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation section 'Quaternion-derived rotation matrix'
  73. #ifdef JPH_USE_SSE4_1
  74. __m128 xyzw = inQuat.mValue.mValue;
  75. __m128 two_xyzw = _mm_add_ps(xyzw, xyzw);
  76. __m128 yzxw = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 0, 2, 1));
  77. __m128 two_yzxw = _mm_add_ps(yzxw, yzxw);
  78. __m128 zxyw = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 1, 0, 2));
  79. __m128 two_zxyw = _mm_add_ps(zxyw, zxyw);
  80. __m128 wwww = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 3, 3, 3));
  81. __m128 diagonal = _mm_sub_ps(_mm_sub_ps(_mm_set1_ps(1.0f), _mm_mul_ps(two_yzxw, yzxw)), _mm_mul_ps(two_zxyw, zxyw)); // (1 - 2 y^2 - 2 z^2, 1 - 2 x^2 - 2 z^2, 1 - 2 x^2 - 2 y^2, 1 - 4 w^2)
  82. __m128 plus = _mm_add_ps(_mm_mul_ps(two_xyzw, zxyw), _mm_mul_ps(two_yzxw, wwww)); // 2 * (xz + yw, xy + zw, yz + xw, ww)
  83. __m128 minus = _mm_sub_ps(_mm_mul_ps(two_yzxw, xyzw), _mm_mul_ps(two_zxyw, wwww)); // 2 * (xy - zw, yz - xw, xz - yw, 0)
  84. // Workaround for compiler changing _mm_sub_ps(_mm_mul_ps(...), ...) into a fused multiply sub instruction, resulting in w not being 0
  85. // There doesn't appear to be a reliable way to turn this off in Clang
  86. minus = _mm_insert_ps(minus, minus, 0b1000);
  87. __m128 col0 = _mm_blend_ps(_mm_blend_ps(plus, diagonal, 0b0001), minus, 0b1100); // (1 - 2 y^2 - 2 z^2, 2 xy + 2 zw, 2 xz - 2 yw, 0)
  88. __m128 col1 = _mm_blend_ps(_mm_blend_ps(diagonal, minus, 0b1001), plus, 0b0100); // (2 xy - 2 zw, 1 - 2 x^2 - 2 z^2, 2 yz + 2 xw, 0)
  89. __m128 col2 = _mm_blend_ps(_mm_blend_ps(minus, plus, 0b0001), diagonal, 0b0100); // (2 xz + 2 yw, 2 yz - 2 xw, 1 - 2 x^2 - 2 y^2, 0)
  90. __m128 col3 = _mm_set_ps(1, 0, 0, 0);
  91. return Mat44(col0, col1, col2, col3);
  92. #else
  93. float x = inQuat.GetX();
  94. float y = inQuat.GetY();
  95. float z = inQuat.GetZ();
  96. float w = inQuat.GetW();
  97. float tx = x + x; // Note: Using x + x instead of 2.0f * x to force this function to return the same value as the SSE4.1 version across platforms.
  98. float ty = y + y;
  99. float tz = z + z;
  100. float xx = tx * x;
  101. float yy = ty * y;
  102. float zz = tz * z;
  103. float xy = tx * y;
  104. float xz = tx * z;
  105. float xw = tx * w;
  106. float yz = ty * z;
  107. float yw = ty * w;
  108. float zw = tz * w;
  109. return Mat44(Vec4((1.0f - yy) - zz, xy + zw, xz - yw, 0.0f), // Note: Added extra brackets to force this function to return the same value as the SSE4.1 version across platforms.
  110. Vec4(xy - zw, (1.0f - zz) - xx, yz + xw, 0.0f),
  111. Vec4(xz + yw, yz - xw, (1.0f - xx) - yy, 0.0f),
  112. Vec4(0.0f, 0.0f, 0.0f, 1.0f));
  113. #endif
  114. }
  115. Mat44 Mat44::sRotation(Vec3Arg inAxis, float inAngle)
  116. {
  117. return sRotation(Quat::sRotation(inAxis, inAngle));
  118. }
  119. Mat44 Mat44::sTranslation(Vec3Arg inV)
  120. {
  121. return Mat44(Vec4(1, 0, 0, 0), Vec4(0, 1, 0, 0), Vec4(0, 0, 1, 0), Vec4(inV, 1));
  122. }
  123. Mat44 Mat44::sRotationTranslation(QuatArg inR, Vec3Arg inT)
  124. {
  125. Mat44 m = sRotation(inR);
  126. m.SetTranslation(inT);
  127. return m;
  128. }
  129. Mat44 Mat44::sInverseRotationTranslation(QuatArg inR, Vec3Arg inT)
  130. {
  131. Mat44 m = sRotation(inR.Conjugated());
  132. m.SetTranslation(-m.Multiply3x3(inT));
  133. return m;
  134. }
  135. Mat44 Mat44::sScale(float inScale)
  136. {
  137. return Mat44(Vec4(inScale, 0, 0, 0), Vec4(0, inScale, 0, 0), Vec4(0, 0, inScale, 0), Vec4(0, 0, 0, 1));
  138. }
  139. Mat44 Mat44::sScale(Vec3Arg inV)
  140. {
  141. return Mat44(Vec4(inV.GetX(), 0, 0, 0), Vec4(0, inV.GetY(), 0, 0), Vec4(0, 0, inV.GetZ(), 0), Vec4(0, 0, 0, 1));
  142. }
  143. Mat44 Mat44::sOuterProduct(Vec3Arg inV1, Vec3Arg inV2)
  144. {
  145. Vec4 v1(inV1, 0);
  146. return Mat44(v1 * inV2.SplatX(), v1 * inV2.SplatY(), v1 * inV2.SplatZ(), Vec4(0, 0, 0, 1));
  147. }
  148. Mat44 Mat44::sCrossProduct(Vec3Arg inV)
  149. {
  150. #ifdef JPH_USE_SSE4_1
  151. // Zero out the W component
  152. __m128 zero = _mm_setzero_ps();
  153. __m128 v = _mm_blend_ps(inV.mValue, zero, 0b1000);
  154. // Negate
  155. __m128 min_v = _mm_sub_ps(zero, v);
  156. return Mat44(
  157. _mm_shuffle_ps(v, min_v, _MM_SHUFFLE(3, 1, 2, 3)), // [0, z, -y, 0]
  158. _mm_shuffle_ps(min_v, v, _MM_SHUFFLE(3, 0, 3, 2)), // [-z, 0, x, 0]
  159. _mm_blend_ps(_mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 1)), _mm_shuffle_ps(min_v, min_v, _MM_SHUFFLE(3, 3, 0, 3)), 0b0010), // [y, -x, 0, 0]
  160. Vec4(0, 0, 0, 1));
  161. #else
  162. float x = inV.GetX();
  163. float y = inV.GetY();
  164. float z = inV.GetZ();
  165. return Mat44(
  166. Vec4(0, z, -y, 0),
  167. Vec4(-z, 0, x, 0),
  168. Vec4(y, -x, 0, 0),
  169. Vec4(0, 0, 0, 1));
  170. #endif
  171. }
  172. Mat44 Mat44::sLookAt(Vec3Arg inPos, Vec3Arg inTarget, Vec3Arg inUp)
  173. {
  174. Vec3 direction = (inTarget - inPos).NormalizedOr(-Vec3::sAxisZ());
  175. Vec3 right = direction.Cross(inUp).NormalizedOr(Vec3::sAxisX());
  176. Vec3 up = right.Cross(direction);
  177. return Mat44(Vec4(right, 0), Vec4(up, 0), Vec4(-direction, 0), Vec4(inPos, 1)).InversedRotationTranslation();
  178. }
  179. bool Mat44::operator == (Mat44Arg inM2) const
  180. {
  181. return UVec4::sAnd(
  182. UVec4::sAnd(Vec4::sEquals(mCol[0], inM2.mCol[0]), Vec4::sEquals(mCol[1], inM2.mCol[1])),
  183. UVec4::sAnd(Vec4::sEquals(mCol[2], inM2.mCol[2]), Vec4::sEquals(mCol[3], inM2.mCol[3]))
  184. ).TestAllTrue();
  185. }
  186. bool Mat44::IsClose(Mat44Arg inM2, float inMaxDistSq) const
  187. {
  188. for (int i = 0; i < 4; ++i)
  189. if (!mCol[i].IsClose(inM2.mCol[i], inMaxDistSq))
  190. return false;
  191. return true;
  192. }
  193. Mat44 Mat44::operator * (Mat44Arg inM) const
  194. {
  195. Mat44 result;
  196. #if defined(JPH_USE_SSE)
  197. for (int i = 0; i < 4; ++i)
  198. {
  199. __m128 c = inM.mCol[i].mValue;
  200. __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)));
  201. t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1))));
  202. t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2))));
  203. t = _mm_add_ps(t, _mm_mul_ps(mCol[3].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3))));
  204. result.mCol[i].mValue = t;
  205. }
  206. #elif defined(JPH_USE_NEON)
  207. for (int i = 0; i < 4; ++i)
  208. {
  209. Type c = inM.mCol[i].mValue;
  210. Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0));
  211. t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1));
  212. t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2));
  213. t = vmlaq_f32(t, mCol[3].mValue, vdupq_laneq_f32(c, 3));
  214. result.mCol[i].mValue = t;
  215. }
  216. #else
  217. for (int i = 0; i < 4; ++i)
  218. result.mCol[i] = mCol[0] * inM.mCol[i].mF32[0] + mCol[1] * inM.mCol[i].mF32[1] + mCol[2] * inM.mCol[i].mF32[2] + mCol[3] * inM.mCol[i].mF32[3];
  219. #endif
  220. return result;
  221. }
  222. Vec3 Mat44::operator * (Vec3Arg inV) const
  223. {
  224. #if defined(JPH_USE_SSE)
  225. __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0)));
  226. t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1))));
  227. t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
  228. t = _mm_add_ps(t, mCol[3].mValue);
  229. return Vec3::sFixW(t);
  230. #elif defined(JPH_USE_NEON)
  231. Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0));
  232. t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1));
  233. t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2));
  234. t = vaddq_f32(t, mCol[3].mValue); // Don't combine this with the first mul into a fused multiply add, causes precision issues
  235. return Vec3::sFixW(t);
  236. #else
  237. return Vec3(
  238. mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2] + mCol[3].mF32[0],
  239. mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2] + mCol[3].mF32[1],
  240. mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2] + mCol[3].mF32[2]);
  241. #endif
  242. }
  243. Vec4 Mat44::operator * (Vec4Arg inV) const
  244. {
  245. #if defined(JPH_USE_SSE)
  246. __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0)));
  247. t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1))));
  248. t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
  249. t = _mm_add_ps(t, _mm_mul_ps(mCol[3].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(3, 3, 3, 3))));
  250. return t;
  251. #elif defined(JPH_USE_NEON)
  252. Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0));
  253. t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1));
  254. t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2));
  255. t = vmlaq_f32(t, mCol[3].mValue, vdupq_laneq_f32(inV.mValue, 3));
  256. return t;
  257. #else
  258. return Vec4(
  259. mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2] + mCol[3].mF32[0] * inV.mF32[3],
  260. mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2] + mCol[3].mF32[1] * inV.mF32[3],
  261. mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2] + mCol[3].mF32[2] * inV.mF32[3],
  262. mCol[0].mF32[3] * inV.mF32[0] + mCol[1].mF32[3] * inV.mF32[1] + mCol[2].mF32[3] * inV.mF32[2] + mCol[3].mF32[3] * inV.mF32[3]);
  263. #endif
  264. }
  265. Vec3 Mat44::Multiply3x3(Vec3Arg inV) const
  266. {
  267. #if defined(JPH_USE_SSE)
  268. __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0)));
  269. t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1))));
  270. t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
  271. return Vec3::sFixW(t);
  272. #elif defined(JPH_USE_NEON)
  273. Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0));
  274. t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1));
  275. t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2));
  276. return Vec3::sFixW(t);
  277. #else
  278. return Vec3(
  279. mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2],
  280. mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2],
  281. mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2]);
  282. #endif
  283. }
  284. Vec3 Mat44::Multiply3x3Transposed(Vec3Arg inV) const
  285. {
  286. #if defined(JPH_USE_SSE4_1)
  287. __m128 x = _mm_dp_ps(mCol[0].mValue, inV.mValue, 0x7f);
  288. __m128 y = _mm_dp_ps(mCol[1].mValue, inV.mValue, 0x7f);
  289. __m128 xy = _mm_blend_ps(x, y, 0b0010);
  290. __m128 z = _mm_dp_ps(mCol[2].mValue, inV.mValue, 0x7f);
  291. __m128 xyzz = _mm_blend_ps(xy, z, 0b1100);
  292. return xyzz;
  293. #else
  294. return Transposed3x3().Multiply3x3(inV);
  295. #endif
  296. }
  297. Mat44 Mat44::Multiply3x3(Mat44Arg inM) const
  298. {
  299. JPH_ASSERT(mCol[0][3] == 0.0f);
  300. JPH_ASSERT(mCol[1][3] == 0.0f);
  301. JPH_ASSERT(mCol[2][3] == 0.0f);
  302. Mat44 result;
  303. #if defined(JPH_USE_SSE)
  304. for (int i = 0; i < 3; ++i)
  305. {
  306. __m128 c = inM.mCol[i].mValue;
  307. __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0)));
  308. t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1))));
  309. t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2))));
  310. result.mCol[i].mValue = t;
  311. }
  312. #elif defined(JPH_USE_NEON)
  313. for (int i = 0; i < 3; ++i)
  314. {
  315. Type c = inM.mCol[i].mValue;
  316. Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0));
  317. t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1));
  318. t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2));
  319. result.mCol[i].mValue = t;
  320. }
  321. #else
  322. for (int i = 0; i < 3; ++i)
  323. result.mCol[i] = mCol[0] * inM.mCol[i].mF32[0] + mCol[1] * inM.mCol[i].mF32[1] + mCol[2] * inM.mCol[i].mF32[2];
  324. #endif
  325. result.mCol[3] = Vec4(0, 0, 0, 1);
  326. return result;
  327. }
  328. Mat44 Mat44::Multiply3x3LeftTransposed(Mat44Arg inM) const
  329. {
  330. // Transpose left hand side
  331. Mat44 trans = Transposed3x3();
  332. // Do 3x3 matrix multiply
  333. Mat44 result;
  334. result.mCol[0] = trans.mCol[0] * inM.mCol[0].SplatX() + trans.mCol[1] * inM.mCol[0].SplatY() + trans.mCol[2] * inM.mCol[0].SplatZ();
  335. result.mCol[1] = trans.mCol[0] * inM.mCol[1].SplatX() + trans.mCol[1] * inM.mCol[1].SplatY() + trans.mCol[2] * inM.mCol[1].SplatZ();
  336. result.mCol[2] = trans.mCol[0] * inM.mCol[2].SplatX() + trans.mCol[1] * inM.mCol[2].SplatY() + trans.mCol[2] * inM.mCol[2].SplatZ();
  337. result.mCol[3] = Vec4(0, 0, 0, 1);
  338. return result;
  339. }
  340. Mat44 Mat44::Multiply3x3RightTransposed(Mat44Arg inM) const
  341. {
  342. JPH_ASSERT(mCol[0][3] == 0.0f);
  343. JPH_ASSERT(mCol[1][3] == 0.0f);
  344. JPH_ASSERT(mCol[2][3] == 0.0f);
  345. Mat44 result;
  346. result.mCol[0] = mCol[0] * inM.mCol[0].SplatX() + mCol[1] * inM.mCol[1].SplatX() + mCol[2] * inM.mCol[2].SplatX();
  347. result.mCol[1] = mCol[0] * inM.mCol[0].SplatY() + mCol[1] * inM.mCol[1].SplatY() + mCol[2] * inM.mCol[2].SplatY();
  348. result.mCol[2] = mCol[0] * inM.mCol[0].SplatZ() + mCol[1] * inM.mCol[1].SplatZ() + mCol[2] * inM.mCol[2].SplatZ();
  349. result.mCol[3] = Vec4(0, 0, 0, 1);
  350. return result;
  351. }
  352. Mat44 Mat44::operator * (float inV) const
  353. {
  354. Vec4 multiplier = Vec4::sReplicate(inV);
  355. Mat44 result;
  356. for (int c = 0; c < 4; ++c)
  357. result.mCol[c] = mCol[c] * multiplier;
  358. return result;
  359. }
  360. Mat44 &Mat44::operator *= (float inV)
  361. {
  362. for (int c = 0; c < 4; ++c)
  363. mCol[c] *= inV;
  364. return *this;
  365. }
  366. Mat44 Mat44::operator + (Mat44Arg inM) const
  367. {
  368. Mat44 result;
  369. for (int i = 0; i < 4; ++i)
  370. result.mCol[i] = mCol[i] + inM.mCol[i];
  371. return result;
  372. }
  373. Mat44 Mat44::operator - () const
  374. {
  375. Mat44 result;
  376. for (int i = 0; i < 4; ++i)
  377. result.mCol[i] = -mCol[i];
  378. return result;
  379. }
  380. Mat44 Mat44::operator - (Mat44Arg inM) const
  381. {
  382. Mat44 result;
  383. for (int i = 0; i < 4; ++i)
  384. result.mCol[i] = mCol[i] - inM.mCol[i];
  385. return result;
  386. }
  387. Mat44 &Mat44::operator += (Mat44Arg inM)
  388. {
  389. for (int c = 0; c < 4; ++c)
  390. mCol[c] += inM.mCol[c];
  391. return *this;
  392. }
  393. void Mat44::StoreFloat4x4(Float4 *outV) const
  394. {
  395. for (int c = 0; c < 4; ++c)
  396. mCol[c].StoreFloat4(outV + c);
  397. }
  398. Mat44 Mat44::Transposed() const
  399. {
  400. #if defined(JPH_USE_SSE)
  401. __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  402. __m128 tmp3 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  403. __m128 tmp2 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  404. __m128 tmp4 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  405. Mat44 result;
  406. result.mCol[0].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(2, 0, 2, 0));
  407. result.mCol[1].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(3, 1, 3, 1));
  408. result.mCol[2].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(2, 0, 2, 0));
  409. result.mCol[3].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(3, 1, 3, 1));
  410. return result;
  411. #elif defined(JPH_USE_NEON)
  412. float32x4x2_t tmp1 = vzipq_f32(mCol[0].mValue, mCol[2].mValue);
  413. float32x4x2_t tmp2 = vzipq_f32(mCol[1].mValue, mCol[3].mValue);
  414. float32x4x2_t tmp3 = vzipq_f32(tmp1.val[0], tmp2.val[0]);
  415. float32x4x2_t tmp4 = vzipq_f32(tmp1.val[1], tmp2.val[1]);
  416. Mat44 result;
  417. result.mCol[0].mValue = tmp3.val[0];
  418. result.mCol[1].mValue = tmp3.val[1];
  419. result.mCol[2].mValue = tmp4.val[0];
  420. result.mCol[3].mValue = tmp4.val[1];
  421. return result;
  422. #else
  423. Mat44 result;
  424. for (int c = 0; c < 4; ++c)
  425. for (int r = 0; r < 4; ++r)
  426. result.mCol[r].mF32[c] = mCol[c].mF32[r];
  427. return result;
  428. #endif
  429. }
  430. Mat44 Mat44::Transposed3x3() const
  431. {
  432. #if defined(JPH_USE_SSE)
  433. __m128 zero = _mm_setzero_ps();
  434. __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  435. __m128 tmp3 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  436. __m128 tmp2 = _mm_shuffle_ps(mCol[2].mValue, zero, _MM_SHUFFLE(1, 0, 1, 0));
  437. __m128 tmp4 = _mm_shuffle_ps(mCol[2].mValue, zero, _MM_SHUFFLE(3, 2, 3, 2));
  438. Mat44 result;
  439. result.mCol[0].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(2, 0, 2, 0));
  440. result.mCol[1].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(3, 1, 3, 1));
  441. result.mCol[2].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(2, 0, 2, 0));
  442. #elif defined(JPH_USE_NEON)
  443. float32x4x2_t tmp1 = vzipq_f32(mCol[0].mValue, mCol[2].mValue);
  444. float32x4x2_t tmp2 = vzipq_f32(mCol[1].mValue, vdupq_n_f32(0));
  445. float32x4x2_t tmp3 = vzipq_f32(tmp1.val[0], tmp2.val[0]);
  446. float32x4x2_t tmp4 = vzipq_f32(tmp1.val[1], tmp2.val[1]);
  447. Mat44 result;
  448. result.mCol[0].mValue = tmp3.val[0];
  449. result.mCol[1].mValue = tmp3.val[1];
  450. result.mCol[2].mValue = tmp4.val[0];
  451. #else
  452. Mat44 result;
  453. for (int c = 0; c < 3; ++c)
  454. {
  455. for (int r = 0; r < 3; ++r)
  456. result.mCol[c].mF32[r] = mCol[r].mF32[c];
  457. result.mCol[c].mF32[3] = 0;
  458. }
  459. #endif
  460. result.mCol[3] = Vec4(0, 0, 0, 1);
  461. return result;
  462. }
  463. Mat44 Mat44::Inversed() const
  464. {
  465. #if defined(JPH_USE_SSE)
  466. // Algorithm from: http://download.intel.com/design/PentiumIII/sml/24504301.pdf
  467. // Streaming SIMD Extensions - Inverse of 4x4 Matrix
  468. // Adapted to load data using _mm_shuffle_ps instead of loading from memory
  469. // Replaced _mm_rcp_ps with _mm_div_ps for better accuracy
  470. __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  471. __m128 row1 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(1, 0, 1, 0));
  472. __m128 row0 = _mm_shuffle_ps(tmp1, row1, _MM_SHUFFLE(2, 0, 2, 0));
  473. row1 = _mm_shuffle_ps(row1, tmp1, _MM_SHUFFLE(3, 1, 3, 1));
  474. tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  475. __m128 row3 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(3, 2, 3, 2));
  476. __m128 row2 = _mm_shuffle_ps(tmp1, row3, _MM_SHUFFLE(2, 0, 2, 0));
  477. row3 = _mm_shuffle_ps(row3, tmp1, _MM_SHUFFLE(3, 1, 3, 1));
  478. tmp1 = _mm_mul_ps(row2, row3);
  479. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  480. __m128 minor0 = _mm_mul_ps(row1, tmp1);
  481. __m128 minor1 = _mm_mul_ps(row0, tmp1);
  482. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  483. minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0);
  484. minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1);
  485. minor1 = _mm_shuffle_ps(minor1, minor1, _MM_SHUFFLE(1, 0, 3, 2));
  486. tmp1 = _mm_mul_ps(row1, row2);
  487. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  488. minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0);
  489. __m128 minor3 = _mm_mul_ps(row0, tmp1);
  490. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  491. minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1));
  492. minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3);
  493. minor3 = _mm_shuffle_ps(minor3, minor3, _MM_SHUFFLE(1, 0, 3, 2));
  494. tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, _MM_SHUFFLE(1, 0, 3, 2)), row3);
  495. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  496. row2 = _mm_shuffle_ps(row2, row2, _MM_SHUFFLE(1, 0, 3, 2));
  497. minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0);
  498. __m128 minor2 = _mm_mul_ps(row0, tmp1);
  499. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  500. minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1));
  501. minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2);
  502. minor2 = _mm_shuffle_ps(minor2, minor2, _MM_SHUFFLE(1, 0, 3, 2));
  503. tmp1 = _mm_mul_ps(row0, row1);
  504. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  505. minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2);
  506. minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3);
  507. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  508. minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2);
  509. minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1));
  510. tmp1 = _mm_mul_ps(row0, row3);
  511. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  512. minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1));
  513. minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2);
  514. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  515. minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1);
  516. minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1));
  517. tmp1 = _mm_mul_ps(row0, row2);
  518. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1));
  519. minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1);
  520. minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1));
  521. tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2));
  522. minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1));
  523. minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3);
  524. __m128 det = _mm_mul_ps(row0, minor0);
  525. det = _mm_add_ps(_mm_shuffle_ps(det, det, _MM_SHUFFLE(2, 3, 0, 1)), det); // Original code did (x + z) + (y + w), changed to (x + y) + (z + w) to match the ARM code below and make the result cross platform deterministic
  526. det = _mm_add_ss(_mm_shuffle_ps(det, det, _MM_SHUFFLE(1, 0, 3, 2)), det);
  527. det = _mm_div_ss(_mm_set_ss(1.0f), det);
  528. det = _mm_shuffle_ps(det, det, _MM_SHUFFLE(0, 0, 0, 0));
  529. Mat44 result;
  530. result.mCol[0].mValue = _mm_mul_ps(det, minor0);
  531. result.mCol[1].mValue = _mm_mul_ps(det, minor1);
  532. result.mCol[2].mValue = _mm_mul_ps(det, minor2);
  533. result.mCol[3].mValue = _mm_mul_ps(det, minor3);
  534. return result;
  535. #elif defined(JPH_USE_NEON)
  536. // Adapted from the SSE version, there's surprising few articles about efficient ways of calculating an inverse for ARM on the internet
  537. Type tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 0, 1, 4, 5);
  538. Type row1 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, mCol[3].mValue, 0, 1, 4, 5);
  539. Type row0 = JPH_NEON_SHUFFLE_F32x4(tmp1, row1, 0, 2, 4, 6);
  540. row1 = JPH_NEON_SHUFFLE_F32x4(row1, tmp1, 1, 3, 5, 7);
  541. tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 2, 3, 6, 7);
  542. Type row3 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, mCol[3].mValue, 2, 3, 6, 7);
  543. Type row2 = JPH_NEON_SHUFFLE_F32x4(tmp1, row3, 0, 2, 4, 6);
  544. row3 = JPH_NEON_SHUFFLE_F32x4(row3, tmp1, 1, 3, 5, 7);
  545. tmp1 = vmulq_f32(row2, row3);
  546. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  547. Type minor0 = vmulq_f32(row1, tmp1);
  548. Type minor1 = vmulq_f32(row0, tmp1);
  549. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  550. minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0);
  551. minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1);
  552. minor1 = JPH_NEON_SHUFFLE_F32x4(minor1, minor1, 2, 3, 0, 1);
  553. tmp1 = vmulq_f32(row1, row2);
  554. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  555. minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0);
  556. Type minor3 = vmulq_f32(row0, tmp1);
  557. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  558. minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1));
  559. minor3 = vsubq_f32(vmulq_f32(row0, tmp1), minor3);
  560. minor3 = JPH_NEON_SHUFFLE_F32x4(minor3, minor3, 2, 3, 0, 1);
  561. tmp1 = JPH_NEON_SHUFFLE_F32x4(row1, row1, 2, 3, 0, 1);
  562. tmp1 = vmulq_f32(tmp1, row3);
  563. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  564. row2 = JPH_NEON_SHUFFLE_F32x4(row2, row2, 2, 3, 0, 1);
  565. minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0);
  566. Type minor2 = vmulq_f32(row0, tmp1);
  567. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  568. minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1));
  569. minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2);
  570. minor2 = JPH_NEON_SHUFFLE_F32x4(minor2, minor2, 2, 3, 0, 1);
  571. tmp1 = vmulq_f32(row0, row1);
  572. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  573. minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2);
  574. minor3 = vsubq_f32(vmulq_f32(row2, tmp1), minor3);
  575. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  576. minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2);
  577. minor3 = vsubq_f32(minor3, vmulq_f32(row2, tmp1));
  578. tmp1 = vmulq_f32(row0, row3);
  579. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  580. minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1));
  581. minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2);
  582. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  583. minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1);
  584. minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1));
  585. tmp1 = vmulq_f32(row0, row2);
  586. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2);
  587. minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1);
  588. minor3 = vsubq_f32(minor3, vmulq_f32(row1, tmp1));
  589. tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1);
  590. minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1));
  591. minor3 = vaddq_f32(vmulq_f32(row1, tmp1), minor3);
  592. Type det = vmulq_f32(row0, minor0);
  593. det = vdupq_n_f32(vaddvq_f32(det));
  594. det = vdivq_f32(vdupq_n_f32(1.0f), det);
  595. Mat44 result;
  596. result.mCol[0].mValue = vmulq_f32(det, minor0);
  597. result.mCol[1].mValue = vmulq_f32(det, minor1);
  598. result.mCol[2].mValue = vmulq_f32(det, minor2);
  599. result.mCol[3].mValue = vmulq_f32(det, minor3);
  600. return result;
  601. #else
  602. float m00 = JPH_EL(0, 0), m10 = JPH_EL(1, 0), m20 = JPH_EL(2, 0), m30 = JPH_EL(3, 0);
  603. float m01 = JPH_EL(0, 1), m11 = JPH_EL(1, 1), m21 = JPH_EL(2, 1), m31 = JPH_EL(3, 1);
  604. float m02 = JPH_EL(0, 2), m12 = JPH_EL(1, 2), m22 = JPH_EL(2, 2), m32 = JPH_EL(3, 2);
  605. float m03 = JPH_EL(0, 3), m13 = JPH_EL(1, 3), m23 = JPH_EL(2, 3), m33 = JPH_EL(3, 3);
  606. float m10211120 = m10 * m21 - m11 * m20;
  607. float m10221220 = m10 * m22 - m12 * m20;
  608. float m10231320 = m10 * m23 - m13 * m20;
  609. float m10311130 = m10 * m31 - m11 * m30;
  610. float m10321230 = m10 * m32 - m12 * m30;
  611. float m10331330 = m10 * m33 - m13 * m30;
  612. float m11221221 = m11 * m22 - m12 * m21;
  613. float m11231321 = m11 * m23 - m13 * m21;
  614. float m11321231 = m11 * m32 - m12 * m31;
  615. float m11331331 = m11 * m33 - m13 * m31;
  616. float m12231322 = m12 * m23 - m13 * m22;
  617. float m12331332 = m12 * m33 - m13 * m32;
  618. float m20312130 = m20 * m31 - m21 * m30;
  619. float m20322230 = m20 * m32 - m22 * m30;
  620. float m20332330 = m20 * m33 - m23 * m30;
  621. float m21322231 = m21 * m32 - m22 * m31;
  622. float m21332331 = m21 * m33 - m23 * m31;
  623. float m22332332 = m22 * m33 - m23 * m32;
  624. Vec4 col0(m11 * m22332332 - m12 * m21332331 + m13 * m21322231, -m10 * m22332332 + m12 * m20332330 - m13 * m20322230, m10 * m21332331 - m11 * m20332330 + m13 * m20312130, -m10 * m21322231 + m11 * m20322230 - m12 * m20312130);
  625. Vec4 col1(-m01 * m22332332 + m02 * m21332331 - m03 * m21322231, m00 * m22332332 - m02 * m20332330 + m03 * m20322230, -m00 * m21332331 + m01 * m20332330 - m03 * m20312130, m00 * m21322231 - m01 * m20322230 + m02 * m20312130);
  626. Vec4 col2(m01 * m12331332 - m02 * m11331331 + m03 * m11321231, -m00 * m12331332 + m02 * m10331330 - m03 * m10321230, m00 * m11331331 - m01 * m10331330 + m03 * m10311130, -m00 * m11321231 + m01 * m10321230 - m02 * m10311130);
  627. Vec4 col3(-m01 * m12231322 + m02 * m11231321 - m03 * m11221221, m00 * m12231322 - m02 * m10231320 + m03 * m10221220, -m00 * m11231321 + m01 * m10231320 - m03 * m10211120, m00 * m11221221 - m01 * m10221220 + m02 * m10211120);
  628. float det = m00 * col0.mF32[0] + m01 * col0.mF32[1] + m02 * col0.mF32[2] + m03 * col0.mF32[3];
  629. return Mat44(col0 / det, col1 / det, col2 / det, col3 / det);
  630. #endif
  631. }
  632. Mat44 Mat44::InversedRotationTranslation() const
  633. {
  634. Mat44 m = Transposed3x3();
  635. m.SetTranslation(-m.Multiply3x3(GetTranslation()));
  636. return m;
  637. }
  638. float Mat44::GetDeterminant3x3() const
  639. {
  640. return GetAxisX().Dot(GetAxisY().Cross(GetAxisZ()));
  641. }
  642. Mat44 Mat44::Adjointed3x3() const
  643. {
  644. return Mat44(
  645. Vec4(JPH_EL(1, 1), JPH_EL(1, 2), JPH_EL(1, 0), 0) * Vec4(JPH_EL(2, 2), JPH_EL(2, 0), JPH_EL(2, 1), 0)
  646. - Vec4(JPH_EL(1, 2), JPH_EL(1, 0), JPH_EL(1, 1), 0) * Vec4(JPH_EL(2, 1), JPH_EL(2, 2), JPH_EL(2, 0), 0),
  647. Vec4(JPH_EL(0, 2), JPH_EL(0, 0), JPH_EL(0, 1), 0) * Vec4(JPH_EL(2, 1), JPH_EL(2, 2), JPH_EL(2, 0), 0)
  648. - Vec4(JPH_EL(0, 1), JPH_EL(0, 2), JPH_EL(0, 0), 0) * Vec4(JPH_EL(2, 2), JPH_EL(2, 0), JPH_EL(2, 1), 0),
  649. Vec4(JPH_EL(0, 1), JPH_EL(0, 2), JPH_EL(0, 0), 0) * Vec4(JPH_EL(1, 2), JPH_EL(1, 0), JPH_EL(1, 1), 0)
  650. - Vec4(JPH_EL(0, 2), JPH_EL(0, 0), JPH_EL(0, 1), 0) * Vec4(JPH_EL(1, 1), JPH_EL(1, 2), JPH_EL(1, 0), 0),
  651. Vec4(0, 0, 0, 1));
  652. }
  653. Mat44 Mat44::Inversed3x3() const
  654. {
  655. float det = GetDeterminant3x3();
  656. return Mat44(
  657. (Vec4(JPH_EL(1, 1), JPH_EL(1, 2), JPH_EL(1, 0), 0) * Vec4(JPH_EL(2, 2), JPH_EL(2, 0), JPH_EL(2, 1), 0)
  658. - Vec4(JPH_EL(1, 2), JPH_EL(1, 0), JPH_EL(1, 1), 0) * Vec4(JPH_EL(2, 1), JPH_EL(2, 2), JPH_EL(2, 0), 0)) / det,
  659. (Vec4(JPH_EL(0, 2), JPH_EL(0, 0), JPH_EL(0, 1), 0) * Vec4(JPH_EL(2, 1), JPH_EL(2, 2), JPH_EL(2, 0), 0)
  660. - Vec4(JPH_EL(0, 1), JPH_EL(0, 2), JPH_EL(0, 0), 0) * Vec4(JPH_EL(2, 2), JPH_EL(2, 0), JPH_EL(2, 1), 0)) / det,
  661. (Vec4(JPH_EL(0, 1), JPH_EL(0, 2), JPH_EL(0, 0), 0) * Vec4(JPH_EL(1, 2), JPH_EL(1, 0), JPH_EL(1, 1), 0)
  662. - Vec4(JPH_EL(0, 2), JPH_EL(0, 0), JPH_EL(0, 1), 0) * Vec4(JPH_EL(1, 1), JPH_EL(1, 2), JPH_EL(1, 0), 0)) / det,
  663. Vec4(0, 0, 0, 1));
  664. }
  665. bool Mat44::SetInversed3x3(Mat44Arg inM)
  666. {
  667. float det = inM.GetDeterminant3x3();
  668. // If the determinant is zero the matrix is singular and we return false
  669. if (det == 0.0f)
  670. return false;
  671. // Finish calculating the inverse
  672. *this = inM.Adjointed3x3();
  673. mCol[0] /= det;
  674. mCol[1] /= det;
  675. mCol[2] /= det;
  676. return true;
  677. }
  678. Quat Mat44::GetQuaternion() const
  679. {
  680. JPH_ASSERT(mCol[3] == Vec4(0, 0, 0, 1));
  681. float tr = mCol[0].mF32[0] + mCol[1].mF32[1] + mCol[2].mF32[2];
  682. if (tr >= 0.0f)
  683. {
  684. float s = sqrt(tr + 1.0f);
  685. float is = 0.5f / s;
  686. return Quat(
  687. (mCol[1].mF32[2] - mCol[2].mF32[1]) * is,
  688. (mCol[2].mF32[0] - mCol[0].mF32[2]) * is,
  689. (mCol[0].mF32[1] - mCol[1].mF32[0]) * is,
  690. 0.5f * s);
  691. }
  692. else
  693. {
  694. int i = 0;
  695. if (mCol[1].mF32[1] > mCol[0].mF32[0]) i = 1;
  696. if (mCol[2].mF32[2] > mCol[i].mF32[i]) i = 2;
  697. if (i == 0)
  698. {
  699. float s = sqrt(mCol[0].mF32[0] - (mCol[1].mF32[1] + mCol[2].mF32[2]) + 1);
  700. float is = 0.5f / s;
  701. return Quat(
  702. 0.5f * s,
  703. (mCol[1].mF32[0] + mCol[0].mF32[1]) * is,
  704. (mCol[0].mF32[2] + mCol[2].mF32[0]) * is,
  705. (mCol[1].mF32[2] - mCol[2].mF32[1]) * is);
  706. }
  707. else if (i == 1)
  708. {
  709. float s = sqrt(mCol[1].mF32[1] - (mCol[2].mF32[2] + mCol[0].mF32[0]) + 1);
  710. float is = 0.5f / s;
  711. return Quat(
  712. (mCol[1].mF32[0] + mCol[0].mF32[1]) * is,
  713. 0.5f * s,
  714. (mCol[2].mF32[1] + mCol[1].mF32[2]) * is,
  715. (mCol[2].mF32[0] - mCol[0].mF32[2]) * is);
  716. }
  717. else
  718. {
  719. JPH_ASSERT(i == 2);
  720. float s = sqrt(mCol[2].mF32[2] - (mCol[0].mF32[0] + mCol[1].mF32[1]) + 1);
  721. float is = 0.5f / s;
  722. return Quat(
  723. (mCol[0].mF32[2] + mCol[2].mF32[0]) * is,
  724. (mCol[2].mF32[1] + mCol[1].mF32[2]) * is,
  725. 0.5f * s,
  726. (mCol[0].mF32[1] - mCol[1].mF32[0]) * is);
  727. }
  728. }
  729. }
  730. Mat44 Mat44::sQuatLeftMultiply(QuatArg inQ)
  731. {
  732. return Mat44(
  733. Vec4(1, 1, -1, -1) * inQ.mValue.Swizzle<SWIZZLE_W, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_X>(),
  734. Vec4(-1, 1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(),
  735. Vec4(1, -1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(),
  736. inQ.mValue);
  737. }
  738. Mat44 Mat44::sQuatRightMultiply(QuatArg inQ)
  739. {
  740. return Mat44(
  741. Vec4(1, -1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_W, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_X>(),
  742. Vec4(1, 1, -1, -1) * inQ.mValue.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(),
  743. Vec4(-1, 1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(),
  744. inQ.mValue);
  745. }
  746. Mat44 Mat44::GetRotation() const
  747. {
  748. JPH_ASSERT(mCol[0][3] == 0.0f);
  749. JPH_ASSERT(mCol[1][3] == 0.0f);
  750. JPH_ASSERT(mCol[2][3] == 0.0f);
  751. return Mat44(mCol[0], mCol[1], mCol[2], Vec4(0, 0, 0, 1));
  752. }
  753. Mat44 Mat44::GetRotationSafe() const
  754. {
  755. #if defined(JPH_USE_AVX512)
  756. return Mat44(_mm_maskz_mov_ps(0b0111, mCol[0].mValue),
  757. _mm_maskz_mov_ps(0b0111, mCol[1].mValue),
  758. _mm_maskz_mov_ps(0b0111, mCol[2].mValue),
  759. Vec4(0, 0, 0, 1));
  760. #elif defined(JPH_USE_SSE4_1)
  761. __m128 zero = _mm_setzero_ps();
  762. return Mat44(_mm_blend_ps(mCol[0].mValue, zero, 8),
  763. _mm_blend_ps(mCol[1].mValue, zero, 8),
  764. _mm_blend_ps(mCol[2].mValue, zero, 8),
  765. Vec4(0, 0, 0, 1));
  766. #elif defined(JPH_USE_NEON)
  767. return Mat44(vsetq_lane_f32(0, mCol[0].mValue, 3),
  768. vsetq_lane_f32(0, mCol[1].mValue, 3),
  769. vsetq_lane_f32(0, mCol[2].mValue, 3),
  770. Vec4(0, 0, 0, 1));
  771. #else
  772. return Mat44(Vec4(mCol[0].mF32[0], mCol[0].mF32[1], mCol[0].mF32[2], 0),
  773. Vec4(mCol[1].mF32[0], mCol[1].mF32[1], mCol[1].mF32[2], 0),
  774. Vec4(mCol[2].mF32[0], mCol[2].mF32[1], mCol[2].mF32[2], 0),
  775. Vec4(0, 0, 0, 1));
  776. #endif
  777. }
  778. void Mat44::SetRotation(Mat44Arg inRotation)
  779. {
  780. mCol[0] = inRotation.mCol[0];
  781. mCol[1] = inRotation.mCol[1];
  782. mCol[2] = inRotation.mCol[2];
  783. }
  784. Mat44 Mat44::PreTranslated(Vec3Arg inTranslation) const
  785. {
  786. return Mat44(mCol[0], mCol[1], mCol[2], Vec4(GetTranslation() + Multiply3x3(inTranslation), 1));
  787. }
  788. Mat44 Mat44::PostTranslated(Vec3Arg inTranslation) const
  789. {
  790. return Mat44(mCol[0], mCol[1], mCol[2], Vec4(GetTranslation() + inTranslation, 1));
  791. }
  792. Mat44 Mat44::PreScaled(Vec3Arg inScale) const
  793. {
  794. return Mat44(inScale.GetX() * mCol[0], inScale.GetY() * mCol[1], inScale.GetZ() * mCol[2], mCol[3]);
  795. }
  796. Mat44 Mat44::PostScaled(Vec3Arg inScale) const
  797. {
  798. Vec4 scale(inScale, 1);
  799. return Mat44(scale * mCol[0], scale * mCol[1], scale * mCol[2], scale * mCol[3]);
  800. }
  801. Mat44 Mat44::Decompose(Vec3 &outScale) const
  802. {
  803. // Start the modified Gram-Schmidt algorithm
  804. // X axis will just be normalized
  805. Vec3 x = GetAxisX();
  806. // Make Y axis perpendicular to X
  807. Vec3 y = GetAxisY();
  808. float x_dot_x = x.LengthSq();
  809. y -= (x.Dot(y) / x_dot_x) * x;
  810. // Make Z axis perpendicular to X
  811. Vec3 z = GetAxisZ();
  812. z -= (x.Dot(z) / x_dot_x) * x;
  813. // Make Z axis perpendicular to Y
  814. float y_dot_y = y.LengthSq();
  815. z -= (y.Dot(z) / y_dot_y) * y;
  816. // Determine the scale
  817. float z_dot_z = z.LengthSq();
  818. outScale = Vec3(x_dot_x, y_dot_y, z_dot_z).Sqrt();
  819. // If the resulting x, y and z vectors don't form a right handed matrix, flip the z axis.
  820. if (x.Cross(y).Dot(z) < 0.0f)
  821. outScale.SetZ(-outScale.GetZ());
  822. // Determine the rotation and translation
  823. return Mat44(Vec4(x / outScale.GetX(), 0), Vec4(y / outScale.GetY(), 0), Vec4(z / outScale.GetZ(), 0), GetColumn4(3));
  824. }
  825. #undef JPH_EL
  826. JPH_NAMESPACE_END