Matrix3x4.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744
  1. //
  2. // Copyright (c) 2008-2015 the Urho3D project.
  3. //
  4. // Permission is hereby granted, free of charge, to any person obtaining a copy
  5. // of this software and associated documentation files (the "Software"), to deal
  6. // in the Software without restriction, including without limitation the rights
  7. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. // copies of the Software, and to permit persons to whom the Software is
  9. // furnished to do so, subject to the following conditions:
  10. //
  11. // The above copyright notice and this permission notice shall be included in
  12. // all copies or substantial portions of the Software.
  13. //
  14. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  19. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  20. // THE SOFTWARE.
  21. //
  22. #pragma once
  23. #include "../Math/Matrix4.h"
  24. #ifdef URHO3D_SSE
  25. #include <emmintrin.h>
  26. #endif
  27. namespace Urho3D
  28. {
  29. /// 3x4 matrix for scene node transform calculations.
  30. class URHO3D_API Matrix3x4
  31. {
  32. public:
  33. /// Construct an identity matrix.
  34. Matrix3x4()
  35. #ifndef URHO3D_SSE
  36. :m00_(1.0f),
  37. m01_(0.0f),
  38. m02_(0.0f),
  39. m03_(0.0f),
  40. m10_(0.0f),
  41. m11_(1.0f),
  42. m12_(0.0f),
  43. m13_(0.0f),
  44. m20_(0.0f),
  45. m21_(0.0f),
  46. m22_(1.0f),
  47. m23_(0.0f)
  48. #endif
  49. {
  50. #ifdef URHO3D_SSE
  51. _mm_storeu_ps(&m00_, _mm_set_ps(0.f, 0.f, 0.f, 1.f));
  52. _mm_storeu_ps(&m10_, _mm_set_ps(0.f, 0.f, 1.f, 0.f));
  53. _mm_storeu_ps(&m20_, _mm_set_ps(0.f, 1.f, 0.f, 0.f));
  54. #endif
  55. }
  56. /// Copy-construct from another matrix.
  57. Matrix3x4(const Matrix3x4& matrix)
  58. #ifndef URHO3D_SSE
  59. :m00_(matrix.m00_),
  60. m01_(matrix.m01_),
  61. m02_(matrix.m02_),
  62. m03_(matrix.m03_),
  63. m10_(matrix.m10_),
  64. m11_(matrix.m11_),
  65. m12_(matrix.m12_),
  66. m13_(matrix.m13_),
  67. m20_(matrix.m20_),
  68. m21_(matrix.m21_),
  69. m22_(matrix.m22_),
  70. m23_(matrix.m23_)
  71. #endif
  72. {
  73. #ifdef URHO3D_SSE
  74. _mm_storeu_ps(&m00_, _mm_loadu_ps(&matrix.m00_));
  75. _mm_storeu_ps(&m10_, _mm_loadu_ps(&matrix.m10_));
  76. _mm_storeu_ps(&m20_, _mm_loadu_ps(&matrix.m20_));
  77. #endif
  78. }
  79. /// Copy-construct from a 3x3 matrix and set the extra elements to identity.
  80. Matrix3x4(const Matrix3& matrix) :
  81. m00_(matrix.m00_),
  82. m01_(matrix.m01_),
  83. m02_(matrix.m02_),
  84. m03_(0.0f),
  85. m10_(matrix.m10_),
  86. m11_(matrix.m11_),
  87. m12_(matrix.m12_),
  88. m13_(0.0f),
  89. m20_(matrix.m20_),
  90. m21_(matrix.m21_),
  91. m22_(matrix.m22_),
  92. m23_(0.0f)
  93. {
  94. }
  95. /// Copy-construct from a 4x4 matrix which is assumed to contain no projection.
  96. Matrix3x4(const Matrix4& matrix)
  97. #ifndef URHO3D_SSE
  98. :m00_(matrix.m00_),
  99. m01_(matrix.m01_),
  100. m02_(matrix.m02_),
  101. m03_(matrix.m03_),
  102. m10_(matrix.m10_),
  103. m11_(matrix.m11_),
  104. m12_(matrix.m12_),
  105. m13_(matrix.m13_),
  106. m20_(matrix.m20_),
  107. m21_(matrix.m21_),
  108. m22_(matrix.m22_),
  109. m23_(matrix.m23_)
  110. #endif
  111. {
  112. #ifdef URHO3D_SSE
  113. _mm_storeu_ps(&m00_, _mm_loadu_ps(&matrix.m00_));
  114. _mm_storeu_ps(&m10_, _mm_loadu_ps(&matrix.m10_));
  115. _mm_storeu_ps(&m20_, _mm_loadu_ps(&matrix.m20_));
  116. #endif
  117. }
  118. // Construct from values.
  119. Matrix3x4(float v00, float v01, float v02, float v03,
  120. float v10, float v11, float v12, float v13,
  121. float v20, float v21, float v22, float v23) :
  122. m00_(v00),
  123. m01_(v01),
  124. m02_(v02),
  125. m03_(v03),
  126. m10_(v10),
  127. m11_(v11),
  128. m12_(v12),
  129. m13_(v13),
  130. m20_(v20),
  131. m21_(v21),
  132. m22_(v22),
  133. m23_(v23)
  134. {
  135. }
  136. /// Construct from a float array.
  137. explicit Matrix3x4(const float* data)
  138. #ifndef URHO3D_SSE
  139. :m00_(data[0]),
  140. m01_(data[1]),
  141. m02_(data[2]),
  142. m03_(data[3]),
  143. m10_(data[4]),
  144. m11_(data[5]),
  145. m12_(data[6]),
  146. m13_(data[7]),
  147. m20_(data[8]),
  148. m21_(data[9]),
  149. m22_(data[10]),
  150. m23_(data[11])
  151. #endif
  152. {
  153. #ifdef URHO3D_SSE
  154. _mm_storeu_ps(&m00_, _mm_loadu_ps(data));
  155. _mm_storeu_ps(&m10_, _mm_loadu_ps(data + 4));
  156. _mm_storeu_ps(&m20_, _mm_loadu_ps(data + 8));
  157. #endif
  158. }
  159. /// Construct from translation, rotation and uniform scale.
  160. Matrix3x4(const Vector3& translation, const Quaternion& rotation, float scale)
  161. {
  162. #ifdef URHO3D_SSE
  163. __m128 t = _mm_set_ps(1.f, translation.z_, translation.y_, translation.x_);
  164. __m128 q = _mm_loadu_ps(&rotation.w_);
  165. __m128 s = _mm_set_ps(1.f, scale, scale, scale);
  166. SetFromTRS(t, q, s);
  167. #else
  168. SetRotation(rotation.RotationMatrix() * scale);
  169. SetTranslation(translation);
  170. #endif
  171. }
  172. /// Construct from translation, rotation and nonuniform scale.
  173. Matrix3x4(const Vector3& translation, const Quaternion& rotation, const Vector3& scale)
  174. {
  175. #ifdef URHO3D_SSE
  176. __m128 t = _mm_set_ps(1.f, translation.z_, translation.y_, translation.x_);
  177. __m128 q = _mm_loadu_ps(&rotation.w_);
  178. __m128 s = _mm_set_ps(1.f, scale.z_, scale.y_, scale.x_);
  179. SetFromTRS(t, q, s);
  180. #else
  181. SetRotation(rotation.RotationMatrix().Scaled(scale));
  182. SetTranslation(translation);
  183. #endif
  184. }
  185. /// Assign from another matrix.
  186. Matrix3x4& operator =(const Matrix3x4& rhs)
  187. {
  188. #ifdef URHO3D_SSE
  189. _mm_storeu_ps(&m00_, _mm_loadu_ps(&rhs.m00_));
  190. _mm_storeu_ps(&m10_, _mm_loadu_ps(&rhs.m10_));
  191. _mm_storeu_ps(&m20_, _mm_loadu_ps(&rhs.m20_));
  192. #else
  193. m00_ = rhs.m00_;
  194. m01_ = rhs.m01_;
  195. m02_ = rhs.m02_;
  196. m03_ = rhs.m03_;
  197. m10_ = rhs.m10_;
  198. m11_ = rhs.m11_;
  199. m12_ = rhs.m12_;
  200. m13_ = rhs.m13_;
  201. m20_ = rhs.m20_;
  202. m21_ = rhs.m21_;
  203. m22_ = rhs.m22_;
  204. m23_ = rhs.m23_;
  205. #endif
  206. return *this;
  207. }
  208. /// Assign from a 3x3 matrix and set the extra elements to identity.
  209. Matrix3x4& operator =(const Matrix3& rhs)
  210. {
  211. m00_ = rhs.m00_;
  212. m01_ = rhs.m01_;
  213. m02_ = rhs.m02_;
  214. m03_ = 0.0;
  215. m10_ = rhs.m10_;
  216. m11_ = rhs.m11_;
  217. m12_ = rhs.m12_;
  218. m13_ = 0.0;
  219. m20_ = rhs.m20_;
  220. m21_ = rhs.m21_;
  221. m22_ = rhs.m22_;
  222. m23_ = 0.0;
  223. return *this;
  224. }
  225. /// Assign from a 4x4 matrix which is assumed to contain no projection.
  226. Matrix3x4& operator =(const Matrix4& rhs)
  227. {
  228. #ifdef URHO3D_SSE
  229. _mm_storeu_ps(&m00_, _mm_loadu_ps(&rhs.m00_));
  230. _mm_storeu_ps(&m10_, _mm_loadu_ps(&rhs.m10_));
  231. _mm_storeu_ps(&m20_, _mm_loadu_ps(&rhs.m20_));
  232. #else
  233. m00_ = rhs.m00_;
  234. m01_ = rhs.m01_;
  235. m02_ = rhs.m02_;
  236. m03_ = rhs.m03_;
  237. m10_ = rhs.m10_;
  238. m11_ = rhs.m11_;
  239. m12_ = rhs.m12_;
  240. m13_ = rhs.m13_;
  241. m20_ = rhs.m20_;
  242. m21_ = rhs.m21_;
  243. m22_ = rhs.m22_;
  244. m23_ = rhs.m23_;
  245. #endif
  246. return *this;
  247. }
  248. /// Test for equality with another matrix without epsilon.
  249. bool operator ==(const Matrix3x4& rhs) const
  250. {
  251. #ifdef URHO3D_SSE
  252. __m128 c0 = _mm_cmpeq_ps(_mm_loadu_ps(&m00_), _mm_loadu_ps(&rhs.m00_));
  253. __m128 c1 = _mm_cmpeq_ps(_mm_loadu_ps(&m10_), _mm_loadu_ps(&rhs.m10_));
  254. c0 = _mm_and_ps(c0, c1);
  255. __m128 c2 = _mm_cmpeq_ps(_mm_loadu_ps(&m20_), _mm_loadu_ps(&rhs.m20_));
  256. c0 = _mm_and_ps(c0, c2);
  257. __m128 hi = _mm_movehl_ps(c0, c0);
  258. c0 = _mm_and_ps(c0, hi);
  259. hi = _mm_shuffle_ps(c0, c0, _MM_SHUFFLE(1, 1, 1, 1));
  260. c0 = _mm_and_ps(c0, hi);
  261. return !_mm_ucomige_ss(c0, c0);
  262. #else
  263. const float* leftData = Data();
  264. const float* rightData = rhs.Data();
  265. for (unsigned i = 0; i < 12; ++i)
  266. {
  267. if (leftData[i] != rightData[i])
  268. return false;
  269. }
  270. return true;
  271. #endif
  272. }
  273. /// Test for inequality with another matrix without epsilon.
  274. bool operator !=(const Matrix3x4& rhs) const { return !(*this == rhs); }
  275. /// Multiply a Vector3 which is assumed to represent position.
  276. Vector3 operator *(const Vector3& rhs) const
  277. {
  278. #ifdef URHO3D_SSE
  279. __m128 vec = _mm_set_ps(1.f, rhs.z_, rhs.y_, rhs.x_);
  280. __m128 r0 = _mm_mul_ps(_mm_loadu_ps(&m00_), vec);
  281. __m128 r1 = _mm_mul_ps(_mm_loadu_ps(&m10_), vec);
  282. __m128 t0 = _mm_unpacklo_ps(r0, r1);
  283. __m128 t1 = _mm_unpackhi_ps(r0, r1);
  284. t0 = _mm_add_ps(t0, t1);
  285. __m128 r2 = _mm_mul_ps(_mm_loadu_ps(&m20_), vec);
  286. __m128 r3 = _mm_setzero_ps();
  287. __m128 t2 = _mm_unpacklo_ps(r2, r3);
  288. __m128 t3 = _mm_unpackhi_ps(r2, r3);
  289. t2 = _mm_add_ps(t2, t3);
  290. vec = _mm_add_ps(_mm_movelh_ps(t0, t2), _mm_movehl_ps(t2, t0));
  291. return Vector3(
  292. _mm_cvtss_f32(vec),
  293. _mm_cvtss_f32(_mm_shuffle_ps(vec, vec, _MM_SHUFFLE(1, 1, 1, 1))),
  294. _mm_cvtss_f32(_mm_movehl_ps(vec, vec)));
  295. #else
  296. return Vector3(
  297. (m00_ * rhs.x_ + m01_ * rhs.y_ + m02_ * rhs.z_ + m03_),
  298. (m10_ * rhs.x_ + m11_ * rhs.y_ + m12_ * rhs.z_ + m13_),
  299. (m20_ * rhs.x_ + m21_ * rhs.y_ + m22_ * rhs.z_ + m23_)
  300. );
  301. #endif
  302. }
  303. /// Multiply a Vector4.
  304. Vector3 operator *(const Vector4& rhs) const
  305. {
  306. #ifdef URHO3D_SSE
  307. __m128 vec = _mm_loadu_ps(&rhs.x_);
  308. __m128 r0 = _mm_mul_ps(_mm_loadu_ps(&m00_), vec);
  309. __m128 r1 = _mm_mul_ps(_mm_loadu_ps(&m10_), vec);
  310. __m128 t0 = _mm_unpacklo_ps(r0, r1);
  311. __m128 t1 = _mm_unpackhi_ps(r0, r1);
  312. t0 = _mm_add_ps(t0, t1);
  313. __m128 r2 = _mm_mul_ps(_mm_loadu_ps(&m20_), vec);
  314. __m128 r3 = _mm_setzero_ps();
  315. __m128 t2 = _mm_unpacklo_ps(r2, r3);
  316. __m128 t3 = _mm_unpackhi_ps(r2, r3);
  317. t2 = _mm_add_ps(t2, t3);
  318. vec = _mm_add_ps(_mm_movelh_ps(t0, t2), _mm_movehl_ps(t2, t0));
  319. return Vector3(
  320. _mm_cvtss_f32(vec),
  321. _mm_cvtss_f32(_mm_shuffle_ps(vec, vec, _MM_SHUFFLE(1, 1, 1, 1))),
  322. _mm_cvtss_f32(_mm_movehl_ps(vec, vec)));
  323. #else
  324. return Vector3(
  325. (m00_ * rhs.x_ + m01_ * rhs.y_ + m02_ * rhs.z_ + m03_ * rhs.w_),
  326. (m10_ * rhs.x_ + m11_ * rhs.y_ + m12_ * rhs.z_ + m13_ * rhs.w_),
  327. (m20_ * rhs.x_ + m21_ * rhs.y_ + m22_ * rhs.z_ + m23_ * rhs.w_)
  328. );
  329. #endif
  330. }
  331. /// Add a matrix.
  332. Matrix3x4 operator +(const Matrix3x4& rhs) const
  333. {
  334. #ifdef URHO3D_SSE
  335. Matrix3x4 ret;
  336. _mm_storeu_ps(&ret.m00_, _mm_add_ps(_mm_loadu_ps(&m00_), _mm_loadu_ps(&rhs.m00_)));
  337. _mm_storeu_ps(&ret.m10_, _mm_add_ps(_mm_loadu_ps(&m10_), _mm_loadu_ps(&rhs.m10_)));
  338. _mm_storeu_ps(&ret.m20_, _mm_add_ps(_mm_loadu_ps(&m20_), _mm_loadu_ps(&rhs.m20_)));
  339. return ret;
  340. #else
  341. return Matrix3x4(
  342. m00_ + rhs.m00_,
  343. m01_ + rhs.m01_,
  344. m02_ + rhs.m02_,
  345. m03_ + rhs.m03_,
  346. m10_ + rhs.m10_,
  347. m11_ + rhs.m11_,
  348. m12_ + rhs.m12_,
  349. m13_ + rhs.m13_,
  350. m20_ + rhs.m20_,
  351. m21_ + rhs.m21_,
  352. m22_ + rhs.m22_,
  353. m23_ + rhs.m23_
  354. );
  355. #endif
  356. }
  357. /// Subtract a matrix.
  358. Matrix3x4 operator -(const Matrix3x4& rhs) const
  359. {
  360. #ifdef URHO3D_SSE
  361. Matrix3x4 ret;
  362. _mm_storeu_ps(&ret.m00_, _mm_sub_ps(_mm_loadu_ps(&m00_), _mm_loadu_ps(&rhs.m00_)));
  363. _mm_storeu_ps(&ret.m10_, _mm_sub_ps(_mm_loadu_ps(&m10_), _mm_loadu_ps(&rhs.m10_)));
  364. _mm_storeu_ps(&ret.m20_, _mm_sub_ps(_mm_loadu_ps(&m20_), _mm_loadu_ps(&rhs.m20_)));
  365. return ret;
  366. #else
  367. return Matrix3x4(
  368. m00_ - rhs.m00_,
  369. m01_ - rhs.m01_,
  370. m02_ - rhs.m02_,
  371. m03_ - rhs.m03_,
  372. m10_ - rhs.m10_,
  373. m11_ - rhs.m11_,
  374. m12_ - rhs.m12_,
  375. m13_ - rhs.m13_,
  376. m20_ - rhs.m20_,
  377. m21_ - rhs.m21_,
  378. m22_ - rhs.m22_,
  379. m23_ - rhs.m23_
  380. );
  381. #endif
  382. }
  383. /// Multiply with a scalar.
  384. Matrix3x4 operator *(float rhs) const
  385. {
  386. #ifdef URHO3D_SSE
  387. Matrix3x4 ret;
  388. const __m128 mul = _mm_set1_ps(rhs);
  389. _mm_storeu_ps(&ret.m00_, _mm_mul_ps(_mm_loadu_ps(&m00_), mul));
  390. _mm_storeu_ps(&ret.m10_, _mm_mul_ps(_mm_loadu_ps(&m10_), mul));
  391. _mm_storeu_ps(&ret.m20_, _mm_mul_ps(_mm_loadu_ps(&m20_), mul));
  392. return ret;
  393. #else
  394. return Matrix3x4(
  395. m00_ * rhs,
  396. m01_ * rhs,
  397. m02_ * rhs,
  398. m03_ * rhs,
  399. m10_ * rhs,
  400. m11_ * rhs,
  401. m12_ * rhs,
  402. m13_ * rhs,
  403. m20_ * rhs,
  404. m21_ * rhs,
  405. m22_ * rhs,
  406. m23_ * rhs
  407. );
  408. #endif
  409. }
  410. /// Multiply a matrix.
  411. Matrix3x4 operator *(const Matrix3x4& rhs) const
  412. {
  413. #ifdef URHO3D_SSE
  414. Matrix3x4 out;
  415. __m128 r0 = _mm_loadu_ps(&rhs.m00_);
  416. __m128 r1 = _mm_loadu_ps(&rhs.m10_);
  417. __m128 r2 = _mm_loadu_ps(&rhs.m20_);
  418. __m128 r3 = _mm_set_ps(1.f, 0.f, 0.f, 0.f);
  419. __m128 l = _mm_loadu_ps(&m00_);
  420. __m128 t0 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(0, 0, 0, 0)), r0);
  421. __m128 t1 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(1, 1, 1, 1)), r1);
  422. __m128 t2 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(2, 2, 2, 2)), r2);
  423. __m128 t3 = _mm_mul_ps(l, r3);
  424. _mm_storeu_ps(&out.m00_, _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3)));
  425. l = _mm_loadu_ps(&m10_);
  426. t0 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(0, 0, 0, 0)), r0);
  427. t1 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(1, 1, 1, 1)), r1);
  428. t2 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(2, 2, 2, 2)), r2);
  429. t3 = _mm_mul_ps(l, r3);
  430. _mm_storeu_ps(&out.m10_, _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3)));
  431. l = _mm_loadu_ps(&m20_);
  432. t0 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(0, 0, 0, 0)), r0);
  433. t1 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(1, 1, 1, 1)), r1);
  434. t2 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(2, 2, 2, 2)), r2);
  435. t3 = _mm_mul_ps(l, r3);
  436. _mm_storeu_ps(&out.m20_, _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3)));
  437. return out;
  438. #else
  439. return Matrix3x4(
  440. m00_ * rhs.m00_ + m01_ * rhs.m10_ + m02_ * rhs.m20_,
  441. m00_ * rhs.m01_ + m01_ * rhs.m11_ + m02_ * rhs.m21_,
  442. m00_ * rhs.m02_ + m01_ * rhs.m12_ + m02_ * rhs.m22_,
  443. m00_ * rhs.m03_ + m01_ * rhs.m13_ + m02_ * rhs.m23_ + m03_,
  444. m10_ * rhs.m00_ + m11_ * rhs.m10_ + m12_ * rhs.m20_,
  445. m10_ * rhs.m01_ + m11_ * rhs.m11_ + m12_ * rhs.m21_,
  446. m10_ * rhs.m02_ + m11_ * rhs.m12_ + m12_ * rhs.m22_,
  447. m10_ * rhs.m03_ + m11_ * rhs.m13_ + m12_ * rhs.m23_ + m13_,
  448. m20_ * rhs.m00_ + m21_ * rhs.m10_ + m22_ * rhs.m20_,
  449. m20_ * rhs.m01_ + m21_ * rhs.m11_ + m22_ * rhs.m21_,
  450. m20_ * rhs.m02_ + m21_ * rhs.m12_ + m22_ * rhs.m22_,
  451. m20_ * rhs.m03_ + m21_ * rhs.m13_ + m22_ * rhs.m23_ + m23_
  452. );
  453. #endif
  454. }
  455. /// Multiply a 4x4 matrix.
  456. Matrix4 operator *(const Matrix4& rhs) const
  457. {
  458. #ifdef URHO3D_SSE
  459. Matrix4 out;
  460. __m128 r0 = _mm_loadu_ps(&rhs.m00_);
  461. __m128 r1 = _mm_loadu_ps(&rhs.m10_);
  462. __m128 r2 = _mm_loadu_ps(&rhs.m20_);
  463. __m128 r3 = _mm_loadu_ps(&rhs.m30_);
  464. __m128 l = _mm_loadu_ps(&m00_);
  465. __m128 t0 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(0, 0, 0, 0)), r0);
  466. __m128 t1 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(1, 1, 1, 1)), r1);
  467. __m128 t2 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(2, 2, 2, 2)), r2);
  468. __m128 t3 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(3, 3, 3, 3)), r3);
  469. _mm_storeu_ps(&out.m00_, _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3)));
  470. l = _mm_loadu_ps(&m10_);
  471. t0 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(0, 0, 0, 0)), r0);
  472. t1 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(1, 1, 1, 1)), r1);
  473. t2 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(2, 2, 2, 2)), r2);
  474. t3 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(3, 3, 3, 3)), r3);
  475. _mm_storeu_ps(&out.m10_, _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3)));
  476. l = _mm_loadu_ps(&m20_);
  477. t0 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(0, 0, 0, 0)), r0);
  478. t1 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(1, 1, 1, 1)), r1);
  479. t2 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(2, 2, 2, 2)), r2);
  480. t3 = _mm_mul_ps(_mm_shuffle_ps(l, l, _MM_SHUFFLE(3, 3, 3, 3)), r3);
  481. _mm_storeu_ps(&out.m20_, _mm_add_ps(_mm_add_ps(t0, t1), _mm_add_ps(t2, t3)));
  482. _mm_storeu_ps(&out.m30_, r3);
  483. return out;
  484. #else
  485. return Matrix4(
  486. m00_ * rhs.m00_ + m01_ * rhs.m10_ + m02_ * rhs.m20_ + m03_ * rhs.m30_,
  487. m00_ * rhs.m01_ + m01_ * rhs.m11_ + m02_ * rhs.m21_ + m03_ * rhs.m31_,
  488. m00_ * rhs.m02_ + m01_ * rhs.m12_ + m02_ * rhs.m22_ + m03_ * rhs.m32_,
  489. m00_ * rhs.m03_ + m01_ * rhs.m13_ + m02_ * rhs.m23_ + m03_ * rhs.m33_,
  490. m10_ * rhs.m00_ + m11_ * rhs.m10_ + m12_ * rhs.m20_ + m13_ * rhs.m30_,
  491. m10_ * rhs.m01_ + m11_ * rhs.m11_ + m12_ * rhs.m21_ + m13_ * rhs.m31_,
  492. m10_ * rhs.m02_ + m11_ * rhs.m12_ + m12_ * rhs.m22_ + m13_ * rhs.m32_,
  493. m10_ * rhs.m03_ + m11_ * rhs.m13_ + m12_ * rhs.m23_ + m13_ * rhs.m33_,
  494. m20_ * rhs.m00_ + m21_ * rhs.m10_ + m22_ * rhs.m20_ + m23_ * rhs.m30_,
  495. m20_ * rhs.m01_ + m21_ * rhs.m11_ + m22_ * rhs.m21_ + m23_ * rhs.m31_,
  496. m20_ * rhs.m02_ + m21_ * rhs.m12_ + m22_ * rhs.m22_ + m23_ * rhs.m32_,
  497. m20_ * rhs.m03_ + m21_ * rhs.m13_ + m22_ * rhs.m23_ + m23_ * rhs.m33_,
  498. rhs.m30_,
  499. rhs.m31_,
  500. rhs.m32_,
  501. rhs.m33_
  502. );
  503. #endif
  504. }
  505. /// Set translation elements.
  506. void SetTranslation(const Vector3& translation)
  507. {
  508. m03_ = translation.x_;
  509. m13_ = translation.y_;
  510. m23_ = translation.z_;
  511. }
  512. /// Set rotation elements from a 3x3 matrix.
  513. void SetRotation(const Matrix3& rotation)
  514. {
  515. m00_ = rotation.m00_;
  516. m01_ = rotation.m01_;
  517. m02_ = rotation.m02_;
  518. m10_ = rotation.m10_;
  519. m11_ = rotation.m11_;
  520. m12_ = rotation.m12_;
  521. m20_ = rotation.m20_;
  522. m21_ = rotation.m21_;
  523. m22_ = rotation.m22_;
  524. }
  525. /// Set scaling elements.
  526. void SetScale(const Vector3& scale)
  527. {
  528. m00_ = scale.x_;
  529. m11_ = scale.y_;
  530. m22_ = scale.z_;
  531. }
  532. /// Set uniform scaling elements.
  533. void SetScale(float scale)
  534. {
  535. m00_ = scale;
  536. m11_ = scale;
  537. m22_ = scale;
  538. }
  539. /// Return the combined rotation and scaling matrix.
  540. Matrix3 ToMatrix3() const
  541. {
  542. return Matrix3(
  543. m00_,
  544. m01_,
  545. m02_,
  546. m10_,
  547. m11_,
  548. m12_,
  549. m20_,
  550. m21_,
  551. m22_
  552. );
  553. }
  554. /// Convert to a 4x4 matrix by filling in an identity last row.
  555. Matrix4 ToMatrix4() const
  556. {
  557. #ifdef URHO3D_SSE
  558. Matrix4 ret;
  559. _mm_storeu_ps(&ret.m00_, _mm_loadu_ps(&m00_));
  560. _mm_storeu_ps(&ret.m10_, _mm_loadu_ps(&m10_));
  561. _mm_storeu_ps(&ret.m20_, _mm_loadu_ps(&m20_));
  562. _mm_storeu_ps(&ret.m30_, _mm_set_ps(1.f, 0.f, 0.f, 0.f));
  563. return ret;
  564. #else
  565. return Matrix4(
  566. m00_,
  567. m01_,
  568. m02_,
  569. m03_,
  570. m10_,
  571. m11_,
  572. m12_,
  573. m13_,
  574. m20_,
  575. m21_,
  576. m22_,
  577. m23_,
  578. 0.0f,
  579. 0.0f,
  580. 0.0f,
  581. 1.0f
  582. );
  583. #endif
  584. }
  585. /// Return the rotation matrix with scaling removed.
  586. Matrix3 RotationMatrix() const
  587. {
  588. Vector3 invScale(
  589. 1.0f / sqrtf(m00_ * m00_ + m10_ * m10_ + m20_ * m20_),
  590. 1.0f / sqrtf(m01_ * m01_ + m11_ * m11_ + m21_ * m21_),
  591. 1.0f / sqrtf(m02_ * m02_ + m12_ * m12_ + m22_ * m22_)
  592. );
  593. return ToMatrix3().Scaled(invScale);
  594. }
  595. /// Return the translation part.
  596. Vector3 Translation() const
  597. {
  598. return Vector3(
  599. m03_,
  600. m13_,
  601. m23_
  602. );
  603. }
  604. /// Return the rotation part.
  605. Quaternion Rotation() const { return Quaternion(RotationMatrix()); }
  606. /// Return the scaling part.
  607. Vector3 Scale() const
  608. {
  609. return Vector3(
  610. sqrtf(m00_ * m00_ + m10_ * m10_ + m20_ * m20_),
  611. sqrtf(m01_ * m01_ + m11_ * m11_ + m21_ * m21_),
  612. sqrtf(m02_ * m02_ + m12_ * m12_ + m22_ * m22_)
  613. );
  614. }
  615. /// Test for equality with another matrix with epsilon.
  616. bool Equals(const Matrix3x4& rhs) const
  617. {
  618. const float* leftData = Data();
  619. const float* rightData = rhs.Data();
  620. for (unsigned i = 0; i < 12; ++i)
  621. {
  622. if (!Urho3D::Equals(leftData[i], rightData[i]))
  623. return false;
  624. }
  625. return true;
  626. }
  627. /// Return decomposition to translation, rotation and scale.
  628. void Decompose(Vector3& translation, Quaternion& rotation, Vector3& scale) const;
  629. /// Return inverse.
  630. Matrix3x4 Inverse() const;
  631. /// Return float data.
  632. const float* Data() const { return &m00_; }
  633. /// Return as string.
  634. String ToString() const;
  635. float m00_;
  636. float m01_;
  637. float m02_;
  638. float m03_;
  639. float m10_;
  640. float m11_;
  641. float m12_;
  642. float m13_;
  643. float m20_;
  644. float m21_;
  645. float m22_;
  646. float m23_;
  647. /// Zero matrix.
  648. static const Matrix3x4 ZERO;
  649. /// Identity matrix.
  650. static const Matrix3x4 IDENTITY;
  651. #ifdef URHO3D_SSE
  652. private:
  653. // Sets this matrix from the given translation, rotation (as quaternion (w,x,y,z)), and nonuniform scale (x,y,z) parameters.
  654. // Note: the w component of the scale parameter passed to this function must be 1.
  655. void inline SetFromTRS(__m128 t, __m128 q, __m128 s)
  656. {
  657. q = _mm_shuffle_ps(q, q, _MM_SHUFFLE(0, 3, 2, 1));
  658. __m128 one = _mm_set_ps(0, 0, 0, 1);
  659. const __m128 sseX1 = _mm_castsi128_ps(_mm_set_epi32((int)0x80000000UL, (int)0x80000000UL, 0, (int)0x80000000UL));
  660. __m128 q2 = _mm_add_ps(q, q);
  661. __m128 t2 = _mm_add_ss(_mm_xor_ps(_mm_mul_ps(_mm_shuffle_ps(q, q, _MM_SHUFFLE(3, 3, 3, 2)), _mm_shuffle_ps(q2, q2, _MM_SHUFFLE(0, 1, 2, 2))), sseX1), one);
  662. const __m128 sseX0 = _mm_shuffle_ps(sseX1, sseX1, _MM_SHUFFLE(0, 3, 2, 1));
  663. __m128 t0 = _mm_mul_ps(_mm_shuffle_ps(q, q, _MM_SHUFFLE(1, 0, 0, 1)), _mm_shuffle_ps(q2, q2, _MM_SHUFFLE(2, 2, 1, 1)));
  664. __m128 t1 = _mm_xor_ps(t0, sseX0);
  665. __m128 r0 = _mm_sub_ps(t2, t1);
  666. __m128 xx2 = _mm_mul_ss(q, q2);
  667. __m128 r1 = _mm_sub_ps(_mm_xor_ps(t2, sseX0), _mm_move_ss(t1, xx2));
  668. r1 = _mm_shuffle_ps(r1, r1, _MM_SHUFFLE(2, 3, 0, 1));
  669. __m128 r2 = _mm_shuffle_ps(_mm_movehl_ps(r0, r1), _mm_sub_ss(_mm_sub_ss(one, xx2), t0), _MM_SHUFFLE(2, 0, 3, 1));
  670. __m128 tmp0 = _mm_unpacklo_ps(r0, r1);
  671. __m128 tmp2 = _mm_unpacklo_ps(r2, t);
  672. __m128 tmp1 = _mm_unpackhi_ps(r0, r1);
  673. __m128 tmp3 = _mm_unpackhi_ps(r2, t);
  674. _mm_storeu_ps(&m00_, _mm_mul_ps(_mm_movelh_ps(tmp0, tmp2), s));
  675. _mm_storeu_ps(&m10_, _mm_mul_ps(_mm_movehl_ps(tmp2, tmp0), s));
  676. _mm_storeu_ps(&m20_, _mm_mul_ps(_mm_movelh_ps(tmp1, tmp3), s));
  677. }
  678. #endif
  679. };
  680. /// Multiply a 3x4 matrix with a scalar.
  681. inline Matrix3x4 operator *(float lhs, const Matrix3x4& rhs) { return rhs * lhs; }
  682. }