2
0

b3Quaternion.h 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905
  1. /*
  2. Copyright (c) 2003-2013 Gino van den Bergen / Erwin Coumans http://bulletphysics.org
  3. This software is provided 'as-is', without any express or implied warranty.
  4. In no event will the authors be held liable for any damages arising from the use of this software.
  5. Permission is granted to anyone to use this software for any purpose,
  6. including commercial applications, and to alter it and redistribute it freely,
  7. subject to the following restrictions:
  8. 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
  9. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
  10. 3. This notice may not be removed or altered from any source distribution.
  11. */
  12. #ifndef B3_SIMD__QUATERNION_H_
  13. #define B3_SIMD__QUATERNION_H_
  14. #include "b3Vector3.h"
  15. #include "b3QuadWord.h"
  16. #ifdef B3_USE_SSE
  17. const __m128 B3_ATTRIBUTE_ALIGNED16(b3vOnes) = {1.0f, 1.0f, 1.0f, 1.0f};
  18. #endif
  19. #if defined(B3_USE_SSE) || defined(B3_USE_NEON)
  20. const b3SimdFloat4 B3_ATTRIBUTE_ALIGNED16(b3vQInv) = {-0.0f, -0.0f, -0.0f, +0.0f};
  21. const b3SimdFloat4 B3_ATTRIBUTE_ALIGNED16(b3vPPPM) = {+0.0f, +0.0f, +0.0f, -0.0f};
  22. #endif
  23. /**@brief The b3Quaternion implements quaternion to perform linear algebra rotations in combination with b3Matrix3x3, b3Vector3 and b3Transform. */
  24. class b3Quaternion : public b3QuadWord
  25. {
  26. public:
  27. /**@brief No initialization constructor */
  28. b3Quaternion() {}
  29. #if (defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)) || defined(B3_USE_NEON)
  30. // Set Vector
  31. B3_FORCE_INLINE b3Quaternion(const b3SimdFloat4 vec)
  32. {
  33. mVec128 = vec;
  34. }
  35. // Copy constructor
  36. B3_FORCE_INLINE b3Quaternion(const b3Quaternion& rhs)
  37. {
  38. mVec128 = rhs.mVec128;
  39. }
  40. // Assignment Operator
  41. B3_FORCE_INLINE b3Quaternion&
  42. operator=(const b3Quaternion& v)
  43. {
  44. mVec128 = v.mVec128;
  45. return *this;
  46. }
  47. #endif
  48. // template <typename b3Scalar>
  49. // explicit Quaternion(const b3Scalar *v) : Tuple4<b3Scalar>(v) {}
  50. /**@brief Constructor from scalars */
  51. b3Quaternion(const b3Scalar& _x, const b3Scalar& _y, const b3Scalar& _z, const b3Scalar& _w)
  52. : b3QuadWord(_x, _y, _z, _w)
  53. {
  54. //b3Assert(!((_x==1.f) && (_y==0.f) && (_z==0.f) && (_w==0.f)));
  55. }
  56. /**@brief Axis angle Constructor
  57. * @param axis The axis which the rotation is around
  58. * @param angle The magnitude of the rotation around the angle (Radians) */
  59. b3Quaternion(const b3Vector3& _axis, const b3Scalar& _angle)
  60. {
  61. setRotation(_axis, _angle);
  62. }
  63. /**@brief Constructor from Euler angles
  64. * @param yaw Angle around Y unless B3_EULER_DEFAULT_ZYX defined then Z
  65. * @param pitch Angle around X unless B3_EULER_DEFAULT_ZYX defined then Y
  66. * @param roll Angle around Z unless B3_EULER_DEFAULT_ZYX defined then X */
  67. b3Quaternion(const b3Scalar& yaw, const b3Scalar& pitch, const b3Scalar& roll)
  68. {
  69. #ifndef B3_EULER_DEFAULT_ZYX
  70. setEuler(yaw, pitch, roll);
  71. #else
  72. setEulerZYX(yaw, pitch, roll);
  73. #endif
  74. }
  75. /**@brief Set the rotation using axis angle notation
  76. * @param axis The axis around which to rotate
  77. * @param angle The magnitude of the rotation in Radians */
  78. void setRotation(const b3Vector3& axis, const b3Scalar& _angle)
  79. {
  80. b3Scalar d = axis.length();
  81. b3Assert(d != b3Scalar(0.0));
  82. if (d < B3_EPSILON)
  83. {
  84. setValue(0, 0, 0, 1);
  85. }
  86. else
  87. {
  88. b3Scalar s = b3Sin(_angle * b3Scalar(0.5)) / d;
  89. setValue(axis.getX() * s, axis.getY() * s, axis.getZ() * s,
  90. b3Cos(_angle * b3Scalar(0.5)));
  91. }
  92. }
  93. /**@brief Set the quaternion using Euler angles
  94. * @param yaw Angle around Y
  95. * @param pitch Angle around X
  96. * @param roll Angle around Z */
  97. void setEuler(const b3Scalar& yaw, const b3Scalar& pitch, const b3Scalar& roll)
  98. {
  99. b3Scalar halfYaw = b3Scalar(yaw) * b3Scalar(0.5);
  100. b3Scalar halfPitch = b3Scalar(pitch) * b3Scalar(0.5);
  101. b3Scalar halfRoll = b3Scalar(roll) * b3Scalar(0.5);
  102. b3Scalar cosYaw = b3Cos(halfYaw);
  103. b3Scalar sinYaw = b3Sin(halfYaw);
  104. b3Scalar cosPitch = b3Cos(halfPitch);
  105. b3Scalar sinPitch = b3Sin(halfPitch);
  106. b3Scalar cosRoll = b3Cos(halfRoll);
  107. b3Scalar sinRoll = b3Sin(halfRoll);
  108. setValue(cosRoll * sinPitch * cosYaw + sinRoll * cosPitch * sinYaw,
  109. cosRoll * cosPitch * sinYaw - sinRoll * sinPitch * cosYaw,
  110. sinRoll * cosPitch * cosYaw - cosRoll * sinPitch * sinYaw,
  111. cosRoll * cosPitch * cosYaw + sinRoll * sinPitch * sinYaw);
  112. }
  113. /**@brief Set the quaternion using euler angles
  114. * @param yaw Angle around Z
  115. * @param pitch Angle around Y
  116. * @param roll Angle around X */
  117. void setEulerZYX(const b3Scalar& yawZ, const b3Scalar& pitchY, const b3Scalar& rollX)
  118. {
  119. b3Scalar halfYaw = b3Scalar(yawZ) * b3Scalar(0.5);
  120. b3Scalar halfPitch = b3Scalar(pitchY) * b3Scalar(0.5);
  121. b3Scalar halfRoll = b3Scalar(rollX) * b3Scalar(0.5);
  122. b3Scalar cosYaw = b3Cos(halfYaw);
  123. b3Scalar sinYaw = b3Sin(halfYaw);
  124. b3Scalar cosPitch = b3Cos(halfPitch);
  125. b3Scalar sinPitch = b3Sin(halfPitch);
  126. b3Scalar cosRoll = b3Cos(halfRoll);
  127. b3Scalar sinRoll = b3Sin(halfRoll);
  128. setValue(sinRoll * cosPitch * cosYaw - cosRoll * sinPitch * sinYaw, //x
  129. cosRoll * sinPitch * cosYaw + sinRoll * cosPitch * sinYaw, //y
  130. cosRoll * cosPitch * sinYaw - sinRoll * sinPitch * cosYaw, //z
  131. cosRoll * cosPitch * cosYaw + sinRoll * sinPitch * sinYaw); //formerly yzx
  132. normalize();
  133. }
  134. /**@brief Get the euler angles from this quaternion
  135. * @param yaw Angle around Z
  136. * @param pitch Angle around Y
  137. * @param roll Angle around X */
  138. void getEulerZYX(b3Scalar& yawZ, b3Scalar& pitchY, b3Scalar& rollX) const
  139. {
  140. b3Scalar squ;
  141. b3Scalar sqx;
  142. b3Scalar sqy;
  143. b3Scalar sqz;
  144. b3Scalar sarg;
  145. sqx = m_floats[0] * m_floats[0];
  146. sqy = m_floats[1] * m_floats[1];
  147. sqz = m_floats[2] * m_floats[2];
  148. squ = m_floats[3] * m_floats[3];
  149. rollX = b3Atan2(2 * (m_floats[1] * m_floats[2] + m_floats[3] * m_floats[0]), squ - sqx - sqy + sqz);
  150. sarg = b3Scalar(-2.) * (m_floats[0] * m_floats[2] - m_floats[3] * m_floats[1]);
  151. pitchY = sarg <= b3Scalar(-1.0) ? b3Scalar(-0.5) * B3_PI : (sarg >= b3Scalar(1.0) ? b3Scalar(0.5) * B3_PI : b3Asin(sarg));
  152. yawZ = b3Atan2(2 * (m_floats[0] * m_floats[1] + m_floats[3] * m_floats[2]), squ + sqx - sqy - sqz);
  153. }
  154. /**@brief Add two quaternions
  155. * @param q The quaternion to add to this one */
  156. B3_FORCE_INLINE b3Quaternion& operator+=(const b3Quaternion& q)
  157. {
  158. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  159. mVec128 = _mm_add_ps(mVec128, q.mVec128);
  160. #elif defined(B3_USE_NEON)
  161. mVec128 = vaddq_f32(mVec128, q.mVec128);
  162. #else
  163. m_floats[0] += q.getX();
  164. m_floats[1] += q.getY();
  165. m_floats[2] += q.getZ();
  166. m_floats[3] += q.m_floats[3];
  167. #endif
  168. return *this;
  169. }
  170. /**@brief Subtract out a quaternion
  171. * @param q The quaternion to subtract from this one */
  172. b3Quaternion& operator-=(const b3Quaternion& q)
  173. {
  174. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  175. mVec128 = _mm_sub_ps(mVec128, q.mVec128);
  176. #elif defined(B3_USE_NEON)
  177. mVec128 = vsubq_f32(mVec128, q.mVec128);
  178. #else
  179. m_floats[0] -= q.getX();
  180. m_floats[1] -= q.getY();
  181. m_floats[2] -= q.getZ();
  182. m_floats[3] -= q.m_floats[3];
  183. #endif
  184. return *this;
  185. }
  186. /**@brief Scale this quaternion
  187. * @param s The scalar to scale by */
  188. b3Quaternion& operator*=(const b3Scalar& s)
  189. {
  190. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  191. __m128 vs = _mm_load_ss(&s); // (S 0 0 0)
  192. vs = b3_pshufd_ps(vs, 0); // (S S S S)
  193. mVec128 = _mm_mul_ps(mVec128, vs);
  194. #elif defined(B3_USE_NEON)
  195. mVec128 = vmulq_n_f32(mVec128, s);
  196. #else
  197. m_floats[0] *= s;
  198. m_floats[1] *= s;
  199. m_floats[2] *= s;
  200. m_floats[3] *= s;
  201. #endif
  202. return *this;
  203. }
  204. /**@brief Multiply this quaternion by q on the right
  205. * @param q The other quaternion
  206. * Equivilant to this = this * q */
  207. b3Quaternion& operator*=(const b3Quaternion& q)
  208. {
  209. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  210. __m128 vQ2 = q.get128();
  211. __m128 A1 = b3_pshufd_ps(mVec128, B3_SHUFFLE(0, 1, 2, 0));
  212. __m128 B1 = b3_pshufd_ps(vQ2, B3_SHUFFLE(3, 3, 3, 0));
  213. A1 = A1 * B1;
  214. __m128 A2 = b3_pshufd_ps(mVec128, B3_SHUFFLE(1, 2, 0, 1));
  215. __m128 B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(2, 0, 1, 1));
  216. A2 = A2 * B2;
  217. B1 = b3_pshufd_ps(mVec128, B3_SHUFFLE(2, 0, 1, 2));
  218. B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(1, 2, 0, 2));
  219. B1 = B1 * B2; // A3 *= B3
  220. mVec128 = b3_splat_ps(mVec128, 3); // A0
  221. mVec128 = mVec128 * vQ2; // A0 * B0
  222. A1 = A1 + A2; // AB12
  223. mVec128 = mVec128 - B1; // AB03 = AB0 - AB3
  224. A1 = _mm_xor_ps(A1, b3vPPPM); // change sign of the last element
  225. mVec128 = mVec128 + A1; // AB03 + AB12
  226. #elif defined(B3_USE_NEON)
  227. float32x4_t vQ1 = mVec128;
  228. float32x4_t vQ2 = q.get128();
  229. float32x4_t A0, A1, B1, A2, B2, A3, B3;
  230. float32x2_t vQ1zx, vQ2wx, vQ1yz, vQ2zx, vQ2yz, vQ2xz;
  231. {
  232. float32x2x2_t tmp;
  233. tmp = vtrn_f32(vget_high_f32(vQ1), vget_low_f32(vQ1)); // {z x}, {w y}
  234. vQ1zx = tmp.val[0];
  235. tmp = vtrn_f32(vget_high_f32(vQ2), vget_low_f32(vQ2)); // {z x}, {w y}
  236. vQ2zx = tmp.val[0];
  237. }
  238. vQ2wx = vext_f32(vget_high_f32(vQ2), vget_low_f32(vQ2), 1);
  239. vQ1yz = vext_f32(vget_low_f32(vQ1), vget_high_f32(vQ1), 1);
  240. vQ2yz = vext_f32(vget_low_f32(vQ2), vget_high_f32(vQ2), 1);
  241. vQ2xz = vext_f32(vQ2zx, vQ2zx, 1);
  242. A1 = vcombine_f32(vget_low_f32(vQ1), vQ1zx); // X Y z x
  243. B1 = vcombine_f32(vdup_lane_f32(vget_high_f32(vQ2), 1), vQ2wx); // W W W X
  244. A2 = vcombine_f32(vQ1yz, vget_low_f32(vQ1));
  245. B2 = vcombine_f32(vQ2zx, vdup_lane_f32(vget_low_f32(vQ2), 1));
  246. A3 = vcombine_f32(vQ1zx, vQ1yz); // Z X Y Z
  247. B3 = vcombine_f32(vQ2yz, vQ2xz); // Y Z x z
  248. A1 = vmulq_f32(A1, B1);
  249. A2 = vmulq_f32(A2, B2);
  250. A3 = vmulq_f32(A3, B3); // A3 *= B3
  251. A0 = vmulq_lane_f32(vQ2, vget_high_f32(vQ1), 1); // A0 * B0
  252. A1 = vaddq_f32(A1, A2); // AB12 = AB1 + AB2
  253. A0 = vsubq_f32(A0, A3); // AB03 = AB0 - AB3
  254. // change the sign of the last element
  255. A1 = (b3SimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)b3vPPPM);
  256. A0 = vaddq_f32(A0, A1); // AB03 + AB12
  257. mVec128 = A0;
  258. #else
  259. setValue(
  260. m_floats[3] * q.getX() + m_floats[0] * q.m_floats[3] + m_floats[1] * q.getZ() - m_floats[2] * q.getY(),
  261. m_floats[3] * q.getY() + m_floats[1] * q.m_floats[3] + m_floats[2] * q.getX() - m_floats[0] * q.getZ(),
  262. m_floats[3] * q.getZ() + m_floats[2] * q.m_floats[3] + m_floats[0] * q.getY() - m_floats[1] * q.getX(),
  263. m_floats[3] * q.m_floats[3] - m_floats[0] * q.getX() - m_floats[1] * q.getY() - m_floats[2] * q.getZ());
  264. #endif
  265. return *this;
  266. }
  267. /**@brief Return the dot product between this quaternion and another
  268. * @param q The other quaternion */
  269. b3Scalar dot(const b3Quaternion& q) const
  270. {
  271. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  272. __m128 vd;
  273. vd = _mm_mul_ps(mVec128, q.mVec128);
  274. __m128 t = _mm_movehl_ps(vd, vd);
  275. vd = _mm_add_ps(vd, t);
  276. t = _mm_shuffle_ps(vd, vd, 0x55);
  277. vd = _mm_add_ss(vd, t);
  278. return _mm_cvtss_f32(vd);
  279. #elif defined(B3_USE_NEON)
  280. float32x4_t vd = vmulq_f32(mVec128, q.mVec128);
  281. float32x2_t x = vpadd_f32(vget_low_f32(vd), vget_high_f32(vd));
  282. x = vpadd_f32(x, x);
  283. return vget_lane_f32(x, 0);
  284. #else
  285. return m_floats[0] * q.getX() +
  286. m_floats[1] * q.getY() +
  287. m_floats[2] * q.getZ() +
  288. m_floats[3] * q.m_floats[3];
  289. #endif
  290. }
  291. /**@brief Return the length squared of the quaternion */
  292. b3Scalar length2() const
  293. {
  294. return dot(*this);
  295. }
  296. /**@brief Return the length of the quaternion */
  297. b3Scalar length() const
  298. {
  299. return b3Sqrt(length2());
  300. }
  301. /**@brief Normalize the quaternion
  302. * Such that x^2 + y^2 + z^2 +w^2 = 1 */
  303. b3Quaternion& normalize()
  304. {
  305. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  306. __m128 vd;
  307. vd = _mm_mul_ps(mVec128, mVec128);
  308. __m128 t = _mm_movehl_ps(vd, vd);
  309. vd = _mm_add_ps(vd, t);
  310. t = _mm_shuffle_ps(vd, vd, 0x55);
  311. vd = _mm_add_ss(vd, t);
  312. vd = _mm_sqrt_ss(vd);
  313. vd = _mm_div_ss(b3vOnes, vd);
  314. vd = b3_pshufd_ps(vd, 0); // splat
  315. mVec128 = _mm_mul_ps(mVec128, vd);
  316. return *this;
  317. #else
  318. return *this /= length();
  319. #endif
  320. }
  321. /**@brief Return a scaled version of this quaternion
  322. * @param s The scale factor */
  323. B3_FORCE_INLINE b3Quaternion
  324. operator*(const b3Scalar& s) const
  325. {
  326. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  327. __m128 vs = _mm_load_ss(&s); // (S 0 0 0)
  328. vs = b3_pshufd_ps(vs, 0x00); // (S S S S)
  329. return b3Quaternion(_mm_mul_ps(mVec128, vs));
  330. #elif defined(B3_USE_NEON)
  331. return b3Quaternion(vmulq_n_f32(mVec128, s));
  332. #else
  333. return b3Quaternion(getX() * s, getY() * s, getZ() * s, m_floats[3] * s);
  334. #endif
  335. }
  336. /**@brief Return an inversely scaled versionof this quaternion
  337. * @param s The inverse scale factor */
  338. b3Quaternion operator/(const b3Scalar& s) const
  339. {
  340. b3Assert(s != b3Scalar(0.0));
  341. return *this * (b3Scalar(1.0) / s);
  342. }
  343. /**@brief Inversely scale this quaternion
  344. * @param s The scale factor */
  345. b3Quaternion& operator/=(const b3Scalar& s)
  346. {
  347. b3Assert(s != b3Scalar(0.0));
  348. return *this *= b3Scalar(1.0) / s;
  349. }
  350. /**@brief Return a normalized version of this quaternion */
  351. b3Quaternion normalized() const
  352. {
  353. return *this / length();
  354. }
  355. /**@brief Return the angle between this quaternion and the other
  356. * @param q The other quaternion */
  357. b3Scalar angle(const b3Quaternion& q) const
  358. {
  359. b3Scalar s = b3Sqrt(length2() * q.length2());
  360. b3Assert(s != b3Scalar(0.0));
  361. return b3Acos(dot(q) / s);
  362. }
  363. /**@brief Return the angle of rotation represented by this quaternion */
  364. b3Scalar getAngle() const
  365. {
  366. b3Scalar s = b3Scalar(2.) * b3Acos(m_floats[3]);
  367. return s;
  368. }
  369. /**@brief Return the axis of the rotation represented by this quaternion */
  370. b3Vector3 getAxis() const
  371. {
  372. b3Scalar s_squared = 1.f - m_floats[3] * m_floats[3];
  373. if (s_squared < b3Scalar(10.) * B3_EPSILON) //Check for divide by zero
  374. return b3MakeVector3(1.0, 0.0, 0.0); // Arbitrary
  375. b3Scalar s = 1.f / b3Sqrt(s_squared);
  376. return b3MakeVector3(m_floats[0] * s, m_floats[1] * s, m_floats[2] * s);
  377. }
  378. /**@brief Return the inverse of this quaternion */
  379. b3Quaternion inverse() const
  380. {
  381. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  382. return b3Quaternion(_mm_xor_ps(mVec128, b3vQInv));
  383. #elif defined(B3_USE_NEON)
  384. return b3Quaternion((b3SimdFloat4)veorq_s32((int32x4_t)mVec128, (int32x4_t)b3vQInv));
  385. #else
  386. return b3Quaternion(-m_floats[0], -m_floats[1], -m_floats[2], m_floats[3]);
  387. #endif
  388. }
  389. /**@brief Return the sum of this quaternion and the other
  390. * @param q2 The other quaternion */
  391. B3_FORCE_INLINE b3Quaternion
  392. operator+(const b3Quaternion& q2) const
  393. {
  394. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  395. return b3Quaternion(_mm_add_ps(mVec128, q2.mVec128));
  396. #elif defined(B3_USE_NEON)
  397. return b3Quaternion(vaddq_f32(mVec128, q2.mVec128));
  398. #else
  399. const b3Quaternion& q1 = *this;
  400. return b3Quaternion(q1.getX() + q2.getX(), q1.getY() + q2.getY(), q1.getZ() + q2.getZ(), q1.m_floats[3] + q2.m_floats[3]);
  401. #endif
  402. }
  403. /**@brief Return the difference between this quaternion and the other
  404. * @param q2 The other quaternion */
  405. B3_FORCE_INLINE b3Quaternion
  406. operator-(const b3Quaternion& q2) const
  407. {
  408. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  409. return b3Quaternion(_mm_sub_ps(mVec128, q2.mVec128));
  410. #elif defined(B3_USE_NEON)
  411. return b3Quaternion(vsubq_f32(mVec128, q2.mVec128));
  412. #else
  413. const b3Quaternion& q1 = *this;
  414. return b3Quaternion(q1.getX() - q2.getX(), q1.getY() - q2.getY(), q1.getZ() - q2.getZ(), q1.m_floats[3] - q2.m_floats[3]);
  415. #endif
  416. }
  417. /**@brief Return the negative of this quaternion
  418. * This simply negates each element */
  419. B3_FORCE_INLINE b3Quaternion operator-() const
  420. {
  421. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  422. return b3Quaternion(_mm_xor_ps(mVec128, b3vMzeroMask));
  423. #elif defined(B3_USE_NEON)
  424. return b3Quaternion((b3SimdFloat4)veorq_s32((int32x4_t)mVec128, (int32x4_t)b3vMzeroMask));
  425. #else
  426. const b3Quaternion& q2 = *this;
  427. return b3Quaternion(-q2.getX(), -q2.getY(), -q2.getZ(), -q2.m_floats[3]);
  428. #endif
  429. }
  430. /**@todo document this and it's use */
  431. B3_FORCE_INLINE b3Quaternion farthest(const b3Quaternion& qd) const
  432. {
  433. b3Quaternion diff, sum;
  434. diff = *this - qd;
  435. sum = *this + qd;
  436. if (diff.dot(diff) > sum.dot(sum))
  437. return qd;
  438. return (-qd);
  439. }
  440. /**@todo document this and it's use */
  441. B3_FORCE_INLINE b3Quaternion nearest(const b3Quaternion& qd) const
  442. {
  443. b3Quaternion diff, sum;
  444. diff = *this - qd;
  445. sum = *this + qd;
  446. if (diff.dot(diff) < sum.dot(sum))
  447. return qd;
  448. return (-qd);
  449. }
  450. /**@brief Return the quaternion which is the result of Spherical Linear Interpolation between this and the other quaternion
  451. * @param q The other quaternion to interpolate with
  452. * @param t The ratio between this and q to interpolate. If t = 0 the result is this, if t=1 the result is q.
  453. * Slerp interpolates assuming constant velocity. */
  454. b3Quaternion slerp(const b3Quaternion& q, const b3Scalar& t) const
  455. {
  456. b3Scalar magnitude = b3Sqrt(length2() * q.length2());
  457. b3Assert(magnitude > b3Scalar(0));
  458. b3Scalar product = dot(q) / magnitude;
  459. if (b3Fabs(product) < b3Scalar(1))
  460. {
  461. // Take care of long angle case see http://en.wikipedia.org/wiki/Slerp
  462. const b3Scalar sign = (product < 0) ? b3Scalar(-1) : b3Scalar(1);
  463. const b3Scalar theta = b3Acos(sign * product);
  464. const b3Scalar s1 = b3Sin(sign * t * theta);
  465. const b3Scalar d = b3Scalar(1.0) / b3Sin(theta);
  466. const b3Scalar s0 = b3Sin((b3Scalar(1.0) - t) * theta);
  467. return b3Quaternion(
  468. (m_floats[0] * s0 + q.getX() * s1) * d,
  469. (m_floats[1] * s0 + q.getY() * s1) * d,
  470. (m_floats[2] * s0 + q.getZ() * s1) * d,
  471. (m_floats[3] * s0 + q.m_floats[3] * s1) * d);
  472. }
  473. else
  474. {
  475. return *this;
  476. }
  477. }
  478. static const b3Quaternion& getIdentity()
  479. {
  480. static const b3Quaternion identityQuat(b3Scalar(0.), b3Scalar(0.), b3Scalar(0.), b3Scalar(1.));
  481. return identityQuat;
  482. }
  483. B3_FORCE_INLINE const b3Scalar& getW() const { return m_floats[3]; }
  484. };
  485. /**@brief Return the product of two quaternions */
  486. B3_FORCE_INLINE b3Quaternion
  487. operator*(const b3Quaternion& q1, const b3Quaternion& q2)
  488. {
  489. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  490. __m128 vQ1 = q1.get128();
  491. __m128 vQ2 = q2.get128();
  492. __m128 A0, A1, B1, A2, B2;
  493. A1 = b3_pshufd_ps(vQ1, B3_SHUFFLE(0, 1, 2, 0)); // X Y z x // vtrn
  494. B1 = b3_pshufd_ps(vQ2, B3_SHUFFLE(3, 3, 3, 0)); // W W W X // vdup vext
  495. A1 = A1 * B1;
  496. A2 = b3_pshufd_ps(vQ1, B3_SHUFFLE(1, 2, 0, 1)); // Y Z X Y // vext
  497. B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(2, 0, 1, 1)); // z x Y Y // vtrn vdup
  498. A2 = A2 * B2;
  499. B1 = b3_pshufd_ps(vQ1, B3_SHUFFLE(2, 0, 1, 2)); // z x Y Z // vtrn vext
  500. B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(1, 2, 0, 2)); // Y Z x z // vext vtrn
  501. B1 = B1 * B2; // A3 *= B3
  502. A0 = b3_splat_ps(vQ1, 3); // A0
  503. A0 = A0 * vQ2; // A0 * B0
  504. A1 = A1 + A2; // AB12
  505. A0 = A0 - B1; // AB03 = AB0 - AB3
  506. A1 = _mm_xor_ps(A1, b3vPPPM); // change sign of the last element
  507. A0 = A0 + A1; // AB03 + AB12
  508. return b3Quaternion(A0);
  509. #elif defined(B3_USE_NEON)
  510. float32x4_t vQ1 = q1.get128();
  511. float32x4_t vQ2 = q2.get128();
  512. float32x4_t A0, A1, B1, A2, B2, A3, B3;
  513. float32x2_t vQ1zx, vQ2wx, vQ1yz, vQ2zx, vQ2yz, vQ2xz;
  514. {
  515. float32x2x2_t tmp;
  516. tmp = vtrn_f32(vget_high_f32(vQ1), vget_low_f32(vQ1)); // {z x}, {w y}
  517. vQ1zx = tmp.val[0];
  518. tmp = vtrn_f32(vget_high_f32(vQ2), vget_low_f32(vQ2)); // {z x}, {w y}
  519. vQ2zx = tmp.val[0];
  520. }
  521. vQ2wx = vext_f32(vget_high_f32(vQ2), vget_low_f32(vQ2), 1);
  522. vQ1yz = vext_f32(vget_low_f32(vQ1), vget_high_f32(vQ1), 1);
  523. vQ2yz = vext_f32(vget_low_f32(vQ2), vget_high_f32(vQ2), 1);
  524. vQ2xz = vext_f32(vQ2zx, vQ2zx, 1);
  525. A1 = vcombine_f32(vget_low_f32(vQ1), vQ1zx); // X Y z x
  526. B1 = vcombine_f32(vdup_lane_f32(vget_high_f32(vQ2), 1), vQ2wx); // W W W X
  527. A2 = vcombine_f32(vQ1yz, vget_low_f32(vQ1));
  528. B2 = vcombine_f32(vQ2zx, vdup_lane_f32(vget_low_f32(vQ2), 1));
  529. A3 = vcombine_f32(vQ1zx, vQ1yz); // Z X Y Z
  530. B3 = vcombine_f32(vQ2yz, vQ2xz); // Y Z x z
  531. A1 = vmulq_f32(A1, B1);
  532. A2 = vmulq_f32(A2, B2);
  533. A3 = vmulq_f32(A3, B3); // A3 *= B3
  534. A0 = vmulq_lane_f32(vQ2, vget_high_f32(vQ1), 1); // A0 * B0
  535. A1 = vaddq_f32(A1, A2); // AB12 = AB1 + AB2
  536. A0 = vsubq_f32(A0, A3); // AB03 = AB0 - AB3
  537. // change the sign of the last element
  538. A1 = (b3SimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)b3vPPPM);
  539. A0 = vaddq_f32(A0, A1); // AB03 + AB12
  540. return b3Quaternion(A0);
  541. #else
  542. return b3Quaternion(
  543. q1.getW() * q2.getX() + q1.getX() * q2.getW() + q1.getY() * q2.getZ() - q1.getZ() * q2.getY(),
  544. q1.getW() * q2.getY() + q1.getY() * q2.getW() + q1.getZ() * q2.getX() - q1.getX() * q2.getZ(),
  545. q1.getW() * q2.getZ() + q1.getZ() * q2.getW() + q1.getX() * q2.getY() - q1.getY() * q2.getX(),
  546. q1.getW() * q2.getW() - q1.getX() * q2.getX() - q1.getY() * q2.getY() - q1.getZ() * q2.getZ());
  547. #endif
  548. }
  549. B3_FORCE_INLINE b3Quaternion
  550. operator*(const b3Quaternion& q, const b3Vector3& w)
  551. {
  552. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  553. __m128 vQ1 = q.get128();
  554. __m128 vQ2 = w.get128();
  555. __m128 A1, B1, A2, B2, A3, B3;
  556. A1 = b3_pshufd_ps(vQ1, B3_SHUFFLE(3, 3, 3, 0));
  557. B1 = b3_pshufd_ps(vQ2, B3_SHUFFLE(0, 1, 2, 0));
  558. A1 = A1 * B1;
  559. A2 = b3_pshufd_ps(vQ1, B3_SHUFFLE(1, 2, 0, 1));
  560. B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(2, 0, 1, 1));
  561. A2 = A2 * B2;
  562. A3 = b3_pshufd_ps(vQ1, B3_SHUFFLE(2, 0, 1, 2));
  563. B3 = b3_pshufd_ps(vQ2, B3_SHUFFLE(1, 2, 0, 2));
  564. A3 = A3 * B3; // A3 *= B3
  565. A1 = A1 + A2; // AB12
  566. A1 = _mm_xor_ps(A1, b3vPPPM); // change sign of the last element
  567. A1 = A1 - A3; // AB123 = AB12 - AB3
  568. return b3Quaternion(A1);
  569. #elif defined(B3_USE_NEON)
  570. float32x4_t vQ1 = q.get128();
  571. float32x4_t vQ2 = w.get128();
  572. float32x4_t A1, B1, A2, B2, A3, B3;
  573. float32x2_t vQ1wx, vQ2zx, vQ1yz, vQ2yz, vQ1zx, vQ2xz;
  574. vQ1wx = vext_f32(vget_high_f32(vQ1), vget_low_f32(vQ1), 1);
  575. {
  576. float32x2x2_t tmp;
  577. tmp = vtrn_f32(vget_high_f32(vQ2), vget_low_f32(vQ2)); // {z x}, {w y}
  578. vQ2zx = tmp.val[0];
  579. tmp = vtrn_f32(vget_high_f32(vQ1), vget_low_f32(vQ1)); // {z x}, {w y}
  580. vQ1zx = tmp.val[0];
  581. }
  582. vQ1yz = vext_f32(vget_low_f32(vQ1), vget_high_f32(vQ1), 1);
  583. vQ2yz = vext_f32(vget_low_f32(vQ2), vget_high_f32(vQ2), 1);
  584. vQ2xz = vext_f32(vQ2zx, vQ2zx, 1);
  585. A1 = vcombine_f32(vdup_lane_f32(vget_high_f32(vQ1), 1), vQ1wx); // W W W X
  586. B1 = vcombine_f32(vget_low_f32(vQ2), vQ2zx); // X Y z x
  587. A2 = vcombine_f32(vQ1yz, vget_low_f32(vQ1));
  588. B2 = vcombine_f32(vQ2zx, vdup_lane_f32(vget_low_f32(vQ2), 1));
  589. A3 = vcombine_f32(vQ1zx, vQ1yz); // Z X Y Z
  590. B3 = vcombine_f32(vQ2yz, vQ2xz); // Y Z x z
  591. A1 = vmulq_f32(A1, B1);
  592. A2 = vmulq_f32(A2, B2);
  593. A3 = vmulq_f32(A3, B3); // A3 *= B3
  594. A1 = vaddq_f32(A1, A2); // AB12 = AB1 + AB2
  595. // change the sign of the last element
  596. A1 = (b3SimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)b3vPPPM);
  597. A1 = vsubq_f32(A1, A3); // AB123 = AB12 - AB3
  598. return b3Quaternion(A1);
  599. #else
  600. return b3Quaternion(
  601. q.getW() * w.getX() + q.getY() * w.getZ() - q.getZ() * w.getY(),
  602. q.getW() * w.getY() + q.getZ() * w.getX() - q.getX() * w.getZ(),
  603. q.getW() * w.getZ() + q.getX() * w.getY() - q.getY() * w.getX(),
  604. -q.getX() * w.getX() - q.getY() * w.getY() - q.getZ() * w.getZ());
  605. #endif
  606. }
  607. B3_FORCE_INLINE b3Quaternion
  608. operator*(const b3Vector3& w, const b3Quaternion& q)
  609. {
  610. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  611. __m128 vQ1 = w.get128();
  612. __m128 vQ2 = q.get128();
  613. __m128 A1, B1, A2, B2, A3, B3;
  614. A1 = b3_pshufd_ps(vQ1, B3_SHUFFLE(0, 1, 2, 0)); // X Y z x
  615. B1 = b3_pshufd_ps(vQ2, B3_SHUFFLE(3, 3, 3, 0)); // W W W X
  616. A1 = A1 * B1;
  617. A2 = b3_pshufd_ps(vQ1, B3_SHUFFLE(1, 2, 0, 1));
  618. B2 = b3_pshufd_ps(vQ2, B3_SHUFFLE(2, 0, 1, 1));
  619. A2 = A2 * B2;
  620. A3 = b3_pshufd_ps(vQ1, B3_SHUFFLE(2, 0, 1, 2));
  621. B3 = b3_pshufd_ps(vQ2, B3_SHUFFLE(1, 2, 0, 2));
  622. A3 = A3 * B3; // A3 *= B3
  623. A1 = A1 + A2; // AB12
  624. A1 = _mm_xor_ps(A1, b3vPPPM); // change sign of the last element
  625. A1 = A1 - A3; // AB123 = AB12 - AB3
  626. return b3Quaternion(A1);
  627. #elif defined(B3_USE_NEON)
  628. float32x4_t vQ1 = w.get128();
  629. float32x4_t vQ2 = q.get128();
  630. float32x4_t A1, B1, A2, B2, A3, B3;
  631. float32x2_t vQ1zx, vQ2wx, vQ1yz, vQ2zx, vQ2yz, vQ2xz;
  632. {
  633. float32x2x2_t tmp;
  634. tmp = vtrn_f32(vget_high_f32(vQ1), vget_low_f32(vQ1)); // {z x}, {w y}
  635. vQ1zx = tmp.val[0];
  636. tmp = vtrn_f32(vget_high_f32(vQ2), vget_low_f32(vQ2)); // {z x}, {w y}
  637. vQ2zx = tmp.val[0];
  638. }
  639. vQ2wx = vext_f32(vget_high_f32(vQ2), vget_low_f32(vQ2), 1);
  640. vQ1yz = vext_f32(vget_low_f32(vQ1), vget_high_f32(vQ1), 1);
  641. vQ2yz = vext_f32(vget_low_f32(vQ2), vget_high_f32(vQ2), 1);
  642. vQ2xz = vext_f32(vQ2zx, vQ2zx, 1);
  643. A1 = vcombine_f32(vget_low_f32(vQ1), vQ1zx); // X Y z x
  644. B1 = vcombine_f32(vdup_lane_f32(vget_high_f32(vQ2), 1), vQ2wx); // W W W X
  645. A2 = vcombine_f32(vQ1yz, vget_low_f32(vQ1));
  646. B2 = vcombine_f32(vQ2zx, vdup_lane_f32(vget_low_f32(vQ2), 1));
  647. A3 = vcombine_f32(vQ1zx, vQ1yz); // Z X Y Z
  648. B3 = vcombine_f32(vQ2yz, vQ2xz); // Y Z x z
  649. A1 = vmulq_f32(A1, B1);
  650. A2 = vmulq_f32(A2, B2);
  651. A3 = vmulq_f32(A3, B3); // A3 *= B3
  652. A1 = vaddq_f32(A1, A2); // AB12 = AB1 + AB2
  653. // change the sign of the last element
  654. A1 = (b3SimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)b3vPPPM);
  655. A1 = vsubq_f32(A1, A3); // AB123 = AB12 - AB3
  656. return b3Quaternion(A1);
  657. #else
  658. return b3Quaternion(
  659. +w.getX() * q.getW() + w.getY() * q.getZ() - w.getZ() * q.getY(),
  660. +w.getY() * q.getW() + w.getZ() * q.getX() - w.getX() * q.getZ(),
  661. +w.getZ() * q.getW() + w.getX() * q.getY() - w.getY() * q.getX(),
  662. -w.getX() * q.getX() - w.getY() * q.getY() - w.getZ() * q.getZ());
  663. #endif
  664. }
  665. /**@brief Calculate the dot product between two quaternions */
  666. B3_FORCE_INLINE b3Scalar
  667. b3Dot(const b3Quaternion& q1, const b3Quaternion& q2)
  668. {
  669. return q1.dot(q2);
  670. }
  671. /**@brief Return the length of a quaternion */
  672. B3_FORCE_INLINE b3Scalar
  673. b3Length(const b3Quaternion& q)
  674. {
  675. return q.length();
  676. }
  677. /**@brief Return the angle between two quaternions*/
  678. B3_FORCE_INLINE b3Scalar
  679. b3Angle(const b3Quaternion& q1, const b3Quaternion& q2)
  680. {
  681. return q1.angle(q2);
  682. }
  683. /**@brief Return the inverse of a quaternion*/
  684. B3_FORCE_INLINE b3Quaternion
  685. b3Inverse(const b3Quaternion& q)
  686. {
  687. return q.inverse();
  688. }
  689. /**@brief Return the result of spherical linear interpolation betwen two quaternions
  690. * @param q1 The first quaternion
  691. * @param q2 The second quaternion
  692. * @param t The ration between q1 and q2. t = 0 return q1, t=1 returns q2
  693. * Slerp assumes constant velocity between positions. */
  694. B3_FORCE_INLINE b3Quaternion
  695. b3Slerp(const b3Quaternion& q1, const b3Quaternion& q2, const b3Scalar& t)
  696. {
  697. return q1.slerp(q2, t);
  698. }
  699. B3_FORCE_INLINE b3Quaternion
  700. b3QuatMul(const b3Quaternion& rot0, const b3Quaternion& rot1)
  701. {
  702. return rot0 * rot1;
  703. }
  704. B3_FORCE_INLINE b3Quaternion
  705. b3QuatNormalized(const b3Quaternion& orn)
  706. {
  707. return orn.normalized();
  708. }
  709. B3_FORCE_INLINE b3Vector3
  710. b3QuatRotate(const b3Quaternion& rotation, const b3Vector3& v)
  711. {
  712. b3Quaternion q = rotation * v;
  713. q *= rotation.inverse();
  714. #if defined(B3_USE_SSE_IN_API) && defined(B3_USE_SSE)
  715. return b3MakeVector3(_mm_and_ps(q.get128(), b3vFFF0fMask));
  716. #elif defined(B3_USE_NEON)
  717. return b3MakeVector3((float32x4_t)vandq_s32((int32x4_t)q.get128(), b3vFFF0Mask));
  718. #else
  719. return b3MakeVector3(q.getX(), q.getY(), q.getZ());
  720. #endif
  721. }
  722. B3_FORCE_INLINE b3Quaternion
  723. b3ShortestArcQuat(const b3Vector3& v0, const b3Vector3& v1) // Game Programming Gems 2.10. make sure v0,v1 are normalized
  724. {
  725. b3Vector3 c = v0.cross(v1);
  726. b3Scalar d = v0.dot(v1);
  727. if (d < -1.0 + B3_EPSILON)
  728. {
  729. b3Vector3 n, unused;
  730. b3PlaneSpace1(v0, n, unused);
  731. return b3Quaternion(n.getX(), n.getY(), n.getZ(), 0.0f); // just pick any vector that is orthogonal to v0
  732. }
  733. b3Scalar s = b3Sqrt((1.0f + d) * 2.0f);
  734. b3Scalar rs = 1.0f / s;
  735. return b3Quaternion(c.getX() * rs, c.getY() * rs, c.getZ() * rs, s * 0.5f);
  736. }
  737. B3_FORCE_INLINE b3Quaternion
  738. b3ShortestArcQuatNormalize2(b3Vector3& v0, b3Vector3& v1)
  739. {
  740. v0.normalize();
  741. v1.normalize();
  742. return b3ShortestArcQuat(v0, v1);
  743. }
  744. #endif //B3_SIMD__QUATERNION_H_