Vec3.inl 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857
  1. // Jolt Physics Library (https://github.com/jrouwe/JoltPhysics)
  2. // SPDX-FileCopyrightText: 2021 Jorrit Rouwe
  3. // SPDX-License-Identifier: MIT
  4. #include <Jolt/Math/Vec4.h>
  5. #include <Jolt/Math/UVec4.h>
  6. #include <Jolt/Core/HashCombine.h>
  7. JPH_SUPPRESS_WARNINGS_STD_BEGIN
  8. #include <random>
  9. JPH_SUPPRESS_WARNINGS_STD_END
  10. // Create a std::hash for Vec3
  11. JPH_MAKE_HASHABLE(JPH::Vec3, t.GetX(), t.GetY(), t.GetZ())
  12. JPH_NAMESPACE_BEGIN
  13. void Vec3::CheckW() const
  14. {
  15. #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  16. // Avoid asserts when both components are NaN
  17. JPH_ASSERT(reinterpret_cast<const uint32 *>(mF32)[2] == reinterpret_cast<const uint32 *>(mF32)[3]);
  18. #endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  19. }
  20. JPH_INLINE Vec3::Type Vec3::sFixW(Type inValue)
  21. {
  22. #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  23. #if defined(JPH_USE_SSE)
  24. return _mm_shuffle_ps(inValue, inValue, _MM_SHUFFLE(2, 2, 1, 0));
  25. #elif defined(JPH_USE_NEON)
  26. return JPH_NEON_SHUFFLE_F32x4(inValue, inValue, 0, 1, 2, 2);
  27. #else
  28. Type value;
  29. value.mData[0] = inValue.mData[0];
  30. value.mData[1] = inValue.mData[1];
  31. value.mData[2] = inValue.mData[2];
  32. value.mData[3] = inValue.mData[2];
  33. return value;
  34. #endif
  35. #else
  36. return inValue;
  37. #endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  38. }
  39. Vec3::Vec3(Vec4Arg inRHS) :
  40. mValue(sFixW(inRHS.mValue))
  41. {
  42. }
  43. Vec3::Vec3(const Float3 &inV)
  44. {
  45. #if defined(JPH_USE_SSE)
  46. Type x = _mm_load_ss(&inV.x);
  47. Type y = _mm_load_ss(&inV.y);
  48. Type z = _mm_load_ss(&inV.z);
  49. Type xy = _mm_unpacklo_ps(x, y);
  50. mValue = _mm_shuffle_ps(xy, z, _MM_SHUFFLE(0, 0, 1, 0)); // Assure Z and W are the same
  51. #elif defined(JPH_USE_NEON)
  52. float32x2_t xy = vld1_f32(&inV.x);
  53. float32x2_t zz = vdup_n_f32(inV.z); // Assure Z and W are the same
  54. mValue = vcombine_f32(xy, zz);
  55. #else
  56. mF32[0] = inV[0];
  57. mF32[1] = inV[1];
  58. mF32[2] = inV[2];
  59. #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  60. mF32[3] = inV[2];
  61. #endif
  62. #endif
  63. }
  64. Vec3::Vec3(float inX, float inY, float inZ)
  65. {
  66. #if defined(JPH_USE_SSE)
  67. mValue = _mm_set_ps(inZ, inZ, inY, inX);
  68. #elif defined(JPH_USE_NEON)
  69. uint32x2_t xy = vcreate_u32(static_cast<uint64>(BitCast<uint32>(inX)) | (static_cast<uint64>(BitCast<uint32>(inY)) << 32));
  70. uint32x2_t zz = vreinterpret_u32_f32(vdup_n_f32(inZ));
  71. mValue = vreinterpretq_f32_u32(vcombine_u32(xy, zz));
  72. #else
  73. mF32[0] = inX;
  74. mF32[1] = inY;
  75. mF32[2] = inZ;
  76. #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  77. mF32[3] = inZ;
  78. #endif
  79. #endif
  80. }
  81. template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ>
  82. Vec3 Vec3::Swizzle() const
  83. {
  84. static_assert(SwizzleX <= 3, "SwizzleX template parameter out of range");
  85. static_assert(SwizzleY <= 3, "SwizzleY template parameter out of range");
  86. static_assert(SwizzleZ <= 3, "SwizzleZ template parameter out of range");
  87. #if defined(JPH_USE_SSE)
  88. return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(SwizzleZ, SwizzleZ, SwizzleY, SwizzleX)); // Assure Z and W are the same
  89. #elif defined(JPH_USE_NEON)
  90. return JPH_NEON_SHUFFLE_F32x4(mValue, mValue, SwizzleX, SwizzleY, SwizzleZ, SwizzleZ);
  91. #else
  92. return Vec3(mF32[SwizzleX], mF32[SwizzleY], mF32[SwizzleZ]);
  93. #endif
  94. }
  95. Vec3 Vec3::sZero()
  96. {
  97. #if defined(JPH_USE_SSE)
  98. return _mm_setzero_ps();
  99. #elif defined(JPH_USE_NEON)
  100. return vdupq_n_f32(0);
  101. #else
  102. return Vec3(0, 0, 0);
  103. #endif
  104. }
  105. Vec3 Vec3::sReplicate(float inV)
  106. {
  107. #if defined(JPH_USE_SSE)
  108. return _mm_set1_ps(inV);
  109. #elif defined(JPH_USE_NEON)
  110. return vdupq_n_f32(inV);
  111. #else
  112. return Vec3(inV, inV, inV);
  113. #endif
  114. }
  115. Vec3 Vec3::sNaN()
  116. {
  117. return sReplicate(numeric_limits<float>::quiet_NaN());
  118. }
  119. Vec3 Vec3::sLoadFloat3Unsafe(const Float3 &inV)
  120. {
  121. #if defined(JPH_USE_SSE)
  122. Type v = _mm_loadu_ps(&inV.x);
  123. #elif defined(JPH_USE_NEON)
  124. Type v = vld1q_f32(&inV.x);
  125. #else
  126. Type v = { inV.x, inV.y, inV.z };
  127. #endif
  128. return sFixW(v);
  129. }
  130. Vec3 Vec3::sMin(Vec3Arg inV1, Vec3Arg inV2)
  131. {
  132. #if defined(JPH_USE_SSE)
  133. return _mm_min_ps(inV1.mValue, inV2.mValue);
  134. #elif defined(JPH_USE_NEON)
  135. return vminq_f32(inV1.mValue, inV2.mValue);
  136. #else
  137. return Vec3(min(inV1.mF32[0], inV2.mF32[0]),
  138. min(inV1.mF32[1], inV2.mF32[1]),
  139. min(inV1.mF32[2], inV2.mF32[2]));
  140. #endif
  141. }
  142. Vec3 Vec3::sMax(Vec3Arg inV1, Vec3Arg inV2)
  143. {
  144. #if defined(JPH_USE_SSE)
  145. return _mm_max_ps(inV1.mValue, inV2.mValue);
  146. #elif defined(JPH_USE_NEON)
  147. return vmaxq_f32(inV1.mValue, inV2.mValue);
  148. #else
  149. return Vec3(max(inV1.mF32[0], inV2.mF32[0]),
  150. max(inV1.mF32[1], inV2.mF32[1]),
  151. max(inV1.mF32[2], inV2.mF32[2]));
  152. #endif
  153. }
  154. Vec3 Vec3::sClamp(Vec3Arg inV, Vec3Arg inMin, Vec3Arg inMax)
  155. {
  156. return sMax(sMin(inV, inMax), inMin);
  157. }
  158. UVec4 Vec3::sEquals(Vec3Arg inV1, Vec3Arg inV2)
  159. {
  160. #if defined(JPH_USE_SSE)
  161. return _mm_castps_si128(_mm_cmpeq_ps(inV1.mValue, inV2.mValue));
  162. #elif defined(JPH_USE_NEON)
  163. return vceqq_f32(inV1.mValue, inV2.mValue);
  164. #else
  165. uint32 z = inV1.mF32[2] == inV2.mF32[2]? 0xffffffffu : 0;
  166. return UVec4(inV1.mF32[0] == inV2.mF32[0]? 0xffffffffu : 0,
  167. inV1.mF32[1] == inV2.mF32[1]? 0xffffffffu : 0,
  168. z,
  169. z);
  170. #endif
  171. }
  172. UVec4 Vec3::sLess(Vec3Arg inV1, Vec3Arg inV2)
  173. {
  174. #if defined(JPH_USE_SSE)
  175. return _mm_castps_si128(_mm_cmplt_ps(inV1.mValue, inV2.mValue));
  176. #elif defined(JPH_USE_NEON)
  177. return vcltq_f32(inV1.mValue, inV2.mValue);
  178. #else
  179. uint32 z = inV1.mF32[2] < inV2.mF32[2]? 0xffffffffu : 0;
  180. return UVec4(inV1.mF32[0] < inV2.mF32[0]? 0xffffffffu : 0,
  181. inV1.mF32[1] < inV2.mF32[1]? 0xffffffffu : 0,
  182. z,
  183. z);
  184. #endif
  185. }
  186. UVec4 Vec3::sLessOrEqual(Vec3Arg inV1, Vec3Arg inV2)
  187. {
  188. #if defined(JPH_USE_SSE)
  189. return _mm_castps_si128(_mm_cmple_ps(inV1.mValue, inV2.mValue));
  190. #elif defined(JPH_USE_NEON)
  191. return vcleq_f32(inV1.mValue, inV2.mValue);
  192. #else
  193. uint32 z = inV1.mF32[2] <= inV2.mF32[2]? 0xffffffffu : 0;
  194. return UVec4(inV1.mF32[0] <= inV2.mF32[0]? 0xffffffffu : 0,
  195. inV1.mF32[1] <= inV2.mF32[1]? 0xffffffffu : 0,
  196. z,
  197. z);
  198. #endif
  199. }
  200. UVec4 Vec3::sGreater(Vec3Arg inV1, Vec3Arg inV2)
  201. {
  202. #if defined(JPH_USE_SSE)
  203. return _mm_castps_si128(_mm_cmpgt_ps(inV1.mValue, inV2.mValue));
  204. #elif defined(JPH_USE_NEON)
  205. return vcgtq_f32(inV1.mValue, inV2.mValue);
  206. #else
  207. uint32 z = inV1.mF32[2] > inV2.mF32[2]? 0xffffffffu : 0;
  208. return UVec4(inV1.mF32[0] > inV2.mF32[0]? 0xffffffffu : 0,
  209. inV1.mF32[1] > inV2.mF32[1]? 0xffffffffu : 0,
  210. z,
  211. z);
  212. #endif
  213. }
  214. UVec4 Vec3::sGreaterOrEqual(Vec3Arg inV1, Vec3Arg inV2)
  215. {
  216. #if defined(JPH_USE_SSE)
  217. return _mm_castps_si128(_mm_cmpge_ps(inV1.mValue, inV2.mValue));
  218. #elif defined(JPH_USE_NEON)
  219. return vcgeq_f32(inV1.mValue, inV2.mValue);
  220. #else
  221. uint32 z = inV1.mF32[2] >= inV2.mF32[2]? 0xffffffffu : 0;
  222. return UVec4(inV1.mF32[0] >= inV2.mF32[0]? 0xffffffffu : 0,
  223. inV1.mF32[1] >= inV2.mF32[1]? 0xffffffffu : 0,
  224. z,
  225. z);
  226. #endif
  227. }
  228. Vec3 Vec3::sFusedMultiplyAdd(Vec3Arg inMul1, Vec3Arg inMul2, Vec3Arg inAdd)
  229. {
  230. #if defined(JPH_USE_SSE)
  231. #ifdef JPH_USE_FMADD
  232. return _mm_fmadd_ps(inMul1.mValue, inMul2.mValue, inAdd.mValue);
  233. #else
  234. return _mm_add_ps(_mm_mul_ps(inMul1.mValue, inMul2.mValue), inAdd.mValue);
  235. #endif
  236. #elif defined(JPH_USE_NEON)
  237. return vmlaq_f32(inAdd.mValue, inMul1.mValue, inMul2.mValue);
  238. #else
  239. return Vec3(inMul1.mF32[0] * inMul2.mF32[0] + inAdd.mF32[0],
  240. inMul1.mF32[1] * inMul2.mF32[1] + inAdd.mF32[1],
  241. inMul1.mF32[2] * inMul2.mF32[2] + inAdd.mF32[2]);
  242. #endif
  243. }
  244. Vec3 Vec3::sSelect(Vec3Arg inNotSet, Vec3Arg inSet, UVec4Arg inControl)
  245. {
  246. #if defined(JPH_USE_SSE4_1) && !defined(JPH_PLATFORM_WASM) // _mm_blendv_ps has problems on FireFox
  247. Type v = _mm_blendv_ps(inNotSet.mValue, inSet.mValue, _mm_castsi128_ps(inControl.mValue));
  248. return sFixW(v);
  249. #elif defined(JPH_USE_SSE)
  250. __m128 is_set = _mm_castsi128_ps(_mm_srai_epi32(inControl.mValue, 31));
  251. Type v = _mm_or_ps(_mm_and_ps(is_set, inSet.mValue), _mm_andnot_ps(is_set, inNotSet.mValue));
  252. return sFixW(v);
  253. #elif defined(JPH_USE_NEON)
  254. Type v = vbslq_f32(vreinterpretq_u32_s32(vshrq_n_s32(vreinterpretq_s32_u32(inControl.mValue), 31)), inSet.mValue, inNotSet.mValue);
  255. return sFixW(v);
  256. #else
  257. Vec3 result;
  258. for (int i = 0; i < 3; i++)
  259. result.mF32[i] = (inControl.mU32[i] & 0x80000000u) ? inSet.mF32[i] : inNotSet.mF32[i];
  260. #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  261. result.mF32[3] = result.mF32[2];
  262. #endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  263. return result;
  264. #endif
  265. }
  266. Vec3 Vec3::sOr(Vec3Arg inV1, Vec3Arg inV2)
  267. {
  268. #if defined(JPH_USE_SSE)
  269. return _mm_or_ps(inV1.mValue, inV2.mValue);
  270. #elif defined(JPH_USE_NEON)
  271. return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(inV1.mValue), vreinterpretq_u32_f32(inV2.mValue)));
  272. #else
  273. return Vec3(UVec4::sOr(inV1.ReinterpretAsInt(), inV2.ReinterpretAsInt()).ReinterpretAsFloat());
  274. #endif
  275. }
  276. Vec3 Vec3::sXor(Vec3Arg inV1, Vec3Arg inV2)
  277. {
  278. #if defined(JPH_USE_SSE)
  279. return _mm_xor_ps(inV1.mValue, inV2.mValue);
  280. #elif defined(JPH_USE_NEON)
  281. return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(inV1.mValue), vreinterpretq_u32_f32(inV2.mValue)));
  282. #else
  283. return Vec3(UVec4::sXor(inV1.ReinterpretAsInt(), inV2.ReinterpretAsInt()).ReinterpretAsFloat());
  284. #endif
  285. }
  286. Vec3 Vec3::sAnd(Vec3Arg inV1, Vec3Arg inV2)
  287. {
  288. #if defined(JPH_USE_SSE)
  289. return _mm_and_ps(inV1.mValue, inV2.mValue);
  290. #elif defined(JPH_USE_NEON)
  291. return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(inV1.mValue), vreinterpretq_u32_f32(inV2.mValue)));
  292. #else
  293. return Vec3(UVec4::sAnd(inV1.ReinterpretAsInt(), inV2.ReinterpretAsInt()).ReinterpretAsFloat());
  294. #endif
  295. }
  296. Vec3 Vec3::sUnitSpherical(float inTheta, float inPhi)
  297. {
  298. Vec4 s, c;
  299. Vec4(inTheta, inPhi, 0, 0).SinCos(s, c);
  300. return Vec3(s.GetX() * c.GetY(), s.GetX() * s.GetY(), c.GetX());
  301. }
  302. template <class Random>
  303. Vec3 Vec3::sRandom(Random &inRandom)
  304. {
  305. std::uniform_real_distribution<float> zero_to_one(0.0f, 1.0f);
  306. float theta = JPH_PI * zero_to_one(inRandom);
  307. float phi = 2.0f * JPH_PI * zero_to_one(inRandom);
  308. return sUnitSpherical(theta, phi);
  309. }
  310. bool Vec3::operator == (Vec3Arg inV2) const
  311. {
  312. return sEquals(*this, inV2).TestAllXYZTrue();
  313. }
  314. bool Vec3::IsClose(Vec3Arg inV2, float inMaxDistSq) const
  315. {
  316. return (inV2 - *this).LengthSq() <= inMaxDistSq;
  317. }
  318. bool Vec3::IsNearZero(float inMaxDistSq) const
  319. {
  320. return LengthSq() <= inMaxDistSq;
  321. }
  322. Vec3 Vec3::operator * (Vec3Arg inV2) const
  323. {
  324. #if defined(JPH_USE_SSE)
  325. return _mm_mul_ps(mValue, inV2.mValue);
  326. #elif defined(JPH_USE_NEON)
  327. return vmulq_f32(mValue, inV2.mValue);
  328. #else
  329. return Vec3(mF32[0] * inV2.mF32[0], mF32[1] * inV2.mF32[1], mF32[2] * inV2.mF32[2]);
  330. #endif
  331. }
  332. Vec3 Vec3::operator * (float inV2) const
  333. {
  334. #if defined(JPH_USE_SSE)
  335. return _mm_mul_ps(mValue, _mm_set1_ps(inV2));
  336. #elif defined(JPH_USE_NEON)
  337. return vmulq_n_f32(mValue, inV2);
  338. #else
  339. return Vec3(mF32[0] * inV2, mF32[1] * inV2, mF32[2] * inV2);
  340. #endif
  341. }
  342. Vec3 operator * (float inV1, Vec3Arg inV2)
  343. {
  344. #if defined(JPH_USE_SSE)
  345. return _mm_mul_ps(_mm_set1_ps(inV1), inV2.mValue);
  346. #elif defined(JPH_USE_NEON)
  347. return vmulq_n_f32(inV2.mValue, inV1);
  348. #else
  349. return Vec3(inV1 * inV2.mF32[0], inV1 * inV2.mF32[1], inV1 * inV2.mF32[2]);
  350. #endif
  351. }
  352. Vec3 Vec3::operator / (float inV2) const
  353. {
  354. #if defined(JPH_USE_SSE)
  355. return _mm_div_ps(mValue, _mm_set1_ps(inV2));
  356. #elif defined(JPH_USE_NEON)
  357. return vdivq_f32(mValue, vdupq_n_f32(inV2));
  358. #else
  359. return Vec3(mF32[0] / inV2, mF32[1] / inV2, mF32[2] / inV2);
  360. #endif
  361. }
  362. Vec3 &Vec3::operator *= (float inV2)
  363. {
  364. #if defined(JPH_USE_SSE)
  365. mValue = _mm_mul_ps(mValue, _mm_set1_ps(inV2));
  366. #elif defined(JPH_USE_NEON)
  367. mValue = vmulq_n_f32(mValue, inV2);
  368. #else
  369. for (int i = 0; i < 3; ++i)
  370. mF32[i] *= inV2;
  371. #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  372. mF32[3] = mF32[2];
  373. #endif
  374. #endif
  375. return *this;
  376. }
  377. Vec3 &Vec3::operator *= (Vec3Arg inV2)
  378. {
  379. #if defined(JPH_USE_SSE)
  380. mValue = _mm_mul_ps(mValue, inV2.mValue);
  381. #elif defined(JPH_USE_NEON)
  382. mValue = vmulq_f32(mValue, inV2.mValue);
  383. #else
  384. for (int i = 0; i < 3; ++i)
  385. mF32[i] *= inV2.mF32[i];
  386. #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  387. mF32[3] = mF32[2];
  388. #endif
  389. #endif
  390. return *this;
  391. }
  392. Vec3 &Vec3::operator /= (float inV2)
  393. {
  394. #if defined(JPH_USE_SSE)
  395. mValue = _mm_div_ps(mValue, _mm_set1_ps(inV2));
  396. #elif defined(JPH_USE_NEON)
  397. mValue = vdivq_f32(mValue, vdupq_n_f32(inV2));
  398. #else
  399. for (int i = 0; i < 3; ++i)
  400. mF32[i] /= inV2;
  401. #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  402. mF32[3] = mF32[2];
  403. #endif
  404. #endif
  405. return *this;
  406. }
  407. Vec3 Vec3::operator + (Vec3Arg inV2) const
  408. {
  409. #if defined(JPH_USE_SSE)
  410. return _mm_add_ps(mValue, inV2.mValue);
  411. #elif defined(JPH_USE_NEON)
  412. return vaddq_f32(mValue, inV2.mValue);
  413. #else
  414. return Vec3(mF32[0] + inV2.mF32[0], mF32[1] + inV2.mF32[1], mF32[2] + inV2.mF32[2]);
  415. #endif
  416. }
  417. Vec3 &Vec3::operator += (Vec3Arg inV2)
  418. {
  419. #if defined(JPH_USE_SSE)
  420. mValue = _mm_add_ps(mValue, inV2.mValue);
  421. #elif defined(JPH_USE_NEON)
  422. mValue = vaddq_f32(mValue, inV2.mValue);
  423. #else
  424. for (int i = 0; i < 3; ++i)
  425. mF32[i] += inV2.mF32[i];
  426. #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  427. mF32[3] = mF32[2];
  428. #endif
  429. #endif
  430. return *this;
  431. }
  432. Vec3 Vec3::operator - () const
  433. {
  434. #if defined(JPH_USE_SSE)
  435. return _mm_sub_ps(_mm_setzero_ps(), mValue);
  436. #elif defined(JPH_USE_NEON)
  437. #ifdef JPH_CROSS_PLATFORM_DETERMINISTIC
  438. return vsubq_f32(vdupq_n_f32(0), mValue);
  439. #else
  440. return vnegq_f32(mValue);
  441. #endif
  442. #else
  443. #ifdef JPH_CROSS_PLATFORM_DETERMINISTIC
  444. return Vec3(0.0f - mF32[0], 0.0f - mF32[1], 0.0f - mF32[2]);
  445. #else
  446. return Vec3(-mF32[0], -mF32[1], -mF32[2]);
  447. #endif
  448. #endif
  449. }
  450. Vec3 Vec3::operator - (Vec3Arg inV2) const
  451. {
  452. #if defined(JPH_USE_SSE)
  453. return _mm_sub_ps(mValue, inV2.mValue);
  454. #elif defined(JPH_USE_NEON)
  455. return vsubq_f32(mValue, inV2.mValue);
  456. #else
  457. return Vec3(mF32[0] - inV2.mF32[0], mF32[1] - inV2.mF32[1], mF32[2] - inV2.mF32[2]);
  458. #endif
  459. }
  460. Vec3 &Vec3::operator -= (Vec3Arg inV2)
  461. {
  462. #if defined(JPH_USE_SSE)
  463. mValue = _mm_sub_ps(mValue, inV2.mValue);
  464. #elif defined(JPH_USE_NEON)
  465. mValue = vsubq_f32(mValue, inV2.mValue);
  466. #else
  467. for (int i = 0; i < 3; ++i)
  468. mF32[i] -= inV2.mF32[i];
  469. #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  470. mF32[3] = mF32[2];
  471. #endif
  472. #endif
  473. return *this;
  474. }
  475. Vec3 Vec3::operator / (Vec3Arg inV2) const
  476. {
  477. inV2.CheckW(); // Check W equals Z to avoid div by zero
  478. #if defined(JPH_USE_SSE)
  479. return _mm_div_ps(mValue, inV2.mValue);
  480. #elif defined(JPH_USE_NEON)
  481. return vdivq_f32(mValue, inV2.mValue);
  482. #else
  483. return Vec3(mF32[0] / inV2.mF32[0], mF32[1] / inV2.mF32[1], mF32[2] / inV2.mF32[2]);
  484. #endif
  485. }
  486. Vec4 Vec3::SplatX() const
  487. {
  488. #if defined(JPH_USE_SSE)
  489. return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(0, 0, 0, 0));
  490. #elif defined(JPH_USE_NEON)
  491. return vdupq_laneq_f32(mValue, 0);
  492. #else
  493. return Vec4(mF32[0], mF32[0], mF32[0], mF32[0]);
  494. #endif
  495. }
  496. Vec4 Vec3::SplatY() const
  497. {
  498. #if defined(JPH_USE_SSE)
  499. return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(1, 1, 1, 1));
  500. #elif defined(JPH_USE_NEON)
  501. return vdupq_laneq_f32(mValue, 1);
  502. #else
  503. return Vec4(mF32[1], mF32[1], mF32[1], mF32[1]);
  504. #endif
  505. }
  506. Vec4 Vec3::SplatZ() const
  507. {
  508. #if defined(JPH_USE_SSE)
  509. return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(2, 2, 2, 2));
  510. #elif defined(JPH_USE_NEON)
  511. return vdupq_laneq_f32(mValue, 2);
  512. #else
  513. return Vec4(mF32[2], mF32[2], mF32[2], mF32[2]);
  514. #endif
  515. }
  516. int Vec3::GetLowestComponentIndex() const
  517. {
  518. return GetX() < GetY() ? (GetZ() < GetX() ? 2 : 0) : (GetZ() < GetY() ? 2 : 1);
  519. }
  520. int Vec3::GetHighestComponentIndex() const
  521. {
  522. return GetX() > GetY() ? (GetZ() > GetX() ? 2 : 0) : (GetZ() > GetY() ? 2 : 1);
  523. }
  524. Vec3 Vec3::Abs() const
  525. {
  526. #if defined(JPH_USE_AVX512)
  527. return _mm_range_ps(mValue, mValue, 0b1000);
  528. #elif defined(JPH_USE_SSE)
  529. return _mm_max_ps(_mm_sub_ps(_mm_setzero_ps(), mValue), mValue);
  530. #elif defined(JPH_USE_NEON)
  531. return vabsq_f32(mValue);
  532. #else
  533. return Vec3(abs(mF32[0]), abs(mF32[1]), abs(mF32[2]));
  534. #endif
  535. }
  536. Vec3 Vec3::Reciprocal() const
  537. {
  538. return sReplicate(1.0f) / mValue;
  539. }
  540. Vec3 Vec3::Cross(Vec3Arg inV2) const
  541. {
  542. #if defined(JPH_USE_SSE)
  543. Type t1 = _mm_shuffle_ps(inV2.mValue, inV2.mValue, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same
  544. t1 = _mm_mul_ps(t1, mValue);
  545. Type t2 = _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same
  546. t2 = _mm_mul_ps(t2, inV2.mValue);
  547. Type t3 = _mm_sub_ps(t1, t2);
  548. return _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same
  549. #elif defined(JPH_USE_NEON)
  550. Type t1 = JPH_NEON_SHUFFLE_F32x4(inV2.mValue, inV2.mValue, 1, 2, 0, 0); // Assure Z and W are the same
  551. t1 = vmulq_f32(t1, mValue);
  552. Type t2 = JPH_NEON_SHUFFLE_F32x4(mValue, mValue, 1, 2, 0, 0); // Assure Z and W are the same
  553. t2 = vmulq_f32(t2, inV2.mValue);
  554. Type t3 = vsubq_f32(t1, t2);
  555. return JPH_NEON_SHUFFLE_F32x4(t3, t3, 1, 2, 0, 0); // Assure Z and W are the same
  556. #else
  557. return Vec3(mF32[1] * inV2.mF32[2] - mF32[2] * inV2.mF32[1],
  558. mF32[2] * inV2.mF32[0] - mF32[0] * inV2.mF32[2],
  559. mF32[0] * inV2.mF32[1] - mF32[1] * inV2.mF32[0]);
  560. #endif
  561. }
  562. Vec3 Vec3::DotV(Vec3Arg inV2) const
  563. {
  564. #if defined(JPH_USE_SSE4_1)
  565. return _mm_dp_ps(mValue, inV2.mValue, 0x7f);
  566. #elif defined(JPH_USE_NEON)
  567. float32x4_t mul = vmulq_f32(mValue, inV2.mValue);
  568. mul = vsetq_lane_f32(0, mul, 3);
  569. return vdupq_n_f32(vaddvq_f32(mul));
  570. #else
  571. float dot = 0.0f;
  572. for (int i = 0; i < 3; i++)
  573. dot += mF32[i] * inV2.mF32[i];
  574. return Vec3::sReplicate(dot);
  575. #endif
  576. }
  577. Vec4 Vec3::DotV4(Vec3Arg inV2) const
  578. {
  579. #if defined(JPH_USE_SSE4_1)
  580. return _mm_dp_ps(mValue, inV2.mValue, 0x7f);
  581. #elif defined(JPH_USE_NEON)
  582. float32x4_t mul = vmulq_f32(mValue, inV2.mValue);
  583. mul = vsetq_lane_f32(0, mul, 3);
  584. return vdupq_n_f32(vaddvq_f32(mul));
  585. #else
  586. float dot = 0.0f;
  587. for (int i = 0; i < 3; i++)
  588. dot += mF32[i] * inV2.mF32[i];
  589. return Vec4::sReplicate(dot);
  590. #endif
  591. }
  592. float Vec3::Dot(Vec3Arg inV2) const
  593. {
  594. #if defined(JPH_USE_SSE4_1)
  595. return _mm_cvtss_f32(_mm_dp_ps(mValue, inV2.mValue, 0x7f));
  596. #elif defined(JPH_USE_NEON)
  597. float32x4_t mul = vmulq_f32(mValue, inV2.mValue);
  598. mul = vsetq_lane_f32(0, mul, 3);
  599. return vaddvq_f32(mul);
  600. #else
  601. float dot = 0.0f;
  602. for (int i = 0; i < 3; i++)
  603. dot += mF32[i] * inV2.mF32[i];
  604. return dot;
  605. #endif
  606. }
  607. float Vec3::LengthSq() const
  608. {
  609. #if defined(JPH_USE_SSE4_1)
  610. return _mm_cvtss_f32(_mm_dp_ps(mValue, mValue, 0x7f));
  611. #elif defined(JPH_USE_NEON)
  612. float32x4_t mul = vmulq_f32(mValue, mValue);
  613. mul = vsetq_lane_f32(0, mul, 3);
  614. return vaddvq_f32(mul);
  615. #else
  616. float len_sq = 0.0f;
  617. for (int i = 0; i < 3; i++)
  618. len_sq += mF32[i] * mF32[i];
  619. return len_sq;
  620. #endif
  621. }
  622. float Vec3::Length() const
  623. {
  624. #if defined(JPH_USE_SSE4_1)
  625. return _mm_cvtss_f32(_mm_sqrt_ss(_mm_dp_ps(mValue, mValue, 0x7f)));
  626. #elif defined(JPH_USE_NEON)
  627. float32x4_t mul = vmulq_f32(mValue, mValue);
  628. mul = vsetq_lane_f32(0, mul, 3);
  629. float32x2_t sum = vdup_n_f32(vaddvq_f32(mul));
  630. return vget_lane_f32(vsqrt_f32(sum), 0);
  631. #else
  632. return sqrt(LengthSq());
  633. #endif
  634. }
  635. Vec3 Vec3::Sqrt() const
  636. {
  637. #if defined(JPH_USE_SSE)
  638. return _mm_sqrt_ps(mValue);
  639. #elif defined(JPH_USE_NEON)
  640. return vsqrtq_f32(mValue);
  641. #else
  642. return Vec3(sqrt(mF32[0]), sqrt(mF32[1]), sqrt(mF32[2]));
  643. #endif
  644. }
  645. Vec3 Vec3::Normalized() const
  646. {
  647. #if defined(JPH_USE_SSE4_1)
  648. return _mm_div_ps(mValue, _mm_sqrt_ps(_mm_dp_ps(mValue, mValue, 0x7f)));
  649. #elif defined(JPH_USE_NEON)
  650. float32x4_t mul = vmulq_f32(mValue, mValue);
  651. mul = vsetq_lane_f32(0, mul, 3);
  652. float32x4_t sum = vdupq_n_f32(vaddvq_f32(mul));
  653. return vdivq_f32(mValue, vsqrtq_f32(sum));
  654. #else
  655. return *this / Length();
  656. #endif
  657. }
  658. Vec3 Vec3::NormalizedOr(Vec3Arg inZeroValue) const
  659. {
  660. #if defined(JPH_USE_SSE4_1) && !defined(JPH_PLATFORM_WASM) // _mm_blendv_ps has problems on FireFox
  661. Type len_sq = _mm_dp_ps(mValue, mValue, 0x7f);
  662. Type is_zero = _mm_cmpeq_ps(len_sq, _mm_setzero_ps());
  663. #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  664. if (_mm_movemask_ps(is_zero) == 0xf)
  665. return inZeroValue;
  666. else
  667. return _mm_div_ps(mValue, _mm_sqrt_ps(len_sq));
  668. #else
  669. return _mm_blendv_ps(_mm_div_ps(mValue, _mm_sqrt_ps(len_sq)), inZeroValue.mValue, is_zero);
  670. #endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
  671. #elif defined(JPH_USE_NEON)
  672. float32x4_t mul = vmulq_f32(mValue, mValue);
  673. mul = vsetq_lane_f32(0, mul, 3);
  674. float32x4_t sum = vdupq_n_f32(vaddvq_f32(mul));
  675. float32x4_t len = vsqrtq_f32(sum);
  676. uint32x4_t is_zero = vceqq_f32(len, vdupq_n_f32(0));
  677. return vbslq_f32(is_zero, inZeroValue.mValue, vdivq_f32(mValue, len));
  678. #else
  679. float len_sq = LengthSq();
  680. if (len_sq == 0.0f)
  681. return inZeroValue;
  682. else
  683. return *this / sqrt(len_sq);
  684. #endif
  685. }
  686. bool Vec3::IsNormalized(float inTolerance) const
  687. {
  688. return abs(LengthSq() - 1.0f) <= inTolerance;
  689. }
  690. bool Vec3::IsNaN() const
  691. {
  692. #if defined(JPH_USE_AVX512)
  693. return (_mm_fpclass_ps_mask(mValue, 0b10000001) & 0x7) != 0;
  694. #elif defined(JPH_USE_SSE)
  695. return (_mm_movemask_ps(_mm_cmpunord_ps(mValue, mValue)) & 0x7) != 0;
  696. #elif defined(JPH_USE_NEON)
  697. uint32x4_t mask = JPH_NEON_UINT32x4(1, 1, 1, 0);
  698. uint32x4_t is_equal = vceqq_f32(mValue, mValue); // If a number is not equal to itself it's a NaN
  699. return vaddvq_u32(vandq_u32(is_equal, mask)) != 3;
  700. #else
  701. return isnan(mF32[0]) || isnan(mF32[1]) || isnan(mF32[2]);
  702. #endif
  703. }
  704. void Vec3::StoreFloat3(Float3 *outV) const
  705. {
  706. #if defined(JPH_USE_SSE)
  707. _mm_store_ss(&outV->x, mValue);
  708. Vec3 t = Swizzle<SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_UNUSED>();
  709. _mm_store_ss(&outV->y, t.mValue);
  710. t = t.Swizzle<SWIZZLE_Y, SWIZZLE_UNUSED, SWIZZLE_UNUSED>();
  711. _mm_store_ss(&outV->z, t.mValue);
  712. #elif defined(JPH_USE_NEON)
  713. float32x2_t xy = vget_low_f32(mValue);
  714. vst1_f32(&outV->x, xy);
  715. vst1q_lane_f32(&outV->z, mValue, 2);
  716. #else
  717. outV->x = mF32[0];
  718. outV->y = mF32[1];
  719. outV->z = mF32[2];
  720. #endif
  721. }
  722. UVec4 Vec3::ToInt() const
  723. {
  724. #if defined(JPH_USE_SSE)
  725. return _mm_cvttps_epi32(mValue);
  726. #elif defined(JPH_USE_NEON)
  727. return vcvtq_u32_f32(mValue);
  728. #else
  729. return UVec4(uint32(mF32[0]), uint32(mF32[1]), uint32(mF32[2]), uint32(mF32[3]));
  730. #endif
  731. }
  732. UVec4 Vec3::ReinterpretAsInt() const
  733. {
  734. #if defined(JPH_USE_SSE)
  735. return UVec4(_mm_castps_si128(mValue));
  736. #elif defined(JPH_USE_NEON)
  737. return vreinterpretq_u32_f32(mValue);
  738. #else
  739. return *reinterpret_cast<const UVec4 *>(this);
  740. #endif
  741. }
  742. float Vec3::ReduceMin() const
  743. {
  744. Vec3 v = sMin(mValue, Swizzle<SWIZZLE_Y, SWIZZLE_UNUSED, SWIZZLE_Z>());
  745. v = sMin(v, v.Swizzle<SWIZZLE_Z, SWIZZLE_UNUSED, SWIZZLE_UNUSED>());
  746. return v.GetX();
  747. }
  748. float Vec3::ReduceMax() const
  749. {
  750. Vec3 v = sMax(mValue, Swizzle<SWIZZLE_Y, SWIZZLE_UNUSED, SWIZZLE_Z>());
  751. v = sMax(v, v.Swizzle<SWIZZLE_Z, SWIZZLE_UNUSED, SWIZZLE_UNUSED>());
  752. return v.GetX();
  753. }
  754. Vec3 Vec3::GetNormalizedPerpendicular() const
  755. {
  756. if (abs(mF32[0]) > abs(mF32[1]))
  757. {
  758. float len = sqrt(mF32[0] * mF32[0] + mF32[2] * mF32[2]);
  759. return Vec3(mF32[2], 0.0f, -mF32[0]) / len;
  760. }
  761. else
  762. {
  763. float len = sqrt(mF32[1] * mF32[1] + mF32[2] * mF32[2]);
  764. return Vec3(0.0f, mF32[2], -mF32[1]) / len;
  765. }
  766. }
  767. Vec3 Vec3::GetSign() const
  768. {
  769. #if defined(JPH_USE_AVX512)
  770. return _mm_fixupimm_ps(mValue, mValue, _mm_set1_epi32(0xA9A90A00), 0);
  771. #elif defined(JPH_USE_SSE)
  772. Type minus_one = _mm_set1_ps(-1.0f);
  773. Type one = _mm_set1_ps(1.0f);
  774. return _mm_or_ps(_mm_and_ps(mValue, minus_one), one);
  775. #elif defined(JPH_USE_NEON)
  776. Type minus_one = vdupq_n_f32(-1.0f);
  777. Type one = vdupq_n_f32(1.0f);
  778. return vreinterpretq_f32_u32(vorrq_u32(vandq_u32(vreinterpretq_u32_f32(mValue), vreinterpretq_u32_f32(minus_one)), vreinterpretq_u32_f32(one)));
  779. #else
  780. return Vec3(std::signbit(mF32[0])? -1.0f : 1.0f,
  781. std::signbit(mF32[1])? -1.0f : 1.0f,
  782. std::signbit(mF32[2])? -1.0f : 1.0f);
  783. #endif
  784. }
  785. JPH_NAMESPACE_END