vint4_sse2.h 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. // Copyright 2009-2020 Intel Corporation
  2. // SPDX-License-Identifier: Apache-2.0
  3. #pragma once
  4. #include "../math/math.h"
  5. namespace embree
  6. {
  7. /* 4-wide SSE integer type */
  8. template<>
  9. struct vint<4>
  10. {
  11. ALIGNED_STRUCT_(16);
  12. typedef vboolf4 Bool;
  13. typedef vint4 Int;
  14. typedef vfloat4 Float;
  15. enum { size = 4 }; // number of SIMD elements
  16. union { __m128i v; int i[4]; }; // data
  17. ////////////////////////////////////////////////////////////////////////////////
  18. /// Constructors, Assignment & Cast Operators
  19. ////////////////////////////////////////////////////////////////////////////////
  20. __forceinline vint() {}
  21. __forceinline vint(const vint4& a) { v = a.v; }
  22. __forceinline vint4& operator =(const vint4& a) { v = a.v; return *this; }
  23. __forceinline vint(__m128i a) : v(a) {}
  24. __forceinline operator const __m128i&() const { return v; }
  25. __forceinline operator __m128i&() { return v; }
  26. __forceinline vint(int a) : v(_mm_set1_epi32(a)) {}
  27. __forceinline vint(int a, int b, int c, int d) : v(_mm_set_epi32(d, c, b, a)) {}
  28. __forceinline explicit vint(__m128 a) : v(_mm_cvtps_epi32(a)) {}
  29. #if defined(__AVX512VL__)
  30. __forceinline explicit vint(const vboolf4& a) : v(_mm_movm_epi32(a)) {}
  31. #else
  32. __forceinline explicit vint(const vboolf4& a) : v(_mm_castps_si128((__m128)a)) {}
  33. #endif
  34. __forceinline vint(long long a, long long b) : v(_mm_set_epi64x(b,a)) {}
  35. ////////////////////////////////////////////////////////////////////////////////
  36. /// Constants
  37. ////////////////////////////////////////////////////////////////////////////////
  38. __forceinline vint(ZeroTy) : v(_mm_setzero_si128()) {}
  39. __forceinline vint(OneTy) : v(_mm_set_epi32(1, 1, 1, 1)) {}
  40. __forceinline vint(PosInfTy) : v(_mm_set_epi32(pos_inf, pos_inf, pos_inf, pos_inf)) {}
  41. __forceinline vint(NegInfTy) : v(_mm_set_epi32(neg_inf, neg_inf, neg_inf, neg_inf)) {}
  42. __forceinline vint(StepTy) : v(_mm_set_epi32(3, 2, 1, 0)) {}
  43. __forceinline vint(ReverseStepTy) : v(_mm_set_epi32(0, 1, 2, 3)) {}
  44. __forceinline vint(TrueTy) { v = _mm_cmpeq_epi32(v,v); }
  45. __forceinline vint(UndefinedTy) : v(_mm_castps_si128(_mm_undefined_ps())) {}
  46. ////////////////////////////////////////////////////////////////////////////////
  47. /// Loads and Stores
  48. ////////////////////////////////////////////////////////////////////////////////
  49. static __forceinline vint4 load (const void* a) { return _mm_load_si128((__m128i*)a); }
  50. static __forceinline vint4 loadu(const void* a) { return _mm_loadu_si128((__m128i*)a); }
  51. static __forceinline void store (void* ptr, const vint4& v) { _mm_store_si128((__m128i*)ptr,v); }
  52. static __forceinline void storeu(void* ptr, const vint4& v) { _mm_storeu_si128((__m128i*)ptr,v); }
  53. #if defined(__AVX512VL__)
  54. static __forceinline vint4 compact(const vboolf4& mask, vint4 &v) {
  55. return _mm_mask_compress_epi32(v, mask, v);
  56. }
  57. static __forceinline vint4 compact(const vboolf4& mask, vint4 &a, const vint4& b) {
  58. return _mm_mask_compress_epi32(a, mask, b);
  59. }
  60. static __forceinline vint4 load (const vboolf4& mask, const void* ptr) { return _mm_mask_load_epi32 (_mm_setzero_si128(),mask,ptr); }
  61. static __forceinline vint4 loadu(const vboolf4& mask, const void* ptr) { return _mm_mask_loadu_epi32(_mm_setzero_si128(),mask,ptr); }
  62. static __forceinline void store (const vboolf4& mask, void* ptr, const vint4& v) { _mm_mask_store_epi32 (ptr,mask,v); }
  63. static __forceinline void storeu(const vboolf4& mask, void* ptr, const vint4& v) { _mm_mask_storeu_epi32(ptr,mask,v); }
  64. #elif defined(__AVX__)
  65. static __forceinline vint4 load (const vbool4& mask, const void* a) { return _mm_castps_si128(_mm_maskload_ps((float*)a,mask)); }
  66. static __forceinline vint4 loadu(const vbool4& mask, const void* a) { return _mm_castps_si128(_mm_maskload_ps((float*)a,mask)); }
  67. static __forceinline void store (const vboolf4& mask, void* ptr, const vint4& i) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,_mm_castsi128_ps(i)); }
  68. static __forceinline void storeu(const vboolf4& mask, void* ptr, const vint4& i) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,_mm_castsi128_ps(i)); }
  69. #else
  70. static __forceinline vint4 load (const vbool4& mask, const void* a) { return _mm_and_si128(_mm_load_si128 ((__m128i*)a),mask); }
  71. static __forceinline vint4 loadu(const vbool4& mask, const void* a) { return _mm_and_si128(_mm_loadu_si128((__m128i*)a),mask); }
  72. static __forceinline void store (const vboolf4& mask, void* ptr, const vint4& i) { store (ptr,select(mask,i,load (ptr))); }
  73. static __forceinline void storeu(const vboolf4& mask, void* ptr, const vint4& i) { storeu(ptr,select(mask,i,loadu(ptr))); }
  74. #endif
  75. #if defined(__aarch64__)
  76. static __forceinline vint4 load(const uint8_t* ptr) {
  77. return _mm_load4epu8_epi32(((__m128i*)ptr));
  78. }
  79. static __forceinline vint4 loadu(const uint8_t* ptr) {
  80. return _mm_load4epu8_epi32(((__m128i*)ptr));
  81. }
  82. #elif defined(__SSE4_1__)
  83. static __forceinline vint4 load(const uint8_t* ptr) {
  84. return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
  85. }
  86. static __forceinline vint4 loadu(const uint8_t* ptr) {
  87. return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr));
  88. }
  89. #else
  90. static __forceinline vint4 load(const uint8_t* ptr) {
  91. return vint4(ptr[0],ptr[1],ptr[2],ptr[3]);
  92. }
  93. static __forceinline vint4 loadu(const uint8_t* ptr) {
  94. return vint4(ptr[0],ptr[1],ptr[2],ptr[3]);
  95. }
  96. #endif
  97. static __forceinline vint4 load(const unsigned short* ptr) {
  98. #if defined(__aarch64__)
  99. return __m128i(vmovl_u16(vld1_u16(ptr)));
  100. #elif defined (__SSE4_1__)
  101. return _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr));
  102. #else
  103. return vint4(ptr[0],ptr[1],ptr[2],ptr[3]);
  104. #endif
  105. }
  106. static __forceinline void store(uint8_t* ptr, const vint4& v) {
  107. #if defined(__aarch64__)
  108. int32x4_t x = v;
  109. uint16x4_t y = vqmovn_u32(uint32x4_t(x));
  110. uint8x8_t z = vqmovn_u16(vcombine_u16(y, y));
  111. vst1_lane_u32((uint32_t *)ptr,uint32x2_t(z), 0);
  112. #elif defined(__SSE4_1__)
  113. __m128i x = v;
  114. x = _mm_packus_epi32(x, x);
  115. x = _mm_packus_epi16(x, x);
  116. *(int*)ptr = _mm_cvtsi128_si32(x);
  117. #else
  118. for (size_t i=0;i<4;i++)
  119. ptr[i] = (uint8_t)v[i];
  120. #endif
  121. }
  122. static __forceinline void store(unsigned short* ptr, const vint4& v) {
  123. #if defined(__aarch64__)
  124. uint32x4_t x = uint32x4_t(v.v);
  125. uint16x4_t y = vqmovn_u32(x);
  126. vst1_u16(ptr, y);
  127. #else
  128. for (size_t i=0;i<4;i++)
  129. ptr[i] = (unsigned short)v[i];
  130. #endif
  131. }
  132. static __forceinline vint4 load_nt(void* ptr) {
  133. #if defined(__aarch64__) || defined(__SSE4_1__)
  134. return _mm_stream_load_si128((__m128i*)ptr);
  135. #else
  136. return _mm_load_si128((__m128i*)ptr);
  137. #endif
  138. }
  139. static __forceinline void store_nt(void* ptr, const vint4& v) {
  140. #if !defined(__aarch64__) && defined(__SSE4_1__)
  141. _mm_stream_ps((float*)ptr, _mm_castsi128_ps(v));
  142. #else
  143. _mm_store_si128((__m128i*)ptr,v);
  144. #endif
  145. }
  146. template<int scale = 4>
  147. static __forceinline vint4 gather(const int* ptr, const vint4& index) {
  148. #if defined(__AVX2__) && !defined(__aarch64__)
  149. return _mm_i32gather_epi32(ptr, index, scale);
  150. #else
  151. return vint4(
  152. *(int*)(((int8_t*)ptr)+scale*index[0]),
  153. *(int*)(((int8_t*)ptr)+scale*index[1]),
  154. *(int*)(((int8_t*)ptr)+scale*index[2]),
  155. *(int*)(((int8_t*)ptr)+scale*index[3]));
  156. #endif
  157. }
  158. template<int scale = 4>
  159. static __forceinline vint4 gather(const vboolf4& mask, const int* ptr, const vint4& index) {
  160. vint4 r = zero;
  161. #if defined(__AVX512VL__)
  162. return _mm_mmask_i32gather_epi32(r, mask, index, ptr, scale);
  163. #elif defined(__AVX2__) && !defined(__aarch64__)
  164. return _mm_mask_i32gather_epi32(r, ptr, index, mask, scale);
  165. #else
  166. if (likely(mask[0])) r[0] = *(int*)(((int8_t*)ptr)+scale*index[0]);
  167. if (likely(mask[1])) r[1] = *(int*)(((int8_t*)ptr)+scale*index[1]);
  168. if (likely(mask[2])) r[2] = *(int*)(((int8_t*)ptr)+scale*index[2]);
  169. if (likely(mask[3])) r[3] = *(int*)(((int8_t*)ptr)+scale*index[3]);
  170. return r;
  171. #endif
  172. }
  173. template<int scale = 4>
  174. static __forceinline void scatter(void* ptr, const vint4& index, const vint4& v)
  175. {
  176. #if defined(__AVX512VL__)
  177. _mm_i32scatter_epi32((int*)ptr, index, v, scale);
  178. #else
  179. *(int*)(((int8_t*)ptr)+scale*index[0]) = v[0];
  180. *(int*)(((int8_t*)ptr)+scale*index[1]) = v[1];
  181. *(int*)(((int8_t*)ptr)+scale*index[2]) = v[2];
  182. *(int*)(((int8_t*)ptr)+scale*index[3]) = v[3];
  183. #endif
  184. }
  185. template<int scale = 4>
  186. static __forceinline void scatter(const vboolf4& mask, void* ptr, const vint4& index, const vint4& v)
  187. {
  188. #if defined(__AVX512VL__)
  189. _mm_mask_i32scatter_epi32((int*)ptr, mask, index, v, scale);
  190. #else
  191. if (likely(mask[0])) *(int*)(((int8_t*)ptr)+scale*index[0]) = v[0];
  192. if (likely(mask[1])) *(int*)(((int8_t*)ptr)+scale*index[1]) = v[1];
  193. if (likely(mask[2])) *(int*)(((int8_t*)ptr)+scale*index[2]) = v[2];
  194. if (likely(mask[3])) *(int*)(((int8_t*)ptr)+scale*index[3]) = v[3];
  195. #endif
  196. }
  197. #if defined(__x86_64__) || defined(__aarch64__)
  198. static __forceinline vint4 broadcast64(long long a) { return _mm_set1_epi64x(a); }
  199. #endif
  200. ////////////////////////////////////////////////////////////////////////////////
  201. /// Array Access
  202. ////////////////////////////////////////////////////////////////////////////////
  203. __forceinline const int& operator [](size_t index) const { assert(index < 4); return i[index]; }
  204. __forceinline int& operator [](size_t index) { assert(index < 4); return i[index]; }
  205. friend __forceinline vint4 select(const vboolf4& m, const vint4& t, const vint4& f) {
  206. #if defined(__AVX512VL__)
  207. return _mm_mask_blend_epi32(m, (__m128i)f, (__m128i)t);
  208. #elif defined(__aarch64__)
  209. return _mm_castps_si128(_mm_blendv_ps((__m128)f.v,(__m128) t.v, (__m128)m.v));
  210. #elif defined(__SSE4_1__)
  211. return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m));
  212. #else
  213. return _mm_or_si128(_mm_and_si128(m, t), _mm_andnot_si128(m, f));
  214. #endif
  215. }
  216. };
  217. ////////////////////////////////////////////////////////////////////////////////
  218. /// Unary Operators
  219. ////////////////////////////////////////////////////////////////////////////////
  220. #if defined(__AVX512VL__)
  221. __forceinline vboolf4 asBool(const vint4& a) { return _mm_movepi32_mask(a); }
  222. #else
  223. __forceinline vboolf4 asBool(const vint4& a) { return _mm_castsi128_ps(a); }
  224. #endif
  225. __forceinline vint4 operator +(const vint4& a) { return a; }
  226. __forceinline vint4 operator -(const vint4& a) { return _mm_sub_epi32(_mm_setzero_si128(), a); }
  227. #if defined(__aarch64__)
  228. __forceinline vint4 abs(const vint4& a) { return vabsq_s32(a.v); }
  229. #elif defined(__SSSE3__)
  230. __forceinline vint4 abs(const vint4& a) { return _mm_abs_epi32(a); }
  231. #endif
  232. ////////////////////////////////////////////////////////////////////////////////
  233. /// Binary Operators
  234. ////////////////////////////////////////////////////////////////////////////////
  235. __forceinline vint4 operator +(const vint4& a, const vint4& b) { return _mm_add_epi32(a, b); }
  236. __forceinline vint4 operator +(const vint4& a, int b) { return a + vint4(b); }
  237. __forceinline vint4 operator +(int a, const vint4& b) { return vint4(a) + b; }
  238. __forceinline vint4 operator -(const vint4& a, const vint4& b) { return _mm_sub_epi32(a, b); }
  239. __forceinline vint4 operator -(const vint4& a, int b) { return a - vint4(b); }
  240. __forceinline vint4 operator -(int a, const vint4& b) { return vint4(a) - b; }
  241. #if (defined(__aarch64__)) || defined(__SSE4_1__)
  242. __forceinline vint4 operator *(const vint4& a, const vint4& b) { return _mm_mullo_epi32(a, b); }
  243. #else
  244. __forceinline vint4 operator *(const vint4& a, const vint4& b) { return vint4(a[0]*b[0],a[1]*b[1],a[2]*b[2],a[3]*b[3]); }
  245. #endif
  246. __forceinline vint4 operator *(const vint4& a, int b) { return a * vint4(b); }
  247. __forceinline vint4 operator *(int a, const vint4& b) { return vint4(a) * b; }
  248. __forceinline vint4 operator &(const vint4& a, const vint4& b) { return _mm_and_si128(a, b); }
  249. __forceinline vint4 operator &(const vint4& a, int b) { return a & vint4(b); }
  250. __forceinline vint4 operator &(int a, const vint4& b) { return vint4(a) & b; }
  251. __forceinline vint4 operator |(const vint4& a, const vint4& b) { return _mm_or_si128(a, b); }
  252. __forceinline vint4 operator |(const vint4& a, int b) { return a | vint4(b); }
  253. __forceinline vint4 operator |(int a, const vint4& b) { return vint4(a) | b; }
  254. __forceinline vint4 operator ^(const vint4& a, const vint4& b) { return _mm_xor_si128(a, b); }
  255. __forceinline vint4 operator ^(const vint4& a, int b) { return a ^ vint4(b); }
  256. __forceinline vint4 operator ^(int a, const vint4& b) { return vint4(a) ^ b; }
  257. __forceinline vint4 operator <<(const vint4& a, const int n) { return _mm_slli_epi32(a, n); }
  258. __forceinline vint4 operator >>(const vint4& a, const int n) { return _mm_srai_epi32(a, n); }
  259. __forceinline vint4 sll (const vint4& a, int b) { return _mm_slli_epi32(a, b); }
  260. __forceinline vint4 sra (const vint4& a, int b) { return _mm_srai_epi32(a, b); }
  261. __forceinline vint4 srl (const vint4& a, int b) { return _mm_srli_epi32(a, b); }
  262. ////////////////////////////////////////////////////////////////////////////////
  263. /// Assignment Operators
  264. ////////////////////////////////////////////////////////////////////////////////
  265. __forceinline vint4& operator +=(vint4& a, const vint4& b) { return a = a + b; }
  266. __forceinline vint4& operator +=(vint4& a, int b) { return a = a + b; }
  267. __forceinline vint4& operator -=(vint4& a, const vint4& b) { return a = a - b; }
  268. __forceinline vint4& operator -=(vint4& a, int b) { return a = a - b; }
  269. #if (defined(__aarch64__)) || defined(__SSE4_1__)
  270. __forceinline vint4& operator *=(vint4& a, const vint4& b) { return a = a * b; }
  271. __forceinline vint4& operator *=(vint4& a, int b) { return a = a * b; }
  272. #endif
  273. __forceinline vint4& operator &=(vint4& a, const vint4& b) { return a = a & b; }
  274. __forceinline vint4& operator &=(vint4& a, int b) { return a = a & b; }
  275. __forceinline vint4& operator |=(vint4& a, const vint4& b) { return a = a | b; }
  276. __forceinline vint4& operator |=(vint4& a, int b) { return a = a | b; }
  277. __forceinline vint4& operator <<=(vint4& a, int b) { return a = a << b; }
  278. __forceinline vint4& operator >>=(vint4& a, int b) { return a = a >> b; }
  279. ////////////////////////////////////////////////////////////////////////////////
  280. /// Comparison Operators + Select
  281. ////////////////////////////////////////////////////////////////////////////////
  282. #if defined(__AVX512VL__)
  283. __forceinline vboolf4 operator ==(const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_EQ); }
  284. __forceinline vboolf4 operator !=(const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_NE); }
  285. __forceinline vboolf4 operator < (const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_LT); }
  286. __forceinline vboolf4 operator >=(const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_GE); }
  287. __forceinline vboolf4 operator > (const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_GT); }
  288. __forceinline vboolf4 operator <=(const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_LE); }
  289. #else
  290. __forceinline vboolf4 operator ==(const vint4& a, const vint4& b) { return _mm_castsi128_ps(_mm_cmpeq_epi32(a, b)); }
  291. __forceinline vboolf4 operator !=(const vint4& a, const vint4& b) { return !(a == b); }
  292. __forceinline vboolf4 operator < (const vint4& a, const vint4& b) { return _mm_castsi128_ps(_mm_cmplt_epi32(a, b)); }
  293. __forceinline vboolf4 operator >=(const vint4& a, const vint4& b) { return !(a < b); }
  294. __forceinline vboolf4 operator > (const vint4& a, const vint4& b) { return _mm_castsi128_ps(_mm_cmpgt_epi32(a, b)); }
  295. __forceinline vboolf4 operator <=(const vint4& a, const vint4& b) { return !(a > b); }
  296. #endif
  297. __forceinline vboolf4 operator ==(const vint4& a, int b) { return a == vint4(b); }
  298. __forceinline vboolf4 operator ==(int a, const vint4& b) { return vint4(a) == b; }
  299. __forceinline vboolf4 operator !=(const vint4& a, int b) { return a != vint4(b); }
  300. __forceinline vboolf4 operator !=(int a, const vint4& b) { return vint4(a) != b; }
  301. __forceinline vboolf4 operator < (const vint4& a, int b) { return a < vint4(b); }
  302. __forceinline vboolf4 operator < (int a, const vint4& b) { return vint4(a) < b; }
  303. __forceinline vboolf4 operator >=(const vint4& a, int b) { return a >= vint4(b); }
  304. __forceinline vboolf4 operator >=(int a, const vint4& b) { return vint4(a) >= b; }
  305. __forceinline vboolf4 operator > (const vint4& a, int b) { return a > vint4(b); }
  306. __forceinline vboolf4 operator > (int a, const vint4& b) { return vint4(a) > b; }
  307. __forceinline vboolf4 operator <=(const vint4& a, int b) { return a <= vint4(b); }
  308. __forceinline vboolf4 operator <=(int a, const vint4& b) { return vint4(a) <= b; }
  309. __forceinline vboolf4 eq(const vint4& a, const vint4& b) { return a == b; }
  310. __forceinline vboolf4 ne(const vint4& a, const vint4& b) { return a != b; }
  311. __forceinline vboolf4 lt(const vint4& a, const vint4& b) { return a < b; }
  312. __forceinline vboolf4 ge(const vint4& a, const vint4& b) { return a >= b; }
  313. __forceinline vboolf4 gt(const vint4& a, const vint4& b) { return a > b; }
  314. __forceinline vboolf4 le(const vint4& a, const vint4& b) { return a <= b; }
  315. #if defined(__AVX512VL__)
  316. __forceinline vboolf4 eq(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_EQ); }
  317. __forceinline vboolf4 ne(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_NE); }
  318. __forceinline vboolf4 lt(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_LT); }
  319. __forceinline vboolf4 ge(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_GE); }
  320. __forceinline vboolf4 gt(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_GT); }
  321. __forceinline vboolf4 le(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_LE); }
  322. #else
  323. __forceinline vboolf4 eq(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a == b); }
  324. __forceinline vboolf4 ne(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a != b); }
  325. __forceinline vboolf4 lt(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a < b); }
  326. __forceinline vboolf4 ge(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a >= b); }
  327. __forceinline vboolf4 gt(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a > b); }
  328. __forceinline vboolf4 le(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a <= b); }
  329. #endif
  330. template<int mask>
  331. __forceinline vint4 select(const vint4& t, const vint4& f) {
  332. #if defined(__SSE4_1__)
  333. return _mm_castps_si128(_mm_blend_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), mask));
  334. #else
  335. return select(vboolf4(mask), t, f);
  336. #endif
  337. }
  338. #if defined(__aarch64__) || defined(__SSE4_1__)
  339. __forceinline vint4 min(const vint4& a, const vint4& b) { return _mm_min_epi32(a, b); }
  340. __forceinline vint4 max(const vint4& a, const vint4& b) { return _mm_max_epi32(a, b); }
  341. __forceinline vint4 umin(const vint4& a, const vint4& b) { return _mm_min_epu32(a, b); }
  342. __forceinline vint4 umax(const vint4& a, const vint4& b) { return _mm_max_epu32(a, b); }
  343. #else
  344. __forceinline vint4 min(const vint4& a, const vint4& b) { return select(a < b,a,b); }
  345. __forceinline vint4 max(const vint4& a, const vint4& b) { return select(a < b,b,a); }
  346. #endif
  347. __forceinline vint4 min(const vint4& a, int b) { return min(a,vint4(b)); }
  348. __forceinline vint4 min(int a, const vint4& b) { return min(vint4(a),b); }
  349. __forceinline vint4 max(const vint4& a, int b) { return max(a,vint4(b)); }
  350. __forceinline vint4 max(int a, const vint4& b) { return max(vint4(a),b); }
  351. ////////////////////////////////////////////////////////////////////////////////
  352. // Movement/Shifting/Shuffling Functions
  353. ////////////////////////////////////////////////////////////////////////////////
  354. __forceinline vint4 unpacklo(const vint4& a, const vint4& b) { return _mm_castps_si128(_mm_unpacklo_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
  355. __forceinline vint4 unpackhi(const vint4& a, const vint4& b) { return _mm_castps_si128(_mm_unpackhi_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); }
  356. #if defined(__aarch64__)
  357. template<int i0, int i1, int i2, int i3>
  358. __forceinline vint4 shuffle(const vint4& v) {
  359. return vreinterpretq_s32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3)));
  360. }
  361. template<int i0, int i1, int i2, int i3>
  362. __forceinline vint4 shuffle(const vint4& a, const vint4& b) {
  363. return vreinterpretq_s32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3)));
  364. }
  365. #else
  366. template<int i0, int i1, int i2, int i3>
  367. __forceinline vint4 shuffle(const vint4& v) {
  368. return _mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0));
  369. }
  370. template<int i0, int i1, int i2, int i3>
  371. __forceinline vint4 shuffle(const vint4& a, const vint4& b) {
  372. return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0)));
  373. }
  374. #endif
  375. #if defined(__SSE3__)
  376. template<> __forceinline vint4 shuffle<0, 0, 2, 2>(const vint4& v) { return _mm_castps_si128(_mm_moveldup_ps(_mm_castsi128_ps(v))); }
  377. template<> __forceinline vint4 shuffle<1, 1, 3, 3>(const vint4& v) { return _mm_castps_si128(_mm_movehdup_ps(_mm_castsi128_ps(v))); }
  378. template<> __forceinline vint4 shuffle<0, 1, 0, 1>(const vint4& v) { return _mm_castpd_si128(_mm_movedup_pd (_mm_castsi128_pd(v))); }
  379. #endif
  380. template<int i>
  381. __forceinline vint4 shuffle(const vint4& v) {
  382. return shuffle<i,i,i,i>(v);
  383. }
  384. #if defined(__aarch64__)
  385. template<int src> __forceinline int extract(const vint4& b);
  386. template<int dst> __forceinline vint4 insert(const vint4& a, const int b);
  387. #elif defined(__SSE4_1__)
  388. template<int src> __forceinline int extract(const vint4& b) { return _mm_extract_epi32(b, src); }
  389. template<int dst> __forceinline vint4 insert(const vint4& a, const int b) { return _mm_insert_epi32(a, b, dst); }
  390. #else
  391. template<int src> __forceinline int extract(const vint4& b) { return b[src&3]; }
  392. template<int dst> __forceinline vint4 insert(const vint4& a, int b) { vint4 c = a; c[dst&3] = b; return c; }
  393. #endif
  394. #if defined(__aarch64__)
  395. template<> __forceinline int extract<0>(const vint4& b) {
  396. return b.v[0];
  397. }
  398. template<> __forceinline int extract<1>(const vint4& b) {
  399. return b.v[1];
  400. }
  401. template<> __forceinline int extract<2>(const vint4& b) {
  402. return b.v[2];
  403. }
  404. template<> __forceinline int extract<3>(const vint4& b) {
  405. return b.v[3];
  406. }
  407. template<> __forceinline vint4 insert<0>(const vint4& a, int b)
  408. {
  409. vint4 c = a;
  410. c[0] = b;
  411. return c;
  412. }
  413. template<> __forceinline vint4 insert<1>(const vint4& a, int b)
  414. {
  415. vint4 c = a;
  416. c[1] = b;
  417. return c;
  418. }
  419. template<> __forceinline vint4 insert<2>(const vint4& a, int b)
  420. {
  421. vint4 c = a;
  422. c[2] = b;
  423. return c;
  424. }
  425. template<> __forceinline vint4 insert<3>(const vint4& a, int b)
  426. {
  427. vint4 c = a;
  428. c[3] = b;
  429. return c;
  430. }
  431. __forceinline int toScalar(const vint4& v) {
  432. return v[0];
  433. }
  434. __forceinline size_t toSizeT(const vint4& v) {
  435. uint64x2_t x = uint64x2_t(v.v);
  436. return x[0];
  437. }
  438. #else
  439. template<> __forceinline int extract<0>(const vint4& b) { return _mm_cvtsi128_si32(b); }
  440. __forceinline int toScalar(const vint4& v) { return _mm_cvtsi128_si32(v); }
  441. __forceinline size_t toSizeT(const vint4& v) {
  442. #if defined(__WIN32__) && !defined(__X86_64__) // win32 workaround
  443. return toScalar(v);
  444. #elif defined(__ARM_NEON)
  445. // FIXME(LTE): Do we need a swap(i.e. use lane 1)?
  446. return vgetq_lane_u64(*(reinterpret_cast<const uint64x2_t *>(&v)), 0);
  447. #else
  448. return _mm_cvtsi128_si64(v);
  449. #endif
  450. }
  451. #endif
  452. #if defined(__AVX512VL__)
  453. __forceinline vint4 permute(const vint4 &a, const vint4 &index) {
  454. return _mm_castps_si128(_mm_permutevar_ps(_mm_castsi128_ps(a),index));
  455. }
  456. template<int i>
  457. __forceinline vint4 align_shift_right(const vint4& a, const vint4& b) {
  458. return _mm_alignr_epi32(a, b, i);
  459. }
  460. #endif
  461. ////////////////////////////////////////////////////////////////////////////////
  462. /// Reductions
  463. ////////////////////////////////////////////////////////////////////////////////
  464. #if defined(__aarch64__) || defined(__SSE4_1__)
  465. #if defined(__aarch64__)
  466. __forceinline vint4 vreduce_min(const vint4& v) { int h = vminvq_s32(v); return vdupq_n_s32(h); }
  467. __forceinline vint4 vreduce_max(const vint4& v) { int h = vmaxvq_s32(v); return vdupq_n_s32(h); }
  468. __forceinline vint4 vreduce_add(const vint4& v) { int h = vaddvq_s32(v); return vdupq_n_s32(h); }
  469. __forceinline int reduce_min(const vint4& v) { return vminvq_s32(v); }
  470. __forceinline int reduce_max(const vint4& v) { return vmaxvq_s32(v); }
  471. __forceinline int reduce_add(const vint4& v) { return vaddvq_s32(v); }
  472. #else
  473. __forceinline vint4 vreduce_min(const vint4& v) { vint4 h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); }
  474. __forceinline vint4 vreduce_max(const vint4& v) { vint4 h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); }
  475. __forceinline vint4 vreduce_add(const vint4& v) { vint4 h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; }
  476. __forceinline int reduce_min(const vint4& v) { return toScalar(vreduce_min(v)); }
  477. __forceinline int reduce_max(const vint4& v) { return toScalar(vreduce_max(v)); }
  478. __forceinline int reduce_add(const vint4& v) { return toScalar(vreduce_add(v)); }
  479. #endif
  480. __forceinline size_t select_min(const vint4& v) { return bsf(movemask(v == vreduce_min(v))); }
  481. __forceinline size_t select_max(const vint4& v) { return bsf(movemask(v == vreduce_max(v))); }
  482. __forceinline size_t select_min(const vboolf4& valid, const vint4& v) { const vint4 a = select(valid,v,vint4(pos_inf)); return bsf(movemask(valid & (a == vreduce_min(a)))); }
  483. __forceinline size_t select_max(const vboolf4& valid, const vint4& v) { const vint4 a = select(valid,v,vint4(neg_inf)); return bsf(movemask(valid & (a == vreduce_max(a)))); }
  484. #else
  485. __forceinline int reduce_min(const vint4& v) { return min(v[0],v[1],v[2],v[3]); }
  486. __forceinline int reduce_max(const vint4& v) { return max(v[0],v[1],v[2],v[3]); }
  487. __forceinline int reduce_add(const vint4& v) { return v[0]+v[1]+v[2]+v[3]; }
  488. #endif
  489. ////////////////////////////////////////////////////////////////////////////////
  490. /// Sorting networks
  491. ////////////////////////////////////////////////////////////////////////////////
  492. #if (defined(__aarch64__)) || defined(__SSE4_1__)
  493. __forceinline vint4 usort_ascending(const vint4& v)
  494. {
  495. const vint4 a0 = v;
  496. const vint4 b0 = shuffle<1,0,3,2>(a0);
  497. const vint4 c0 = umin(a0,b0);
  498. const vint4 d0 = umax(a0,b0);
  499. const vint4 a1 = select<0x5 /* 0b0101 */>(c0,d0);
  500. const vint4 b1 = shuffle<2,3,0,1>(a1);
  501. const vint4 c1 = umin(a1,b1);
  502. const vint4 d1 = umax(a1,b1);
  503. const vint4 a2 = select<0x3 /* 0b0011 */>(c1,d1);
  504. const vint4 b2 = shuffle<0,2,1,3>(a2);
  505. const vint4 c2 = umin(a2,b2);
  506. const vint4 d2 = umax(a2,b2);
  507. const vint4 a3 = select<0x2 /* 0b0010 */>(c2,d2);
  508. return a3;
  509. }
  510. __forceinline vint4 usort_descending(const vint4& v)
  511. {
  512. const vint4 a0 = v;
  513. const vint4 b0 = shuffle<1,0,3,2>(a0);
  514. const vint4 c0 = umax(a0,b0);
  515. const vint4 d0 = umin(a0,b0);
  516. const vint4 a1 = select<0x5 /* 0b0101 */>(c0,d0);
  517. const vint4 b1 = shuffle<2,3,0,1>(a1);
  518. const vint4 c1 = umax(a1,b1);
  519. const vint4 d1 = umin(a1,b1);
  520. const vint4 a2 = select<0x3 /* 0b0011 */>(c1,d1);
  521. const vint4 b2 = shuffle<0,2,1,3>(a2);
  522. const vint4 c2 = umax(a2,b2);
  523. const vint4 d2 = umin(a2,b2);
  524. const vint4 a3 = select<0x2 /* 0b0010 */>(c2,d2);
  525. return a3;
  526. }
  527. #else
  528. __forceinline vint4 usort_ascending(const vint4& v)
  529. {
  530. const vint4 a0 = v-vint4(0x80000000);
  531. const vint4 b0 = shuffle<1,0,3,2>(a0);
  532. const vint4 c0 = min(a0,b0);
  533. const vint4 d0 = max(a0,b0);
  534. const vint4 a1 = select<0x5 /* 0b0101 */>(c0,d0);
  535. const vint4 b1 = shuffle<2,3,0,1>(a1);
  536. const vint4 c1 = min(a1,b1);
  537. const vint4 d1 = max(a1,b1);
  538. const vint4 a2 = select<0x3 /* 0b0011 */>(c1,d1);
  539. const vint4 b2 = shuffle<0,2,1,3>(a2);
  540. const vint4 c2 = min(a2,b2);
  541. const vint4 d2 = max(a2,b2);
  542. const vint4 a3 = select<0x2 /* 0b0010 */>(c2,d2);
  543. return a3+vint4(0x80000000);
  544. }
  545. __forceinline vint4 usort_descending(const vint4& v)
  546. {
  547. const vint4 a0 = v-vint4(0x80000000);
  548. const vint4 b0 = shuffle<1,0,3,2>(a0);
  549. const vint4 c0 = max(a0,b0);
  550. const vint4 d0 = min(a0,b0);
  551. const vint4 a1 = select<0x5 /* 0b0101 */>(c0,d0);
  552. const vint4 b1 = shuffle<2,3,0,1>(a1);
  553. const vint4 c1 = max(a1,b1);
  554. const vint4 d1 = min(a1,b1);
  555. const vint4 a2 = select<0x3 /* 0b0011 */>(c1,d1);
  556. const vint4 b2 = shuffle<0,2,1,3>(a2);
  557. const vint4 c2 = max(a2,b2);
  558. const vint4 d2 = min(a2,b2);
  559. const vint4 a3 = select<0x2 /* 0b0010 */>(c2,d2);
  560. return a3+vint4(0x80000000);
  561. }
  562. #endif
  563. ////////////////////////////////////////////////////////////////////////////////
  564. /// Output Operators
  565. ////////////////////////////////////////////////////////////////////////////////
  566. __forceinline embree_ostream operator <<(embree_ostream cout, const vint4& a) {
  567. return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ">";
  568. }
  569. }