vuint16_avx512.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. // Copyright 2009-2020 Intel Corporation
  2. // SPDX-License-Identifier: Apache-2.0
  3. #pragma once
  4. namespace embree
  5. {
  6. /* 16-wide AVX-512 unsigned integer type */
  7. template<>
  8. struct vuint<16>
  9. {
  10. ALIGNED_STRUCT_(64);
  11. typedef vboolf16 Bool;
  12. typedef vuint16 UInt;
  13. typedef vfloat16 Float;
  14. enum { size = 16 }; // number of SIMD elements
  15. union { // data
  16. __m512i v;
  17. unsigned int i[16];
  18. };
  19. ////////////////////////////////////////////////////////////////////////////////
  20. /// Constructors, Assignment & Cast Operators
  21. ////////////////////////////////////////////////////////////////////////////////
  22. __forceinline vuint() {}
  23. __forceinline vuint(const vuint16& t) { v = t.v; }
  24. __forceinline vuint16& operator =(const vuint16& f) { v = f.v; return *this; }
  25. __forceinline vuint(const __m512i& t) { v = t; }
  26. __forceinline operator __m512i() const { return v; }
  27. __forceinline operator __m256i() const { return _mm512_castsi512_si256(v); }
  28. __forceinline vuint(unsigned int i) {
  29. v = _mm512_set1_epi32(i);
  30. }
  31. __forceinline vuint(const vuint4& i) {
  32. v = _mm512_broadcast_i32x4(i);
  33. }
  34. __forceinline vuint(const vuint8& i) {
  35. v = _mm512_castps_si512(_mm512_castpd_ps(_mm512_broadcast_f64x4(_mm256_castsi256_pd(i))));
  36. }
  37. __forceinline vuint(unsigned int a, unsigned int b, unsigned int c, unsigned int d) {
  38. v = _mm512_set4_epi32(d,c,b,a);
  39. }
  40. __forceinline vuint(unsigned int a0 , unsigned int a1 , unsigned int a2 , unsigned int a3,
  41. unsigned int a4 , unsigned int a5 , unsigned int a6 , unsigned int a7,
  42. unsigned int a8 , unsigned int a9 , unsigned int a10, unsigned int a11,
  43. unsigned int a12, unsigned int a13, unsigned int a14, unsigned int a15)
  44. {
  45. v = _mm512_set_epi32(a15,a14,a13,a12,a11,a10,a9,a8,a7,a6,a5,a4,a3,a2,a1,a0);
  46. }
  47. __forceinline explicit vuint(const __m512& f) {
  48. v = _mm512_cvtps_epu32(f);
  49. }
  50. ////////////////////////////////////////////////////////////////////////////////
  51. /// Constants
  52. ////////////////////////////////////////////////////////////////////////////////
  53. __forceinline vuint(ZeroTy) : v(_mm512_setzero_epi32()) {}
  54. __forceinline vuint(OneTy) : v(_mm512_set1_epi32(1)) {}
  55. __forceinline vuint(StepTy) : v(_mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0)) {}
  56. __forceinline vuint(ReverseStepTy) : v(_mm512_setr_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0)) {}
  57. ////////////////////////////////////////////////////////////////////////////////
  58. /// Loads and Stores
  59. ////////////////////////////////////////////////////////////////////////////////
  60. static __forceinline void store_nt(void* __restrict__ ptr, const vuint16& a) {
  61. _mm512_stream_si512((__m512i*)ptr,a);
  62. }
  63. static __forceinline vuint16 loadu(const void* addr)
  64. {
  65. return _mm512_loadu_si512(addr);
  66. }
  67. static __forceinline vuint16 loadu(const uint8_t* ptr) { return _mm512_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)); }
  68. static __forceinline vuint16 loadu(const unsigned short* ptr) { return _mm512_cvtepu16_epi32(_mm256_loadu_si256((__m256i*)ptr)); }
  69. static __forceinline vuint16 load(const vuint16* addr) {
  70. return _mm512_load_si512(addr);
  71. }
  72. static __forceinline vuint16 load(const unsigned int* addr) {
  73. return _mm512_load_si512(addr);
  74. }
  75. static __forceinline vuint16 load(unsigned short* ptr) { return _mm512_cvtepu16_epi32(*(__m256i*)ptr); }
  76. static __forceinline void store(void* ptr, const vuint16& v) {
  77. _mm512_store_si512(ptr,v);
  78. }
  79. static __forceinline void storeu(void* ptr, const vuint16& v) {
  80. _mm512_storeu_si512(ptr,v);
  81. }
  82. static __forceinline void storeu(const vboolf16& mask, void* ptr, const vuint16& f) {
  83. _mm512_mask_storeu_epi32(ptr,mask,f);
  84. }
  85. static __forceinline void store(const vboolf16& mask, void* addr, const vuint16& v2) {
  86. _mm512_mask_store_epi32(addr,mask,v2);
  87. }
  88. /* pass by value to avoid compiler generating inefficient code */
  89. static __forceinline void storeu_compact(const vboolf16 mask, void* addr, const vuint16 reg) {
  90. _mm512_mask_compressstoreu_epi32(addr,mask,reg);
  91. }
  92. static __forceinline void storeu_compact_single(const vboolf16 mask, void* addr, vuint16 reg) {
  93. //_mm512_mask_compressstoreu_epi32(addr,mask,reg);
  94. *(float*)addr = mm512_cvtss_f32(_mm512_mask_compress_ps(_mm512_castsi512_ps(reg),mask,_mm512_castsi512_ps(reg)));
  95. }
  96. static __forceinline vuint16 compact64bit(const vboolf16& mask, vuint16& v) {
  97. return _mm512_mask_compress_epi64(v,mask,v);
  98. }
  99. static __forceinline vuint16 compact(const vboolf16& mask, vuint16& v) {
  100. return _mm512_mask_compress_epi32(v,mask,v);
  101. }
  102. static __forceinline vuint16 compact(const vboolf16& mask, const vuint16& a, vuint16& b) {
  103. return _mm512_mask_compress_epi32(a,mask,b);
  104. }
  105. static __forceinline vuint16 expand(const vboolf16& mask, const vuint16& a, vuint16& b) {
  106. return _mm512_mask_expand_epi32(b,mask,a);
  107. }
  108. template<int scale = 4>
  109. static __forceinline vuint16 gather(const unsigned int* ptr, const vint16& index) {
  110. return _mm512_i32gather_epi32(index,ptr,scale);
  111. }
  112. template<int scale = 4>
  113. static __forceinline vuint16 gather(const vboolf16& mask, const unsigned int* ptr, const vint16& index) {
  114. return _mm512_mask_i32gather_epi32(_mm512_undefined_epi32(),mask,index,ptr,scale);
  115. }
  116. template<int scale = 4>
  117. static __forceinline vuint16 gather(const vboolf16& mask, vuint16& dest, const unsigned int* ptr, const vint16& index) {
  118. return _mm512_mask_i32gather_epi32(dest,mask,index,ptr,scale);
  119. }
  120. template<int scale = 4>
  121. static __forceinline void scatter(unsigned int* ptr, const vint16& index, const vuint16& v) {
  122. _mm512_i32scatter_epi32((int*)ptr,index,v,scale);
  123. }
  124. template<int scale = 4>
  125. static __forceinline void scatter(const vboolf16& mask, unsigned int* ptr, const vint16& index, const vuint16& v) {
  126. _mm512_mask_i32scatter_epi32((int*)ptr,mask,index,v,scale);
  127. }
  128. static __forceinline vuint16 broadcast64bit(size_t v) {
  129. return _mm512_set1_epi64(v);
  130. }
  131. static __forceinline size_t extract64bit(const vuint16& v)
  132. {
  133. return _mm_cvtsi128_si64(_mm512_castsi512_si128(v));
  134. }
  135. ////////////////////////////////////////////////////////////////////////////////
  136. /// Array Access
  137. ////////////////////////////////////////////////////////////////////////////////
  138. __forceinline unsigned int& operator [](size_t index) { assert(index < 16); return i[index]; }
  139. __forceinline const unsigned int& operator [](size_t index) const { assert(index < 16); return i[index]; }
  140. __forceinline unsigned int uint (size_t index) const { assert(index < 16); return ((unsigned int*)i)[index]; }
  141. __forceinline size_t& uint64_t(size_t index) const { assert(index < 8); return ((size_t*)i)[index]; }
  142. };
  143. ////////////////////////////////////////////////////////////////////////////////
  144. /// Unary Operators
  145. ////////////////////////////////////////////////////////////////////////////////
  146. __forceinline vboolf16 asBool(const vuint16& a) { return _mm512_movepi32_mask(a); }
  147. __forceinline vuint16 operator +(const vuint16& a) { return a; }
  148. __forceinline vuint16 operator -(const vuint16& a) { return _mm512_sub_epi32(_mm512_setzero_epi32(), a); }
  149. ////////////////////////////////////////////////////////////////////////////////
  150. /// Binary Operators
  151. ////////////////////////////////////////////////////////////////////////////////
  152. __forceinline vuint16 operator +(const vuint16& a, const vuint16& b) { return _mm512_add_epi32(a, b); }
  153. __forceinline vuint16 operator +(const vuint16& a, unsigned int b) { return a + vuint16(b); }
  154. __forceinline vuint16 operator +(unsigned int a, const vuint16& b) { return vuint16(a) + b; }
  155. __forceinline vuint16 operator -(const vuint16& a, const vuint16& b) { return _mm512_sub_epi32(a, b); }
  156. __forceinline vuint16 operator -(const vuint16& a, unsigned int b) { return a - vuint16(b); }
  157. __forceinline vuint16 operator -(unsigned int a, const vuint16& b) { return vuint16(a) - b; }
  158. __forceinline vuint16 operator *(const vuint16& a, const vuint16& b) { return _mm512_mul_epu32(a, b); }
  159. __forceinline vuint16 operator *(const vuint16& a, unsigned int b) { return a * vuint16(b); }
  160. __forceinline vuint16 operator *(unsigned int a, const vuint16& b) { return vuint16(a) * b; }
  161. __forceinline vuint16 operator &(const vuint16& a, const vuint16& b) { return _mm512_and_epi32(a, b); }
  162. __forceinline vuint16 operator &(const vuint16& a, unsigned int b) { return a & vuint16(b); }
  163. __forceinline vuint16 operator &(unsigned int a, const vuint16& b) { return vuint16(a) & b; }
  164. __forceinline vuint16 operator |(const vuint16& a, const vuint16& b) { return _mm512_or_epi32(a, b); }
  165. __forceinline vuint16 operator |(const vuint16& a, unsigned int b) { return a | vuint16(b); }
  166. __forceinline vuint16 operator |(unsigned int a, const vuint16& b) { return vuint16(a) | b; }
  167. __forceinline vuint16 operator ^(const vuint16& a, const vuint16& b) { return _mm512_xor_epi32(a, b); }
  168. __forceinline vuint16 operator ^(const vuint16& a, unsigned int b) { return a ^ vuint16(b); }
  169. __forceinline vuint16 operator ^(unsigned int a, const vuint16& b) { return vuint16(a) ^ b; }
  170. __forceinline vuint16 operator <<(const vuint16& a, unsigned int n) { return _mm512_slli_epi32(a, n); }
  171. __forceinline vuint16 operator >>(const vuint16& a, unsigned int n) { return _mm512_srli_epi32(a, n); }
  172. __forceinline vuint16 operator <<(const vuint16& a, const vuint16& n) { return _mm512_sllv_epi32(a, n); }
  173. __forceinline vuint16 operator >>(const vuint16& a, const vuint16& n) { return _mm512_srlv_epi32(a, n); }
  174. __forceinline vuint16 sll (const vuint16& a, unsigned int b) { return _mm512_slli_epi32(a, b); }
  175. __forceinline vuint16 sra (const vuint16& a, unsigned int b) { return _mm512_srai_epi32(a, b); }
  176. __forceinline vuint16 srl (const vuint16& a, unsigned int b) { return _mm512_srli_epi32(a, b); }
  177. __forceinline vuint16 min(const vuint16& a, const vuint16& b) { return _mm512_min_epu32(a, b); }
  178. __forceinline vuint16 min(const vuint16& a, unsigned int b) { return min(a,vuint16(b)); }
  179. __forceinline vuint16 min(unsigned int a, const vuint16& b) { return min(vuint16(a),b); }
  180. __forceinline vuint16 max(const vuint16& a, const vuint16& b) { return _mm512_max_epu32(a, b); }
  181. __forceinline vuint16 max(const vuint16& a, unsigned int b) { return max(a,vuint16(b)); }
  182. __forceinline vuint16 max(unsigned int a, const vuint16& b) { return max(vuint16(a),b); }
  183. __forceinline vuint16 mask_add(const vboolf16& mask, vuint16& c, const vuint16& a, const vuint16& b) { return _mm512_mask_add_epi32(c,mask,a,b); }
  184. __forceinline vuint16 mask_sub(const vboolf16& mask, vuint16& c, const vuint16& a, const vuint16& b) { return _mm512_mask_sub_epi32(c,mask,a,b); }
  185. __forceinline vuint16 mask_and(const vboolf16& m, vuint16& c, const vuint16& a, const vuint16& b) { return _mm512_mask_and_epi32(c,m,a,b); }
  186. __forceinline vuint16 mask_or (const vboolf16& m, vuint16& c, const vuint16& a, const vuint16& b) { return _mm512_mask_or_epi32(c,m,a,b); }
  187. ////////////////////////////////////////////////////////////////////////////////
  188. /// Assignment Operators
  189. ////////////////////////////////////////////////////////////////////////////////
  190. __forceinline vuint16& operator +=(vuint16& a, const vuint16& b) { return a = a + b; }
  191. __forceinline vuint16& operator +=(vuint16& a, unsigned int b) { return a = a + b; }
  192. __forceinline vuint16& operator -=(vuint16& a, const vuint16& b) { return a = a - b; }
  193. __forceinline vuint16& operator -=(vuint16& a, unsigned int b) { return a = a - b; }
  194. __forceinline vuint16& operator *=(vuint16& a, const vuint16& b) { return a = a * b; }
  195. __forceinline vuint16& operator *=(vuint16& a, unsigned int b) { return a = a * b; }
  196. __forceinline vuint16& operator &=(vuint16& a, const vuint16& b) { return a = a & b; }
  197. __forceinline vuint16& operator &=(vuint16& a, unsigned int b) { return a = a & b; }
  198. __forceinline vuint16& operator |=(vuint16& a, const vuint16& b) { return a = a | b; }
  199. __forceinline vuint16& operator |=(vuint16& a, unsigned int b) { return a = a | b; }
  200. __forceinline vuint16& operator <<=(vuint16& a, unsigned int b) { return a = a << b; }
  201. __forceinline vuint16& operator >>=(vuint16& a, unsigned int b) { return a = a >> b; }
  202. ////////////////////////////////////////////////////////////////////////////////
  203. /// Comparison Operators + Select
  204. ////////////////////////////////////////////////////////////////////////////////
  205. __forceinline vboolf16 operator ==(const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_EQ); }
  206. __forceinline vboolf16 operator ==(const vuint16& a, unsigned int b) { return a == vuint16(b); }
  207. __forceinline vboolf16 operator ==(unsigned int a, const vuint16& b) { return vuint16(a) == b; }
  208. __forceinline vboolf16 operator !=(const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_NE); }
  209. __forceinline vboolf16 operator !=(const vuint16& a, unsigned int b) { return a != vuint16(b); }
  210. __forceinline vboolf16 operator !=(unsigned int a, const vuint16& b) { return vuint16(a) != b; }
  211. __forceinline vboolf16 operator < (const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_LT); }
  212. __forceinline vboolf16 operator < (const vuint16& a, unsigned int b) { return a < vuint16(b); }
  213. __forceinline vboolf16 operator < (unsigned int a, const vuint16& b) { return vuint16(a) < b; }
  214. __forceinline vboolf16 operator >=(const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_GE); }
  215. __forceinline vboolf16 operator >=(const vuint16& a, unsigned int b) { return a >= vuint16(b); }
  216. __forceinline vboolf16 operator >=(unsigned int a, const vuint16& b) { return vuint16(a) >= b; }
  217. __forceinline vboolf16 operator > (const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_GT); }
  218. __forceinline vboolf16 operator > (const vuint16& a, unsigned int b) { return a > vuint16(b); }
  219. __forceinline vboolf16 operator > (unsigned int a, const vuint16& b) { return vuint16(a) > b; }
  220. __forceinline vboolf16 operator <=(const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_LE); }
  221. __forceinline vboolf16 operator <=(const vuint16& a, unsigned int b) { return a <= vuint16(b); }
  222. __forceinline vboolf16 operator <=(unsigned int a, const vuint16& b) { return vuint16(a) <= b; }
  223. __forceinline vboolf16 eq(const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_EQ); }
  224. __forceinline vboolf16 ne(const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_NE); }
  225. __forceinline vboolf16 lt(const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_LT); }
  226. __forceinline vboolf16 ge(const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_GE); }
  227. __forceinline vboolf16 gt(const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_GT); }
  228. __forceinline vboolf16 le(const vuint16& a, const vuint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_LE); }
  229. __forceinline vboolf16 eq(const vboolf16 mask, const vuint16& a, const vuint16& b) { return _mm512_mask_cmp_epu32_mask(mask,a,b,_MM_CMPINT_EQ); }
  230. __forceinline vboolf16 ne(const vboolf16 mask, const vuint16& a, const vuint16& b) { return _mm512_mask_cmp_epu32_mask(mask,a,b,_MM_CMPINT_NE); }
  231. __forceinline vboolf16 lt(const vboolf16 mask, const vuint16& a, const vuint16& b) { return _mm512_mask_cmp_epu32_mask(mask,a,b,_MM_CMPINT_LT); }
  232. __forceinline vboolf16 ge(const vboolf16 mask, const vuint16& a, const vuint16& b) { return _mm512_mask_cmp_epu32_mask(mask,a,b,_MM_CMPINT_GE); }
  233. __forceinline vboolf16 gt(const vboolf16 mask, const vuint16& a, const vuint16& b) { return _mm512_mask_cmp_epu32_mask(mask,a,b,_MM_CMPINT_GT); }
  234. __forceinline vboolf16 le(const vboolf16 mask, const vuint16& a, const vuint16& b) { return _mm512_mask_cmp_epu32_mask(mask,a,b,_MM_CMPINT_LE); }
  235. __forceinline vuint16 select(const vboolf16& m, const vuint16& t, const vuint16& f) {
  236. return _mm512_mask_or_epi32(f,m,t,t);
  237. }
  238. __forceinline void xchg(const vboolf16& m, vuint16& a, vuint16& b) {
  239. const vuint16 c = a; a = select(m,b,a); b = select(m,c,b);
  240. }
  241. __forceinline vboolf16 test(const vboolf16& m, const vuint16& a, const vuint16& b) {
  242. return _mm512_mask_test_epi32_mask(m,a,b);
  243. }
  244. __forceinline vboolf16 test(const vuint16& a, const vuint16& b) {
  245. return _mm512_test_epi32_mask(a,b);
  246. }
  247. ////////////////////////////////////////////////////////////////////////////////
  248. // Movement/Shifting/Shuffling Functions
  249. ////////////////////////////////////////////////////////////////////////////////
  250. template<int i>
  251. __forceinline vuint16 shuffle(const vuint16& v) {
  252. return _mm512_castps_si512(_mm512_permute_ps(_mm512_castsi512_ps(v), _MM_SHUFFLE(i, i, i, i)));
  253. }
  254. template<int i0, int i1, int i2, int i3>
  255. __forceinline vuint16 shuffle(const vuint16& v) {
  256. return _mm512_castps_si512(_mm512_permute_ps(_mm512_castsi512_ps(v), _MM_SHUFFLE(i3, i2, i1, i0)));
  257. }
  258. template<int i>
  259. __forceinline vuint16 shuffle4(const vuint16& v) {
  260. return _mm512_castps_si512(_mm512_shuffle_f32x4(_mm512_castsi512_ps(v), _mm512_castsi512_ps(v) ,_MM_SHUFFLE(i, i, i, i)));
  261. }
  262. template<int i0, int i1, int i2, int i3>
  263. __forceinline vuint16 shuffle4(const vuint16& v) {
  264. return _mm512_castps_si512(_mm512_shuffle_f32x4(_mm512_castsi512_ps(v), _mm512_castsi512_ps(v), _MM_SHUFFLE(i3, i2, i1, i0)));
  265. }
  266. template<int i>
  267. __forceinline vuint16 align_shift_right(const vuint16& a, const vuint16& b) {
  268. return _mm512_alignr_epi32(a, b, i);
  269. };
  270. __forceinline unsigned int toScalar(const vuint16& v) {
  271. return _mm_cvtsi128_si32(_mm512_castsi512_si128(v));
  272. }
  273. ////////////////////////////////////////////////////////////////////////////////
  274. /// Reductions
  275. ////////////////////////////////////////////////////////////////////////////////
  276. __forceinline vuint16 vreduce_min2(vuint16 x) { return min(x, shuffle<1,0,3,2>(x)); }
  277. __forceinline vuint16 vreduce_min4(vuint16 x) { x = vreduce_min2(x); return min(x, shuffle<2,3,0,1>(x)); }
  278. __forceinline vuint16 vreduce_min8(vuint16 x) { x = vreduce_min4(x); return min(x, shuffle4<1,0,3,2>(x)); }
  279. __forceinline vuint16 vreduce_min (vuint16 x) { x = vreduce_min8(x); return min(x, shuffle4<2,3,0,1>(x)); }
  280. __forceinline vuint16 vreduce_max2(vuint16 x) { return max(x, shuffle<1,0,3,2>(x)); }
  281. __forceinline vuint16 vreduce_max4(vuint16 x) { x = vreduce_max2(x); return max(x, shuffle<2,3,0,1>(x)); }
  282. __forceinline vuint16 vreduce_max8(vuint16 x) { x = vreduce_max4(x); return max(x, shuffle4<1,0,3,2>(x)); }
  283. __forceinline vuint16 vreduce_max (vuint16 x) { x = vreduce_max8(x); return max(x, shuffle4<2,3,0,1>(x)); }
  284. __forceinline vuint16 vreduce_and2(vuint16 x) { return x & shuffle<1,0,3,2>(x); }
  285. __forceinline vuint16 vreduce_and4(vuint16 x) { x = vreduce_and2(x); return x & shuffle<2,3,0,1>(x); }
  286. __forceinline vuint16 vreduce_and8(vuint16 x) { x = vreduce_and4(x); return x & shuffle4<1,0,3,2>(x); }
  287. __forceinline vuint16 vreduce_and (vuint16 x) { x = vreduce_and8(x); return x & shuffle4<2,3,0,1>(x); }
  288. __forceinline vuint16 vreduce_or2(vuint16 x) { return x | shuffle<1,0,3,2>(x); }
  289. __forceinline vuint16 vreduce_or4(vuint16 x) { x = vreduce_or2(x); return x | shuffle<2,3,0,1>(x); }
  290. __forceinline vuint16 vreduce_or8(vuint16 x) { x = vreduce_or4(x); return x | shuffle4<1,0,3,2>(x); }
  291. __forceinline vuint16 vreduce_or (vuint16 x) { x = vreduce_or8(x); return x | shuffle4<2,3,0,1>(x); }
  292. __forceinline vuint16 vreduce_add2(vuint16 x) { return x + shuffle<1,0,3,2>(x); }
  293. __forceinline vuint16 vreduce_add4(vuint16 x) { x = vreduce_add2(x); return x + shuffle<2,3,0,1>(x); }
  294. __forceinline vuint16 vreduce_add8(vuint16 x) { x = vreduce_add4(x); return x + shuffle4<1,0,3,2>(x); }
  295. __forceinline vuint16 vreduce_add (vuint16 x) { x = vreduce_add8(x); return x + shuffle4<2,3,0,1>(x); }
  296. __forceinline unsigned int reduce_min(const vuint16& v) { return toScalar(vreduce_min(v)); }
  297. __forceinline unsigned int reduce_max(const vuint16& v) { return toScalar(vreduce_max(v)); }
  298. __forceinline unsigned int reduce_and(const vuint16& v) { return toScalar(vreduce_and(v)); }
  299. __forceinline unsigned int reduce_or (const vuint16& v) { return toScalar(vreduce_or (v)); }
  300. __forceinline unsigned int reduce_add(const vuint16& v) { return toScalar(vreduce_add(v)); }
  301. ////////////////////////////////////////////////////////////////////////////////
  302. /// Memory load and store operations
  303. ////////////////////////////////////////////////////////////////////////////////
  304. __forceinline vuint16 permute(vuint16 v, vuint16 index) {
  305. return _mm512_permutexvar_epi32(index,v);
  306. }
  307. __forceinline vuint16 reverse(const vuint16& a) {
  308. return permute(a,vuint16(reverse_step));
  309. }
  310. __forceinline vuint16 prefix_sum(const vuint16& a)
  311. {
  312. const vuint16 z(zero);
  313. vuint16 v = a;
  314. v = v + align_shift_right<16-1>(v,z);
  315. v = v + align_shift_right<16-2>(v,z);
  316. v = v + align_shift_right<16-4>(v,z);
  317. v = v + align_shift_right<16-8>(v,z);
  318. return v;
  319. }
  320. __forceinline vuint16 reverse_prefix_sum(const vuint16& a)
  321. {
  322. const vuint16 z(zero);
  323. vuint16 v = a;
  324. v = v + align_shift_right<1>(z,v);
  325. v = v + align_shift_right<2>(z,v);
  326. v = v + align_shift_right<4>(z,v);
  327. v = v + align_shift_right<8>(z,v);
  328. return v;
  329. }
  330. ////////////////////////////////////////////////////////////////////////////////
  331. /// Output Operators
  332. ////////////////////////////////////////////////////////////////////////////////
  333. __forceinline embree_ostream operator <<(embree_ostream cout, const vuint16& v)
  334. {
  335. cout << "<" << v[0];
  336. for (int i=1; i<16; i++) cout << ", " << v[i];
  337. cout << ">";
  338. return cout;
  339. }
  340. }