vfloat16_avx512.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615
  1. // Copyright 2009-2021 Intel Corporation
  2. // SPDX-License-Identifier: Apache-2.0
  3. #pragma once
  4. #define vboolf vboolf_impl
  5. #define vboold vboold_impl
  6. #define vint vint_impl
  7. #define vuint vuint_impl
  8. #define vllong vllong_impl
  9. #define vfloat vfloat_impl
  10. #define vdouble vdouble_impl
  11. namespace embree
  12. {
  13. /* 16-wide AVX-512 float type */
  14. template<>
  15. struct vfloat<16>
  16. {
  17. ALIGNED_STRUCT_(64);
  18. typedef vboolf16 Bool;
  19. typedef vint16 Int;
  20. typedef vfloat16 Float;
  21. enum { size = 16 }; // number of SIMD elements
  22. union { // data
  23. __m512 v;
  24. float f[16];
  25. int i[16];
  26. };
  27. ////////////////////////////////////////////////////////////////////////////////
  28. /// Constructors, Assignment & Cast Operators
  29. ////////////////////////////////////////////////////////////////////////////////
  30. __forceinline vfloat() {}
  31. __forceinline vfloat(const vfloat16& t) { v = t; }
  32. __forceinline vfloat16& operator =(const vfloat16& f) { v = f.v; return *this; }
  33. __forceinline vfloat(const __m512& t) { v = t; }
  34. __forceinline operator __m512() const { return v; }
  35. __forceinline operator __m256() const { return _mm512_castps512_ps256(v); }
  36. __forceinline operator __m128() const { return _mm512_castps512_ps128(v); }
  37. __forceinline vfloat(float f) {
  38. v = _mm512_set1_ps(f);
  39. }
  40. __forceinline vfloat(float a, float b, float c, float d) {
  41. v = _mm512_set4_ps(a, b, c, d);
  42. }
  43. __forceinline vfloat(const vfloat4& i) {
  44. v = _mm512_broadcast_f32x4(i);
  45. }
  46. __forceinline vfloat(const vfloat4& a, const vfloat4& b, const vfloat4& c, const vfloat4& d) {
  47. v = _mm512_castps128_ps512(a);
  48. v = _mm512_insertf32x4(v, b, 1);
  49. v = _mm512_insertf32x4(v, c, 2);
  50. v = _mm512_insertf32x4(v, d, 3);
  51. }
  52. __forceinline vfloat(const vboolf16& mask, const vfloat4& a, const vfloat4& b) {
  53. v = _mm512_broadcast_f32x4(a);
  54. v = _mm512_mask_broadcast_f32x4(v,mask,b);
  55. }
  56. __forceinline vfloat(const vfloat8& i) {
  57. v = _mm512_castpd_ps(_mm512_broadcast_f64x4(_mm256_castps_pd(i)));
  58. }
  59. __forceinline vfloat(const vfloat8& a, const vfloat8& b) {
  60. v = _mm512_castps256_ps512(a);
  61. #if defined(__AVX512DQ__)
  62. v = _mm512_insertf32x8(v, b, 1);
  63. #else
  64. v = _mm512_castpd_ps(_mm512_insertf64x4(_mm512_castps_pd(v), _mm256_castps_pd(b), 1));
  65. #endif
  66. }
  67. /* WARNING: due to f64x4 the mask is considered as an 8bit mask */
  68. /*__forceinline vfloat(const vboolf16& mask, const vfloat8& a, const vfloat8& b) {
  69. __m512d aa = _mm512_broadcast_f64x4(_mm256_castps_pd(a));
  70. aa = _mm512_mask_broadcast_f64x4(aa,mask,_mm256_castps_pd(b));
  71. v = _mm512_castpd_ps(aa);
  72. }*/
  73. __forceinline explicit vfloat(const vint16& a) {
  74. v = _mm512_cvtepi32_ps(a);
  75. }
  76. __forceinline explicit vfloat(const vuint16& a) {
  77. v = _mm512_cvtepu32_ps(a);
  78. }
  79. ////////////////////////////////////////////////////////////////////////////////
  80. /// Constants
  81. ////////////////////////////////////////////////////////////////////////////////
  82. __forceinline vfloat(ZeroTy) : v(_mm512_setzero_ps()) {}
  83. __forceinline vfloat(OneTy) : v(_mm512_set1_ps(1.0f)) {}
  84. __forceinline vfloat(PosInfTy) : v(_mm512_set1_ps(pos_inf)) {}
  85. __forceinline vfloat(NegInfTy) : v(_mm512_set1_ps(neg_inf)) {}
  86. __forceinline vfloat(StepTy) : v(_mm512_set_ps(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0)) {}
  87. __forceinline vfloat(NaNTy) : v(_mm512_set1_ps(nan)) {}
  88. __forceinline vfloat(UndefinedTy) : v(_mm512_undefined_ps()) {}
  89. ////////////////////////////////////////////////////////////////////////////////
  90. /// Loads and Stores
  91. ////////////////////////////////////////////////////////////////////////////////
  92. static __forceinline vfloat16 load (const void* ptr) { return _mm512_load_ps((float*)ptr); }
  93. static __forceinline vfloat16 loadu(const void* ptr) { return _mm512_loadu_ps((float*)ptr); }
  94. static __forceinline vfloat16 load (const vboolf16& mask, const void* ptr) { return _mm512_mask_load_ps (_mm512_setzero_ps(),mask,(float*)ptr); }
  95. static __forceinline vfloat16 loadu(const vboolf16& mask, const void* ptr) { return _mm512_mask_loadu_ps(_mm512_setzero_ps(),mask,(float*)ptr); }
  96. static __forceinline void store (void* ptr, const vfloat16& v) { _mm512_store_ps ((float*)ptr,v); }
  97. static __forceinline void storeu(void* ptr, const vfloat16& v) { _mm512_storeu_ps((float*)ptr,v); }
  98. static __forceinline void store (const vboolf16& mask, void* ptr, const vfloat16& v) { _mm512_mask_store_ps ((float*)ptr,mask,v); }
  99. static __forceinline void storeu(const vboolf16& mask, void* ptr, const vfloat16& v) { _mm512_mask_storeu_ps((float*)ptr,mask,v); }
  100. static __forceinline void store_nt(void* __restrict__ ptr, const vfloat16& a) {
  101. _mm512_stream_ps((float*)ptr,a);
  102. }
  103. static __forceinline vfloat16 broadcast(const float* f) {
  104. return _mm512_set1_ps(*f);
  105. }
  106. template<int scale = 4>
  107. static __forceinline vfloat16 gather(const float* ptr, const vint16& index) {
  108. return _mm512_i32gather_ps(index, ptr, scale);
  109. }
  110. template<int scale = 4>
  111. static __forceinline vfloat16 gather(const vboolf16& mask, const float* ptr, const vint16& index) {
  112. vfloat16 r = zero;
  113. return _mm512_mask_i32gather_ps(r, mask, index, ptr, scale);
  114. }
  115. template<int scale = 4>
  116. static __forceinline void scatter(float* ptr, const vint16& index, const vfloat16& v) {
  117. _mm512_i32scatter_ps(ptr, index, v, scale);
  118. }
  119. template<int scale = 4>
  120. static __forceinline void scatter(const vboolf16& mask, float* ptr, const vint16& index, const vfloat16& v) {
  121. _mm512_mask_i32scatter_ps(ptr, mask, index, v, scale);
  122. }
  123. ////////////////////////////////////////////////////////////////////////////////
  124. /// Array Access
  125. ////////////////////////////////////////////////////////////////////////////////
  126. __forceinline float& operator [](size_t index) { assert(index < 16); return f[index]; }
  127. __forceinline const float& operator [](size_t index) const { assert(index < 16); return f[index]; }
  128. };
  129. ////////////////////////////////////////////////////////////////////////////////
  130. /// Unary Operators
  131. ////////////////////////////////////////////////////////////////////////////////
  132. __forceinline vfloat16 asFloat(const vint16& a) { return _mm512_castsi512_ps(a); }
  133. __forceinline vint16 asInt (const vfloat16& a) { return _mm512_castps_si512(a); }
  134. __forceinline vuint16 asUInt (const vfloat16& a) { return _mm512_castps_si512(a); }
  135. __forceinline vint16 toInt (const vfloat16& a) { return vint16(a); }
  136. __forceinline vfloat16 toFloat(const vint16& a) { return vfloat16(a); }
  137. __forceinline vfloat16 operator +(const vfloat16& a) { return a; }
  138. __forceinline vfloat16 operator -(const vfloat16& a) { return _mm512_mul_ps(a,vfloat16(-1)); }
  139. __forceinline vfloat16 abs (const vfloat16& a) { return _mm512_castsi512_ps(_mm512_and_epi32(_mm512_castps_si512(a),_mm512_set1_epi32(0x7FFFFFFF))); }
  140. __forceinline vfloat16 signmsk(const vfloat16& a) { return _mm512_castsi512_ps(_mm512_and_epi32(_mm512_castps_si512(a),_mm512_set1_epi32(0x80000000))); }
  141. __forceinline vfloat16 rcp(const vfloat16& a) {
  142. const vfloat16 r = _mm512_rcp14_ps(a);
  143. return _mm512_mul_ps(r, _mm512_fnmadd_ps(r, a, vfloat16(2.0f)));
  144. }
  145. __forceinline vfloat16 sqr (const vfloat16& a) { return _mm512_mul_ps(a,a); }
  146. __forceinline vfloat16 sqrt(const vfloat16& a) { return _mm512_sqrt_ps(a); }
  147. __forceinline vfloat16 rsqrt(const vfloat16& a)
  148. {
  149. const vfloat16 r = _mm512_rsqrt14_ps(a);
  150. return _mm512_fmadd_ps(_mm512_set1_ps(1.5f), r,
  151. _mm512_mul_ps(_mm512_mul_ps(_mm512_mul_ps(a, _mm512_set1_ps(-0.5f)), r), _mm512_mul_ps(r, r)));
  152. }
  153. ////////////////////////////////////////////////////////////////////////////////
  154. /// Binary Operators
  155. ////////////////////////////////////////////////////////////////////////////////
  156. __forceinline vfloat16 operator +(const vfloat16& a, const vfloat16& b) { return _mm512_add_ps(a, b); }
  157. __forceinline vfloat16 operator +(const vfloat16& a, float b) { return a + vfloat16(b); }
  158. __forceinline vfloat16 operator +(float a, const vfloat16& b) { return vfloat16(a) + b; }
  159. __forceinline vfloat16 operator -(const vfloat16& a, const vfloat16& b) { return _mm512_sub_ps(a, b); }
  160. __forceinline vfloat16 operator -(const vfloat16& a, float b) { return a - vfloat16(b); }
  161. __forceinline vfloat16 operator -(float a, const vfloat16& b) { return vfloat16(a) - b; }
  162. __forceinline vfloat16 operator *(const vfloat16& a, const vfloat16& b) { return _mm512_mul_ps(a, b); }
  163. __forceinline vfloat16 operator *(const vfloat16& a, float b) { return a * vfloat16(b); }
  164. __forceinline vfloat16 operator *(float a, const vfloat16& b) { return vfloat16(a) * b; }
  165. __forceinline vfloat16 operator /(const vfloat16& a, const vfloat16& b) { return _mm512_div_ps(a,b); }
  166. __forceinline vfloat16 operator /(const vfloat16& a, float b) { return a/vfloat16(b); }
  167. __forceinline vfloat16 operator /(float a, const vfloat16& b) { return vfloat16(a)/b; }
  168. __forceinline vfloat16 operator &(const vfloat16& a, const vfloat16& b) { return _mm512_and_ps(a,b); }
  169. __forceinline vfloat16 operator |(const vfloat16& a, const vfloat16& b) { return _mm512_or_ps(a,b); }
  170. __forceinline vfloat16 operator ^(const vfloat16& a, const vfloat16& b) {
  171. return _mm512_castsi512_ps(_mm512_xor_epi32(_mm512_castps_si512(a),_mm512_castps_si512(b)));
  172. }
  173. __forceinline vfloat16 min(const vfloat16& a, const vfloat16& b) { return _mm512_min_ps(a,b); }
  174. __forceinline vfloat16 min(const vfloat16& a, float b) { return _mm512_min_ps(a,vfloat16(b)); }
  175. __forceinline vfloat16 min(const float& a, const vfloat16& b) { return _mm512_min_ps(vfloat16(a),b); }
  176. __forceinline vfloat16 max(const vfloat16& a, const vfloat16& b) { return _mm512_max_ps(a,b); }
  177. __forceinline vfloat16 max(const vfloat16& a, float b) { return _mm512_max_ps(a,vfloat16(b)); }
  178. __forceinline vfloat16 max(const float& a, const vfloat16& b) { return _mm512_max_ps(vfloat16(a),b); }
  179. __forceinline vfloat16 mini(const vfloat16& a, const vfloat16& b) {
  180. const vint16 ai = _mm512_castps_si512(a);
  181. const vint16 bi = _mm512_castps_si512(b);
  182. const vint16 ci = _mm512_min_epi32(ai,bi);
  183. return _mm512_castsi512_ps(ci);
  184. }
  185. __forceinline vfloat16 maxi(const vfloat16& a, const vfloat16& b) {
  186. const vint16 ai = _mm512_castps_si512(a);
  187. const vint16 bi = _mm512_castps_si512(b);
  188. const vint16 ci = _mm512_max_epi32(ai,bi);
  189. return _mm512_castsi512_ps(ci);
  190. }
  191. ////////////////////////////////////////////////////////////////////////////////
  192. /// Ternary Operators
  193. ////////////////////////////////////////////////////////////////////////////////
  194. __forceinline vfloat16 madd (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmadd_ps(a,b,c); }
  195. __forceinline vfloat16 msub (const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fmsub_ps(a,b,c); }
  196. __forceinline vfloat16 nmadd(const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fnmadd_ps(a,b,c); }
  197. __forceinline vfloat16 nmsub(const vfloat16& a, const vfloat16& b, const vfloat16& c) { return _mm512_fnmsub_ps(a,b,c); }
  198. ////////////////////////////////////////////////////////////////////////////////
  199. /// Assignment Operators
  200. ////////////////////////////////////////////////////////////////////////////////
  201. __forceinline vfloat16& operator +=(vfloat16& a, const vfloat16& b) { return a = a + b; }
  202. __forceinline vfloat16& operator +=(vfloat16& a, float b) { return a = a + b; }
  203. __forceinline vfloat16& operator -=(vfloat16& a, const vfloat16& b) { return a = a - b; }
  204. __forceinline vfloat16& operator -=(vfloat16& a, float b) { return a = a - b; }
  205. __forceinline vfloat16& operator *=(vfloat16& a, const vfloat16& b) { return a = a * b; }
  206. __forceinline vfloat16& operator *=(vfloat16& a, float b) { return a = a * b; }
  207. __forceinline vfloat16& operator /=(vfloat16& a, const vfloat16& b) { return a = a / b; }
  208. __forceinline vfloat16& operator /=(vfloat16& a, float b) { return a = a / b; }
  209. ////////////////////////////////////////////////////////////////////////////////
  210. /// Comparison Operators + Select
  211. ////////////////////////////////////////////////////////////////////////////////
  212. __forceinline vboolf16 operator ==(const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_EQ); }
  213. __forceinline vboolf16 operator ==(const vfloat16& a, float b) { return a == vfloat16(b); }
  214. __forceinline vboolf16 operator ==(float a, const vfloat16& b) { return vfloat16(a) == b; }
  215. __forceinline vboolf16 operator !=(const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_NE); }
  216. __forceinline vboolf16 operator !=(const vfloat16& a, float b) { return a != vfloat16(b); }
  217. __forceinline vboolf16 operator !=(float a, const vfloat16& b) { return vfloat16(a) != b; }
  218. __forceinline vboolf16 operator < (const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_LT); }
  219. __forceinline vboolf16 operator < (const vfloat16& a, float b) { return a < vfloat16(b); }
  220. __forceinline vboolf16 operator < (float a, const vfloat16& b) { return vfloat16(a) < b; }
  221. __forceinline vboolf16 operator >=(const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_GE); }
  222. __forceinline vboolf16 operator >=(const vfloat16& a, float b) { return a >= vfloat16(b); }
  223. __forceinline vboolf16 operator >=(float a, const vfloat16& b) { return vfloat16(a) >= b; }
  224. __forceinline vboolf16 operator > (const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_GT); }
  225. __forceinline vboolf16 operator > (const vfloat16& a, float b) { return a > vfloat16(b); }
  226. __forceinline vboolf16 operator > (float a, const vfloat16& b) { return vfloat16(a) > b; }
  227. __forceinline vboolf16 operator <=(const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_LE); }
  228. __forceinline vboolf16 operator <=(const vfloat16& a, float b) { return a <= vfloat16(b); }
  229. __forceinline vboolf16 operator <=(float a, const vfloat16& b) { return vfloat16(a) <= b; }
  230. __forceinline vboolf16 eq(const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_EQ); }
  231. __forceinline vboolf16 ne(const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_NE); }
  232. __forceinline vboolf16 lt(const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_LT); }
  233. __forceinline vboolf16 ge(const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_GE); }
  234. __forceinline vboolf16 gt(const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_GT); }
  235. __forceinline vboolf16 le(const vfloat16& a, const vfloat16& b) { return _mm512_cmp_ps_mask(a,b,_MM_CMPINT_LE); }
  236. __forceinline vboolf16 eq(const vboolf16& mask, const vfloat16& a, const vfloat16& b) { return _mm512_mask_cmp_ps_mask(mask,a,b,_MM_CMPINT_EQ); }
  237. __forceinline vboolf16 ne(const vboolf16& mask, const vfloat16& a, const vfloat16& b) { return _mm512_mask_cmp_ps_mask(mask,a,b,_MM_CMPINT_NE); }
  238. __forceinline vboolf16 lt(const vboolf16& mask, const vfloat16& a, const vfloat16& b) { return _mm512_mask_cmp_ps_mask(mask,a,b,_MM_CMPINT_LT); }
  239. __forceinline vboolf16 ge(const vboolf16& mask, const vfloat16& a, const vfloat16& b) { return _mm512_mask_cmp_ps_mask(mask,a,b,_MM_CMPINT_GE); }
  240. __forceinline vboolf16 gt(const vboolf16& mask, const vfloat16& a, const vfloat16& b) { return _mm512_mask_cmp_ps_mask(mask,a,b,_MM_CMPINT_GT); }
  241. __forceinline vboolf16 le(const vboolf16& mask, const vfloat16& a, const vfloat16& b) { return _mm512_mask_cmp_ps_mask(mask,a,b,_MM_CMPINT_LE); }
  242. __forceinline vfloat16 select(const vboolf16& s, const vfloat16& t, const vfloat16& f) {
  243. return _mm512_mask_blend_ps(s, f, t);
  244. }
  245. __forceinline vfloat16 lerp(const vfloat16& a, const vfloat16& b, const vfloat16& t) {
  246. return madd(t,b-a,a);
  247. }
  248. ////////////////////////////////////////////////////////////////////////////////
  249. /// Rounding Functions
  250. ////////////////////////////////////////////////////////////////////////////////
  251. __forceinline vfloat16 floor(const vfloat16& a) {
  252. return _mm512_floor_ps(a);
  253. }
  254. __forceinline vfloat16 ceil (const vfloat16& a) {
  255. return _mm512_ceil_ps(a);
  256. }
  257. __forceinline vfloat16 round (const vfloat16& a) {
  258. return _mm512_roundscale_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
  259. }
  260. __forceinline vint16 floori (const vfloat16& a) {
  261. return _mm512_cvt_roundps_epi32(a, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
  262. }
  263. ////////////////////////////////////////////////////////////////////////////////
  264. /// Movement/Shifting/Shuffling Functions
  265. ////////////////////////////////////////////////////////////////////////////////
  266. __forceinline vfloat16 unpacklo(const vfloat16& a, const vfloat16& b) { return _mm512_unpacklo_ps(a, b); }
  267. __forceinline vfloat16 unpackhi(const vfloat16& a, const vfloat16& b) { return _mm512_unpackhi_ps(a, b); }
  268. template<int i>
  269. __forceinline vfloat16 shuffle(const vfloat16& v) {
  270. return _mm512_permute_ps(v, _MM_SHUFFLE(i, i, i, i));
  271. }
  272. template<int i0, int i1, int i2, int i3>
  273. __forceinline vfloat16 shuffle(const vfloat16& v) {
  274. return _mm512_permute_ps(v, _MM_SHUFFLE(i3, i2, i1, i0));
  275. }
  276. template<int i>
  277. __forceinline vfloat16 shuffle4(const vfloat16& v) {
  278. return _mm512_shuffle_f32x4(v, v ,_MM_SHUFFLE(i, i, i, i));
  279. }
  280. template<int i0, int i1, int i2, int i3>
  281. __forceinline vfloat16 shuffle4(const vfloat16& v) {
  282. return _mm512_shuffle_f32x4(v, v, _MM_SHUFFLE(i3, i2, i1, i0));
  283. }
  284. __forceinline vfloat16 interleave4_even(const vfloat16& a, const vfloat16& b) {
  285. return _mm512_castsi512_ps(_mm512_mask_permutex_epi64(_mm512_castps_si512(a), mm512_int2mask(0xcc), _mm512_castps_si512(b), (_MM_PERM_ENUM)0x4e));
  286. }
  287. __forceinline vfloat16 interleave4_odd(const vfloat16& a, const vfloat16& b) {
  288. return _mm512_castsi512_ps(_mm512_mask_permutex_epi64(_mm512_castps_si512(b), mm512_int2mask(0x33), _mm512_castps_si512(a), (_MM_PERM_ENUM)0x4e));
  289. }
  290. __forceinline vfloat16 permute(vfloat16 v, __m512i index) {
  291. return _mm512_castsi512_ps(_mm512_permutexvar_epi32(index, _mm512_castps_si512(v)));
  292. }
  293. __forceinline vfloat16 reverse(const vfloat16& v) {
  294. return permute(v,_mm512_setr_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0));
  295. }
  296. template<int i>
  297. __forceinline vfloat16 align_shift_right(const vfloat16& a, const vfloat16& b) {
  298. return _mm512_castsi512_ps(_mm512_alignr_epi32(_mm512_castps_si512(a),_mm512_castps_si512(b),i));
  299. };
  300. template<int i>
  301. __forceinline vfloat16 mask_align_shift_right(const vboolf16& mask, vfloat16& c, const vfloat16& a, const vfloat16& b) {
  302. return _mm512_castsi512_ps(_mm512_mask_alignr_epi32(_mm512_castps_si512(c),mask,_mm512_castps_si512(a),_mm512_castps_si512(b),i));
  303. };
  304. __forceinline vfloat16 shift_left_1(const vfloat16& a) {
  305. vfloat16 z = zero;
  306. return mask_align_shift_right<15>(0xfffe,z,a,a);
  307. }
  308. __forceinline vfloat16 shift_right_1(const vfloat16& x) {
  309. return align_shift_right<1>(zero,x);
  310. }
  311. __forceinline float toScalar(const vfloat16& v) { return mm512_cvtss_f32(v); }
  312. template<int i> __forceinline vfloat16 insert4(const vfloat16& a, const vfloat4& b) { return _mm512_insertf32x4(a, b, i); }
  313. template<int N, int i>
  314. vfloat<N> extractN(const vfloat16& v);
  315. template<> __forceinline vfloat4 extractN<4,0>(const vfloat16& v) { return _mm512_castps512_ps128(v); }
  316. template<> __forceinline vfloat4 extractN<4,1>(const vfloat16& v) { return _mm512_extractf32x4_ps(v, 1); }
  317. template<> __forceinline vfloat4 extractN<4,2>(const vfloat16& v) { return _mm512_extractf32x4_ps(v, 2); }
  318. template<> __forceinline vfloat4 extractN<4,3>(const vfloat16& v) { return _mm512_extractf32x4_ps(v, 3); }
  319. template<> __forceinline vfloat8 extractN<8,0>(const vfloat16& v) { return _mm512_castps512_ps256(v); }
  320. template<> __forceinline vfloat8 extractN<8,1>(const vfloat16& v) { return _mm512_extractf32x8_ps(v, 1); }
  321. template<int i> __forceinline vfloat4 extract4 (const vfloat16& v) { return _mm512_extractf32x4_ps(v, i); }
  322. template<> __forceinline vfloat4 extract4<0>(const vfloat16& v) { return _mm512_castps512_ps128(v); }
  323. template<int i> __forceinline vfloat8 extract8 (const vfloat16& v) { return _mm512_extractf32x8_ps(v, i); }
  324. template<> __forceinline vfloat8 extract8<0>(const vfloat16& v) { return _mm512_castps512_ps256(v); }
  325. ////////////////////////////////////////////////////////////////////////////////
  326. /// Transpose
  327. ////////////////////////////////////////////////////////////////////////////////
  328. __forceinline void transpose(const vfloat16& r0, const vfloat16& r1, const vfloat16& r2, const vfloat16& r3,
  329. vfloat16& c0, vfloat16& c1, vfloat16& c2, vfloat16& c3)
  330. {
  331. vfloat16 a0a2_b0b2 = unpacklo(r0, r2);
  332. vfloat16 c0c2_d0d2 = unpackhi(r0, r2);
  333. vfloat16 a1a3_b1b3 = unpacklo(r1, r3);
  334. vfloat16 c1c3_d1d3 = unpackhi(r1, r3);
  335. c0 = unpacklo(a0a2_b0b2, a1a3_b1b3);
  336. c1 = unpackhi(a0a2_b0b2, a1a3_b1b3);
  337. c2 = unpacklo(c0c2_d0d2, c1c3_d1d3);
  338. c3 = unpackhi(c0c2_d0d2, c1c3_d1d3);
  339. }
  340. __forceinline void transpose(const vfloat4& r0, const vfloat4& r1, const vfloat4& r2, const vfloat4& r3,
  341. const vfloat4& r4, const vfloat4& r5, const vfloat4& r6, const vfloat4& r7,
  342. const vfloat4& r8, const vfloat4& r9, const vfloat4& r10, const vfloat4& r11,
  343. const vfloat4& r12, const vfloat4& r13, const vfloat4& r14, const vfloat4& r15,
  344. vfloat16& c0, vfloat16& c1, vfloat16& c2, vfloat16& c3)
  345. {
  346. return transpose(vfloat16(r0, r4, r8, r12), vfloat16(r1, r5, r9, r13), vfloat16(r2, r6, r10, r14), vfloat16(r3, r7, r11, r15),
  347. c0, c1, c2, c3);
  348. }
  349. __forceinline void transpose(const vfloat16& r0, const vfloat16& r1, const vfloat16& r2, const vfloat16& r3,
  350. const vfloat16& r4, const vfloat16& r5, const vfloat16& r6, const vfloat16& r7,
  351. vfloat16& c0, vfloat16& c1, vfloat16& c2, vfloat16& c3,
  352. vfloat16& c4, vfloat16& c5, vfloat16& c6, vfloat16& c7)
  353. {
  354. vfloat16 a0a1a2a3_e0e1e2e3, b0b1b2b3_f0f1f2f3, c0c1c2c3_g0g1g2g3, d0d1d2d3_h0h1h2h3;
  355. transpose(r0, r1, r2, r3, a0a1a2a3_e0e1e2e3, b0b1b2b3_f0f1f2f3, c0c1c2c3_g0g1g2g3, d0d1d2d3_h0h1h2h3);
  356. vfloat16 a4a5a6a7_e4e5e6e7, b4b5b6b7_f4f5f6f7, c4c5c6c7_g4g5g6g7, d4d5d6d7_h4h5h6h7;
  357. transpose(r4, r5, r6, r7, a4a5a6a7_e4e5e6e7, b4b5b6b7_f4f5f6f7, c4c5c6c7_g4g5g6g7, d4d5d6d7_h4h5h6h7);
  358. c0 = interleave4_even(a0a1a2a3_e0e1e2e3, a4a5a6a7_e4e5e6e7);
  359. c1 = interleave4_even(b0b1b2b3_f0f1f2f3, b4b5b6b7_f4f5f6f7);
  360. c2 = interleave4_even(c0c1c2c3_g0g1g2g3, c4c5c6c7_g4g5g6g7);
  361. c3 = interleave4_even(d0d1d2d3_h0h1h2h3, d4d5d6d7_h4h5h6h7);
  362. c4 = interleave4_odd (a0a1a2a3_e0e1e2e3, a4a5a6a7_e4e5e6e7);
  363. c5 = interleave4_odd (b0b1b2b3_f0f1f2f3, b4b5b6b7_f4f5f6f7);
  364. c6 = interleave4_odd (c0c1c2c3_g0g1g2g3, c4c5c6c7_g4g5g6g7);
  365. c7 = interleave4_odd (d0d1d2d3_h0h1h2h3, d4d5d6d7_h4h5h6h7);
  366. }
  367. __forceinline void transpose(const vfloat8& r0, const vfloat8& r1, const vfloat8& r2, const vfloat8& r3,
  368. const vfloat8& r4, const vfloat8& r5, const vfloat8& r6, const vfloat8& r7,
  369. const vfloat8& r8, const vfloat8& r9, const vfloat8& r10, const vfloat8& r11,
  370. const vfloat8& r12, const vfloat8& r13, const vfloat8& r14, const vfloat8& r15,
  371. vfloat16& c0, vfloat16& c1, vfloat16& c2, vfloat16& c3,
  372. vfloat16& c4, vfloat16& c5, vfloat16& c6, vfloat16& c7)
  373. {
  374. return transpose(vfloat16(r0, r8), vfloat16(r1, r9), vfloat16(r2, r10), vfloat16(r3, r11),
  375. vfloat16(r4, r12), vfloat16(r5, r13), vfloat16(r6, r14), vfloat16(r7, r15),
  376. c0, c1, c2, c3, c4, c5, c6, c7);
  377. }
  378. ////////////////////////////////////////////////////////////////////////////////
  379. /// Reductions
  380. ////////////////////////////////////////////////////////////////////////////////
  381. __forceinline vfloat16 vreduce_add2(vfloat16 x) { return x + shuffle<1,0,3,2>(x); }
  382. __forceinline vfloat16 vreduce_add4(vfloat16 x) { x = vreduce_add2(x); return x + shuffle<2,3,0,1>(x); }
  383. __forceinline vfloat16 vreduce_add8(vfloat16 x) { x = vreduce_add4(x); return x + shuffle4<1,0,3,2>(x); }
  384. __forceinline vfloat16 vreduce_add (vfloat16 x) { x = vreduce_add8(x); return x + shuffle4<2,3,0,1>(x); }
  385. __forceinline vfloat16 vreduce_min2(vfloat16 x) { return min(x, shuffle<1,0,3,2>(x)); }
  386. __forceinline vfloat16 vreduce_min4(vfloat16 x) { x = vreduce_min2(x); return min(x, shuffle<2,3,0,1>(x)); }
  387. __forceinline vfloat16 vreduce_min8(vfloat16 x) { x = vreduce_min4(x); return min(x, shuffle4<1,0,3,2>(x)); }
  388. __forceinline vfloat16 vreduce_min (vfloat16 x) { x = vreduce_min8(x); return min(x, shuffle4<2,3,0,1>(x)); }
  389. __forceinline vfloat16 vreduce_max2(vfloat16 x) { return max(x, shuffle<1,0,3,2>(x)); }
  390. __forceinline vfloat16 vreduce_max4(vfloat16 x) { x = vreduce_max2(x); return max(x, shuffle<2,3,0,1>(x)); }
  391. __forceinline vfloat16 vreduce_max8(vfloat16 x) { x = vreduce_max4(x); return max(x, shuffle4<1,0,3,2>(x)); }
  392. __forceinline vfloat16 vreduce_max (vfloat16 x) { x = vreduce_max8(x); return max(x, shuffle4<2,3,0,1>(x)); }
  393. __forceinline float reduce_add(const vfloat16& v) { return toScalar(vreduce_add(v)); }
  394. __forceinline float reduce_min(const vfloat16& v) { return toScalar(vreduce_min(v)); }
  395. __forceinline float reduce_max(const vfloat16& v) { return toScalar(vreduce_max(v)); }
  396. __forceinline size_t select_min(const vfloat16& v) {
  397. return bsf(_mm512_kmov(_mm512_cmp_epi32_mask(_mm512_castps_si512(v),_mm512_castps_si512(vreduce_min(v)),_MM_CMPINT_EQ)));
  398. }
  399. __forceinline size_t select_max(const vfloat16& v) {
  400. return bsf(_mm512_kmov(_mm512_cmp_epi32_mask(_mm512_castps_si512(v),_mm512_castps_si512(vreduce_max(v)),_MM_CMPINT_EQ)));
  401. }
  402. __forceinline size_t select_min(const vboolf16& valid, const vfloat16& v)
  403. {
  404. const vfloat16 a = select(valid,v,vfloat16(pos_inf));
  405. const vbool16 valid_min = valid & (a == vreduce_min(a));
  406. return bsf(movemask(any(valid_min) ? valid_min : valid));
  407. }
  408. __forceinline size_t select_max(const vboolf16& valid, const vfloat16& v)
  409. {
  410. const vfloat16 a = select(valid,v,vfloat16(neg_inf));
  411. const vbool16 valid_max = valid & (a == vreduce_max(a));
  412. return bsf(movemask(any(valid_max) ? valid_max : valid));
  413. }
  414. __forceinline vfloat16 prefix_sum(const vfloat16& a)
  415. {
  416. const vfloat16 z(zero);
  417. vfloat16 v = a;
  418. v = v + align_shift_right<16-1>(v,z);
  419. v = v + align_shift_right<16-2>(v,z);
  420. v = v + align_shift_right<16-4>(v,z);
  421. v = v + align_shift_right<16-8>(v,z);
  422. return v;
  423. }
  424. __forceinline vfloat16 reverse_prefix_sum(const vfloat16& a)
  425. {
  426. const vfloat16 z(zero);
  427. vfloat16 v = a;
  428. v = v + align_shift_right<1>(z,v);
  429. v = v + align_shift_right<2>(z,v);
  430. v = v + align_shift_right<4>(z,v);
  431. v = v + align_shift_right<8>(z,v);
  432. return v;
  433. }
  434. __forceinline vfloat16 prefix_min(const vfloat16& a)
  435. {
  436. const vfloat16 z(pos_inf);
  437. vfloat16 v = a;
  438. v = min(v,align_shift_right<16-1>(v,z));
  439. v = min(v,align_shift_right<16-2>(v,z));
  440. v = min(v,align_shift_right<16-4>(v,z));
  441. v = min(v,align_shift_right<16-8>(v,z));
  442. return v;
  443. }
  444. __forceinline vfloat16 prefix_max(const vfloat16& a)
  445. {
  446. const vfloat16 z(neg_inf);
  447. vfloat16 v = a;
  448. v = max(v,align_shift_right<16-1>(v,z));
  449. v = max(v,align_shift_right<16-2>(v,z));
  450. v = max(v,align_shift_right<16-4>(v,z));
  451. v = max(v,align_shift_right<16-8>(v,z));
  452. return v;
  453. }
  454. __forceinline vfloat16 reverse_prefix_min(const vfloat16& a)
  455. {
  456. const vfloat16 z(pos_inf);
  457. vfloat16 v = a;
  458. v = min(v,align_shift_right<1>(z,v));
  459. v = min(v,align_shift_right<2>(z,v));
  460. v = min(v,align_shift_right<4>(z,v));
  461. v = min(v,align_shift_right<8>(z,v));
  462. return v;
  463. }
  464. __forceinline vfloat16 reverse_prefix_max(const vfloat16& a)
  465. {
  466. const vfloat16 z(neg_inf);
  467. vfloat16 v = a;
  468. v = max(v,align_shift_right<1>(z,v));
  469. v = max(v,align_shift_right<2>(z,v));
  470. v = max(v,align_shift_right<4>(z,v));
  471. v = max(v,align_shift_right<8>(z,v));
  472. return v;
  473. }
  474. __forceinline vfloat16 rcp_safe(const vfloat16& a) {
  475. return rcp(select(a != vfloat16(zero), a, vfloat16(min_rcp_input)));
  476. }
  477. ////////////////////////////////////////////////////////////////////////////////
  478. /// Output Operators
  479. ////////////////////////////////////////////////////////////////////////////////
  480. __forceinline embree_ostream operator <<(embree_ostream cout, const vfloat16& v)
  481. {
  482. cout << "<" << v[0];
  483. for (int i=1; i<16; i++) cout << ", " << v[i];
  484. cout << ">";
  485. return cout;
  486. }
  487. }
  488. #undef vboolf
  489. #undef vboold
  490. #undef vint
  491. #undef vuint
  492. #undef vllong
  493. #undef vfloat
  494. #undef vdouble