vdouble8_avx512.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. // Copyright 2009-2021 Intel Corporation
  2. // SPDX-License-Identifier: Apache-2.0
  3. #pragma once
  4. #define vboolf vboolf_impl
  5. #define vboold vboold_impl
  6. #define vint vint_impl
  7. #define vuint vuint_impl
  8. #define vllong vllong_impl
  9. #define vfloat vfloat_impl
  10. #define vdouble vdouble_impl
  11. namespace embree
  12. {
  13. /* 8-wide AVX-512 64-bit double type */
  14. template<>
  15. struct vdouble<8>
  16. {
  17. ALIGNED_STRUCT_(64);
  18. typedef vboold8 Bool;
  19. enum { size = 8 }; // number of SIMD elements
  20. union { // data
  21. __m512d v;
  22. double i[8];
  23. };
  24. ////////////////////////////////////////////////////////////////////////////////
  25. /// Constructors, Assignment & Cast Operators
  26. ////////////////////////////////////////////////////////////////////////////////
  27. __forceinline vdouble() {}
  28. __forceinline vdouble(const vdouble8& t) { v = t.v; }
  29. __forceinline vdouble8& operator =(const vdouble8& f) { v = f.v; return *this; }
  30. __forceinline vdouble(const __m512d& t) { v = t; }
  31. __forceinline operator __m512d() const { return v; }
  32. __forceinline operator __m256d() const { return _mm512_castpd512_pd256(v); }
  33. __forceinline vdouble(double i) {
  34. v = _mm512_set1_pd(i);
  35. }
  36. __forceinline vdouble(double a, double b, double c, double d) {
  37. v = _mm512_set4_pd(d,c,b,a);
  38. }
  39. __forceinline vdouble(double a0, double a1, double a2, double a3,
  40. double a4, double a5, double a6, double a7)
  41. {
  42. v = _mm512_set_pd(a7,a6,a5,a4,a3,a2,a1,a0);
  43. }
  44. ////////////////////////////////////////////////////////////////////////////////
  45. /// Constants
  46. ////////////////////////////////////////////////////////////////////////////////
  47. __forceinline vdouble(ZeroTy) : v(_mm512_setzero_pd()) {}
  48. __forceinline vdouble(OneTy) : v(_mm512_set1_pd(1)) {}
  49. __forceinline vdouble(StepTy) : v(_mm512_set_pd(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)) {}
  50. __forceinline vdouble(ReverseStepTy) : v(_mm512_setr_pd(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)) {}
  51. ////////////////////////////////////////////////////////////////////////////////
  52. /// Loads and Stores
  53. ////////////////////////////////////////////////////////////////////////////////
  54. static __forceinline void store_nt(void *__restrict__ ptr, const vdouble8& a) {
  55. _mm512_stream_pd((double*)ptr, a);
  56. }
  57. static __forceinline vdouble8 loadu(const void* addr) {
  58. return _mm512_loadu_pd((double*)addr);
  59. }
  60. static __forceinline vdouble8 load(const vdouble8* addr) {
  61. return _mm512_load_pd((double*)addr);
  62. }
  63. static __forceinline vdouble8 load(const double* addr) {
  64. return _mm512_load_pd(addr);
  65. }
  66. static __forceinline void store(void* ptr, const vdouble8& v) {
  67. _mm512_store_pd(ptr, v);
  68. }
  69. static __forceinline void storeu(void* ptr, const vdouble8& v) {
  70. _mm512_storeu_pd(ptr, v);
  71. }
  72. static __forceinline void storeu(const vboold8& mask, double* ptr, const vdouble8& f) {
  73. _mm512_mask_storeu_pd(ptr, mask, f);
  74. }
  75. static __forceinline void store(const vboold8& mask, void* addr, const vdouble8& v2) {
  76. _mm512_mask_store_pd(addr, mask, v2);
  77. }
  78. static __forceinline vdouble8 compact(const vboold8& mask, vdouble8& v) {
  79. return _mm512_mask_compress_pd(v, mask, v);
  80. }
  81. static __forceinline vdouble8 compact(const vboold8& mask, const vdouble8& a, vdouble8& b) {
  82. return _mm512_mask_compress_pd(a, mask, b);
  83. }
  84. static __forceinline vdouble8 broadcast(const void* a) { return _mm512_set1_pd(*(double*)a); }
  85. ////////////////////////////////////////////////////////////////////////////////
  86. /// Array Access
  87. ////////////////////////////////////////////////////////////////////////////////
  88. __forceinline double& operator [](size_t index) { assert(index < 8); return i[index]; }
  89. __forceinline const double& operator [](size_t index) const { assert(index < 8); return i[index]; }
  90. };
  91. ////////////////////////////////////////////////////////////////////////////////
  92. /// Unary Operators
  93. ////////////////////////////////////////////////////////////////////////////////
  94. __forceinline vdouble8 asDouble(const vllong8& a) { return _mm512_castsi512_pd(a); }
  95. __forceinline vllong8 asLLong (const vdouble8& a) { return _mm512_castpd_si512(a); }
  96. __forceinline vdouble8 operator +(const vdouble8& a) { return a; }
  97. __forceinline vdouble8 operator -(const vdouble8& a) { return _mm512_sub_pd(_mm512_setzero_pd(), a); }
  98. ////////////////////////////////////////////////////////////////////////////////
  99. /// Binary Operators
  100. ////////////////////////////////////////////////////////////////////////////////
  101. __forceinline vdouble8 operator +(const vdouble8& a, const vdouble8& b) { return _mm512_add_pd(a, b); }
  102. __forceinline vdouble8 operator +(const vdouble8& a, double b) { return a + vdouble8(b); }
  103. __forceinline vdouble8 operator +(double a, const vdouble8& b) { return vdouble8(a) + b; }
  104. __forceinline vdouble8 operator -(const vdouble8& a, const vdouble8& b) { return _mm512_sub_pd(a, b); }
  105. __forceinline vdouble8 operator -(const vdouble8& a, double b) { return a - vdouble8(b); }
  106. __forceinline vdouble8 operator -(double a, const vdouble8& b) { return vdouble8(a) - b; }
  107. __forceinline vdouble8 operator *(const vdouble8& a, const vdouble8& b) { return _mm512_mul_pd(a, b); }
  108. __forceinline vdouble8 operator *(const vdouble8& a, double b) { return a * vdouble8(b); }
  109. __forceinline vdouble8 operator *(double a, const vdouble8& b) { return vdouble8(a) * b; }
  110. __forceinline vdouble8 operator &(const vdouble8& a, const vdouble8& b) { return _mm512_and_pd(a, b); }
  111. __forceinline vdouble8 operator &(const vdouble8& a, double b) { return a & vdouble8(b); }
  112. __forceinline vdouble8 operator &(double a, const vdouble8& b) { return vdouble8(a) & b; }
  113. __forceinline vdouble8 operator |(const vdouble8& a, const vdouble8& b) { return _mm512_or_pd(a, b); }
  114. __forceinline vdouble8 operator |(const vdouble8& a, double b) { return a | vdouble8(b); }
  115. __forceinline vdouble8 operator |(double a, const vdouble8& b) { return vdouble8(a) | b; }
  116. __forceinline vdouble8 operator ^(const vdouble8& a, const vdouble8& b) { return _mm512_xor_pd(a, b); }
  117. __forceinline vdouble8 operator ^(const vdouble8& a, double b) { return a ^ vdouble8(b); }
  118. __forceinline vdouble8 operator ^(double a, const vdouble8& b) { return vdouble8(a) ^ b; }
  119. __forceinline vdouble8 operator <<(const vdouble8& a, const unsigned int n) { return _mm512_castsi512_pd(_mm512_slli_epi64(_mm512_castpd_si512(a), n)); }
  120. __forceinline vdouble8 operator >>(const vdouble8& a, const unsigned int n) { return _mm512_castsi512_pd(_mm512_srai_epi64(_mm512_castpd_si512(a), n)); }
  121. __forceinline vdouble8 operator <<(const vdouble8& a, const vllong8& n) { return _mm512_castsi512_pd(_mm512_sllv_epi64(_mm512_castpd_si512(a), n)); }
  122. __forceinline vdouble8 operator >>(const vdouble8& a, const vllong8& n) { return _mm512_castsi512_pd(_mm512_srav_epi64(_mm512_castpd_si512(a), n)); }
  123. __forceinline vdouble8 sll (const vdouble8& a, const unsigned int b) { return _mm512_castsi512_pd(_mm512_slli_epi64(_mm512_castpd_si512(a), b)); }
  124. __forceinline vdouble8 sra (const vdouble8& a, const unsigned int b) { return _mm512_castsi512_pd(_mm512_srai_epi64(_mm512_castpd_si512(a), b)); }
  125. __forceinline vdouble8 srl (const vdouble8& a, const unsigned int b) { return _mm512_castsi512_pd(_mm512_srli_epi64(_mm512_castpd_si512(a), b)); }
  126. __forceinline vdouble8 min(const vdouble8& a, const vdouble8& b) { return _mm512_min_pd(a, b); }
  127. __forceinline vdouble8 min(const vdouble8& a, double b) { return min(a,vdouble8(b)); }
  128. __forceinline vdouble8 min(double a, const vdouble8& b) { return min(vdouble8(a),b); }
  129. __forceinline vdouble8 max(const vdouble8& a, const vdouble8& b) { return _mm512_max_pd(a, b); }
  130. __forceinline vdouble8 max(const vdouble8& a, double b) { return max(a,vdouble8(b)); }
  131. __forceinline vdouble8 max(double a, const vdouble8& b) { return max(vdouble8(a),b); }
  132. __forceinline vdouble8 mask_add(const vboold8& mask, vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_add_pd(c,mask,a,b); }
  133. __forceinline vdouble8 mask_sub(const vboold8& mask, vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_sub_pd(c,mask,a,b); }
  134. __forceinline vdouble8 mask_and(const vboold8& m,vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_and_pd(c,m,a,b); }
  135. __forceinline vdouble8 mask_or (const vboold8& m,vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_or_pd(c,m,a,b); }
  136. ////////////////////////////////////////////////////////////////////////////////
  137. /// Ternary Operators
  138. ////////////////////////////////////////////////////////////////////////////////
  139. __forceinline vdouble8 madd (const vdouble8& a, const vdouble8& b, const vdouble8& c) { return _mm512_fmadd_pd(a,b,c); }
  140. __forceinline vdouble8 msub (const vdouble8& a, const vdouble8& b, const vdouble8& c) { return _mm512_fmsub_pd(a,b,c); }
  141. __forceinline vdouble8 nmadd(const vdouble8& a, const vdouble8& b, const vdouble8& c) { return _mm512_fnmadd_pd(a,b,c); }
  142. __forceinline vdouble8 nmsub(const vdouble8& a, const vdouble8& b, const vdouble8& c) { return _mm512_fnmsub_pd(a,b,c); }
  143. ////////////////////////////////////////////////////////////////////////////////
  144. /// Assignment Operators
  145. ////////////////////////////////////////////////////////////////////////////////
  146. __forceinline vdouble8& operator +=(vdouble8& a, const vdouble8& b) { return a = a + b; }
  147. __forceinline vdouble8& operator +=(vdouble8& a, double b) { return a = a + b; }
  148. __forceinline vdouble8& operator -=(vdouble8& a, const vdouble8& b) { return a = a - b; }
  149. __forceinline vdouble8& operator -=(vdouble8& a, double b) { return a = a - b; }
  150. __forceinline vdouble8& operator *=(vdouble8& a, const vdouble8& b) { return a = a * b; }
  151. __forceinline vdouble8& operator *=(vdouble8& a, double b) { return a = a * b; }
  152. __forceinline vdouble8& operator &=(vdouble8& a, const vdouble8& b) { return a = a & b; }
  153. __forceinline vdouble8& operator &=(vdouble8& a, double b) { return a = a & b; }
  154. __forceinline vdouble8& operator |=(vdouble8& a, const vdouble8& b) { return a = a | b; }
  155. __forceinline vdouble8& operator |=(vdouble8& a, double b) { return a = a | b; }
  156. __forceinline vdouble8& operator <<=(vdouble8& a, const double b) { return a = a << b; }
  157. __forceinline vdouble8& operator >>=(vdouble8& a, const double b) { return a = a >> b; }
  158. ////////////////////////////////////////////////////////////////////////////////
  159. /// Comparison Operators + Select
  160. ////////////////////////////////////////////////////////////////////////////////
  161. __forceinline vboold8 operator ==(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_EQ); }
  162. __forceinline vboold8 operator ==(const vdouble8& a, double b) { return a == vdouble8(b); }
  163. __forceinline vboold8 operator ==(double a, const vdouble8& b) { return vdouble8(a) == b; }
  164. __forceinline vboold8 operator !=(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_NE); }
  165. __forceinline vboold8 operator !=(const vdouble8& a, double b) { return a != vdouble8(b); }
  166. __forceinline vboold8 operator !=(double a, const vdouble8& b) { return vdouble8(a) != b; }
  167. __forceinline vboold8 operator < (const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LT); }
  168. __forceinline vboold8 operator < (const vdouble8& a, double b) { return a < vdouble8(b); }
  169. __forceinline vboold8 operator < (double a, const vdouble8& b) { return vdouble8(a) < b; }
  170. __forceinline vboold8 operator >=(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GE); }
  171. __forceinline vboold8 operator >=(const vdouble8& a, double b) { return a >= vdouble8(b); }
  172. __forceinline vboold8 operator >=(double a, const vdouble8& b) { return vdouble8(a) >= b; }
  173. __forceinline vboold8 operator > (const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GT); }
  174. __forceinline vboold8 operator > (const vdouble8& a, double b) { return a > vdouble8(b); }
  175. __forceinline vboold8 operator > (double a, const vdouble8& b) { return vdouble8(a) > b; }
  176. __forceinline vboold8 operator <=(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LE); }
  177. __forceinline vboold8 operator <=(const vdouble8& a, double b) { return a <= vdouble8(b); }
  178. __forceinline vboold8 operator <=(double a, const vdouble8& b) { return vdouble8(a) <= b; }
  179. __forceinline vboold8 eq(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_EQ); }
  180. __forceinline vboold8 ne(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_NE); }
  181. __forceinline vboold8 lt(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LT); }
  182. __forceinline vboold8 ge(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GE); }
  183. __forceinline vboold8 gt(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GT); }
  184. __forceinline vboold8 le(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LE); }
  185. __forceinline vboold8 eq(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_EQ); }
  186. __forceinline vboold8 ne(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_NE); }
  187. __forceinline vboold8 lt(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_LT); }
  188. __forceinline vboold8 ge(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_GE); }
  189. __forceinline vboold8 gt(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_GT); }
  190. __forceinline vboold8 le(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_LE); }
  191. __forceinline vdouble8 select(const vboold8& m, const vdouble8& t, const vdouble8& f) {
  192. return _mm512_mask_or_pd(f,m,t,t);
  193. }
  194. ////////////////////////////////////////////////////////////////////////////////
  195. // Movement/Shifting/Shuffling Functions
  196. ////////////////////////////////////////////////////////////////////////////////
  197. template<int i0, int i1>
  198. __forceinline vdouble8 shuffle(const vdouble8& v) {
  199. return _mm512_permute_pd(v, (i1 << 7) | (i0 << 6) | (i1 << 5) | (i0 << 4) | (i1 << 3) | (i0 << 2) | (i1 << 1) | i0);
  200. }
  201. template<int i>
  202. __forceinline vdouble8 shuffle(const vdouble8& v) {
  203. return shuffle<i, i>(v);
  204. }
  205. template<int i0, int i1, int i2, int i3>
  206. __forceinline vdouble8 shuffle(const vdouble8& v) {
  207. return _mm512_permutex_pd(v, _MM_SHUFFLE(i3, i2, i1, i0));
  208. }
  209. template<int i0, int i1>
  210. __forceinline vdouble8 shuffle4(const vdouble8& v) {
  211. return _mm512_shuffle_f64x2(v, v, _MM_SHUFFLE(i1*2+1, i1*2, i0*2+1, i0*2));
  212. }
  213. template<int i>
  214. __forceinline vdouble8 shuffle4(const vdouble8& v) {
  215. return shuffle4<i, i>(v);
  216. }
  217. template<int i>
  218. __forceinline vdouble8 align_shift_right(const vdouble8& a, const vdouble8& b) {
  219. return _mm512_castsi512_pd(_mm512_alignr_epi64(_mm512_castpd_si512(a), _mm512_castpd_si512(b), i));
  220. }
  221. __forceinline double toScalar(const vdouble8& v) {
  222. return _mm_cvtsd_f64(_mm512_castpd512_pd128(v));
  223. }
  224. ////////////////////////////////////////////////////////////////////////////////
  225. /// Reductions
  226. ////////////////////////////////////////////////////////////////////////////////
  227. __forceinline vdouble8 vreduce_add2(vdouble8 x) { return x + shuffle<1,0,3,2>(x); }
  228. __forceinline vdouble8 vreduce_add4(vdouble8 x) { x = vreduce_add2(x); return x + shuffle<2,3,0,1>(x); }
  229. __forceinline vdouble8 vreduce_add (vdouble8 x) { x = vreduce_add4(x); return x + shuffle4<1,0>(x); }
  230. __forceinline vdouble8 vreduce_min2(vdouble8 x) { return min(x, shuffle<1,0,3,2>(x)); }
  231. __forceinline vdouble8 vreduce_min4(vdouble8 x) { x = vreduce_min2(x); return min(x, shuffle<2,3,0,1>(x)); }
  232. __forceinline vdouble8 vreduce_min (vdouble8 x) { x = vreduce_min4(x); return min(x, shuffle4<1,0>(x)); }
  233. __forceinline vdouble8 vreduce_max2(vdouble8 x) { return max(x, shuffle<1,0,3,2>(x)); }
  234. __forceinline vdouble8 vreduce_max4(vdouble8 x) { x = vreduce_max2(x); return max(x, shuffle<2,3,0,1>(x)); }
  235. __forceinline vdouble8 vreduce_max (vdouble8 x) { x = vreduce_max4(x); return max(x, shuffle4<1,0>(x)); }
  236. __forceinline double reduce_add(const vdouble8& v) { return toScalar(vreduce_add(v)); }
  237. __forceinline double reduce_min(const vdouble8& v) { return toScalar(vreduce_min(v)); }
  238. __forceinline double reduce_max(const vdouble8& v) { return toScalar(vreduce_max(v)); }
  239. ////////////////////////////////////////////////////////////////////////////////
  240. /// Memory load and store operations
  241. ////////////////////////////////////////////////////////////////////////////////
  242. __forceinline vdouble8 permute(const vdouble8& v, const vllong8& index) {
  243. return _mm512_permutexvar_pd(index, v);
  244. }
  245. __forceinline vdouble8 reverse(const vdouble8& a) {
  246. return permute(a, vllong8(reverse_step));
  247. }
  248. ////////////////////////////////////////////////////////////////////////////////
  249. /// Output Operators
  250. ////////////////////////////////////////////////////////////////////////////////
  251. __forceinline embree_ostream operator <<(embree_ostream cout, const vdouble8& v)
  252. {
  253. cout << "<" << v[0];
  254. for (size_t i=1; i<8; i++) cout << ", " << v[i];
  255. cout << ">";
  256. return cout;
  257. }
  258. }
  259. #undef vboolf
  260. #undef vboold
  261. #undef vint
  262. #undef vuint
  263. #undef vllong
  264. #undef vfloat
  265. #undef vdouble