vdouble8_avx512.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. // ======================================================================== //
  2. // Copyright 2009-2017 Intel Corporation //
  3. // //
  4. // Licensed under the Apache License, Version 2.0 (the "License"); //
  5. // you may not use this file except in compliance with the License. //
  6. // You may obtain a copy of the License at //
  7. // //
  8. // http://www.apache.org/licenses/LICENSE-2.0 //
  9. // //
  10. // Unless required by applicable law or agreed to in writing, software //
  11. // distributed under the License is distributed on an "AS IS" BASIS, //
  12. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
  13. // See the License for the specific language governing permissions and //
  14. // limitations under the License. //
  15. // ======================================================================== //
  16. #pragma once
  17. namespace embree
  18. {
  19. /* 8-wide AVX-512 64bit double type */
  20. template<>
  21. struct vdouble<8>
  22. {
  23. typedef vboold8 Bool;
  24. enum { size = 8 }; // number of SIMD elements
  25. union { // data
  26. __m512d v;
  27. double i[8];
  28. };
  29. ////////////////////////////////////////////////////////////////////////////////
  30. /// Constructors, Assignment & Cast Operators
  31. ////////////////////////////////////////////////////////////////////////////////
  32. __forceinline vdouble() {}
  33. __forceinline vdouble(const vdouble8& t) { v = t.v; }
  34. __forceinline vdouble8& operator=(const vdouble8& f) { v = f.v; return *this; }
  35. __forceinline vdouble(const __m512d& t) { v = t; }
  36. __forceinline operator __m512d () const { return v; }
  37. __forceinline operator __m256d () const { return _mm512_castpd512_pd256(v); }
  38. __forceinline vdouble(const double i) {
  39. v = _mm512_set1_pd(i);
  40. }
  41. __forceinline vdouble(const double a, const double b, const double c, const double d) {
  42. v = _mm512_set4_pd(d,c,b,a);
  43. }
  44. __forceinline vdouble(const double a0 , const double a1 , const double a2 , const double a3,
  45. const double a4 , const double a5 , const double a6 , const double a7)
  46. {
  47. v = _mm512_set_pd(a7,a6,a5,a4,a3,a2,a1,a0);
  48. }
  49. ////////////////////////////////////////////////////////////////////////////////
  50. /// Constants
  51. ////////////////////////////////////////////////////////////////////////////////
  52. __forceinline vdouble( ZeroTy ) : v(_mm512_setzero_pd()) {}
  53. __forceinline vdouble( OneTy ) : v(_mm512_set1_pd(1)) {}
  54. __forceinline vdouble( StepTy ) : v(_mm512_set_pd(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)) {}
  55. __forceinline vdouble( ReverseStepTy ) : v(_mm512_setr_pd(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)) {}
  56. __forceinline static vdouble8 zero() { return _mm512_setzero_pd(); }
  57. __forceinline static vdouble8 one () { return _mm512_set1_pd(1); }
  58. __forceinline static vdouble8 neg_one () { return _mm512_set1_pd(-1); }
  59. ////////////////////////////////////////////////////////////////////////////////
  60. /// Loads and Stores
  61. ////////////////////////////////////////////////////////////////////////////////
  62. static __forceinline void store_nt(void *__restrict__ ptr, const vdouble8& a) {
  63. _mm512_stream_pd((double*)ptr,a);
  64. }
  65. static __forceinline vdouble8 loadu(const void* addr) {
  66. return _mm512_loadu_pd((double*)addr);
  67. }
  68. static __forceinline vdouble8 load(const vdouble8* addr) {
  69. return _mm512_load_pd((double*)addr);
  70. }
  71. static __forceinline vdouble8 load(const double* addr) {
  72. return _mm512_load_pd(addr);
  73. }
  74. static __forceinline void store(void* ptr, const vdouble8& v) {
  75. _mm512_store_pd(ptr,v);
  76. }
  77. static __forceinline void storeu(void* ptr, const vdouble8& v ) {
  78. _mm512_storeu_pd(ptr,v);
  79. }
  80. static __forceinline void storeu(const vboold8& mask, double* ptr, const vdouble8& f ) {
  81. _mm512_mask_storeu_pd(ptr,mask,f);
  82. }
  83. static __forceinline void store(const vboold8& mask, void* addr, const vdouble8& v2) {
  84. _mm512_mask_store_pd(addr,mask,v2);
  85. }
  86. /* pass by value to avoid compiler generating inefficient code */
  87. static __forceinline void storeu_compact(const vboold8 mask,void * addr, const vdouble8& reg) {
  88. _mm512_mask_compressstoreu_pd(addr,mask,reg);
  89. }
  90. static __forceinline vdouble8 compact64bit(const vboold8& mask, vdouble8& v) {
  91. return _mm512_mask_compress_pd(v,mask,v);
  92. }
  93. static __forceinline vdouble8 compact(const vboold8& mask, vdouble8& v) {
  94. return _mm512_mask_compress_pd(v,mask,v);
  95. }
  96. static __forceinline vdouble8 compact(const vboold8& mask, const vdouble8& a, vdouble8& b) {
  97. return _mm512_mask_compress_pd(a,mask,b);
  98. }
  99. ////////////////////////////////////////////////////////////////////////////////
  100. /// Array Access
  101. ////////////////////////////////////////////////////////////////////////////////
  102. __forceinline double& operator[](const size_t index) { assert(index < 8); return i[index]; }
  103. __forceinline const double& operator[](const size_t index) const { assert(index < 8); return i[index]; }
  104. };
  105. ////////////////////////////////////////////////////////////////////////////////
  106. /// Unary Operators
  107. ////////////////////////////////////////////////////////////////////////////////
  108. __forceinline const vdouble8 asDouble ( const __m512& a ) { return _mm512_castps_pd(a); }
  109. __forceinline const vdouble8 operator +( const vdouble8& a ) { return a; }
  110. __forceinline const vdouble8 operator -( const vdouble8& a ) { return _mm512_sub_pd(_mm512_setzero_pd(), a); }
  111. ////////////////////////////////////////////////////////////////////////////////
  112. /// Binary Operators
  113. ////////////////////////////////////////////////////////////////////////////////
  114. __forceinline const vdouble8 operator +( const vdouble8& a, const vdouble8& b ) { return _mm512_add_pd(a, b); }
  115. __forceinline const vdouble8 operator +( const vdouble8& a, const double b ) { return a + vdouble8(b); }
  116. __forceinline const vdouble8 operator +( const double a, const vdouble8& b ) { return vdouble8(a) + b; }
  117. __forceinline const vdouble8 operator -( const vdouble8& a, const vdouble8& b ) { return _mm512_sub_pd(a, b); }
  118. __forceinline const vdouble8 operator -( const vdouble8& a, const double b ) { return a - vdouble8(b); }
  119. __forceinline const vdouble8 operator -( const double a, const vdouble8& b ) { return vdouble8(a) - b; }
  120. __forceinline const vdouble8 operator *( const vdouble8& a, const vdouble8& b ) { return _mm512_mul_pd(a, b); }
  121. __forceinline const vdouble8 operator *( const vdouble8& a, const double b ) { return a * vdouble8(b); }
  122. __forceinline const vdouble8 operator *( const double a, const vdouble8& b ) { return vdouble8(a) * b; }
  123. __forceinline const vdouble8 operator &( const vdouble8& a, const vdouble8& b ) { return _mm512_and_pd(a, b); }
  124. __forceinline const vdouble8 operator &( const vdouble8& a, const double b ) { return a & vdouble8(b); }
  125. __forceinline const vdouble8 operator &( const double a, const vdouble8& b ) { return vdouble8(a) & b; }
  126. __forceinline const vdouble8 operator |( const vdouble8& a, const vdouble8& b ) { return _mm512_or_pd(a, b); }
  127. __forceinline const vdouble8 operator |( const vdouble8& a, const double b ) { return a | vdouble8(b); }
  128. __forceinline const vdouble8 operator |( const double a, const vdouble8& b ) { return vdouble8(a) | b; }
  129. __forceinline const vdouble8 operator ^( const vdouble8& a, const vdouble8& b ) { return _mm512_xor_pd(a, b); }
  130. __forceinline const vdouble8 operator ^( const vdouble8& a, const double b ) { return a ^ vdouble8(b); }
  131. __forceinline const vdouble8 operator ^( const double a, const vdouble8& b ) { return vdouble8(a) ^ b; }
  132. __forceinline const vdouble8 operator <<( const vdouble8& a, const unsigned int n ) { return _mm512_castsi512_pd(_mm512_slli_epi64(_mm512_castpd_si512(a), n)); }
  133. __forceinline const vdouble8 operator >>( const vdouble8& a, const unsigned int n ) { return _mm512_castsi512_pd(_mm512_srai_epi64(_mm512_castpd_si512(a), n)); }
  134. __forceinline const vdouble8 operator <<( const vdouble8& a, const vllong8& n ) { return _mm512_castsi512_pd(_mm512_sllv_epi64(_mm512_castpd_si512(a), n)); }
  135. __forceinline const vdouble8 operator >>( const vdouble8& a, const vllong8& n ) { return _mm512_castsi512_pd(_mm512_srav_epi64(_mm512_castpd_si512(a), n)); }
  136. __forceinline const vdouble8 sll ( const vdouble8& a, const unsigned int b ) { return _mm512_castsi512_pd(_mm512_slli_epi64(_mm512_castpd_si512(a), b)); }
  137. __forceinline const vdouble8 sra ( const vdouble8& a, const unsigned int b ) { return _mm512_castsi512_pd(_mm512_srai_epi64(_mm512_castpd_si512(a), b)); }
  138. __forceinline const vdouble8 srl ( const vdouble8& a, const unsigned int b ) { return _mm512_castsi512_pd(_mm512_srli_epi64(_mm512_castpd_si512(a), b)); }
  139. __forceinline const vdouble8 min( const vdouble8& a, const vdouble8& b ) { return _mm512_min_pd(a, b); }
  140. __forceinline const vdouble8 min( const vdouble8& a, const double b ) { return min(a,vdouble8(b)); }
  141. __forceinline const vdouble8 min( const double a, const vdouble8& b ) { return min(vdouble8(a),b); }
  142. __forceinline const vdouble8 max( const vdouble8& a, const vdouble8& b ) { return _mm512_max_pd(a, b); }
  143. __forceinline const vdouble8 max( const vdouble8& a, const double b ) { return max(a,vdouble8(b)); }
  144. __forceinline const vdouble8 max( const double a, const vdouble8& b ) { return max(vdouble8(a),b); }
  145. __forceinline const vdouble8 mask_add(const vboold8& mask, vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_add_pd(c,mask,a,b); }
  146. __forceinline const vdouble8 mask_sub(const vboold8& mask, vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_sub_pd(c,mask,a,b); }
  147. __forceinline const vdouble8 mask_and(const vboold8& m,vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_and_pd(c,m,a,b); }
  148. __forceinline const vdouble8 mask_or (const vboold8& m,vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_or_pd(c,m,a,b); }
  149. ////////////////////////////////////////////////////////////////////////////////
  150. /// Assignment Operators
  151. ////////////////////////////////////////////////////////////////////////////////
  152. __forceinline vdouble8& operator +=( vdouble8& a, const vdouble8& b ) { return a = a + b; }
  153. __forceinline vdouble8& operator +=( vdouble8& a, const double b ) { return a = a + b; }
  154. __forceinline vdouble8& operator -=( vdouble8& a, const vdouble8& b ) { return a = a - b; }
  155. __forceinline vdouble8& operator -=( vdouble8& a, const double b ) { return a = a - b; }
  156. __forceinline vdouble8& operator *=( vdouble8& a, const vdouble8& b ) { return a = a * b; }
  157. __forceinline vdouble8& operator *=( vdouble8& a, const double b ) { return a = a * b; }
  158. __forceinline vdouble8& operator &=( vdouble8& a, const vdouble8& b ) { return a = a & b; }
  159. __forceinline vdouble8& operator &=( vdouble8& a, const double b ) { return a = a & b; }
  160. __forceinline vdouble8& operator |=( vdouble8& a, const vdouble8& b ) { return a = a | b; }
  161. __forceinline vdouble8& operator |=( vdouble8& a, const double b ) { return a = a | b; }
  162. __forceinline vdouble8& operator <<=( vdouble8& a, const double b ) { return a = a << b; }
  163. __forceinline vdouble8& operator >>=( vdouble8& a, const double b ) { return a = a >> b; }
  164. ////////////////////////////////////////////////////////////////////////////////
  165. /// Comparison Operators + Select
  166. ////////////////////////////////////////////////////////////////////////////////
  167. __forceinline const vboold8 operator ==( const vdouble8& a, const vdouble8& b ) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_EQ); }
  168. __forceinline const vboold8 operator ==( const vdouble8& a, const double b ) { return a == vdouble8(b); }
  169. __forceinline const vboold8 operator ==( const double a, const vdouble8& b ) { return vdouble8(a) == b; }
  170. __forceinline const vboold8 operator !=( const vdouble8& a, const vdouble8& b ) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_NE); }
  171. __forceinline const vboold8 operator !=( const vdouble8& a, const double b ) { return a != vdouble8(b); }
  172. __forceinline const vboold8 operator !=( const double a, const vdouble8& b ) { return vdouble8(a) != b; }
  173. __forceinline const vboold8 operator < ( const vdouble8& a, const vdouble8& b ) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LT); }
  174. __forceinline const vboold8 operator < ( const vdouble8& a, const double b ) { return a < vdouble8(b); }
  175. __forceinline const vboold8 operator < ( const double a, const vdouble8& b ) { return vdouble8(a) < b; }
  176. __forceinline const vboold8 operator >=( const vdouble8& a, const vdouble8& b ) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GE); }
  177. __forceinline const vboold8 operator >=( const vdouble8& a, const double b ) { return a >= vdouble8(b); }
  178. __forceinline const vboold8 operator >=( const double a, const vdouble8& b ) { return vdouble8(a) >= b; }
  179. __forceinline const vboold8 operator > ( const vdouble8& a, const vdouble8& b ) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GT); }
  180. __forceinline const vboold8 operator > ( const vdouble8& a, const double b ) { return a > vdouble8(b); }
  181. __forceinline const vboold8 operator > ( const double a, const vdouble8& b ) { return vdouble8(a) > b; }
  182. __forceinline const vboold8 operator <=( const vdouble8& a, const vdouble8& b ) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LE); }
  183. __forceinline const vboold8 operator <=( const vdouble8& a, const double b ) { return a <= vdouble8(b); }
  184. __forceinline const vboold8 operator <=( const double a, const vdouble8& b ) { return vdouble8(a) <= b; }
  185. __forceinline vboold8 eq(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_EQ); }
  186. __forceinline vboold8 ne(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_NE); }
  187. __forceinline vboold8 lt(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LT); }
  188. __forceinline vboold8 ge(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GE); }
  189. __forceinline vboold8 gt(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GT); }
  190. __forceinline vboold8 le(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LE); }
  191. __forceinline vboold8 eq(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_EQ); }
  192. __forceinline vboold8 ne(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_NE); }
  193. __forceinline vboold8 lt(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_LT); }
  194. __forceinline vboold8 ge(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_GE); }
  195. __forceinline vboold8 gt(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_GT); }
  196. __forceinline vboold8 le(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_LE); }
  197. __forceinline const vdouble8 select( const vboold8& m, const vdouble8& t, const vdouble8& f ) {
  198. return _mm512_mask_or_pd(f,m,t,t);
  199. }
  200. __forceinline void xchg(const vboold8& m, vdouble8& a, vdouble8& b) {
  201. const vdouble8 c = a; a = select(m,b,a); b = select(m,c,b);
  202. }
  203. __forceinline vboold8 test(const vboold8& m, const vdouble8& a, const vdouble8& b) {
  204. return _mm512_mask_test_epi64_mask(m,_mm512_castpd_si512(a),_mm512_castpd_si512(b));
  205. }
  206. __forceinline vboold8 test(const vdouble8& a, const vdouble8& b) {
  207. return _mm512_test_epi64_mask(_mm512_castpd_si512(a),_mm512_castpd_si512(b));
  208. }
  209. ////////////////////////////////////////////////////////////////////////////////
  210. // Movement/Shifting/Shuffling Functions
  211. ////////////////////////////////////////////////////////////////////////////////
  212. template<size_t i>
  213. __forceinline const vdouble8 shuffle( const vdouble8& a ) {
  214. return _mm512_permute_pd(a, _MM_SHUFFLE(i, i, i, i));
  215. }
  216. template<int A, int B, int C, int D>
  217. __forceinline vdouble8 shuffle (const vdouble8& v) {
  218. return _mm512_permute_pd(v,_MM_SHUFFLE(D,C,B,A));
  219. }
  220. template<int i>
  221. __forceinline vdouble8 shuffle4(const vdouble8& x) {
  222. return _mm512_shuffle_f64x2(x,x,_MM_SHUFFLE(i,i,i,i));
  223. }
  224. template<int A, int B>
  225. __forceinline vdouble8 shuffle4(const vdouble8& x) {
  226. return _mm512_shuffle_f64x2(x,x,_MM_SHUFFLE(0,0,B,A));
  227. }
  228. template<int i>
  229. __forceinline vdouble8 align_shift_right(const vdouble8& a, const vdouble8& b)
  230. {
  231. return _mm512_castsi512_pd(_mm512_alignr_epi64(_mm512_castpd_si512(a),_mm512_castpd_si512(b),i));
  232. }
  233. __forceinline double toScalar(const vdouble8& a) {
  234. return _mm256_cvtsd_f64(_mm512_castpd512_pd256(a));
  235. }
  236. ////////////////////////////////////////////////////////////////////////////////
  237. /// Reductions
  238. ////////////////////////////////////////////////////////////////////////////////
  239. __forceinline double reduce_add(const vdouble8& a) { return _mm512_reduce_add_pd(a); }
  240. __forceinline double reduce_min(const vdouble8& a) { return _mm512_reduce_min_pd(a); }
  241. __forceinline double reduce_max(const vdouble8& a) { return _mm512_reduce_max_pd(a); }
  242. __forceinline vdouble8 vreduce_add2(vdouble8 x) { return x + shuffle<1,0,3,2>(x); }
  243. __forceinline vdouble8 vreduce_add4(vdouble8 x) { x = vreduce_add2(x); return x + shuffle<2,3,0,1>(x); }
  244. __forceinline vdouble8 vreduce_add (vdouble8 x) { x = vreduce_add4(x); return x + shuffle4<1,0>(x); }
  245. __forceinline vdouble8 vreduce_min2(vdouble8 x) { return min(x,shuffle<1,0,3,2>(x)); }
  246. __forceinline vdouble8 vreduce_min4(vdouble8 x) { x = vreduce_min2(x); return min(x,shuffle<2,3,0,1>(x)); }
  247. __forceinline vdouble8 vreduce_min (vdouble8 x) { x = vreduce_min4(x); return min(x,shuffle4<1,0>(x)); }
  248. __forceinline vdouble8 vreduce_max2(vdouble8 x) { return max(x,shuffle<1,0,3,2>(x)); }
  249. __forceinline vdouble8 vreduce_max4(vdouble8 x) { x = vreduce_max2(x); return max(x,shuffle<2,3,0,1>(x)); }
  250. __forceinline vdouble8 vreduce_max (vdouble8 x) { x = vreduce_max4(x); return max(x,shuffle4<1,0>(x)); }
  251. ////////////////////////////////////////////////////////////////////////////////
  252. /// Memory load and store operations
  253. ////////////////////////////////////////////////////////////////////////////////
  254. __forceinline vdouble8 permute(const vdouble8& v, const vllong8& index) {
  255. return _mm512_permutexvar_pd(index,v);
  256. }
  257. __forceinline vdouble8 reverse(const vdouble8& a) {
  258. return permute(a,vllong8(reverse_step));
  259. }
  260. ////////////////////////////////////////////////////////////////////////////////
  261. /// Output Operators
  262. ////////////////////////////////////////////////////////////////////////////////
  263. __forceinline std::ostream& operator<<(std::ostream& cout, const vdouble8& v)
  264. {
  265. cout << "<" << v[0];
  266. for (size_t i=1; i<8; i++) cout << ", " << v[i];
  267. cout << ">";
  268. return cout;
  269. }
  270. }