vllong4_avx2.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. // ======================================================================== //
  2. // Copyright 2009-2017 Intel Corporation //
  3. // //
  4. // Licensed under the Apache License, Version 2.0 (the "License"); //
  5. // you may not use this file except in compliance with the License. //
  6. // You may obtain a copy of the License at //
  7. // //
  8. // http://www.apache.org/licenses/LICENSE-2.0 //
  9. // //
  10. // Unless required by applicable law or agreed to in writing, software //
  11. // distributed under the License is distributed on an "AS IS" BASIS, //
  12. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
  13. // See the License for the specific language governing permissions and //
  14. // limitations under the License. //
  15. // ======================================================================== //
  16. #pragma once
  17. namespace embree
  18. {
  19. #ifndef _MM_SHUF_PERM2
  20. #define _MM_SHUF_PERM2(e3, e2, e1, e0) \
  21. ((int)(((e3)<<3) | ((e2)<<2) | ((e1)<<1) | (e0)))
  22. #endif
  23. #ifndef _MM_SHUF_PERM3
  24. #define _MM_SHUF_PERM3(e1, e0) \
  25. ((int)(((e1)<<4) | (e0)))
  26. #endif
  27. /* 4-wide AVX2 64bit long long type */
  28. template<>
  29. struct vllong<4>
  30. {
  31. typedef vboold4 Bool;
  32. enum { size = 4 }; // number of SIMD elements
  33. union { // data
  34. __m256i v;
  35. long long i[4];
  36. };
  37. ////////////////////////////////////////////////////////////////////////////////
  38. /// Constructors, Assignment & Cast Operators
  39. ////////////////////////////////////////////////////////////////////////////////
  40. __forceinline vllong() {}
  41. __forceinline vllong(const vllong4& t) { v = t.v; }
  42. __forceinline vllong4& operator=(const vllong4& f) { v = f.v; return *this; }
  43. __forceinline vllong(const __m256i& t) { v = t; }
  44. __forceinline operator __m256i () const { return v; }
  45. __forceinline operator __m256d () const { return _mm256_castsi256_pd(v); }
  46. __forceinline vllong(const long long i) {
  47. v = _mm256_set1_epi64x(i);
  48. }
  49. __forceinline vllong(const long long a, const long long b, const long long c, const long long d) {
  50. v = _mm256_set_epi64x(d,c,b,a);
  51. }
  52. ////////////////////////////////////////////////////////////////////////////////
  53. /// Constants
  54. ////////////////////////////////////////////////////////////////////////////////
  55. __forceinline vllong( ZeroTy ) : v(_mm256_setzero_si256()) {}
  56. __forceinline vllong( OneTy ) : v(_mm256_set1_epi64x(1)) {}
  57. __forceinline vllong( StepTy ) : v(_mm256_set_epi64x(3,2,1,0)) {}
  58. __forceinline vllong( ReverseStepTy ) : v(_mm256_set_epi64x(0,1,2,3)) {}
  59. __forceinline static vllong4 zero() { return _mm256_setzero_si256(); }
  60. __forceinline static vllong4 one () { return _mm256_set1_epi64x(1); }
  61. __forceinline static vllong4 neg_one () { return _mm256_set1_epi64x(-1); }
  62. ////////////////////////////////////////////////////////////////////////////////
  63. /// Loads and Stores
  64. ////////////////////////////////////////////////////////////////////////////////
  65. static __forceinline void store_nt(void *__restrict__ ptr, const vllong4& a) {
  66. _mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(a));
  67. }
  68. static __forceinline vllong4 loadu(const void* addr)
  69. {
  70. return _mm256_loadu_si256((__m256i*)addr);
  71. }
  72. static __forceinline vllong4 load(const vllong4* addr) {
  73. return _mm256_load_si256((__m256i*)addr);
  74. }
  75. static __forceinline vllong4 load(const long long* addr) {
  76. return _mm256_load_si256((__m256i*)addr);
  77. }
  78. static __forceinline void store(void* ptr, const vllong4& v) {
  79. _mm256_store_si256((__m256i*)ptr,v);
  80. }
  81. static __forceinline void storeu(void* ptr, const vllong4& v) {
  82. _mm256_storeu_si256((__m256i*)ptr,v);
  83. }
  84. static __forceinline void storeu(const vboold4& mask, long long* ptr, const vllong4& f) {
  85. #if defined(__AVX512VL__)
  86. _mm256_mask_storeu_epi64(ptr,mask,f);
  87. #else
  88. _mm256_maskstore_pd((double*)ptr,mask,_mm256_castsi256_pd(f));
  89. #endif
  90. }
  91. static __forceinline void store(const vboold4& mask, void* ptr, const vllong4& f) {
  92. #if defined(__AVX512VL__)
  93. _mm256_mask_store_epi64(ptr,mask,f);
  94. #else
  95. _mm256_maskstore_pd((double*)ptr,mask,_mm256_castsi256_pd(f));
  96. #endif
  97. }
  98. static __forceinline vllong4 broadcast64bit(size_t v) {
  99. return _mm256_set1_epi64x(v);
  100. }
  101. static __forceinline size_t extract64bit(const vllong4& v)
  102. {
  103. return _mm_cvtsi128_si64(_mm256_castsi256_si128(v));
  104. }
  105. ////////////////////////////////////////////////////////////////////////////////
  106. /// Array Access
  107. ////////////////////////////////////////////////////////////////////////////////
  108. __forceinline long long& operator[](const size_t index) { assert(index < 4); return i[index]; }
  109. __forceinline const long long& operator[](const size_t index) const { assert(index < 4); return i[index]; }
  110. };
  111. ////////////////////////////////////////////////////////////////////////////////
  112. /// Select
  113. ////////////////////////////////////////////////////////////////////////////////
  114. __forceinline const vllong4 select( const vboold4& m, const vllong4& t, const vllong4& f ) {
  115. #if defined(__AVX512VL__)
  116. return _mm256_mask_blend_epi64(m, f, t);
  117. #else
  118. return _mm256_castpd_si256(_mm256_blendv_pd(_mm256_castsi256_pd(f), _mm256_castsi256_pd(t), m));
  119. #endif
  120. }
  121. ////////////////////////////////////////////////////////////////////////////////
  122. /// Unary Operators
  123. ////////////////////////////////////////////////////////////////////////////////
  124. __forceinline const vllong4 asLong ( const __m256& a ) { return _mm256_castps_si256(a); }
  125. __forceinline const vllong4 operator +( const vllong4& a ) { return a; }
  126. __forceinline const vllong4 operator -( const vllong4& a ) { return _mm256_sub_epi64(_mm256_setzero_si256(), a); }
  127. ////////////////////////////////////////////////////////////////////////////////
  128. /// Binary Operators
  129. ////////////////////////////////////////////////////////////////////////////////
  130. __forceinline const vllong4 operator +( const vllong4& a, const vllong4& b ) { return _mm256_add_epi64(a, b); }
  131. __forceinline const vllong4 operator +( const vllong4& a, const long long b ) { return a + vllong4(b); }
  132. __forceinline const vllong4 operator +( const long long a, const vllong4& b ) { return vllong4(a) + b; }
  133. __forceinline const vllong4 operator -( const vllong4& a, const vllong4& b ) { return _mm256_sub_epi64(a, b); }
  134. __forceinline const vllong4 operator -( const vllong4& a, const long long b ) { return a - vllong4(b); }
  135. __forceinline const vllong4 operator -( const long long a, const vllong4& b ) { return vllong4(a) - b; }
  136. /* only low 32bit part */
  137. __forceinline const vllong4 operator *( const vllong4& a, const vllong4& b ) { return _mm256_mul_epi32(a, b); }
  138. __forceinline const vllong4 operator *( const vllong4& a, const long long b ) { return a * vllong4(b); }
  139. __forceinline const vllong4 operator *( const long long a, const vllong4& b ) { return vllong4(a) * b; }
  140. __forceinline const vllong4 operator &( const vllong4& a, const vllong4& b ) { return _mm256_and_si256(a, b); }
  141. __forceinline const vllong4 operator &( const vllong4& a, const long long b ) { return a & vllong4(b); }
  142. __forceinline const vllong4 operator &( const long long a, const vllong4& b ) { return vllong4(a) & b; }
  143. __forceinline const vllong4 operator |( const vllong4& a, const vllong4& b ) { return _mm256_or_si256(a, b); }
  144. __forceinline const vllong4 operator |( const vllong4& a, const long long b ) { return a | vllong4(b); }
  145. __forceinline const vllong4 operator |( const long long a, const vllong4& b ) { return vllong4(a) | b; }
  146. __forceinline const vllong4 operator ^( const vllong4& a, const vllong4& b ) { return _mm256_xor_si256(a, b); }
  147. __forceinline const vllong4 operator ^( const vllong4& a, const long long b ) { return a ^ vllong4(b); }
  148. __forceinline const vllong4 operator ^( const long long a, const vllong4& b ) { return vllong4(a) ^ b; }
  149. __forceinline const vllong4 operator <<( const vllong4& a, const long long n ) { return _mm256_slli_epi64(a, n); }
  150. //__forceinline const vllong4 operator >>( const vllong4& a, const long long n ) { return _mm256_srai_epi64(a, n); }
  151. __forceinline const vllong4 operator <<( const vllong4& a, const vllong4& n ) { return _mm256_sllv_epi64(a, n); }
  152. //__forceinline const vllong4 operator >>( const vllong4& a, const vllong4& n ) { return _mm256_srav_epi64(a, n); }
  153. //__forceinline const vllong4 sra ( const vllong4& a, const long long b ) { return _mm256_srai_epi64(a, b); }
  154. __forceinline const vllong4 srl ( const vllong4& a, const long long b ) { return _mm256_srli_epi64(a, b); }
  155. //__forceinline const vllong4 min( const vllong4& a, const vllong4& b ) { return _mm256_min_epi64(a, b); }
  156. //__forceinline const vllong4 min( const vllong4& a, const long long b ) { return min(a,vllong4(b)); }
  157. //__forceinline const vllong4 min( const long long a, const vllong4& b ) { return min(vllong4(a),b); }
  158. //__forceinline const vllong4 max( const vllong4& a, const vllong4& b ) { return _mm256_max_epi64(a, b); }
  159. //__forceinline const vllong4 max( const vllong4& a, const long long b ) { return max(a,vllong4(b)); }
  160. //__forceinline const vllong4 max( const long long a, const vllong4& b ) { return max(vllong4(a),b); }
  161. #if defined(__AVX512VL__)
  162. __forceinline const vllong4 mask_and(const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return _mm256_mask_and_epi64(c,m,a,b); }
  163. __forceinline const vllong4 mask_or (const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return _mm256_mask_or_epi64(c,m,a,b); }
  164. #else
  165. __forceinline const vllong4 mask_and(const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return select(m, a & b, c); }
  166. __forceinline const vllong4 mask_or (const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return select(m, a | b, c); }
  167. #endif
  168. ////////////////////////////////////////////////////////////////////////////////
  169. /// Assignment Operators
  170. ////////////////////////////////////////////////////////////////////////////////
  171. __forceinline vllong4& operator +=( vllong4& a, const vllong4& b ) { return a = a + b; }
  172. __forceinline vllong4& operator +=( vllong4& a, const long long b ) { return a = a + b; }
  173. __forceinline vllong4& operator -=( vllong4& a, const vllong4& b ) { return a = a - b; }
  174. __forceinline vllong4& operator -=( vllong4& a, const long long b ) { return a = a - b; }
  175. __forceinline vllong4& operator *=( vllong4& a, const vllong4& b ) { return a = a * b; }
  176. __forceinline vllong4& operator *=( vllong4& a, const long long b ) { return a = a * b; }
  177. __forceinline vllong4& operator &=( vllong4& a, const vllong4& b ) { return a = a & b; }
  178. __forceinline vllong4& operator &=( vllong4& a, const long long b ) { return a = a & b; }
  179. __forceinline vllong4& operator |=( vllong4& a, const vllong4& b ) { return a = a | b; }
  180. __forceinline vllong4& operator |=( vllong4& a, const long long b ) { return a = a | b; }
  181. __forceinline vllong4& operator <<=( vllong4& a, const long long b ) { return a = a << b; }
  182. //__forceinline vllong4& operator >>=( vllong4& a, const long long b ) { return a = a >> b; }
  183. ////////////////////////////////////////////////////////////////////////////////
  184. /// Comparison Operators
  185. ////////////////////////////////////////////////////////////////////////////////
  186. #if defined(__AVX512VL__)
  187. __forceinline const vboold4 operator ==( const vllong4& a, const vllong4& b ) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_EQ); }
  188. __forceinline const vboold4 operator !=( const vllong4& a, const vllong4& b ) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_NE); }
  189. __forceinline const vboold4 operator < ( const vllong4& a, const vllong4& b ) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_LT); }
  190. __forceinline const vboold4 operator >=( const vllong4& a, const vllong4& b ) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_GE); }
  191. __forceinline const vboold4 operator > ( const vllong4& a, const vllong4& b ) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_GT); }
  192. __forceinline const vboold4 operator <=( const vllong4& a, const vllong4& b ) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_LE); }
  193. #else
  194. __forceinline const vboold4 operator ==( const vllong4& a, const vllong4& b ) { return _mm256_cmpeq_epi64(a,b); }
  195. __forceinline const vboold4 operator !=( const vllong4& a, const vllong4& b ) { return !(a == b); }
  196. __forceinline const vboold4 operator > ( const vllong4& a, const vllong4& b ) { return _mm256_cmpgt_epi64(a,b); }
  197. __forceinline const vboold4 operator < ( const vllong4& a, const vllong4& b ) { return _mm256_cmpgt_epi64(b,a); }
  198. __forceinline const vboold4 operator >=( const vllong4& a, const vllong4& b ) { return !(a < b); }
  199. __forceinline const vboold4 operator <=( const vllong4& a, const vllong4& b ) { return !(a > b); }
  200. #endif
  201. __forceinline const vboold4 operator ==( const vllong4& a, const long long b ) { return a == vllong4(b); }
  202. __forceinline const vboold4 operator ==( const long long a, const vllong4& b ) { return vllong4(a) == b; }
  203. __forceinline const vboold4 operator !=( const vllong4& a, const long long b ) { return a != vllong4(b); }
  204. __forceinline const vboold4 operator !=( const long long a, const vllong4& b ) { return vllong4(a) != b; }
  205. __forceinline const vboold4 operator > ( const vllong4& a, const long long b ) { return a > vllong4(b); }
  206. __forceinline const vboold4 operator > ( const long long a, const vllong4& b ) { return vllong4(a) > b; }
  207. __forceinline const vboold4 operator < ( const vllong4& a, const long long b ) { return a < vllong4(b); }
  208. __forceinline const vboold4 operator < ( const long long a, const vllong4& b ) { return vllong4(a) < b; }
  209. __forceinline const vboold4 operator >=( const vllong4& a, const long long b ) { return a >= vllong4(b); }
  210. __forceinline const vboold4 operator >=( const long long a, const vllong4& b ) { return vllong4(a) >= b; }
  211. __forceinline const vboold4 operator <=( const vllong4& a, const long long b ) { return a <= vllong4(b); }
  212. __forceinline const vboold4 operator <=( const long long a, const vllong4& b ) { return vllong4(a) <= b; }
  213. __forceinline vboold4 eq(const vllong4& a, const vllong4& b) { return a == b; }
  214. __forceinline vboold4 ne(const vllong4& a, const vllong4& b) { return a != b; }
  215. __forceinline vboold4 lt(const vllong4& a, const vllong4& b) { return a < b; }
  216. __forceinline vboold4 ge(const vllong4& a, const vllong4& b) { return a >= b; }
  217. __forceinline vboold4 gt(const vllong4& a, const vllong4& b) { return a > b; }
  218. __forceinline vboold4 le(const vllong4& a, const vllong4& b) { return a <= b; }
  219. #if defined(__AVX512VL__)
  220. __forceinline vboold4 eq(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_EQ); }
  221. __forceinline vboold4 ne(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_NE); }
  222. __forceinline vboold4 lt(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_LT); }
  223. __forceinline vboold4 ge(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_GE); }
  224. __forceinline vboold4 gt(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_GT); }
  225. __forceinline vboold4 le(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_LE); }
  226. #else
  227. __forceinline vboold4 eq(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a == b); }
  228. __forceinline vboold4 ne(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a != b); }
  229. __forceinline vboold4 lt(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a < b); }
  230. __forceinline vboold4 ge(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a >= b); }
  231. __forceinline vboold4 gt(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a > b); }
  232. __forceinline vboold4 le(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a <= b); }
  233. #endif
  234. __forceinline void xchg(const vboold4& m, vllong4& a, vllong4& b) {
  235. const vllong4 c = a; a = select(m,b,a); b = select(m,c,b);
  236. }
  237. __forceinline vboold4 test(const vllong4& a, const vllong4& b) {
  238. #if defined(__AVX512VL__)
  239. return _mm256_test_epi64_mask(a,b);
  240. #else
  241. return _mm256_testz_si256(a,b);
  242. #endif
  243. }
  244. ////////////////////////////////////////////////////////////////////////////////
  245. // Movement/Shifting/Shuffling Functions
  246. ////////////////////////////////////////////////////////////////////////////////
  247. template<int B, int A> __forceinline vllong4 shuffle (const vllong4& v) { return _mm256_castpd_si256(_mm256_permute_pd(_mm256_castsi256_pd(v),(int)_MM_SHUF_PERM2(B,A,B,A))); }
  248. template<int A> __forceinline vllong4 shuffle (const vllong4& x) { return shuffle<A,A>(x); }
  249. template<int B, int A> __forceinline vllong4 shuffle2 (const vllong4& v) { return _mm256_castpd_si256(_mm256_permute2f128_pd(_mm256_castsi256_pd(v),v,(int)_MM_SHUF_PERM3(B,A))); }
  250. __forceinline long long toScalar(const vllong4& a)
  251. {
  252. return _mm_cvtsi128_si64(_mm256_castsi256_si128(a));
  253. }
  254. ////////////////////////////////////////////////////////////////////////////////
  255. /// Reductions
  256. ////////////////////////////////////////////////////////////////////////////////
  257. __forceinline vllong4 vreduce_and2(const vllong4& x) { return x & shuffle<0,1>(x); }
  258. __forceinline vllong4 vreduce_and (const vllong4& y) { const vllong4 x = vreduce_and2(y); return x & shuffle2<0,1>(x); }
  259. __forceinline vllong4 vreduce_or2(const vllong4& x) { return x | shuffle<0,1>(x); }
  260. __forceinline vllong4 vreduce_or (const vllong4& y) { const vllong4 x = vreduce_or2(y); return x | shuffle2<0,1>(x); }
  261. __forceinline vllong4 vreduce_add2(const vllong4& x) { return x + shuffle<0,1>(x); }
  262. __forceinline vllong4 vreduce_add (const vllong4& y) { const vllong4 x = vreduce_add2(y); return x + shuffle2<0,1>(x); }
  263. __forceinline long long reduce_add(const vllong4& a) { return toScalar(vreduce_add(a)); }
  264. __forceinline long long reduce_or (const vllong4& a) { return toScalar(vreduce_or(a)); }
  265. __forceinline long long reduce_and(const vllong4& a) { return toScalar(vreduce_and(a)); }
  266. ////////////////////////////////////////////////////////////////////////////////
  267. /// Output Operators
  268. ////////////////////////////////////////////////////////////////////////////////
  269. __forceinline std::ostream& operator<<(std::ostream& cout, const vllong4& v)
  270. {
  271. cout << "<" << v[0];
  272. for (size_t i=1; i<4; i++) cout << ", " << v[i];
  273. cout << ">";
  274. return cout;
  275. }
  276. }