vint16_avx512.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483
  1. // ======================================================================== //
  2. // Copyright 2009-2017 Intel Corporation //
  3. // //
  4. // Licensed under the Apache License, Version 2.0 (the "License"); //
  5. // you may not use this file except in compliance with the License. //
  6. // You may obtain a copy of the License at //
  7. // //
  8. // http://www.apache.org/licenses/LICENSE-2.0 //
  9. // //
  10. // Unless required by applicable law or agreed to in writing, software //
  11. // distributed under the License is distributed on an "AS IS" BASIS, //
  12. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
  13. // See the License for the specific language governing permissions and //
  14. // limitations under the License. //
  15. // ======================================================================== //
  16. #pragma once
  17. namespace embree
  18. {
  19. /* 16-wide AVX-512 integer type */
  20. template<>
  21. struct vint<16>
  22. {
  23. typedef vboolf16 Bool;
  24. typedef vint16 Int;
  25. typedef vfloat16 Float;
  26. enum { size = 16 }; // number of SIMD elements
  27. union { // data
  28. __m512i v;
  29. int i[16];
  30. };
  31. ////////////////////////////////////////////////////////////////////////////////
  32. /// Constructors, Assignment & Cast Operators
  33. ////////////////////////////////////////////////////////////////////////////////
  34. __forceinline vint() {}
  35. __forceinline vint(const vint16& t) { v = t.v; }
  36. __forceinline vint16& operator=(const vint16& f) { v = f.v; return *this; }
  37. __forceinline vint(const __m512i& t) { v = t; }
  38. __forceinline operator __m512i () const { return v; }
  39. __forceinline operator __m256i () const { return _mm512_castsi512_si256(v); }
  40. __forceinline vint(const int i) {
  41. v = _mm512_set1_epi32(i);
  42. }
  43. __forceinline vint(const int a, const int b, const int c, const int d) {
  44. v = _mm512_set4_epi32(d,c,b,a);
  45. }
  46. __forceinline vint(const int a0 , const int a1 , const int a2 , const int a3,
  47. const int a4 , const int a5 , const int a6 , const int a7,
  48. const int a8 , const int a9 , const int a10, const int a11,
  49. const int a12, const int a13, const int a14, const int a15)
  50. {
  51. v = _mm512_set_epi32(a15,a14,a13,a12,a11,a10,a9,a8,a7,a6,a5,a4,a3,a2,a1,a0);
  52. }
  53. __forceinline vint(const vint4& i) {
  54. v = _mm512_broadcast_i32x4(i);
  55. }
  56. __forceinline vint(const vint8& i) {
  57. v = _mm512_castps_si512(_mm512_castpd_ps(_mm512_broadcast_f64x4(_mm256_castsi256_pd(i))));
  58. }
  59. __forceinline explicit vint(const __m512& f) {
  60. v = _mm512_cvtps_epi32(f);
  61. }
  62. ////////////////////////////////////////////////////////////////////////////////
  63. /// Constants
  64. ////////////////////////////////////////////////////////////////////////////////
  65. __forceinline vint( ZeroTy ) : v(_mm512_setzero_epi32()) {}
  66. __forceinline vint( OneTy ) : v(_mm512_set1_epi32(1)) {}
  67. __forceinline vint( PosInfTy ) : v(_mm512_set1_epi32(pos_inf)) {}
  68. __forceinline vint( NegInfTy ) : v(_mm512_set1_epi32(neg_inf)) {}
  69. __forceinline vint( StepTy ) : v(_mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0)) {}
  70. __forceinline vint( ReverseStepTy ) : v(_mm512_setr_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0)) {}
  71. __forceinline static vint16 zero() { return _mm512_setzero_epi32(); }
  72. __forceinline static vint16 one () { return _mm512_set1_epi32(1); }
  73. __forceinline static vint16 neg_one () { return _mm512_set1_epi32(-1); }
  74. ////////////////////////////////////////////////////////////////////////////////
  75. /// Loads and Stores
  76. ////////////////////////////////////////////////////////////////////////////////
  77. static __forceinline vint16 load (const void* addr) { return _mm512_load_si512((int*)addr); }
  78. static __forceinline const vint16 load( const unsigned char* const ptr ) { return _mm512_cvtepu8_epi32(_mm_load_si128((__m128i*)ptr)); }
  79. static __forceinline vint16 loadu(const void* addr) { return _mm512_loadu_si512(addr); }
  80. static __forceinline vint16 load (const vboolf16& mask, const void* addr) { return _mm512_mask_load_epi32 (_mm512_setzero_epi32(),mask,addr); }
  81. static __forceinline vint16 loadu(const vboolf16& mask, const void* addr) { return _mm512_mask_loadu_epi32(_mm512_setzero_epi32(),mask,addr); }
  82. static __forceinline void store (void* ptr, const vint16& v) { _mm512_store_si512 (ptr,v); }
  83. static __forceinline void storeu(void* ptr, const vint16& v) { _mm512_storeu_si512(ptr,v); }
  84. static __forceinline void store (const vboolf16& mask, void* addr, const vint16& v2) { _mm512_mask_store_epi32(addr,mask,v2); }
  85. static __forceinline void storeu(const vboolf16& mask, void* ptr, const vint16& f ) { _mm512_mask_storeu_epi32((int*)ptr,mask,f); }
  86. static __forceinline void store_nt(void *__restrict__ ptr, const vint16& a) { _mm512_stream_si512((__m512i*)ptr,a); }
  87. /* pass by value to avoid compiler generating inefficient code */
  88. static __forceinline void storeu_compact(const vboolf16 mask,void * addr, const vint16 reg) {
  89. _mm512_mask_compressstoreu_epi32(addr,mask,reg);
  90. }
  91. static __forceinline void storeu_compact_single(const vboolf16 mask,void * addr, const vint16 reg) {
  92. //_mm512_mask_compressstoreu_epi32(addr,mask,reg);
  93. *(float*)addr = mm512_cvtss_f32(_mm512_mask_compress_ps(_mm512_castsi512_ps(reg),mask,_mm512_castsi512_ps(reg)));
  94. }
  95. static __forceinline vint16 compact64bit(const vboolf16& mask, vint16 &v) {
  96. return _mm512_mask_compress_epi64(v,mask,v);
  97. }
  98. static __forceinline vint16 compact(const vboolf16& mask, vint16 &v) {
  99. return _mm512_mask_compress_epi32(v,mask,v);
  100. }
  101. static __forceinline vint16 compact(const vboolf16& mask, const vint16 &a, vint16 &b) {
  102. return _mm512_mask_compress_epi32(a,mask,b);
  103. }
  104. static __forceinline vint16 broadcast64bit(size_t v) {
  105. return _mm512_set1_epi64(v);
  106. }
  107. ////////////////////////////////////////////////////////////////////////////////
  108. /// Array Access
  109. ////////////////////////////////////////////////////////////////////////////////
  110. __forceinline int& operator[](const size_t index) { assert(index < 16); return i[index]; }
  111. __forceinline const int& operator[](const size_t index) const { assert(index < 16); return i[index]; }
  112. __forceinline unsigned int uint (const size_t index) const { assert(index < 16); return ((unsigned int*)i)[index]; }
  113. __forceinline size_t& uint64_t(const size_t index) const { assert(index < 8); return ((size_t*)i)[index]; }
  114. };
  115. ////////////////////////////////////////////////////////////////////////////////
  116. /// Unary Operators
  117. ////////////////////////////////////////////////////////////////////////////////
  118. __forceinline const vint16 asInt ( const __m512& a ) { return _mm512_castps_si512(a); }
  119. __forceinline const vint16 operator +( const vint16& a ) { return a; }
  120. __forceinline const vint16 operator -( const vint16& a ) { return _mm512_sub_epi32(_mm512_setzero_epi32(), a); }
  121. ////////////////////////////////////////////////////////////////////////////////
  122. /// Binary Operators
  123. ////////////////////////////////////////////////////////////////////////////////
  124. __forceinline const vint16 operator +( const vint16& a, const vint16& b ) { return _mm512_add_epi32(a, b); }
  125. __forceinline const vint16 operator +( const vint16& a, const int b ) { return a + vint16(b); }
  126. __forceinline const vint16 operator +( const int a, const vint16& b ) { return vint16(a) + b; }
  127. __forceinline const vint16 operator -( const vint16& a, const vint16& b ) { return _mm512_sub_epi32(a, b); }
  128. __forceinline const vint16 operator -( const vint16& a, const int b ) { return a - vint16(b); }
  129. __forceinline const vint16 operator -( const int a, const vint16& b ) { return vint16(a) - b; }
  130. __forceinline const vint16 operator *( const vint16& a, const vint16& b ) { return _mm512_mullo_epi32(a, b); }
  131. __forceinline const vint16 operator *( const vint16& a, const int b ) { return a * vint16(b); }
  132. __forceinline const vint16 operator *( const int a, const vint16& b ) { return vint16(a) * b; }
  133. __forceinline const vint16 operator &( const vint16& a, const vint16& b ) { return _mm512_and_epi32(a, b); }
  134. __forceinline const vint16 operator &( const vint16& a, const int b ) { return a & vint16(b); }
  135. __forceinline const vint16 operator &( const int a, const vint16& b ) { return vint16(a) & b; }
  136. __forceinline const vint16 operator |( const vint16& a, const vint16& b ) { return _mm512_or_epi32(a, b); }
  137. __forceinline const vint16 operator |( const vint16& a, const int b ) { return a | vint16(b); }
  138. __forceinline const vint16 operator |( const int a, const vint16& b ) { return vint16(a) | b; }
  139. __forceinline const vint16 operator ^( const vint16& a, const vint16& b ) { return _mm512_xor_epi32(a, b); }
  140. __forceinline const vint16 operator ^( const vint16& a, const int b ) { return a ^ vint16(b); }
  141. __forceinline const vint16 operator ^( const int a, const vint16& b ) { return vint16(a) ^ b; }
  142. __forceinline const vint16 operator <<( const vint16& a, const int n ) { return _mm512_slli_epi32(a, n); }
  143. __forceinline const vint16 operator >>( const vint16& a, const int n ) { return _mm512_srai_epi32(a, n); }
  144. __forceinline const vint16 operator <<( const vint16& a, const vint16& n ) { return _mm512_sllv_epi32(a, n); }
  145. __forceinline const vint16 operator >>( const vint16& a, const vint16& n ) { return _mm512_srav_epi32(a, n); }
  146. __forceinline const vint16 sll ( const vint16& a, const int b ) { return _mm512_slli_epi32(a, b); }
  147. __forceinline const vint16 sra ( const vint16& a, const int b ) { return _mm512_srai_epi32(a, b); }
  148. __forceinline const vint16 srl ( const vint16& a, const int b ) { return _mm512_srli_epi32(a, b); }
  149. __forceinline const vint16 min( const vint16& a, const vint16& b ) { return _mm512_min_epi32(a, b); }
  150. __forceinline const vint16 min( const vint16& a, const int b ) { return min(a,vint16(b)); }
  151. __forceinline const vint16 min( const int a, const vint16& b ) { return min(vint16(a),b); }
  152. __forceinline const vint16 max( const vint16& a, const vint16& b ) { return _mm512_max_epi32(a, b); }
  153. __forceinline const vint16 max( const vint16& a, const int b ) { return max(a,vint16(b)); }
  154. __forceinline const vint16 max( const int a, const vint16& b ) { return max(vint16(a),b); }
  155. __forceinline const vint16 umin( const vint16& a, const vint16& b ) { return _mm512_min_epu32(a.v, b.v); }
  156. __forceinline const vint16 umax( const vint16& a, const vint16& b ) { return _mm512_max_epu32(a.v, b.v); }
  157. __forceinline const vint16 mask_add(const vboolf16& mask, vint16& c, const vint16& a, const vint16& b) { return _mm512_mask_add_epi32(c,mask,a,b); }
  158. __forceinline const vint16 mask_sub(const vboolf16& mask, vint16& c, const vint16& a, const vint16& b) { return _mm512_mask_sub_epi32(c,mask,a,b); }
  159. __forceinline const vint16 mask_and(const vboolf16& m,vint16& c, const vint16& a, const vint16& b) { return _mm512_mask_and_epi32(c,m,a,b); }
  160. __forceinline const vint16 mask_or (const vboolf16& m,vint16& c, const vint16& a, const vint16& b) { return _mm512_mask_or_epi32(c,m,a,b); }
  161. ////////////////////////////////////////////////////////////////////////////////
  162. /// Assignment Operators
  163. ////////////////////////////////////////////////////////////////////////////////
  164. __forceinline vint16& operator +=( vint16& a, const vint16& b ) { return a = a + b; }
  165. __forceinline vint16& operator +=( vint16& a, const int b ) { return a = a + b; }
  166. __forceinline vint16& operator -=( vint16& a, const vint16& b ) { return a = a - b; }
  167. __forceinline vint16& operator -=( vint16& a, const int b ) { return a = a - b; }
  168. __forceinline vint16& operator *=( vint16& a, const vint16& b ) { return a = a * b; }
  169. __forceinline vint16& operator *=( vint16& a, const int b ) { return a = a * b; }
  170. __forceinline vint16& operator &=( vint16& a, const vint16& b ) { return a = a & b; }
  171. __forceinline vint16& operator &=( vint16& a, const int b ) { return a = a & b; }
  172. __forceinline vint16& operator |=( vint16& a, const vint16& b ) { return a = a | b; }
  173. __forceinline vint16& operator |=( vint16& a, const int b ) { return a = a | b; }
  174. __forceinline vint16& operator <<=( vint16& a, const int b ) { return a = a << b; }
  175. __forceinline vint16& operator >>=( vint16& a, const int b ) { return a = a >> b; }
  176. ////////////////////////////////////////////////////////////////////////////////
  177. /// Comparison Operators + Select
  178. ////////////////////////////////////////////////////////////////////////////////
  179. __forceinline const vboolf16 operator ==( const vint16& a, const vint16& b ) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_EQ); }
  180. __forceinline const vboolf16 operator ==( const vint16& a, const int b ) { return a == vint16(b); }
  181. __forceinline const vboolf16 operator ==( const int a, const vint16& b ) { return vint16(a) == b; }
  182. __forceinline const vboolf16 operator !=( const vint16& a, const vint16& b ) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_NE); }
  183. __forceinline const vboolf16 operator !=( const vint16& a, const int b ) { return a != vint16(b); }
  184. __forceinline const vboolf16 operator !=( const int a, const vint16& b ) { return vint16(a) != b; }
  185. __forceinline const vboolf16 operator < ( const vint16& a, const vint16& b ) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_LT); }
  186. __forceinline const vboolf16 operator < ( const vint16& a, const int b ) { return a < vint16(b); }
  187. __forceinline const vboolf16 operator < ( const int a, const vint16& b ) { return vint16(a) < b; }
  188. __forceinline const vboolf16 operator >=( const vint16& a, const vint16& b ) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_GE); }
  189. __forceinline const vboolf16 operator >=( const vint16& a, const int b ) { return a >= vint16(b); }
  190. __forceinline const vboolf16 operator >=( const int a, const vint16& b ) { return vint16(a) >= b; }
  191. __forceinline const vboolf16 operator > ( const vint16& a, const vint16& b ) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_GT); }
  192. __forceinline const vboolf16 operator > ( const vint16& a, const int b ) { return a > vint16(b); }
  193. __forceinline const vboolf16 operator > ( const int a, const vint16& b ) { return vint16(a) > b; }
  194. __forceinline const vboolf16 operator <=( const vint16& a, const vint16& b ) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_LE); }
  195. __forceinline const vboolf16 operator <=( const vint16& a, const int b ) { return a <= vint16(b); }
  196. __forceinline const vboolf16 operator <=( const int a, const vint16& b ) { return vint16(a) <= b; }
  197. __forceinline vboolf16 eq(const vint16& a, const vint16& b) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_EQ); }
  198. __forceinline vboolf16 ne(const vint16& a, const vint16& b) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_NE); }
  199. __forceinline vboolf16 lt(const vint16& a, const vint16& b) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_LT); }
  200. __forceinline vboolf16 ge(const vint16& a, const vint16& b) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_GE); }
  201. __forceinline vboolf16 gt(const vint16& a, const vint16& b) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_GT); }
  202. __forceinline vboolf16 le(const vint16& a, const vint16& b) { return _mm512_cmp_epi32_mask(a,b,_MM_CMPINT_LE); }
  203. __forceinline vboolf16 uint_le(const vint16& a, const vint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_LE); }
  204. __forceinline vboolf16 uint_gt(const vint16& a, const vint16& b) { return _mm512_cmp_epu32_mask(a,b,_MM_CMPINT_GT); }
  205. __forceinline vboolf16 eq(const vboolf16 mask, const vint16& a, const vint16& b) { return _mm512_mask_cmp_epi32_mask(mask,a,b,_MM_CMPINT_EQ); }
  206. __forceinline vboolf16 ne(const vboolf16 mask, const vint16& a, const vint16& b) { return _mm512_mask_cmp_epi32_mask(mask,a,b,_MM_CMPINT_NE); }
  207. __forceinline vboolf16 lt(const vboolf16 mask, const vint16& a, const vint16& b) { return _mm512_mask_cmp_epi32_mask(mask,a,b,_MM_CMPINT_LT); }
  208. __forceinline vboolf16 ge(const vboolf16 mask, const vint16& a, const vint16& b) { return _mm512_mask_cmp_epi32_mask(mask,a,b,_MM_CMPINT_GE); }
  209. __forceinline vboolf16 gt(const vboolf16 mask, const vint16& a, const vint16& b) { return _mm512_mask_cmp_epi32_mask(mask,a,b,_MM_CMPINT_GT); }
  210. __forceinline vboolf16 le(const vboolf16 mask, const vint16& a, const vint16& b) { return _mm512_mask_cmp_epi32_mask(mask,a,b,_MM_CMPINT_LE); }
  211. __forceinline vboolf16 uint_le(const vboolf16 mask, const vint16& a, const vint16& b) { return _mm512_mask_cmp_epu32_mask(mask,a,b,_MM_CMPINT_LE); }
  212. __forceinline vboolf16 uint_gt(const vboolf16 mask, const vint16& a, const vint16& b) { return _mm512_mask_cmp_epu32_mask(mask,a,b,_MM_CMPINT_GT); }
  213. __forceinline const vint16 select( const vboolf16& m, const vint16& t, const vint16& f ) {
  214. return _mm512_mask_or_epi32(f,m,t,t);
  215. }
  216. __forceinline void xchg(const vboolf16& m, vint16& a, vint16& b) {
  217. const vint16 c = a; a = select(m,b,a); b = select(m,c,b);
  218. }
  219. __forceinline vboolf16 test(const vboolf16& m, const vint16& a, const vint16& b) {
  220. return _mm512_mask_test_epi32_mask(m,a,b);
  221. }
  222. __forceinline vboolf16 test(const vint16& a, const vint16& b) {
  223. return _mm512_test_epi32_mask(a,b);
  224. }
  225. ////////////////////////////////////////////////////////////////////////////////
  226. // Movement/Shifting/Shuffling Functions
  227. ////////////////////////////////////////////////////////////////////////////////
  228. __forceinline vint16 unpacklo( const vint16& a, const vint16& b ) { return _mm512_unpacklo_epi32(a.v, b.v); }
  229. __forceinline vint16 unpackhi( const vint16& a, const vint16& b ) { return _mm512_unpackhi_epi32(a.v, b.v); }
  230. template<size_t i>
  231. __forceinline const vint16 shuffle( const vint16& a ) {
  232. return _mm512_castps_si512(_mm512_permute_ps(_mm512_castsi512_ps(a), _MM_SHUFFLE(i, i, i, i)));
  233. }
  234. template<int A, int B, int C, int D>
  235. __forceinline vint16 shuffle (const vint16& v) {
  236. return _mm512_castps_si512(_mm512_permute_ps(_mm512_castsi512_ps(v),_MM_SHUFFLE(D,C,B,A)));
  237. }
  238. template<int i>
  239. __forceinline vint16 shuffle4(const vint16& x) {
  240. return _mm512_castps_si512(_mm512_shuffle_f32x4(_mm512_castsi512_ps(x),_mm512_castsi512_ps(x),_MM_SHUFFLE(i,i,i,i)));
  241. }
  242. template<int A, int B, int C, int D>
  243. __forceinline vint16 shuffle4(const vint16& x) {
  244. return _mm512_castps_si512(_mm512_shuffle_f32x4(_mm512_castsi512_ps(x),_mm512_castsi512_ps(x),_MM_SHUFFLE(D,C,B,A)));
  245. }
  246. template<int i>
  247. __forceinline vint16 align_shift_right(const vint16& a, const vint16& b)
  248. {
  249. return _mm512_alignr_epi32(a,b,i);
  250. };
  251. __forceinline int toScalar(const vint16& a)
  252. {
  253. return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
  254. }
  255. template<int i> __forceinline const vint16 insert4(const vint16& a, const vint4& b) { return _mm512_inserti32x4(a, b, i); }
  256. __forceinline size_t extract64bit(const vint16& v) {
  257. return _mm_cvtsi128_si64(_mm512_castsi512_si128(v));
  258. }
  259. template<int N, int i>
  260. vint<N> extractN(const vint16& v);
  261. template<> __forceinline vint4 extractN<4,0>(const vint16& v) { return _mm512_castsi512_si128(v); }
  262. template<> __forceinline vint4 extractN<4,1>(const vint16& v) { return _mm512_extracti32x4_epi32(v, 1); }
  263. template<> __forceinline vint4 extractN<4,2>(const vint16& v) { return _mm512_extracti32x4_epi32(v, 2); }
  264. template<> __forceinline vint4 extractN<4,3>(const vint16& v) { return _mm512_extracti32x4_epi32(v, 3); }
  265. template<> __forceinline vint8 extractN<8,0>(const vint16& v) { return _mm512_castsi512_si256(v); }
  266. template<> __forceinline vint8 extractN<8,1>(const vint16& v) { return _mm512_extracti32x8_epi32(v, 1); }
  267. template<int i> __forceinline vint4 extract4 (const vint16& v) { return _mm512_extracti32x4_epi32(v, i); }
  268. template<> __forceinline vint4 extract4<0>(const vint16& v) { return _mm512_castsi512_si128(v); }
  269. template<int i> __forceinline vint8 extract8 (const vint16& v) { return _mm512_extracti32x8_epi32(v, i); }
  270. template<> __forceinline vint8 extract8<0>(const vint16& v) { return _mm512_castsi512_si256(v); }
  271. ////////////////////////////////////////////////////////////////////////////////
  272. /// Reductions
  273. ////////////////////////////////////////////////////////////////////////////////
  274. __forceinline int reduce_add(vint16 a) { return _mm512_reduce_add_epi32(a); }
  275. __forceinline int reduce_mul(vint16 a) { return _mm512_reduce_mul_epi32(a); }
  276. __forceinline int reduce_min(vint16 a) { return _mm512_reduce_min_epi32(a); }
  277. __forceinline int reduce_max(vint16 a) { return _mm512_reduce_max_epi32(a); }
  278. __forceinline int reduce_and(vint16 a) { return _mm512_reduce_and_epi32(a); }
  279. __forceinline vint16 vreduce_min2(vint16 x) { return min(x,shuffle<1,0,3,2>(x)); }
  280. __forceinline vint16 vreduce_min4(vint16 x) { x = vreduce_min2(x); return min(x,shuffle<2,3,0,1>(x)); }
  281. __forceinline vint16 vreduce_min8(vint16 x) { x = vreduce_min4(x); return min(x,shuffle4<1,0,3,2>(x)); }
  282. __forceinline vint16 vreduce_min (vint16 x) { x = vreduce_min8(x); return min(x,shuffle4<2,3,0,1>(x)); }
  283. __forceinline vint16 vreduce_max2(vint16 x) { return max(x,shuffle<1,0,3,2>(x)); }
  284. __forceinline vint16 vreduce_max4(vint16 x) { x = vreduce_max2(x); return max(x,shuffle<2,3,0,1>(x)); }
  285. __forceinline vint16 vreduce_max8(vint16 x) { x = vreduce_max4(x); return max(x,shuffle4<1,0,3,2>(x)); }
  286. __forceinline vint16 vreduce_max (vint16 x) { x = vreduce_max8(x); return max(x,shuffle4<2,3,0,1>(x)); }
  287. __forceinline vint16 vreduce_and2(vint16 x) { return x & shuffle<1,0,3,2>(x); }
  288. __forceinline vint16 vreduce_and4(vint16 x) { x = vreduce_and2(x); return x & shuffle<2,3,0,1>(x); }
  289. __forceinline vint16 vreduce_and8(vint16 x) { x = vreduce_and4(x); return x & shuffle4<1,0,3,2>(x); }
  290. __forceinline vint16 vreduce_and (vint16 x) { x = vreduce_and8(x); return x & shuffle4<2,3,0,1>(x); }
  291. __forceinline vint16 vreduce_or2(vint16 x) { return x | shuffle<1,0,3,2>(x); }
  292. __forceinline vint16 vreduce_or4(vint16 x) { x = vreduce_or2(x); return x | shuffle<2,3,0,1>(x); }
  293. __forceinline vint16 vreduce_or8(vint16 x) { x = vreduce_or4(x); return x | shuffle4<1,0,3,2>(x); }
  294. __forceinline vint16 vreduce_or (vint16 x) { x = vreduce_or8(x); return x | shuffle4<2,3,0,1>(x); }
  295. __forceinline vint16 vreduce_add2(vint16 x) { return x + shuffle<1,0,3,2>(x); }
  296. __forceinline vint16 vreduce_add4(vint16 x) { x = vreduce_add2(x); return x + shuffle<2,3,0,1>(x); }
  297. __forceinline vint16 vreduce_add8(vint16 x) { x = vreduce_add4(x); return x + shuffle4<1,0,3,2>(x); }
  298. __forceinline vint16 vreduce_add (vint16 x) { x = vreduce_add8(x); return x + shuffle4<2,3,0,1>(x); }
  299. ////////////////////////////////////////////////////////////////////////////////
  300. /// Memory load and store operations
  301. ////////////////////////////////////////////////////////////////////////////////
  302. template<int scale = 4>
  303. __forceinline vint16 gather16i(const vboolf16& mask, const int *const ptr, const vint16& index) {
  304. return _mm512_mask_i32gather_epi32(_mm512_undefined_epi32(),mask,index,ptr,scale);
  305. }
  306. template<int scale = 4>
  307. __forceinline vint16 gather16i(const vboolf16& mask, vint16& dest, const int *const ptr, const vint16& index) {
  308. return _mm512_mask_i32gather_epi32(dest,mask,index,ptr,scale);
  309. }
  310. template<int scale = 4>
  311. __forceinline void scatter16i(const vboolf16& mask,int *const ptr, const vint16& index,const vint16& v) {
  312. _mm512_mask_i32scatter_epi32((int*)ptr,mask,index,v,scale);
  313. }
  314. __forceinline vint16 conflict16i(const vint16& index)
  315. {
  316. return _mm512_conflict_epi32(index);
  317. }
  318. __forceinline vint16 conflict16i(const vboolf16& mask, vint16& dest, const vint16& index)
  319. {
  320. return _mm512_mask_conflict_epi32(dest,mask,index);
  321. }
  322. __forceinline void compactustore16i_low(const vboolf16 mask, void *addr, const vint16& reg) {
  323. _mm512_mask_compressstoreu_epi32(addr,mask,reg);
  324. }
  325. __forceinline vint16 convert_uint32_t(const __m512& f) {
  326. return _mm512_cvtps_epu32(f);
  327. }
  328. __forceinline vint16 permute(vint16 v, vint16 index) {
  329. return _mm512_permutexvar_epi32(index,v);
  330. }
  331. __forceinline vint16 reverse(const vint16 &a) {
  332. return permute(a,vint16(reverse_step));
  333. }
  334. __forceinline vint16 prefix_sum(const vint16& a)
  335. {
  336. const vint16 z(zero);
  337. vint16 v = a;
  338. v = v + align_shift_right<16-1>(v,z);
  339. v = v + align_shift_right<16-2>(v,z);
  340. v = v + align_shift_right<16-4>(v,z);
  341. v = v + align_shift_right<16-8>(v,z);
  342. return v;
  343. }
  344. __forceinline vint16 reverse_prefix_sum(const vint16& a)
  345. {
  346. const vint16 z(zero);
  347. vint16 v = a;
  348. v = v + align_shift_right<1>(z,v);
  349. v = v + align_shift_right<2>(z,v);
  350. v = v + align_shift_right<4>(z,v);
  351. v = v + align_shift_right<8>(z,v);
  352. return v;
  353. }
  354. /* this should use a vbool8 and a vint8_64...*/
  355. template<int scale = 1, int hint = _MM_HINT_T0>
  356. __forceinline void gather_prefetch64(void const* base_addr,const vbool16 &mask, const vint16& offset)
  357. {
  358. #if defined(__AVX512PF__)
  359. _mm512_mask_prefetch_i64gather_pd(offset,mask,base_addr,scale,hint);
  360. #endif
  361. }
  362. ////////////////////////////////////////////////////////////////////////////////
  363. /// Output Operators
  364. ////////////////////////////////////////////////////////////////////////////////
  365. __forceinline std::ostream& operator<<(std::ostream& cout, const vint16& v)
  366. {
  367. cout << "<" << v[0];
  368. for (int i=1; i<16; i++) cout << ", " << v[i];
  369. cout << ">";
  370. return cout;
  371. }
  372. }