avxintrin.h 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308
  1. /*===---- avxintrin.h - AVX intrinsics -------------------------------------===
  2. *
  3. * Permission is hereby granted, free of charge, to any person obtaining a copy
  4. * of this software and associated documentation files (the "Software"), to deal
  5. * in the Software without restriction, including without limitation the rights
  6. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  7. * copies of the Software, and to permit persons to whom the Software is
  8. * furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice shall be included in
  11. * all copies or substantial portions of the Software.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  16. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  17. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  18. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  19. * THE SOFTWARE.
  20. *
  21. *===-----------------------------------------------------------------------===
  22. */
  23. #ifndef __IMMINTRIN_H
  24. #error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
  25. #endif
  26. #ifndef __AVXINTRIN_H
  27. #define __AVXINTRIN_H
  28. typedef double __v4df __attribute__ ((__vector_size__ (32)));
  29. typedef float __v8sf __attribute__ ((__vector_size__ (32)));
  30. typedef long long __v4di __attribute__ ((__vector_size__ (32)));
  31. typedef int __v8si __attribute__ ((__vector_size__ (32)));
  32. typedef short __v16hi __attribute__ ((__vector_size__ (32)));
  33. typedef char __v32qi __attribute__ ((__vector_size__ (32)));
  34. typedef float __m256 __attribute__ ((__vector_size__ (32)));
  35. typedef double __m256d __attribute__((__vector_size__(32)));
  36. typedef long long __m256i __attribute__((__vector_size__(32)));
  37. /* Define the default attributes for the functions in this file. */
  38. #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
  39. /* Arithmetic */
  40. static __inline __m256d __DEFAULT_FN_ATTRS
  41. _mm256_add_pd(__m256d __a, __m256d __b)
  42. {
  43. return __a+__b;
  44. }
  45. static __inline __m256 __DEFAULT_FN_ATTRS
  46. _mm256_add_ps(__m256 __a, __m256 __b)
  47. {
  48. return __a+__b;
  49. }
  50. static __inline __m256d __DEFAULT_FN_ATTRS
  51. _mm256_sub_pd(__m256d __a, __m256d __b)
  52. {
  53. return __a-__b;
  54. }
  55. static __inline __m256 __DEFAULT_FN_ATTRS
  56. _mm256_sub_ps(__m256 __a, __m256 __b)
  57. {
  58. return __a-__b;
  59. }
  60. static __inline __m256d __DEFAULT_FN_ATTRS
  61. _mm256_addsub_pd(__m256d __a, __m256d __b)
  62. {
  63. return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);
  64. }
  65. static __inline __m256 __DEFAULT_FN_ATTRS
  66. _mm256_addsub_ps(__m256 __a, __m256 __b)
  67. {
  68. return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);
  69. }
  70. static __inline __m256d __DEFAULT_FN_ATTRS
  71. _mm256_div_pd(__m256d __a, __m256d __b)
  72. {
  73. return __a / __b;
  74. }
  75. static __inline __m256 __DEFAULT_FN_ATTRS
  76. _mm256_div_ps(__m256 __a, __m256 __b)
  77. {
  78. return __a / __b;
  79. }
  80. static __inline __m256d __DEFAULT_FN_ATTRS
  81. _mm256_max_pd(__m256d __a, __m256d __b)
  82. {
  83. return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);
  84. }
  85. static __inline __m256 __DEFAULT_FN_ATTRS
  86. _mm256_max_ps(__m256 __a, __m256 __b)
  87. {
  88. return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);
  89. }
  90. static __inline __m256d __DEFAULT_FN_ATTRS
  91. _mm256_min_pd(__m256d __a, __m256d __b)
  92. {
  93. return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);
  94. }
  95. static __inline __m256 __DEFAULT_FN_ATTRS
  96. _mm256_min_ps(__m256 __a, __m256 __b)
  97. {
  98. return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);
  99. }
  100. static __inline __m256d __DEFAULT_FN_ATTRS
  101. _mm256_mul_pd(__m256d __a, __m256d __b)
  102. {
  103. return __a * __b;
  104. }
  105. static __inline __m256 __DEFAULT_FN_ATTRS
  106. _mm256_mul_ps(__m256 __a, __m256 __b)
  107. {
  108. return __a * __b;
  109. }
  110. static __inline __m256d __DEFAULT_FN_ATTRS
  111. _mm256_sqrt_pd(__m256d __a)
  112. {
  113. return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);
  114. }
  115. static __inline __m256 __DEFAULT_FN_ATTRS
  116. _mm256_sqrt_ps(__m256 __a)
  117. {
  118. return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);
  119. }
  120. static __inline __m256 __DEFAULT_FN_ATTRS
  121. _mm256_rsqrt_ps(__m256 __a)
  122. {
  123. return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);
  124. }
  125. static __inline __m256 __DEFAULT_FN_ATTRS
  126. _mm256_rcp_ps(__m256 __a)
  127. {
  128. return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);
  129. }
  130. #define _mm256_round_pd(V, M) __extension__ ({ \
  131. __m256d __V = (V); \
  132. (__m256d)__builtin_ia32_roundpd256((__v4df)__V, (M)); })
  133. #define _mm256_round_ps(V, M) __extension__ ({ \
  134. __m256 __V = (V); \
  135. (__m256)__builtin_ia32_roundps256((__v8sf)__V, (M)); })
  136. #define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
  137. #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
  138. #define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)
  139. #define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
  140. /* Logical */
  141. static __inline __m256d __DEFAULT_FN_ATTRS
  142. _mm256_and_pd(__m256d __a, __m256d __b)
  143. {
  144. return (__m256d)((__v4di)__a & (__v4di)__b);
  145. }
  146. static __inline __m256 __DEFAULT_FN_ATTRS
  147. _mm256_and_ps(__m256 __a, __m256 __b)
  148. {
  149. return (__m256)((__v8si)__a & (__v8si)__b);
  150. }
  151. static __inline __m256d __DEFAULT_FN_ATTRS
  152. _mm256_andnot_pd(__m256d __a, __m256d __b)
  153. {
  154. return (__m256d)(~(__v4di)__a & (__v4di)__b);
  155. }
  156. static __inline __m256 __DEFAULT_FN_ATTRS
  157. _mm256_andnot_ps(__m256 __a, __m256 __b)
  158. {
  159. return (__m256)(~(__v8si)__a & (__v8si)__b);
  160. }
  161. static __inline __m256d __DEFAULT_FN_ATTRS
  162. _mm256_or_pd(__m256d __a, __m256d __b)
  163. {
  164. return (__m256d)((__v4di)__a | (__v4di)__b);
  165. }
  166. static __inline __m256 __DEFAULT_FN_ATTRS
  167. _mm256_or_ps(__m256 __a, __m256 __b)
  168. {
  169. return (__m256)((__v8si)__a | (__v8si)__b);
  170. }
  171. static __inline __m256d __DEFAULT_FN_ATTRS
  172. _mm256_xor_pd(__m256d __a, __m256d __b)
  173. {
  174. return (__m256d)((__v4di)__a ^ (__v4di)__b);
  175. }
  176. static __inline __m256 __DEFAULT_FN_ATTRS
  177. _mm256_xor_ps(__m256 __a, __m256 __b)
  178. {
  179. return (__m256)((__v8si)__a ^ (__v8si)__b);
  180. }
  181. /* Horizontal arithmetic */
  182. static __inline __m256d __DEFAULT_FN_ATTRS
  183. _mm256_hadd_pd(__m256d __a, __m256d __b)
  184. {
  185. return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
  186. }
  187. static __inline __m256 __DEFAULT_FN_ATTRS
  188. _mm256_hadd_ps(__m256 __a, __m256 __b)
  189. {
  190. return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
  191. }
  192. static __inline __m256d __DEFAULT_FN_ATTRS
  193. _mm256_hsub_pd(__m256d __a, __m256d __b)
  194. {
  195. return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
  196. }
  197. static __inline __m256 __DEFAULT_FN_ATTRS
  198. _mm256_hsub_ps(__m256 __a, __m256 __b)
  199. {
  200. return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b);
  201. }
  202. /* Vector permutations */
  203. static __inline __m128d __DEFAULT_FN_ATTRS
  204. _mm_permutevar_pd(__m128d __a, __m128i __c)
  205. {
  206. return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
  207. }
  208. static __inline __m256d __DEFAULT_FN_ATTRS
  209. _mm256_permutevar_pd(__m256d __a, __m256i __c)
  210. {
  211. return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);
  212. }
  213. static __inline __m128 __DEFAULT_FN_ATTRS
  214. _mm_permutevar_ps(__m128 __a, __m128i __c)
  215. {
  216. return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);
  217. }
  218. static __inline __m256 __DEFAULT_FN_ATTRS
  219. _mm256_permutevar_ps(__m256 __a, __m256i __c)
  220. {
  221. return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);
  222. }
  223. #define _mm_permute_pd(A, C) __extension__ ({ \
  224. __m128d __A = (A); \
  225. (__m128d)__builtin_shufflevector((__v2df)__A, (__v2df) _mm_setzero_pd(), \
  226. (C) & 0x1, ((C) & 0x2) >> 1); })
  227. #define _mm256_permute_pd(A, C) __extension__ ({ \
  228. __m256d __A = (A); \
  229. (__m256d)__builtin_shufflevector((__v4df)__A, (__v4df) _mm256_setzero_pd(), \
  230. (C) & 0x1, ((C) & 0x2) >> 1, \
  231. 2 + (((C) & 0x4) >> 2), \
  232. 2 + (((C) & 0x8) >> 3)); })
  233. #define _mm_permute_ps(A, C) __extension__ ({ \
  234. __m128 __A = (A); \
  235. (__m128)__builtin_shufflevector((__v4sf)__A, (__v4sf) _mm_setzero_ps(), \
  236. (C) & 0x3, ((C) & 0xc) >> 2, \
  237. ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
  238. #define _mm256_permute_ps(A, C) __extension__ ({ \
  239. __m256 __A = (A); \
  240. (__m256)__builtin_shufflevector((__v8sf)__A, (__v8sf) _mm256_setzero_ps(), \
  241. (C) & 0x3, ((C) & 0xc) >> 2, \
  242. ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
  243. 4 + (((C) & 0x03) >> 0), \
  244. 4 + (((C) & 0x0c) >> 2), \
  245. 4 + (((C) & 0x30) >> 4), \
  246. 4 + (((C) & 0xc0) >> 6)); })
  247. #define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
  248. __m256d __V1 = (V1); \
  249. __m256d __V2 = (V2); \
  250. (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)__V1, (__v4df)__V2, (M)); })
  251. #define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
  252. __m256 __V1 = (V1); \
  253. __m256 __V2 = (V2); \
  254. (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
  255. #define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
  256. __m256i __V1 = (V1); \
  257. __m256i __V2 = (V2); \
  258. (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)__V1, (__v8si)__V2, (M)); })
  259. /* Vector Blend */
  260. #define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
  261. __m256d __V1 = (V1); \
  262. __m256d __V2 = (V2); \
  263. (__m256d)__builtin_shufflevector((__v4df)__V1, (__v4df)__V2, \
  264. (((M) & 0x01) ? 4 : 0), \
  265. (((M) & 0x02) ? 5 : 1), \
  266. (((M) & 0x04) ? 6 : 2), \
  267. (((M) & 0x08) ? 7 : 3)); })
  268. #define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
  269. __m256 __V1 = (V1); \
  270. __m256 __V2 = (V2); \
  271. (__m256)__builtin_shufflevector((__v8sf)__V1, (__v8sf)__V2, \
  272. (((M) & 0x01) ? 8 : 0), \
  273. (((M) & 0x02) ? 9 : 1), \
  274. (((M) & 0x04) ? 10 : 2), \
  275. (((M) & 0x08) ? 11 : 3), \
  276. (((M) & 0x10) ? 12 : 4), \
  277. (((M) & 0x20) ? 13 : 5), \
  278. (((M) & 0x40) ? 14 : 6), \
  279. (((M) & 0x80) ? 15 : 7)); })
  280. static __inline __m256d __DEFAULT_FN_ATTRS
  281. _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
  282. {
  283. return (__m256d)__builtin_ia32_blendvpd256(
  284. (__v4df)__a, (__v4df)__b, (__v4df)__c);
  285. }
  286. static __inline __m256 __DEFAULT_FN_ATTRS
  287. _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
  288. {
  289. return (__m256)__builtin_ia32_blendvps256(
  290. (__v8sf)__a, (__v8sf)__b, (__v8sf)__c);
  291. }
  292. /* Vector Dot Product */
  293. #define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
  294. __m256 __V1 = (V1); \
  295. __m256 __V2 = (V2); \
  296. (__m256)__builtin_ia32_dpps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
  297. /* Vector shuffle */
  298. #define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
  299. __m256 __a = (a); \
  300. __m256 __b = (b); \
  301. (__m256)__builtin_shufflevector((__v8sf)__a, (__v8sf)__b, \
  302. (mask) & 0x3, ((mask) & 0xc) >> 2, \
  303. (((mask) & 0x30) >> 4) + 8, (((mask) & 0xc0) >> 6) + 8, \
  304. ((mask) & 0x3) + 4, (((mask) & 0xc) >> 2) + 4, \
  305. (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12); })
  306. #define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
  307. __m256d __a = (a); \
  308. __m256d __b = (b); \
  309. (__m256d)__builtin_shufflevector((__v4df)__a, (__v4df)__b, \
  310. (mask) & 0x1, \
  311. (((mask) & 0x2) >> 1) + 4, \
  312. (((mask) & 0x4) >> 2) + 2, \
  313. (((mask) & 0x8) >> 3) + 6); })
  314. /* Compare */
  315. #define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
  316. #define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
  317. #define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
  318. #define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
  319. #define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
  320. #define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
  321. #define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
  322. #define _CMP_ORD_Q 0x07 /* Ordered (nonsignaling) */
  323. #define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */
  324. #define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unord, signaling) */
  325. #define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
  326. #define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */
  327. #define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */
  328. #define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */
  329. #define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
  330. #define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */
  331. #define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */
  332. #define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */
  333. #define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */
  334. #define _CMP_UNORD_S 0x13 /* Unordered (signaling) */
  335. #define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */
  336. #define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */
  337. #define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unord, non-signaling) */
  338. #define _CMP_ORD_S 0x17 /* Ordered (signaling) */
  339. #define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */
  340. #define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unord, non-sign) */
  341. #define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */
  342. #define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */
  343. #define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */
  344. #define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */
  345. #define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
  346. #define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
  347. #define _mm_cmp_pd(a, b, c) __extension__ ({ \
  348. __m128d __a = (a); \
  349. __m128d __b = (b); \
  350. (__m128d)__builtin_ia32_cmppd((__v2df)__a, (__v2df)__b, (c)); })
  351. #define _mm_cmp_ps(a, b, c) __extension__ ({ \
  352. __m128 __a = (a); \
  353. __m128 __b = (b); \
  354. (__m128)__builtin_ia32_cmpps((__v4sf)__a, (__v4sf)__b, (c)); })
  355. #define _mm256_cmp_pd(a, b, c) __extension__ ({ \
  356. __m256d __a = (a); \
  357. __m256d __b = (b); \
  358. (__m256d)__builtin_ia32_cmppd256((__v4df)__a, (__v4df)__b, (c)); })
  359. #define _mm256_cmp_ps(a, b, c) __extension__ ({ \
  360. __m256 __a = (a); \
  361. __m256 __b = (b); \
  362. (__m256)__builtin_ia32_cmpps256((__v8sf)__a, (__v8sf)__b, (c)); })
  363. #define _mm_cmp_sd(a, b, c) __extension__ ({ \
  364. __m128d __a = (a); \
  365. __m128d __b = (b); \
  366. (__m128d)__builtin_ia32_cmpsd((__v2df)__a, (__v2df)__b, (c)); })
  367. #define _mm_cmp_ss(a, b, c) __extension__ ({ \
  368. __m128 __a = (a); \
  369. __m128 __b = (b); \
  370. (__m128)__builtin_ia32_cmpss((__v4sf)__a, (__v4sf)__b, (c)); })
  371. static __inline int __DEFAULT_FN_ATTRS
  372. _mm256_extract_epi32(__m256i __a, const int __imm)
  373. {
  374. __v8si __b = (__v8si)__a;
  375. return __b[__imm & 7];
  376. }
  377. static __inline int __DEFAULT_FN_ATTRS
  378. _mm256_extract_epi16(__m256i __a, const int __imm)
  379. {
  380. __v16hi __b = (__v16hi)__a;
  381. return __b[__imm & 15];
  382. }
  383. static __inline int __DEFAULT_FN_ATTRS
  384. _mm256_extract_epi8(__m256i __a, const int __imm)
  385. {
  386. __v32qi __b = (__v32qi)__a;
  387. return __b[__imm & 31];
  388. }
  389. #ifdef __x86_64__
  390. static __inline long long __DEFAULT_FN_ATTRS
  391. _mm256_extract_epi64(__m256i __a, const int __imm)
  392. {
  393. __v4di __b = (__v4di)__a;
  394. return __b[__imm & 3];
  395. }
  396. #endif
  397. static __inline __m256i __DEFAULT_FN_ATTRS
  398. _mm256_insert_epi32(__m256i __a, int __b, int const __imm)
  399. {
  400. __v8si __c = (__v8si)__a;
  401. __c[__imm & 7] = __b;
  402. return (__m256i)__c;
  403. }
  404. static __inline __m256i __DEFAULT_FN_ATTRS
  405. _mm256_insert_epi16(__m256i __a, int __b, int const __imm)
  406. {
  407. __v16hi __c = (__v16hi)__a;
  408. __c[__imm & 15] = __b;
  409. return (__m256i)__c;
  410. }
  411. static __inline __m256i __DEFAULT_FN_ATTRS
  412. _mm256_insert_epi8(__m256i __a, int __b, int const __imm)
  413. {
  414. __v32qi __c = (__v32qi)__a;
  415. __c[__imm & 31] = __b;
  416. return (__m256i)__c;
  417. }
  418. #ifdef __x86_64__
  419. static __inline __m256i __DEFAULT_FN_ATTRS
  420. _mm256_insert_epi64(__m256i __a, long long __b, int const __imm)
  421. {
  422. __v4di __c = (__v4di)__a;
  423. __c[__imm & 3] = __b;
  424. return (__m256i)__c;
  425. }
  426. #endif
  427. /* Conversion */
  428. static __inline __m256d __DEFAULT_FN_ATTRS
  429. _mm256_cvtepi32_pd(__m128i __a)
  430. {
  431. return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si) __a);
  432. }
  433. static __inline __m256 __DEFAULT_FN_ATTRS
  434. _mm256_cvtepi32_ps(__m256i __a)
  435. {
  436. return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a);
  437. }
  438. static __inline __m128 __DEFAULT_FN_ATTRS
  439. _mm256_cvtpd_ps(__m256d __a)
  440. {
  441. return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);
  442. }
  443. static __inline __m256i __DEFAULT_FN_ATTRS
  444. _mm256_cvtps_epi32(__m256 __a)
  445. {
  446. return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);
  447. }
  448. static __inline __m256d __DEFAULT_FN_ATTRS
  449. _mm256_cvtps_pd(__m128 __a)
  450. {
  451. return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) __a);
  452. }
  453. static __inline __m128i __DEFAULT_FN_ATTRS
  454. _mm256_cvttpd_epi32(__m256d __a)
  455. {
  456. return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
  457. }
  458. static __inline __m128i __DEFAULT_FN_ATTRS
  459. _mm256_cvtpd_epi32(__m256d __a)
  460. {
  461. return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
  462. }
  463. static __inline __m256i __DEFAULT_FN_ATTRS
  464. _mm256_cvttps_epi32(__m256 __a)
  465. {
  466. return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a);
  467. }
  468. /* Vector replicate */
  469. static __inline __m256 __DEFAULT_FN_ATTRS
  470. _mm256_movehdup_ps(__m256 __a)
  471. {
  472. return __builtin_shufflevector(__a, __a, 1, 1, 3, 3, 5, 5, 7, 7);
  473. }
  474. static __inline __m256 __DEFAULT_FN_ATTRS
  475. _mm256_moveldup_ps(__m256 __a)
  476. {
  477. return __builtin_shufflevector(__a, __a, 0, 0, 2, 2, 4, 4, 6, 6);
  478. }
  479. static __inline __m256d __DEFAULT_FN_ATTRS
  480. _mm256_movedup_pd(__m256d __a)
  481. {
  482. return __builtin_shufflevector(__a, __a, 0, 0, 2, 2);
  483. }
  484. /* Unpack and Interleave */
  485. static __inline __m256d __DEFAULT_FN_ATTRS
  486. _mm256_unpackhi_pd(__m256d __a, __m256d __b)
  487. {
  488. return __builtin_shufflevector(__a, __b, 1, 5, 1+2, 5+2);
  489. }
  490. static __inline __m256d __DEFAULT_FN_ATTRS
  491. _mm256_unpacklo_pd(__m256d __a, __m256d __b)
  492. {
  493. return __builtin_shufflevector(__a, __b, 0, 4, 0+2, 4+2);
  494. }
  495. static __inline __m256 __DEFAULT_FN_ATTRS
  496. _mm256_unpackhi_ps(__m256 __a, __m256 __b)
  497. {
  498. return __builtin_shufflevector(__a, __b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
  499. }
  500. static __inline __m256 __DEFAULT_FN_ATTRS
  501. _mm256_unpacklo_ps(__m256 __a, __m256 __b)
  502. {
  503. return __builtin_shufflevector(__a, __b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
  504. }
  505. /* Bit Test */
  506. static __inline int __DEFAULT_FN_ATTRS
  507. _mm_testz_pd(__m128d __a, __m128d __b)
  508. {
  509. return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);
  510. }
  511. static __inline int __DEFAULT_FN_ATTRS
  512. _mm_testc_pd(__m128d __a, __m128d __b)
  513. {
  514. return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);
  515. }
  516. static __inline int __DEFAULT_FN_ATTRS
  517. _mm_testnzc_pd(__m128d __a, __m128d __b)
  518. {
  519. return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);
  520. }
  521. static __inline int __DEFAULT_FN_ATTRS
  522. _mm_testz_ps(__m128 __a, __m128 __b)
  523. {
  524. return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);
  525. }
  526. static __inline int __DEFAULT_FN_ATTRS
  527. _mm_testc_ps(__m128 __a, __m128 __b)
  528. {
  529. return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);
  530. }
  531. static __inline int __DEFAULT_FN_ATTRS
  532. _mm_testnzc_ps(__m128 __a, __m128 __b)
  533. {
  534. return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);
  535. }
  536. static __inline int __DEFAULT_FN_ATTRS
  537. _mm256_testz_pd(__m256d __a, __m256d __b)
  538. {
  539. return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);
  540. }
  541. static __inline int __DEFAULT_FN_ATTRS
  542. _mm256_testc_pd(__m256d __a, __m256d __b)
  543. {
  544. return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);
  545. }
  546. static __inline int __DEFAULT_FN_ATTRS
  547. _mm256_testnzc_pd(__m256d __a, __m256d __b)
  548. {
  549. return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);
  550. }
  551. static __inline int __DEFAULT_FN_ATTRS
  552. _mm256_testz_ps(__m256 __a, __m256 __b)
  553. {
  554. return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);
  555. }
  556. static __inline int __DEFAULT_FN_ATTRS
  557. _mm256_testc_ps(__m256 __a, __m256 __b)
  558. {
  559. return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);
  560. }
  561. static __inline int __DEFAULT_FN_ATTRS
  562. _mm256_testnzc_ps(__m256 __a, __m256 __b)
  563. {
  564. return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);
  565. }
  566. static __inline int __DEFAULT_FN_ATTRS
  567. _mm256_testz_si256(__m256i __a, __m256i __b)
  568. {
  569. return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);
  570. }
  571. static __inline int __DEFAULT_FN_ATTRS
  572. _mm256_testc_si256(__m256i __a, __m256i __b)
  573. {
  574. return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);
  575. }
  576. static __inline int __DEFAULT_FN_ATTRS
  577. _mm256_testnzc_si256(__m256i __a, __m256i __b)
  578. {
  579. return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b);
  580. }
  581. /* Vector extract sign mask */
  582. static __inline int __DEFAULT_FN_ATTRS
  583. _mm256_movemask_pd(__m256d __a)
  584. {
  585. return __builtin_ia32_movmskpd256((__v4df)__a);
  586. }
  587. static __inline int __DEFAULT_FN_ATTRS
  588. _mm256_movemask_ps(__m256 __a)
  589. {
  590. return __builtin_ia32_movmskps256((__v8sf)__a);
  591. }
  592. /* Vector __zero */
  593. static __inline void __DEFAULT_FN_ATTRS
  594. _mm256_zeroall(void)
  595. {
  596. __builtin_ia32_vzeroall();
  597. }
  598. static __inline void __DEFAULT_FN_ATTRS
  599. _mm256_zeroupper(void)
  600. {
  601. __builtin_ia32_vzeroupper();
  602. }
  603. /* Vector load with broadcast */
  604. static __inline __m128 __DEFAULT_FN_ATTRS
  605. _mm_broadcast_ss(float const *__a)
  606. {
  607. float __f = *__a;
  608. return (__m128)(__v4sf){ __f, __f, __f, __f };
  609. }
  610. static __inline __m256d __DEFAULT_FN_ATTRS
  611. _mm256_broadcast_sd(double const *__a)
  612. {
  613. double __d = *__a;
  614. return (__m256d)(__v4df){ __d, __d, __d, __d };
  615. }
  616. static __inline __m256 __DEFAULT_FN_ATTRS
  617. _mm256_broadcast_ss(float const *__a)
  618. {
  619. float __f = *__a;
  620. return (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
  621. }
  622. static __inline __m256d __DEFAULT_FN_ATTRS
  623. _mm256_broadcast_pd(__m128d const *__a)
  624. {
  625. return (__m256d)__builtin_ia32_vbroadcastf128_pd256(__a);
  626. }
  627. static __inline __m256 __DEFAULT_FN_ATTRS
  628. _mm256_broadcast_ps(__m128 const *__a)
  629. {
  630. return (__m256)__builtin_ia32_vbroadcastf128_ps256(__a);
  631. }
  632. /* SIMD load ops */
  633. static __inline __m256d __DEFAULT_FN_ATTRS
  634. _mm256_load_pd(double const *__p)
  635. {
  636. return *(__m256d *)__p;
  637. }
  638. static __inline __m256 __DEFAULT_FN_ATTRS
  639. _mm256_load_ps(float const *__p)
  640. {
  641. return *(__m256 *)__p;
  642. }
  643. static __inline __m256d __DEFAULT_FN_ATTRS
  644. _mm256_loadu_pd(double const *__p)
  645. {
  646. struct __loadu_pd {
  647. __m256d __v;
  648. } __attribute__((__packed__, __may_alias__));
  649. return ((struct __loadu_pd*)__p)->__v;
  650. }
  651. static __inline __m256 __DEFAULT_FN_ATTRS
  652. _mm256_loadu_ps(float const *__p)
  653. {
  654. struct __loadu_ps {
  655. __m256 __v;
  656. } __attribute__((__packed__, __may_alias__));
  657. return ((struct __loadu_ps*)__p)->__v;
  658. }
  659. static __inline __m256i __DEFAULT_FN_ATTRS
  660. _mm256_load_si256(__m256i const *__p)
  661. {
  662. return *__p;
  663. }
  664. static __inline __m256i __DEFAULT_FN_ATTRS
  665. _mm256_loadu_si256(__m256i const *__p)
  666. {
  667. struct __loadu_si256 {
  668. __m256i __v;
  669. } __attribute__((__packed__, __may_alias__));
  670. return ((struct __loadu_si256*)__p)->__v;
  671. }
  672. static __inline __m256i __DEFAULT_FN_ATTRS
  673. _mm256_lddqu_si256(__m256i const *__p)
  674. {
  675. return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
  676. }
  677. /* SIMD store ops */
  678. static __inline void __DEFAULT_FN_ATTRS
  679. _mm256_store_pd(double *__p, __m256d __a)
  680. {
  681. *(__m256d *)__p = __a;
  682. }
  683. static __inline void __DEFAULT_FN_ATTRS
  684. _mm256_store_ps(float *__p, __m256 __a)
  685. {
  686. *(__m256 *)__p = __a;
  687. }
  688. static __inline void __DEFAULT_FN_ATTRS
  689. _mm256_storeu_pd(double *__p, __m256d __a)
  690. {
  691. __builtin_ia32_storeupd256(__p, (__v4df)__a);
  692. }
  693. static __inline void __DEFAULT_FN_ATTRS
  694. _mm256_storeu_ps(float *__p, __m256 __a)
  695. {
  696. __builtin_ia32_storeups256(__p, (__v8sf)__a);
  697. }
  698. static __inline void __DEFAULT_FN_ATTRS
  699. _mm256_store_si256(__m256i *__p, __m256i __a)
  700. {
  701. *__p = __a;
  702. }
  703. static __inline void __DEFAULT_FN_ATTRS
  704. _mm256_storeu_si256(__m256i *__p, __m256i __a)
  705. {
  706. __builtin_ia32_storedqu256((char *)__p, (__v32qi)__a);
  707. }
  708. /* Conditional load ops */
  709. static __inline __m128d __DEFAULT_FN_ATTRS
  710. _mm_maskload_pd(double const *__p, __m128d __m)
  711. {
  712. return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2df)__m);
  713. }
  714. static __inline __m256d __DEFAULT_FN_ATTRS
  715. _mm256_maskload_pd(double const *__p, __m256d __m)
  716. {
  717. return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p,
  718. (__v4df)__m);
  719. }
  720. static __inline __m128 __DEFAULT_FN_ATTRS
  721. _mm_maskload_ps(float const *__p, __m128 __m)
  722. {
  723. return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4sf)__m);
  724. }
  725. static __inline __m256 __DEFAULT_FN_ATTRS
  726. _mm256_maskload_ps(float const *__p, __m256 __m)
  727. {
  728. return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8sf)__m);
  729. }
  730. /* Conditional store ops */
  731. static __inline void __DEFAULT_FN_ATTRS
  732. _mm256_maskstore_ps(float *__p, __m256 __m, __m256 __a)
  733. {
  734. __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8sf)__m, (__v8sf)__a);
  735. }
  736. static __inline void __DEFAULT_FN_ATTRS
  737. _mm_maskstore_pd(double *__p, __m128d __m, __m128d __a)
  738. {
  739. __builtin_ia32_maskstorepd((__v2df *)__p, (__v2df)__m, (__v2df)__a);
  740. }
  741. static __inline void __DEFAULT_FN_ATTRS
  742. _mm256_maskstore_pd(double *__p, __m256d __m, __m256d __a)
  743. {
  744. __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4df)__m, (__v4df)__a);
  745. }
  746. static __inline void __DEFAULT_FN_ATTRS
  747. _mm_maskstore_ps(float *__p, __m128 __m, __m128 __a)
  748. {
  749. __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4sf)__m, (__v4sf)__a);
  750. }
  751. /* Cacheability support ops */
  752. static __inline void __DEFAULT_FN_ATTRS
  753. _mm256_stream_si256(__m256i *__a, __m256i __b)
  754. {
  755. __builtin_ia32_movntdq256((__v4di *)__a, (__v4di)__b);
  756. }
  757. static __inline void __DEFAULT_FN_ATTRS
  758. _mm256_stream_pd(double *__a, __m256d __b)
  759. {
  760. __builtin_ia32_movntpd256(__a, (__v4df)__b);
  761. }
  762. static __inline void __DEFAULT_FN_ATTRS
  763. _mm256_stream_ps(float *__p, __m256 __a)
  764. {
  765. __builtin_ia32_movntps256(__p, (__v8sf)__a);
  766. }
  767. /* Create vectors */
  768. static __inline __m256d __DEFAULT_FN_ATTRS
  769. _mm256_set_pd(double __a, double __b, double __c, double __d)
  770. {
  771. return (__m256d){ __d, __c, __b, __a };
  772. }
  773. static __inline __m256 __DEFAULT_FN_ATTRS
  774. _mm256_set_ps(float __a, float __b, float __c, float __d,
  775. float __e, float __f, float __g, float __h)
  776. {
  777. return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };
  778. }
  779. static __inline __m256i __DEFAULT_FN_ATTRS
  780. _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,
  781. int __i4, int __i5, int __i6, int __i7)
  782. {
  783. return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
  784. }
  785. static __inline __m256i __DEFAULT_FN_ATTRS
  786. _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,
  787. short __w11, short __w10, short __w09, short __w08,
  788. short __w07, short __w06, short __w05, short __w04,
  789. short __w03, short __w02, short __w01, short __w00)
  790. {
  791. return (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
  792. __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
  793. }
  794. static __inline __m256i __DEFAULT_FN_ATTRS
  795. _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
  796. char __b27, char __b26, char __b25, char __b24,
  797. char __b23, char __b22, char __b21, char __b20,
  798. char __b19, char __b18, char __b17, char __b16,
  799. char __b15, char __b14, char __b13, char __b12,
  800. char __b11, char __b10, char __b09, char __b08,
  801. char __b07, char __b06, char __b05, char __b04,
  802. char __b03, char __b02, char __b01, char __b00)
  803. {
  804. return (__m256i)(__v32qi){
  805. __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
  806. __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
  807. __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
  808. __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31
  809. };
  810. }
  811. static __inline __m256i __DEFAULT_FN_ATTRS
  812. _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
  813. {
  814. return (__m256i)(__v4di){ __d, __c, __b, __a };
  815. }
  816. /* Create vectors with elements in reverse order */
  817. static __inline __m256d __DEFAULT_FN_ATTRS
  818. _mm256_setr_pd(double __a, double __b, double __c, double __d)
  819. {
  820. return (__m256d){ __a, __b, __c, __d };
  821. }
  822. static __inline __m256 __DEFAULT_FN_ATTRS
  823. _mm256_setr_ps(float __a, float __b, float __c, float __d,
  824. float __e, float __f, float __g, float __h)
  825. {
  826. return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h };
  827. }
  828. static __inline __m256i __DEFAULT_FN_ATTRS
  829. _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,
  830. int __i4, int __i5, int __i6, int __i7)
  831. {
  832. return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 };
  833. }
  834. static __inline __m256i __DEFAULT_FN_ATTRS
  835. _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,
  836. short __w11, short __w10, short __w09, short __w08,
  837. short __w07, short __w06, short __w05, short __w04,
  838. short __w03, short __w02, short __w01, short __w00)
  839. {
  840. return (__m256i)(__v16hi){ __w15, __w14, __w13, __w12, __w11, __w10, __w09,
  841. __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 };
  842. }
  843. static __inline __m256i __DEFAULT_FN_ATTRS
  844. _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
  845. char __b27, char __b26, char __b25, char __b24,
  846. char __b23, char __b22, char __b21, char __b20,
  847. char __b19, char __b18, char __b17, char __b16,
  848. char __b15, char __b14, char __b13, char __b12,
  849. char __b11, char __b10, char __b09, char __b08,
  850. char __b07, char __b06, char __b05, char __b04,
  851. char __b03, char __b02, char __b01, char __b00)
  852. {
  853. return (__m256i)(__v32qi){
  854. __b31, __b30, __b29, __b28, __b27, __b26, __b25, __b24,
  855. __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16,
  856. __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08,
  857. __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 };
  858. }
  859. static __inline __m256i __DEFAULT_FN_ATTRS
  860. _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
  861. {
  862. return (__m256i)(__v4di){ __a, __b, __c, __d };
  863. }
  864. /* Create vectors with repeated elements */
  865. static __inline __m256d __DEFAULT_FN_ATTRS
  866. _mm256_set1_pd(double __w)
  867. {
  868. return (__m256d){ __w, __w, __w, __w };
  869. }
  870. static __inline __m256 __DEFAULT_FN_ATTRS
  871. _mm256_set1_ps(float __w)
  872. {
  873. return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w };
  874. }
  875. static __inline __m256i __DEFAULT_FN_ATTRS
  876. _mm256_set1_epi32(int __i)
  877. {
  878. return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i };
  879. }
  880. static __inline __m256i __DEFAULT_FN_ATTRS
  881. _mm256_set1_epi16(short __w)
  882. {
  883. return (__m256i)(__v16hi){ __w, __w, __w, __w, __w, __w, __w, __w, __w, __w,
  884. __w, __w, __w, __w, __w, __w };
  885. }
  886. static __inline __m256i __DEFAULT_FN_ATTRS
  887. _mm256_set1_epi8(char __b)
  888. {
  889. return (__m256i)(__v32qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
  890. __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
  891. __b, __b, __b, __b, __b, __b, __b };
  892. }
  893. static __inline __m256i __DEFAULT_FN_ATTRS
  894. _mm256_set1_epi64x(long long __q)
  895. {
  896. return (__m256i)(__v4di){ __q, __q, __q, __q };
  897. }
  898. /* Create __zeroed vectors */
  899. static __inline __m256d __DEFAULT_FN_ATTRS
  900. _mm256_setzero_pd(void)
  901. {
  902. return (__m256d){ 0, 0, 0, 0 };
  903. }
  904. static __inline __m256 __DEFAULT_FN_ATTRS
  905. _mm256_setzero_ps(void)
  906. {
  907. return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
  908. }
  909. static __inline __m256i __DEFAULT_FN_ATTRS
  910. _mm256_setzero_si256(void)
  911. {
  912. return (__m256i){ 0LL, 0LL, 0LL, 0LL };
  913. }
  914. /* Cast between vector types */
  915. static __inline __m256 __DEFAULT_FN_ATTRS
  916. _mm256_castpd_ps(__m256d __a)
  917. {
  918. return (__m256)__a;
  919. }
  920. static __inline __m256i __DEFAULT_FN_ATTRS
  921. _mm256_castpd_si256(__m256d __a)
  922. {
  923. return (__m256i)__a;
  924. }
  925. static __inline __m256d __DEFAULT_FN_ATTRS
  926. _mm256_castps_pd(__m256 __a)
  927. {
  928. return (__m256d)__a;
  929. }
  930. static __inline __m256i __DEFAULT_FN_ATTRS
  931. _mm256_castps_si256(__m256 __a)
  932. {
  933. return (__m256i)__a;
  934. }
  935. static __inline __m256 __DEFAULT_FN_ATTRS
  936. _mm256_castsi256_ps(__m256i __a)
  937. {
  938. return (__m256)__a;
  939. }
  940. static __inline __m256d __DEFAULT_FN_ATTRS
  941. _mm256_castsi256_pd(__m256i __a)
  942. {
  943. return (__m256d)__a;
  944. }
  945. static __inline __m128d __DEFAULT_FN_ATTRS
  946. _mm256_castpd256_pd128(__m256d __a)
  947. {
  948. return __builtin_shufflevector(__a, __a, 0, 1);
  949. }
  950. static __inline __m128 __DEFAULT_FN_ATTRS
  951. _mm256_castps256_ps128(__m256 __a)
  952. {
  953. return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
  954. }
  955. static __inline __m128i __DEFAULT_FN_ATTRS
  956. _mm256_castsi256_si128(__m256i __a)
  957. {
  958. return __builtin_shufflevector(__a, __a, 0, 1);
  959. }
  960. static __inline __m256d __DEFAULT_FN_ATTRS
  961. _mm256_castpd128_pd256(__m128d __a)
  962. {
  963. return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
  964. }
  965. static __inline __m256 __DEFAULT_FN_ATTRS
  966. _mm256_castps128_ps256(__m128 __a)
  967. {
  968. return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
  969. }
  970. static __inline __m256i __DEFAULT_FN_ATTRS
  971. _mm256_castsi128_si256(__m128i __a)
  972. {
  973. return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
  974. }
  975. /*
  976. Vector insert.
  977. We use macros rather than inlines because we only want to accept
  978. invocations where the immediate M is a constant expression.
  979. */
  980. #define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
  981. (__m256)__builtin_shufflevector( \
  982. (__v8sf)(V1), \
  983. (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \
  984. (((M) & 1) ? 0 : 8), \
  985. (((M) & 1) ? 1 : 9), \
  986. (((M) & 1) ? 2 : 10), \
  987. (((M) & 1) ? 3 : 11), \
  988. (((M) & 1) ? 8 : 4), \
  989. (((M) & 1) ? 9 : 5), \
  990. (((M) & 1) ? 10 : 6), \
  991. (((M) & 1) ? 11 : 7) );})
  992. #define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
  993. (__m256d)__builtin_shufflevector( \
  994. (__v4df)(V1), \
  995. (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \
  996. (((M) & 1) ? 0 : 4), \
  997. (((M) & 1) ? 1 : 5), \
  998. (((M) & 1) ? 4 : 2), \
  999. (((M) & 1) ? 5 : 3) );})
  1000. #define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
  1001. (__m256i)__builtin_shufflevector( \
  1002. (__v4di)(V1), \
  1003. (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
  1004. (((M) & 1) ? 0 : 4), \
  1005. (((M) & 1) ? 1 : 5), \
  1006. (((M) & 1) ? 4 : 2), \
  1007. (((M) & 1) ? 5 : 3) );})
  1008. /*
  1009. Vector extract.
  1010. We use macros rather than inlines because we only want to accept
  1011. invocations where the immediate M is a constant expression.
  1012. */
  1013. #define _mm256_extractf128_ps(V, M) __extension__ ({ \
  1014. (__m128)__builtin_shufflevector( \
  1015. (__v8sf)(V), \
  1016. (__v8sf)(_mm256_setzero_ps()), \
  1017. (((M) & 1) ? 4 : 0), \
  1018. (((M) & 1) ? 5 : 1), \
  1019. (((M) & 1) ? 6 : 2), \
  1020. (((M) & 1) ? 7 : 3) );})
  1021. #define _mm256_extractf128_pd(V, M) __extension__ ({ \
  1022. (__m128d)__builtin_shufflevector( \
  1023. (__v4df)(V), \
  1024. (__v4df)(_mm256_setzero_pd()), \
  1025. (((M) & 1) ? 2 : 0), \
  1026. (((M) & 1) ? 3 : 1) );})
  1027. #define _mm256_extractf128_si256(V, M) __extension__ ({ \
  1028. (__m128i)__builtin_shufflevector( \
  1029. (__v4di)(V), \
  1030. (__v4di)(_mm256_setzero_si256()), \
  1031. (((M) & 1) ? 2 : 0), \
  1032. (((M) & 1) ? 3 : 1) );})
  1033. /* SIMD load ops (unaligned) */
  1034. static __inline __m256 __DEFAULT_FN_ATTRS
  1035. _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
  1036. {
  1037. struct __loadu_ps {
  1038. __m128 __v;
  1039. } __attribute__((__packed__, __may_alias__));
  1040. __m256 __v256 = _mm256_castps128_ps256(((struct __loadu_ps*)__addr_lo)->__v);
  1041. return _mm256_insertf128_ps(__v256, ((struct __loadu_ps*)__addr_hi)->__v, 1);
  1042. }
  1043. static __inline __m256d __DEFAULT_FN_ATTRS
  1044. _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
  1045. {
  1046. struct __loadu_pd {
  1047. __m128d __v;
  1048. } __attribute__((__packed__, __may_alias__));
  1049. __m256d __v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)__addr_lo)->__v);
  1050. return _mm256_insertf128_pd(__v256, ((struct __loadu_pd*)__addr_hi)->__v, 1);
  1051. }
  1052. static __inline __m256i __DEFAULT_FN_ATTRS
  1053. _mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
  1054. {
  1055. struct __loadu_si128 {
  1056. __m128i __v;
  1057. } __attribute__((__packed__, __may_alias__));
  1058. __m256i __v256 = _mm256_castsi128_si256(
  1059. ((struct __loadu_si128*)__addr_lo)->__v);
  1060. return _mm256_insertf128_si256(__v256,
  1061. ((struct __loadu_si128*)__addr_hi)->__v, 1);
  1062. }
  1063. /* SIMD store ops (unaligned) */
  1064. static __inline void __DEFAULT_FN_ATTRS
  1065. _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
  1066. {
  1067. __m128 __v128;
  1068. __v128 = _mm256_castps256_ps128(__a);
  1069. __builtin_ia32_storeups(__addr_lo, __v128);
  1070. __v128 = _mm256_extractf128_ps(__a, 1);
  1071. __builtin_ia32_storeups(__addr_hi, __v128);
  1072. }
  1073. static __inline void __DEFAULT_FN_ATTRS
  1074. _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
  1075. {
  1076. __m128d __v128;
  1077. __v128 = _mm256_castpd256_pd128(__a);
  1078. __builtin_ia32_storeupd(__addr_lo, __v128);
  1079. __v128 = _mm256_extractf128_pd(__a, 1);
  1080. __builtin_ia32_storeupd(__addr_hi, __v128);
  1081. }
  1082. static __inline void __DEFAULT_FN_ATTRS
  1083. _mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
  1084. {
  1085. __m128i __v128;
  1086. __v128 = _mm256_castsi256_si128(__a);
  1087. __builtin_ia32_storedqu((char *)__addr_lo, (__v16qi)__v128);
  1088. __v128 = _mm256_extractf128_si256(__a, 1);
  1089. __builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128);
  1090. }
  1091. static __inline __m256 __DEFAULT_FN_ATTRS
  1092. _mm256_set_m128 (__m128 __hi, __m128 __lo) {
  1093. return (__m256) __builtin_shufflevector(__lo, __hi, 0, 1, 2, 3, 4, 5, 6, 7);
  1094. }
  1095. static __inline __m256d __DEFAULT_FN_ATTRS
  1096. _mm256_set_m128d (__m128d __hi, __m128d __lo) {
  1097. return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
  1098. }
  1099. static __inline __m256i __DEFAULT_FN_ATTRS
  1100. _mm256_set_m128i (__m128i __hi, __m128i __lo) {
  1101. return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
  1102. }
  1103. static __inline __m256 __DEFAULT_FN_ATTRS
  1104. _mm256_setr_m128 (__m128 __lo, __m128 __hi) {
  1105. return _mm256_set_m128(__hi, __lo);
  1106. }
  1107. static __inline __m256d __DEFAULT_FN_ATTRS
  1108. _mm256_setr_m128d (__m128d __lo, __m128d __hi) {
  1109. return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
  1110. }
  1111. static __inline __m256i __DEFAULT_FN_ATTRS
  1112. _mm256_setr_m128i (__m128i __lo, __m128i __hi) {
  1113. return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
  1114. }
  1115. #undef __DEFAULT_FN_ATTRS
  1116. #endif /* __AVXINTRIN_H */