rotate_gcc.cc 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. /*
  2. * Copyright 2015 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/rotate_row.h"
  11. #include "libyuv/row.h"
  12. #ifdef __cplusplus
  13. namespace libyuv {
  14. extern "C" {
  15. #endif
  16. // This module is for GCC x86 and x64.
  17. #if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__))
  18. // Transpose 8x8. 32 or 64 bit, but not NaCL for 64 bit.
  19. #if defined(HAS_TRANSPOSEWX8_SSSE3)
  20. void TransposeWx8_SSSE3(const uint8_t* src,
  21. int src_stride,
  22. uint8_t* dst,
  23. int dst_stride,
  24. int width) {
  25. asm volatile (
  26. // Read in the data from the source pointer.
  27. // First round of bit swap.
  28. LABELALIGN
  29. "1: \n"
  30. "movq (%0),%%xmm0 \n"
  31. "movq (%0,%3),%%xmm1 \n"
  32. "lea (%0,%3,2),%0 \n"
  33. "punpcklbw %%xmm1,%%xmm0 \n"
  34. "movq (%0),%%xmm2 \n"
  35. "movdqa %%xmm0,%%xmm1 \n"
  36. "palignr $0x8,%%xmm1,%%xmm1 \n"
  37. "movq (%0,%3),%%xmm3 \n"
  38. "lea (%0,%3,2),%0 \n"
  39. "punpcklbw %%xmm3,%%xmm2 \n"
  40. "movdqa %%xmm2,%%xmm3 \n"
  41. "movq (%0),%%xmm4 \n"
  42. "palignr $0x8,%%xmm3,%%xmm3 \n"
  43. "movq (%0,%3),%%xmm5 \n"
  44. "lea (%0,%3,2),%0 \n"
  45. "punpcklbw %%xmm5,%%xmm4 \n"
  46. "movdqa %%xmm4,%%xmm5 \n"
  47. "movq (%0),%%xmm6 \n"
  48. "palignr $0x8,%%xmm5,%%xmm5 \n"
  49. "movq (%0,%3),%%xmm7 \n"
  50. "lea (%0,%3,2),%0 \n"
  51. "punpcklbw %%xmm7,%%xmm6 \n"
  52. "neg %3 \n"
  53. "movdqa %%xmm6,%%xmm7 \n"
  54. "lea 0x8(%0,%3,8),%0 \n"
  55. "palignr $0x8,%%xmm7,%%xmm7 \n"
  56. "neg %3 \n"
  57. // Second round of bit swap.
  58. "punpcklwd %%xmm2,%%xmm0 \n"
  59. "punpcklwd %%xmm3,%%xmm1 \n"
  60. "movdqa %%xmm0,%%xmm2 \n"
  61. "movdqa %%xmm1,%%xmm3 \n"
  62. "palignr $0x8,%%xmm2,%%xmm2 \n"
  63. "palignr $0x8,%%xmm3,%%xmm3 \n"
  64. "punpcklwd %%xmm6,%%xmm4 \n"
  65. "punpcklwd %%xmm7,%%xmm5 \n"
  66. "movdqa %%xmm4,%%xmm6 \n"
  67. "movdqa %%xmm5,%%xmm7 \n"
  68. "palignr $0x8,%%xmm6,%%xmm6 \n"
  69. "palignr $0x8,%%xmm7,%%xmm7 \n"
  70. // Third round of bit swap.
  71. // Write to the destination pointer.
  72. "punpckldq %%xmm4,%%xmm0 \n"
  73. "movq %%xmm0,(%1) \n"
  74. "movdqa %%xmm0,%%xmm4 \n"
  75. "palignr $0x8,%%xmm4,%%xmm4 \n"
  76. "movq %%xmm4,(%1,%4) \n"
  77. "lea (%1,%4,2),%1 \n"
  78. "punpckldq %%xmm6,%%xmm2 \n"
  79. "movdqa %%xmm2,%%xmm6 \n"
  80. "movq %%xmm2,(%1) \n"
  81. "palignr $0x8,%%xmm6,%%xmm6 \n"
  82. "punpckldq %%xmm5,%%xmm1 \n"
  83. "movq %%xmm6,(%1,%4) \n"
  84. "lea (%1,%4,2),%1 \n"
  85. "movdqa %%xmm1,%%xmm5 \n"
  86. "movq %%xmm1,(%1) \n"
  87. "palignr $0x8,%%xmm5,%%xmm5 \n"
  88. "movq %%xmm5,(%1,%4) \n"
  89. "lea (%1,%4,2),%1 \n"
  90. "punpckldq %%xmm7,%%xmm3 \n"
  91. "movq %%xmm3,(%1) \n"
  92. "movdqa %%xmm3,%%xmm7 \n"
  93. "palignr $0x8,%%xmm7,%%xmm7 \n"
  94. "sub $0x8,%2 \n"
  95. "movq %%xmm7,(%1,%4) \n"
  96. "lea (%1,%4,2),%1 \n"
  97. "jg 1b \n"
  98. : "+r"(src), // %0
  99. "+r"(dst), // %1
  100. "+r"(width) // %2
  101. : "r"((intptr_t)(src_stride)), // %3
  102. "r"((intptr_t)(dst_stride)) // %4
  103. : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
  104. "xmm7");
  105. }
  106. #endif // defined(HAS_TRANSPOSEWX8_SSSE3)
  107. // Transpose 16x8. 64 bit
  108. #if defined(HAS_TRANSPOSEWX8_FAST_SSSE3)
  109. void TransposeWx8_Fast_SSSE3(const uint8_t* src,
  110. int src_stride,
  111. uint8_t* dst,
  112. int dst_stride,
  113. int width) {
  114. asm volatile (
  115. // Read in the data from the source pointer.
  116. // First round of bit swap.
  117. LABELALIGN
  118. "1: \n"
  119. "movdqu (%0),%%xmm0 \n"
  120. "movdqu (%0,%3),%%xmm1 \n"
  121. "lea (%0,%3,2),%0 \n"
  122. "movdqa %%xmm0,%%xmm8 \n"
  123. "punpcklbw %%xmm1,%%xmm0 \n"
  124. "punpckhbw %%xmm1,%%xmm8 \n"
  125. "movdqu (%0),%%xmm2 \n"
  126. "movdqa %%xmm0,%%xmm1 \n"
  127. "movdqa %%xmm8,%%xmm9 \n"
  128. "palignr $0x8,%%xmm1,%%xmm1 \n"
  129. "palignr $0x8,%%xmm9,%%xmm9 \n"
  130. "movdqu (%0,%3),%%xmm3 \n"
  131. "lea (%0,%3,2),%0 \n"
  132. "movdqa %%xmm2,%%xmm10 \n"
  133. "punpcklbw %%xmm3,%%xmm2 \n"
  134. "punpckhbw %%xmm3,%%xmm10 \n"
  135. "movdqa %%xmm2,%%xmm3 \n"
  136. "movdqa %%xmm10,%%xmm11 \n"
  137. "movdqu (%0),%%xmm4 \n"
  138. "palignr $0x8,%%xmm3,%%xmm3 \n"
  139. "palignr $0x8,%%xmm11,%%xmm11 \n"
  140. "movdqu (%0,%3),%%xmm5 \n"
  141. "lea (%0,%3,2),%0 \n"
  142. "movdqa %%xmm4,%%xmm12 \n"
  143. "punpcklbw %%xmm5,%%xmm4 \n"
  144. "punpckhbw %%xmm5,%%xmm12 \n"
  145. "movdqa %%xmm4,%%xmm5 \n"
  146. "movdqa %%xmm12,%%xmm13 \n"
  147. "movdqu (%0),%%xmm6 \n"
  148. "palignr $0x8,%%xmm5,%%xmm5 \n"
  149. "palignr $0x8,%%xmm13,%%xmm13 \n"
  150. "movdqu (%0,%3),%%xmm7 \n"
  151. "lea (%0,%3,2),%0 \n"
  152. "movdqa %%xmm6,%%xmm14 \n"
  153. "punpcklbw %%xmm7,%%xmm6 \n"
  154. "punpckhbw %%xmm7,%%xmm14 \n"
  155. "neg %3 \n"
  156. "movdqa %%xmm6,%%xmm7 \n"
  157. "movdqa %%xmm14,%%xmm15 \n"
  158. "lea 0x10(%0,%3,8),%0 \n"
  159. "palignr $0x8,%%xmm7,%%xmm7 \n"
  160. "palignr $0x8,%%xmm15,%%xmm15 \n"
  161. "neg %3 \n"
  162. // Second round of bit swap.
  163. "punpcklwd %%xmm2,%%xmm0 \n"
  164. "punpcklwd %%xmm3,%%xmm1 \n"
  165. "movdqa %%xmm0,%%xmm2 \n"
  166. "movdqa %%xmm1,%%xmm3 \n"
  167. "palignr $0x8,%%xmm2,%%xmm2 \n"
  168. "palignr $0x8,%%xmm3,%%xmm3 \n"
  169. "punpcklwd %%xmm6,%%xmm4 \n"
  170. "punpcklwd %%xmm7,%%xmm5 \n"
  171. "movdqa %%xmm4,%%xmm6 \n"
  172. "movdqa %%xmm5,%%xmm7 \n"
  173. "palignr $0x8,%%xmm6,%%xmm6 \n"
  174. "palignr $0x8,%%xmm7,%%xmm7 \n"
  175. "punpcklwd %%xmm10,%%xmm8 \n"
  176. "punpcklwd %%xmm11,%%xmm9 \n"
  177. "movdqa %%xmm8,%%xmm10 \n"
  178. "movdqa %%xmm9,%%xmm11 \n"
  179. "palignr $0x8,%%xmm10,%%xmm10 \n"
  180. "palignr $0x8,%%xmm11,%%xmm11 \n"
  181. "punpcklwd %%xmm14,%%xmm12 \n"
  182. "punpcklwd %%xmm15,%%xmm13 \n"
  183. "movdqa %%xmm12,%%xmm14 \n"
  184. "movdqa %%xmm13,%%xmm15 \n"
  185. "palignr $0x8,%%xmm14,%%xmm14 \n"
  186. "palignr $0x8,%%xmm15,%%xmm15 \n"
  187. // Third round of bit swap.
  188. // Write to the destination pointer.
  189. "punpckldq %%xmm4,%%xmm0 \n"
  190. "movq %%xmm0,(%1) \n"
  191. "movdqa %%xmm0,%%xmm4 \n"
  192. "palignr $0x8,%%xmm4,%%xmm4 \n"
  193. "movq %%xmm4,(%1,%4) \n"
  194. "lea (%1,%4,2),%1 \n"
  195. "punpckldq %%xmm6,%%xmm2 \n"
  196. "movdqa %%xmm2,%%xmm6 \n"
  197. "movq %%xmm2,(%1) \n"
  198. "palignr $0x8,%%xmm6,%%xmm6 \n"
  199. "punpckldq %%xmm5,%%xmm1 \n"
  200. "movq %%xmm6,(%1,%4) \n"
  201. "lea (%1,%4,2),%1 \n"
  202. "movdqa %%xmm1,%%xmm5 \n"
  203. "movq %%xmm1,(%1) \n"
  204. "palignr $0x8,%%xmm5,%%xmm5 \n"
  205. "movq %%xmm5,(%1,%4) \n"
  206. "lea (%1,%4,2),%1 \n"
  207. "punpckldq %%xmm7,%%xmm3 \n"
  208. "movq %%xmm3,(%1) \n"
  209. "movdqa %%xmm3,%%xmm7 \n"
  210. "palignr $0x8,%%xmm7,%%xmm7 \n"
  211. "movq %%xmm7,(%1,%4) \n"
  212. "lea (%1,%4,2),%1 \n"
  213. "punpckldq %%xmm12,%%xmm8 \n"
  214. "movq %%xmm8,(%1) \n"
  215. "movdqa %%xmm8,%%xmm12 \n"
  216. "palignr $0x8,%%xmm12,%%xmm12 \n"
  217. "movq %%xmm12,(%1,%4) \n"
  218. "lea (%1,%4,2),%1 \n"
  219. "punpckldq %%xmm14,%%xmm10 \n"
  220. "movdqa %%xmm10,%%xmm14 \n"
  221. "movq %%xmm10,(%1) \n"
  222. "palignr $0x8,%%xmm14,%%xmm14 \n"
  223. "punpckldq %%xmm13,%%xmm9 \n"
  224. "movq %%xmm14,(%1,%4) \n"
  225. "lea (%1,%4,2),%1 \n"
  226. "movdqa %%xmm9,%%xmm13 \n"
  227. "movq %%xmm9,(%1) \n"
  228. "palignr $0x8,%%xmm13,%%xmm13 \n"
  229. "movq %%xmm13,(%1,%4) \n"
  230. "lea (%1,%4,2),%1 \n"
  231. "punpckldq %%xmm15,%%xmm11 \n"
  232. "movq %%xmm11,(%1) \n"
  233. "movdqa %%xmm11,%%xmm15 \n"
  234. "palignr $0x8,%%xmm15,%%xmm15 \n"
  235. "sub $0x10,%2 \n"
  236. "movq %%xmm15,(%1,%4) \n"
  237. "lea (%1,%4,2),%1 \n"
  238. "jg 1b \n"
  239. : "+r"(src), // %0
  240. "+r"(dst), // %1
  241. "+r"(width) // %2
  242. : "r"((intptr_t)(src_stride)), // %3
  243. "r"((intptr_t)(dst_stride)) // %4
  244. : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
  245. "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14",
  246. "xmm15");
  247. }
  248. #endif // defined(HAS_TRANSPOSEWX8_FAST_SSSE3)
  249. // Transpose UV 8x8. 64 bit.
  250. #if defined(HAS_TRANSPOSEUVWX8_SSE2)
  251. void TransposeUVWx8_SSE2(const uint8_t* src,
  252. int src_stride,
  253. uint8_t* dst_a,
  254. int dst_stride_a,
  255. uint8_t* dst_b,
  256. int dst_stride_b,
  257. int width) {
  258. asm volatile (
  259. // Read in the data from the source pointer.
  260. // First round of bit swap.
  261. LABELALIGN
  262. "1: \n"
  263. "movdqu (%0),%%xmm0 \n"
  264. "movdqu (%0,%4),%%xmm1 \n"
  265. "lea (%0,%4,2),%0 \n"
  266. "movdqa %%xmm0,%%xmm8 \n"
  267. "punpcklbw %%xmm1,%%xmm0 \n"
  268. "punpckhbw %%xmm1,%%xmm8 \n"
  269. "movdqa %%xmm8,%%xmm1 \n"
  270. "movdqu (%0),%%xmm2 \n"
  271. "movdqu (%0,%4),%%xmm3 \n"
  272. "lea (%0,%4,2),%0 \n"
  273. "movdqa %%xmm2,%%xmm8 \n"
  274. "punpcklbw %%xmm3,%%xmm2 \n"
  275. "punpckhbw %%xmm3,%%xmm8 \n"
  276. "movdqa %%xmm8,%%xmm3 \n"
  277. "movdqu (%0),%%xmm4 \n"
  278. "movdqu (%0,%4),%%xmm5 \n"
  279. "lea (%0,%4,2),%0 \n"
  280. "movdqa %%xmm4,%%xmm8 \n"
  281. "punpcklbw %%xmm5,%%xmm4 \n"
  282. "punpckhbw %%xmm5,%%xmm8 \n"
  283. "movdqa %%xmm8,%%xmm5 \n"
  284. "movdqu (%0),%%xmm6 \n"
  285. "movdqu (%0,%4),%%xmm7 \n"
  286. "lea (%0,%4,2),%0 \n"
  287. "movdqa %%xmm6,%%xmm8 \n"
  288. "punpcklbw %%xmm7,%%xmm6 \n"
  289. "neg %4 \n"
  290. "lea 0x10(%0,%4,8),%0 \n"
  291. "punpckhbw %%xmm7,%%xmm8 \n"
  292. "movdqa %%xmm8,%%xmm7 \n"
  293. "neg %4 \n"
  294. // Second round of bit swap.
  295. "movdqa %%xmm0,%%xmm8 \n"
  296. "movdqa %%xmm1,%%xmm9 \n"
  297. "punpckhwd %%xmm2,%%xmm8 \n"
  298. "punpckhwd %%xmm3,%%xmm9 \n"
  299. "punpcklwd %%xmm2,%%xmm0 \n"
  300. "punpcklwd %%xmm3,%%xmm1 \n"
  301. "movdqa %%xmm8,%%xmm2 \n"
  302. "movdqa %%xmm9,%%xmm3 \n"
  303. "movdqa %%xmm4,%%xmm8 \n"
  304. "movdqa %%xmm5,%%xmm9 \n"
  305. "punpckhwd %%xmm6,%%xmm8 \n"
  306. "punpckhwd %%xmm7,%%xmm9 \n"
  307. "punpcklwd %%xmm6,%%xmm4 \n"
  308. "punpcklwd %%xmm7,%%xmm5 \n"
  309. "movdqa %%xmm8,%%xmm6 \n"
  310. "movdqa %%xmm9,%%xmm7 \n"
  311. // Third round of bit swap.
  312. // Write to the destination pointer.
  313. "movdqa %%xmm0,%%xmm8 \n"
  314. "punpckldq %%xmm4,%%xmm0 \n"
  315. "movlpd %%xmm0,(%1) \n" // Write back U channel
  316. "movhpd %%xmm0,(%2) \n" // Write back V channel
  317. "punpckhdq %%xmm4,%%xmm8 \n"
  318. "movlpd %%xmm8,(%1,%5) \n"
  319. "lea (%1,%5,2),%1 \n"
  320. "movhpd %%xmm8,(%2,%6) \n"
  321. "lea (%2,%6,2),%2 \n"
  322. "movdqa %%xmm2,%%xmm8 \n"
  323. "punpckldq %%xmm6,%%xmm2 \n"
  324. "movlpd %%xmm2,(%1) \n"
  325. "movhpd %%xmm2,(%2) \n"
  326. "punpckhdq %%xmm6,%%xmm8 \n"
  327. "movlpd %%xmm8,(%1,%5) \n"
  328. "lea (%1,%5,2),%1 \n"
  329. "movhpd %%xmm8,(%2,%6) \n"
  330. "lea (%2,%6,2),%2 \n"
  331. "movdqa %%xmm1,%%xmm8 \n"
  332. "punpckldq %%xmm5,%%xmm1 \n"
  333. "movlpd %%xmm1,(%1) \n"
  334. "movhpd %%xmm1,(%2) \n"
  335. "punpckhdq %%xmm5,%%xmm8 \n"
  336. "movlpd %%xmm8,(%1,%5) \n"
  337. "lea (%1,%5,2),%1 \n"
  338. "movhpd %%xmm8,(%2,%6) \n"
  339. "lea (%2,%6,2),%2 \n"
  340. "movdqa %%xmm3,%%xmm8 \n"
  341. "punpckldq %%xmm7,%%xmm3 \n"
  342. "movlpd %%xmm3,(%1) \n"
  343. "movhpd %%xmm3,(%2) \n"
  344. "punpckhdq %%xmm7,%%xmm8 \n"
  345. "sub $0x8,%3 \n"
  346. "movlpd %%xmm8,(%1,%5) \n"
  347. "lea (%1,%5,2),%1 \n"
  348. "movhpd %%xmm8,(%2,%6) \n"
  349. "lea (%2,%6,2),%2 \n"
  350. "jg 1b \n"
  351. : "+r"(src), // %0
  352. "+r"(dst_a), // %1
  353. "+r"(dst_b), // %2
  354. "+r"(width) // %3
  355. : "r"((intptr_t)(src_stride)), // %4
  356. "r"((intptr_t)(dst_stride_a)), // %5
  357. "r"((intptr_t)(dst_stride_b)) // %6
  358. : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
  359. "xmm7", "xmm8", "xmm9");
  360. }
  361. #endif // defined(HAS_TRANSPOSEUVWX8_SSE2)
  362. #if defined(HAS_TRANSPOSE4X4_32_SSE2)
  363. // 4 values, little endian view
  364. // a b c d
  365. // e f g h
  366. // i j k l
  367. // m n o p
  368. // transpose 2x2
  369. // a e b f from row 0, 1
  370. // i m j n from row 2, 3
  371. // c g d h from row 0, 1
  372. // k o l p from row 2, 3
  373. // transpose 4x4
  374. // a e i m from row 0, 1
  375. // b f j n from row 0, 1
  376. // c g k o from row 2, 3
  377. // d h l p from row 2, 3
  378. // Transpose 32 bit values (ARGB)
  379. void Transpose4x4_32_SSE2(const uint8_t* src,
  380. int src_stride,
  381. uint8_t* dst,
  382. int dst_stride,
  383. int width) {
  384. asm volatile (
  385. // Main loop transpose 4x4. Read a column, write a row.
  386. "1: \n"
  387. "movdqu (%0),%%xmm0 \n" // a b c d
  388. "movdqu (%0,%3),%%xmm1 \n" // e f g h
  389. "lea (%0,%3,2),%0 \n" // src += stride * 2
  390. "movdqu (%0),%%xmm2 \n" // i j k l
  391. "movdqu (%0,%3),%%xmm3 \n" // m n o p
  392. "lea (%0,%3,2),%0 \n" // src += stride * 2
  393. // Transpose 2x2
  394. "movdqa %%xmm0,%%xmm4 \n"
  395. "movdqa %%xmm2,%%xmm5 \n"
  396. "movdqa %%xmm0,%%xmm6 \n"
  397. "movdqa %%xmm2,%%xmm7 \n"
  398. "punpckldq %%xmm1,%%xmm4 \n" // a e b f from row 0, 1
  399. "punpckldq %%xmm3,%%xmm5 \n" // i m j n from row 2, 3
  400. "punpckhdq %%xmm1,%%xmm6 \n" // c g d h from row 0, 1
  401. "punpckhdq %%xmm3,%%xmm7 \n" // k o l p from row 2, 3
  402. // Transpose 4x4
  403. "movdqa %%xmm4,%%xmm0 \n"
  404. "movdqa %%xmm4,%%xmm1 \n"
  405. "movdqa %%xmm6,%%xmm2 \n"
  406. "movdqa %%xmm6,%%xmm3 \n"
  407. "punpcklqdq %%xmm5,%%xmm0 \n" // a e i m from row 0, 1
  408. "punpckhqdq %%xmm5,%%xmm1 \n" // b f j n from row 0, 1
  409. "punpcklqdq %%xmm7,%%xmm2 \n" // c g k o from row 2, 3
  410. "punpckhqdq %%xmm7,%%xmm3 \n" // d h l p from row 2, 3
  411. "movdqu %%xmm0,(%1) \n"
  412. "lea 16(%1,%4),%1 \n" // dst += stride + 16
  413. "movdqu %%xmm1,-16(%1) \n"
  414. "movdqu %%xmm2,-16(%1,%4) \n"
  415. "movdqu %%xmm3,-16(%1,%4,2) \n"
  416. "sub %4,%1 \n"
  417. "sub $0x4,%2 \n"
  418. "jg 1b \n"
  419. : "+r"(src), // %0
  420. "+r"(dst), // %1
  421. "+rm"(width) // %2
  422. : "r"((ptrdiff_t)(src_stride)), // %3
  423. "r"((ptrdiff_t)(dst_stride)) // %4
  424. : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
  425. "xmm7");
  426. }
  427. #endif // defined(HAS_TRANSPOSE4X4_32_SSE2)
  428. #if defined(HAS_TRANSPOSE4X4_32_AVX2)
  429. // Transpose 32 bit values (ARGB)
  430. void Transpose4x4_32_AVX2(const uint8_t* src,
  431. int src_stride,
  432. uint8_t* dst,
  433. int dst_stride,
  434. int width) {
  435. asm volatile (
  436. // Main loop transpose 2 blocks of 4x4. Read a column, write a row.
  437. "1: \n"
  438. "vmovdqu (%0),%%xmm0 \n" // a b c d
  439. "vmovdqu (%0,%3),%%xmm1 \n" // e f g h
  440. "lea (%0,%3,2),%0 \n" // src += stride * 2
  441. "vmovdqu (%0),%%xmm2 \n" // i j k l
  442. "vmovdqu (%0,%3),%%xmm3 \n" // m n o p
  443. "lea (%0,%3,2),%0 \n" // src += stride * 2
  444. "vinserti128 $1,(%0),%%ymm0,%%ymm0 \n" // a b c d
  445. "vinserti128 $1,(%0,%3),%%ymm1,%%ymm1 \n" // e f g h
  446. "lea (%0,%3,2),%0 \n" // src += stride * 2
  447. "vinserti128 $1,(%0),%%ymm2,%%ymm2 \n" // i j k l
  448. "vinserti128 $1,(%0,%3),%%ymm3,%%ymm3 \n" // m n o p
  449. "lea (%0,%3,2),%0 \n" // src += stride * 2
  450. // Transpose 2x2
  451. "vpunpckldq %%ymm1,%%ymm0,%%ymm4 \n" // a e b f from row 0, 1
  452. "vpunpckldq %%ymm3,%%ymm2,%%ymm5 \n" // i m j n from row 2, 3
  453. "vpunpckhdq %%ymm1,%%ymm0,%%ymm6 \n" // c g d h from row 0, 1
  454. "vpunpckhdq %%ymm3,%%ymm2,%%ymm7 \n" // k o l p from row 2, 3
  455. // Transpose 4x4
  456. "vpunpcklqdq %%ymm5,%%ymm4,%%ymm0 \n" // a e i m from row 0, 1
  457. "vpunpckhqdq %%ymm5,%%ymm4,%%ymm1 \n" // b f j n from row 0, 1
  458. "vpunpcklqdq %%ymm7,%%ymm6,%%ymm2 \n" // c g k o from row 2, 3
  459. "vpunpckhqdq %%ymm7,%%ymm6,%%ymm3 \n" // d h l p from row 2, 3
  460. "vmovdqu %%ymm0,(%1) \n"
  461. "lea 32(%1,%4),%1 \n" // dst += stride + 32
  462. "vmovdqu %%ymm1,-32(%1) \n"
  463. "vmovdqu %%ymm2,-32(%1,%4) \n"
  464. "vmovdqu %%ymm3,-32(%1,%4,2) \n"
  465. "sub %4,%1 \n"
  466. "sub $0x8,%2 \n"
  467. "jg 1b \n"
  468. "vzeroupper \n"
  469. : "+r"(src), // %0
  470. "+r"(dst), // %1
  471. "+rm"(width) // %2
  472. : "r"((ptrdiff_t)(src_stride)), // %3
  473. "r"((ptrdiff_t)(dst_stride)) // %4
  474. : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
  475. "xmm7");
  476. }
  477. #endif // defined(HAS_TRANSPOSE4X4_32_AVX2)
  478. #endif // defined(__x86_64__) || defined(__i386__)
  479. #ifdef __cplusplus
  480. } // extern "C"
  481. } // namespace libyuv
  482. #endif