idct32x32_msa.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. /*
  2. * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "vpx_dsp/mips/inv_txfm_msa.h"
  11. static void idct32x8_row_transpose_store(const int16_t *input,
  12. int16_t *tmp_buf) {
  13. v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
  14. /* 1st & 2nd 8x8 */
  15. LD_SH8(input, 32, m0, n0, m1, n1, m2, n2, m3, n3);
  16. LD_SH8((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7);
  17. TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
  18. n3);
  19. TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
  20. n7);
  21. ST_SH8(m0, n0, m1, n1, m2, n2, m3, n3, (tmp_buf), 8);
  22. ST_SH4(m4, n4, m5, n5, (tmp_buf + 8 * 8), 8);
  23. ST_SH4(m6, n6, m7, n7, (tmp_buf + 12 * 8), 8);
  24. /* 3rd & 4th 8x8 */
  25. LD_SH8((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3);
  26. LD_SH8((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7);
  27. TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
  28. n3);
  29. TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
  30. n7);
  31. ST_SH4(m0, n0, m1, n1, (tmp_buf + 16 * 8), 8);
  32. ST_SH4(m2, n2, m3, n3, (tmp_buf + 20 * 8), 8);
  33. ST_SH4(m4, n4, m5, n5, (tmp_buf + 24 * 8), 8);
  34. ST_SH4(m6, n6, m7, n7, (tmp_buf + 28 * 8), 8);
  35. }
  36. static void idct32x8_row_even_process_store(int16_t *tmp_buf,
  37. int16_t *tmp_eve_buf) {
  38. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  39. v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
  40. v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
  41. /* Even stage 1 */
  42. LD_SH8(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
  43. DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
  44. DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
  45. BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
  46. DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  47. loc1 = vec3;
  48. loc0 = vec1;
  49. DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
  50. DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
  51. BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
  52. BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
  53. BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
  54. /* Even stage 2 */
  55. LD_SH8((tmp_buf + 16), 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
  56. DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
  57. DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
  58. DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
  59. DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
  60. vec0 = reg0 + reg4;
  61. reg0 = reg0 - reg4;
  62. reg4 = reg6 + reg2;
  63. reg6 = reg6 - reg2;
  64. reg2 = reg1 + reg5;
  65. reg1 = reg1 - reg5;
  66. reg5 = reg7 + reg3;
  67. reg7 = reg7 - reg3;
  68. reg3 = vec0;
  69. vec1 = reg2;
  70. reg2 = reg3 + reg4;
  71. reg3 = reg3 - reg4;
  72. reg4 = reg5 - vec1;
  73. reg5 = reg5 + vec1;
  74. DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
  75. DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
  76. vec0 = reg0 - reg6;
  77. reg0 = reg0 + reg6;
  78. vec1 = reg7 - reg1;
  79. reg7 = reg7 + reg1;
  80. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
  81. DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
  82. /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
  83. BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
  84. ST_SH(loc0, (tmp_eve_buf + 15 * 8));
  85. ST_SH(loc1, (tmp_eve_buf));
  86. ST_SH(loc2, (tmp_eve_buf + 14 * 8));
  87. ST_SH(loc3, (tmp_eve_buf + 8));
  88. BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
  89. ST_SH(loc0, (tmp_eve_buf + 13 * 8));
  90. ST_SH(loc1, (tmp_eve_buf + 2 * 8));
  91. ST_SH(loc2, (tmp_eve_buf + 12 * 8));
  92. ST_SH(loc3, (tmp_eve_buf + 3 * 8));
  93. /* Store 8 */
  94. BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
  95. ST_SH(loc0, (tmp_eve_buf + 11 * 8));
  96. ST_SH(loc1, (tmp_eve_buf + 4 * 8));
  97. ST_SH(loc2, (tmp_eve_buf + 10 * 8));
  98. ST_SH(loc3, (tmp_eve_buf + 5 * 8));
  99. BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
  100. ST_SH(loc0, (tmp_eve_buf + 9 * 8));
  101. ST_SH(loc1, (tmp_eve_buf + 6 * 8));
  102. ST_SH(loc2, (tmp_eve_buf + 8 * 8));
  103. ST_SH(loc3, (tmp_eve_buf + 7 * 8));
  104. }
  105. static void idct32x8_row_odd_process_store(int16_t *tmp_buf,
  106. int16_t *tmp_odd_buf) {
  107. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  108. v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
  109. /* Odd stage 1 */
  110. reg0 = LD_SH(tmp_buf + 8);
  111. reg1 = LD_SH(tmp_buf + 7 * 8);
  112. reg2 = LD_SH(tmp_buf + 9 * 8);
  113. reg3 = LD_SH(tmp_buf + 15 * 8);
  114. reg4 = LD_SH(tmp_buf + 17 * 8);
  115. reg5 = LD_SH(tmp_buf + 23 * 8);
  116. reg6 = LD_SH(tmp_buf + 25 * 8);
  117. reg7 = LD_SH(tmp_buf + 31 * 8);
  118. DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
  119. DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
  120. DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
  121. DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
  122. vec0 = reg0 + reg3;
  123. reg0 = reg0 - reg3;
  124. reg3 = reg7 + reg4;
  125. reg7 = reg7 - reg4;
  126. reg4 = reg1 + reg2;
  127. reg1 = reg1 - reg2;
  128. reg2 = reg6 + reg5;
  129. reg6 = reg6 - reg5;
  130. reg5 = vec0;
  131. /* 4 Stores */
  132. ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
  133. ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
  134. SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
  135. DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
  136. ST_SH2(vec0, vec1, (tmp_odd_buf), 8);
  137. /* 4 Stores */
  138. DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
  139. DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
  140. BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
  141. ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
  142. DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
  143. ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
  144. /* Odd stage 2 */
  145. /* 8 loads */
  146. reg0 = LD_SH(tmp_buf + 3 * 8);
  147. reg1 = LD_SH(tmp_buf + 5 * 8);
  148. reg2 = LD_SH(tmp_buf + 11 * 8);
  149. reg3 = LD_SH(tmp_buf + 13 * 8);
  150. reg4 = LD_SH(tmp_buf + 19 * 8);
  151. reg5 = LD_SH(tmp_buf + 21 * 8);
  152. reg6 = LD_SH(tmp_buf + 27 * 8);
  153. reg7 = LD_SH(tmp_buf + 29 * 8);
  154. DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
  155. DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
  156. DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
  157. DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
  158. /* 4 Stores */
  159. SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec0, vec1, vec2, vec3);
  160. DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
  161. DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
  162. BUTTERFLY_4(loc3, loc2, loc0, loc1, vec1, vec0, vec2, vec3);
  163. ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
  164. DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
  165. ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
  166. /* 4 Stores */
  167. ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec1, vec2, vec0, vec3);
  168. BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
  169. ST_SH(reg0, (tmp_odd_buf + 13 * 8));
  170. ST_SH(reg1, (tmp_odd_buf + 14 * 8));
  171. DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
  172. ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
  173. /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
  174. /* Load 8 & Store 8 */
  175. LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
  176. LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
  177. ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
  178. ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
  179. SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
  180. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
  181. SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
  182. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  183. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
  184. /* Load 8 & Store 8 */
  185. LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
  186. LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
  187. ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
  188. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
  189. SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
  190. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
  191. SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
  192. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  193. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
  194. }
  195. static void idct_butterfly_transpose_store(int16_t *tmp_buf,
  196. int16_t *tmp_eve_buf,
  197. int16_t *tmp_odd_buf, int16_t *dst) {
  198. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  199. v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
  200. /* FINAL BUTTERFLY : Dependency on Even & Odd */
  201. vec0 = LD_SH(tmp_odd_buf);
  202. vec1 = LD_SH(tmp_odd_buf + 9 * 8);
  203. vec2 = LD_SH(tmp_odd_buf + 14 * 8);
  204. vec3 = LD_SH(tmp_odd_buf + 6 * 8);
  205. loc0 = LD_SH(tmp_eve_buf);
  206. loc1 = LD_SH(tmp_eve_buf + 8 * 8);
  207. loc2 = LD_SH(tmp_eve_buf + 4 * 8);
  208. loc3 = LD_SH(tmp_eve_buf + 12 * 8);
  209. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
  210. ST_SH((loc0 - vec3), (tmp_buf + 31 * 8));
  211. ST_SH((loc1 - vec2), (tmp_buf + 23 * 8));
  212. ST_SH((loc2 - vec1), (tmp_buf + 27 * 8));
  213. ST_SH((loc3 - vec0), (tmp_buf + 19 * 8));
  214. /* Load 8 & Store 8 */
  215. vec0 = LD_SH(tmp_odd_buf + 4 * 8);
  216. vec1 = LD_SH(tmp_odd_buf + 13 * 8);
  217. vec2 = LD_SH(tmp_odd_buf + 10 * 8);
  218. vec3 = LD_SH(tmp_odd_buf + 3 * 8);
  219. loc0 = LD_SH(tmp_eve_buf + 2 * 8);
  220. loc1 = LD_SH(tmp_eve_buf + 10 * 8);
  221. loc2 = LD_SH(tmp_eve_buf + 6 * 8);
  222. loc3 = LD_SH(tmp_eve_buf + 14 * 8);
  223. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
  224. ST_SH((loc0 - vec3), (tmp_buf + 29 * 8));
  225. ST_SH((loc1 - vec2), (tmp_buf + 21 * 8));
  226. ST_SH((loc2 - vec1), (tmp_buf + 25 * 8));
  227. ST_SH((loc3 - vec0), (tmp_buf + 17 * 8));
  228. /* Load 8 & Store 8 */
  229. vec0 = LD_SH(tmp_odd_buf + 2 * 8);
  230. vec1 = LD_SH(tmp_odd_buf + 11 * 8);
  231. vec2 = LD_SH(tmp_odd_buf + 12 * 8);
  232. vec3 = LD_SH(tmp_odd_buf + 7 * 8);
  233. loc0 = LD_SH(tmp_eve_buf + 1 * 8);
  234. loc1 = LD_SH(tmp_eve_buf + 9 * 8);
  235. loc2 = LD_SH(tmp_eve_buf + 5 * 8);
  236. loc3 = LD_SH(tmp_eve_buf + 13 * 8);
  237. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
  238. ST_SH((loc0 - vec3), (tmp_buf + 30 * 8));
  239. ST_SH((loc1 - vec2), (tmp_buf + 22 * 8));
  240. ST_SH((loc2 - vec1), (tmp_buf + 26 * 8));
  241. ST_SH((loc3 - vec0), (tmp_buf + 18 * 8));
  242. /* Load 8 & Store 8 */
  243. vec0 = LD_SH(tmp_odd_buf + 5 * 8);
  244. vec1 = LD_SH(tmp_odd_buf + 15 * 8);
  245. vec2 = LD_SH(tmp_odd_buf + 8 * 8);
  246. vec3 = LD_SH(tmp_odd_buf + 1 * 8);
  247. loc0 = LD_SH(tmp_eve_buf + 3 * 8);
  248. loc1 = LD_SH(tmp_eve_buf + 11 * 8);
  249. loc2 = LD_SH(tmp_eve_buf + 7 * 8);
  250. loc3 = LD_SH(tmp_eve_buf + 15 * 8);
  251. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
  252. ST_SH((loc0 - vec3), (tmp_buf + 28 * 8));
  253. ST_SH((loc1 - vec2), (tmp_buf + 20 * 8));
  254. ST_SH((loc2 - vec1), (tmp_buf + 24 * 8));
  255. ST_SH((loc3 - vec0), (tmp_buf + 16 * 8));
  256. /* Transpose : 16 vectors */
  257. /* 1st & 2nd 8x8 */
  258. TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
  259. n3);
  260. ST_SH4(m0, n0, m1, n1, (dst + 0), 32);
  261. ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32);
  262. TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
  263. n7);
  264. ST_SH4(m4, n4, m5, n5, (dst + 8), 32);
  265. ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32);
  266. /* 3rd & 4th 8x8 */
  267. LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3);
  268. LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7);
  269. TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3,
  270. n3);
  271. ST_SH4(m0, n0, m1, n1, (dst + 16), 32);
  272. ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32);
  273. TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7,
  274. n7);
  275. ST_SH4(m4, n4, m5, n5, (dst + 24), 32);
  276. ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32);
  277. }
  278. static void idct32x8_1d_rows_msa(const int16_t *input, int16_t *output) {
  279. DECLARE_ALIGNED(32, int16_t, tmp_buf[8 * 32]);
  280. DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]);
  281. DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
  282. idct32x8_row_transpose_store(input, &tmp_buf[0]);
  283. idct32x8_row_even_process_store(&tmp_buf[0], &tmp_eve_buf[0]);
  284. idct32x8_row_odd_process_store(&tmp_buf[0], &tmp_odd_buf[0]);
  285. idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0], &tmp_odd_buf[0],
  286. output);
  287. }
  288. static void idct8x32_column_even_process_store(int16_t *tmp_buf,
  289. int16_t *tmp_eve_buf) {
  290. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  291. v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
  292. v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
  293. /* Even stage 1 */
  294. LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
  295. tmp_buf += (2 * 32);
  296. DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
  297. DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
  298. BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
  299. DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  300. loc1 = vec3;
  301. loc0 = vec1;
  302. DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
  303. DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
  304. BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
  305. BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
  306. BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
  307. /* Even stage 2 */
  308. /* Load 8 */
  309. LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
  310. DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
  311. DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
  312. DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
  313. DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
  314. vec0 = reg0 + reg4;
  315. reg0 = reg0 - reg4;
  316. reg4 = reg6 + reg2;
  317. reg6 = reg6 - reg2;
  318. reg2 = reg1 + reg5;
  319. reg1 = reg1 - reg5;
  320. reg5 = reg7 + reg3;
  321. reg7 = reg7 - reg3;
  322. reg3 = vec0;
  323. vec1 = reg2;
  324. reg2 = reg3 + reg4;
  325. reg3 = reg3 - reg4;
  326. reg4 = reg5 - vec1;
  327. reg5 = reg5 + vec1;
  328. DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
  329. DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
  330. vec0 = reg0 - reg6;
  331. reg0 = reg0 + reg6;
  332. vec1 = reg7 - reg1;
  333. reg7 = reg7 + reg1;
  334. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
  335. DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
  336. /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
  337. /* Store 8 */
  338. BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
  339. ST_SH2(loc1, loc3, tmp_eve_buf, 8);
  340. ST_SH2(loc2, loc0, (tmp_eve_buf + 14 * 8), 8);
  341. BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
  342. ST_SH2(loc1, loc3, (tmp_eve_buf + 2 * 8), 8);
  343. ST_SH2(loc2, loc0, (tmp_eve_buf + 12 * 8), 8);
  344. /* Store 8 */
  345. BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
  346. ST_SH2(loc1, loc3, (tmp_eve_buf + 4 * 8), 8);
  347. ST_SH2(loc2, loc0, (tmp_eve_buf + 10 * 8), 8);
  348. BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
  349. ST_SH2(loc1, loc3, (tmp_eve_buf + 6 * 8), 8);
  350. ST_SH2(loc2, loc0, (tmp_eve_buf + 8 * 8), 8);
  351. }
  352. static void idct8x32_column_odd_process_store(int16_t *tmp_buf,
  353. int16_t *tmp_odd_buf) {
  354. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  355. v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
  356. /* Odd stage 1 */
  357. reg0 = LD_SH(tmp_buf + 32);
  358. reg1 = LD_SH(tmp_buf + 7 * 32);
  359. reg2 = LD_SH(tmp_buf + 9 * 32);
  360. reg3 = LD_SH(tmp_buf + 15 * 32);
  361. reg4 = LD_SH(tmp_buf + 17 * 32);
  362. reg5 = LD_SH(tmp_buf + 23 * 32);
  363. reg6 = LD_SH(tmp_buf + 25 * 32);
  364. reg7 = LD_SH(tmp_buf + 31 * 32);
  365. DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
  366. DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
  367. DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
  368. DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
  369. vec0 = reg0 + reg3;
  370. reg0 = reg0 - reg3;
  371. reg3 = reg7 + reg4;
  372. reg7 = reg7 - reg4;
  373. reg4 = reg1 + reg2;
  374. reg1 = reg1 - reg2;
  375. reg2 = reg6 + reg5;
  376. reg6 = reg6 - reg5;
  377. reg5 = vec0;
  378. /* 4 Stores */
  379. ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
  380. ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
  381. SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
  382. DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
  383. ST_SH2(vec0, vec1, tmp_odd_buf, 8);
  384. /* 4 Stores */
  385. DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
  386. DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
  387. BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
  388. ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
  389. DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
  390. ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
  391. /* Odd stage 2 */
  392. /* 8 loads */
  393. reg0 = LD_SH(tmp_buf + 3 * 32);
  394. reg1 = LD_SH(tmp_buf + 5 * 32);
  395. reg2 = LD_SH(tmp_buf + 11 * 32);
  396. reg3 = LD_SH(tmp_buf + 13 * 32);
  397. reg4 = LD_SH(tmp_buf + 19 * 32);
  398. reg5 = LD_SH(tmp_buf + 21 * 32);
  399. reg6 = LD_SH(tmp_buf + 27 * 32);
  400. reg7 = LD_SH(tmp_buf + 29 * 32);
  401. DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
  402. DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
  403. DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
  404. DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
  405. /* 4 Stores */
  406. SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec0, vec1, vec2, vec3);
  407. DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
  408. DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
  409. BUTTERFLY_4(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
  410. ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
  411. DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
  412. ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
  413. /* 4 Stores */
  414. ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7, vec0, vec1, vec2, vec3);
  415. BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
  416. ST_SH2(reg0, reg1, (tmp_odd_buf + 13 * 8), 8);
  417. DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
  418. ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
  419. /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
  420. /* Load 8 & Store 8 */
  421. LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
  422. LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
  423. ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
  424. ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
  425. SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
  426. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
  427. SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
  428. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  429. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
  430. /* Load 8 & Store 8 */
  431. LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
  432. LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
  433. ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
  434. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
  435. SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
  436. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
  437. SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
  438. DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
  439. ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
  440. }
  441. static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
  442. int16_t *tmp_odd_buf, uint8_t *dst,
  443. int32_t dst_stride) {
  444. v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
  445. v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
  446. /* FINAL BUTTERFLY : Dependency on Even & Odd */
  447. vec0 = LD_SH(tmp_odd_buf);
  448. vec1 = LD_SH(tmp_odd_buf + 9 * 8);
  449. vec2 = LD_SH(tmp_odd_buf + 14 * 8);
  450. vec3 = LD_SH(tmp_odd_buf + 6 * 8);
  451. loc0 = LD_SH(tmp_eve_buf);
  452. loc1 = LD_SH(tmp_eve_buf + 8 * 8);
  453. loc2 = LD_SH(tmp_eve_buf + 4 * 8);
  454. loc3 = LD_SH(tmp_eve_buf + 12 * 8);
  455. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
  456. SRARI_H4_SH(m0, m2, m4, m6, 6);
  457. VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
  458. SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
  459. SRARI_H4_SH(m0, m2, m4, m6, 6);
  460. VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride), m0, m2, m4,
  461. m6);
  462. /* Load 8 & Store 8 */
  463. vec0 = LD_SH(tmp_odd_buf + 4 * 8);
  464. vec1 = LD_SH(tmp_odd_buf + 13 * 8);
  465. vec2 = LD_SH(tmp_odd_buf + 10 * 8);
  466. vec3 = LD_SH(tmp_odd_buf + 3 * 8);
  467. loc0 = LD_SH(tmp_eve_buf + 2 * 8);
  468. loc1 = LD_SH(tmp_eve_buf + 10 * 8);
  469. loc2 = LD_SH(tmp_eve_buf + 6 * 8);
  470. loc3 = LD_SH(tmp_eve_buf + 14 * 8);
  471. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
  472. SRARI_H4_SH(m1, m3, m5, m7, 6);
  473. VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride), m1, m3, m5, m7);
  474. SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
  475. SRARI_H4_SH(m1, m3, m5, m7, 6);
  476. VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride), m1, m3, m5,
  477. m7);
  478. /* Load 8 & Store 8 */
  479. vec0 = LD_SH(tmp_odd_buf + 2 * 8);
  480. vec1 = LD_SH(tmp_odd_buf + 11 * 8);
  481. vec2 = LD_SH(tmp_odd_buf + 12 * 8);
  482. vec3 = LD_SH(tmp_odd_buf + 7 * 8);
  483. loc0 = LD_SH(tmp_eve_buf + 1 * 8);
  484. loc1 = LD_SH(tmp_eve_buf + 9 * 8);
  485. loc2 = LD_SH(tmp_eve_buf + 5 * 8);
  486. loc3 = LD_SH(tmp_eve_buf + 13 * 8);
  487. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
  488. SRARI_H4_SH(n0, n2, n4, n6, 6);
  489. VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride), n0, n2, n4, n6);
  490. SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
  491. SRARI_H4_SH(n0, n2, n4, n6, 6);
  492. VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride), n0, n2, n4,
  493. n6);
  494. /* Load 8 & Store 8 */
  495. vec0 = LD_SH(tmp_odd_buf + 5 * 8);
  496. vec1 = LD_SH(tmp_odd_buf + 15 * 8);
  497. vec2 = LD_SH(tmp_odd_buf + 8 * 8);
  498. vec3 = LD_SH(tmp_odd_buf + 1 * 8);
  499. loc0 = LD_SH(tmp_eve_buf + 3 * 8);
  500. loc1 = LD_SH(tmp_eve_buf + 11 * 8);
  501. loc2 = LD_SH(tmp_eve_buf + 7 * 8);
  502. loc3 = LD_SH(tmp_eve_buf + 15 * 8);
  503. ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
  504. SRARI_H4_SH(n1, n3, n5, n7, 6);
  505. VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride), n1, n3, n5, n7);
  506. SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
  507. SRARI_H4_SH(n1, n3, n5, n7, 6);
  508. VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride), n1, n3, n5,
  509. n7);
  510. }
  511. static void idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
  512. int32_t dst_stride) {
  513. DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]);
  514. DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
  515. idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
  516. idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
  517. idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0], dst,
  518. dst_stride);
  519. }
  520. void vpx_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst,
  521. int32_t dst_stride) {
  522. int32_t i;
  523. DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
  524. int16_t *out_ptr = out_arr;
  525. /* transform rows */
  526. for (i = 0; i < 4; ++i) {
  527. /* process 32 * 8 block */
  528. idct32x8_1d_rows_msa((input + (i << 8)), (out_ptr + (i << 8)));
  529. }
  530. /* transform columns */
  531. for (i = 0; i < 4; ++i) {
  532. /* process 8 * 32 block */
  533. idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
  534. dst_stride);
  535. }
  536. }
  537. void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
  538. int32_t dst_stride) {
  539. int32_t i;
  540. DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
  541. int16_t *out_ptr = out_arr;
  542. for (i = 32; i--;) {
  543. __asm__ __volatile__(
  544. "sw $zero, 0(%[out_ptr]) \n\t"
  545. "sw $zero, 4(%[out_ptr]) \n\t"
  546. "sw $zero, 8(%[out_ptr]) \n\t"
  547. "sw $zero, 12(%[out_ptr]) \n\t"
  548. "sw $zero, 16(%[out_ptr]) \n\t"
  549. "sw $zero, 20(%[out_ptr]) \n\t"
  550. "sw $zero, 24(%[out_ptr]) \n\t"
  551. "sw $zero, 28(%[out_ptr]) \n\t"
  552. "sw $zero, 32(%[out_ptr]) \n\t"
  553. "sw $zero, 36(%[out_ptr]) \n\t"
  554. "sw $zero, 40(%[out_ptr]) \n\t"
  555. "sw $zero, 44(%[out_ptr]) \n\t"
  556. "sw $zero, 48(%[out_ptr]) \n\t"
  557. "sw $zero, 52(%[out_ptr]) \n\t"
  558. "sw $zero, 56(%[out_ptr]) \n\t"
  559. "sw $zero, 60(%[out_ptr]) \n\t"
  560. :
  561. : [out_ptr] "r"(out_ptr));
  562. out_ptr += 32;
  563. }
  564. out_ptr = out_arr;
  565. /* rows: only upper-left 8x8 has non-zero coeff */
  566. idct32x8_1d_rows_msa(input, out_ptr);
  567. /* transform columns */
  568. for (i = 0; i < 4; ++i) {
  569. /* process 8 * 32 block */
  570. idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
  571. dst_stride);
  572. }
  573. }
  574. void vpx_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst,
  575. int32_t dst_stride) {
  576. int32_t i;
  577. int16_t out;
  578. v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
  579. v8i16 res0, res1, res2, res3, res4, res5, res6, res7, vec;
  580. out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
  581. out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
  582. out = ROUND_POWER_OF_TWO(out, 6);
  583. vec = __msa_fill_h(out);
  584. for (i = 16; i--;) {
  585. LD_UB2(dst, 16, dst0, dst1);
  586. LD_UB2(dst + dst_stride, 16, dst2, dst3);
  587. UNPCK_UB_SH(dst0, res0, res4);
  588. UNPCK_UB_SH(dst1, res1, res5);
  589. UNPCK_UB_SH(dst2, res2, res6);
  590. UNPCK_UB_SH(dst3, res3, res7);
  591. ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
  592. ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7);
  593. CLIP_SH4_0_255(res0, res1, res2, res3);
  594. CLIP_SH4_0_255(res4, res5, res6, res7);
  595. PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, tmp0, tmp1,
  596. tmp2, tmp3);
  597. ST_UB2(tmp0, tmp1, dst, 16);
  598. dst += dst_stride;
  599. ST_UB2(tmp2, tmp3, dst, 16);
  600. dst += dst_stride;
  601. }
  602. }