convolve8_avg_dspr2.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. /*
  2. * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include <stdio.h>
  12. #include "./vpx_dsp_rtcd.h"
  13. #include "vpx_dsp/mips/convolve_common_dspr2.h"
  14. #include "vpx_dsp/vpx_convolve.h"
  15. #include "vpx_dsp/vpx_dsp_common.h"
  16. #include "vpx_ports/mem.h"
  17. #if HAVE_DSPR2
  18. static void convolve_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride,
  19. uint8_t *dst, int32_t dst_stride,
  20. const int16_t *filter_y, int32_t w,
  21. int32_t h) {
  22. int32_t x, y;
  23. const uint8_t *src_ptr;
  24. uint8_t *dst_ptr;
  25. uint8_t *cm = vpx_ff_cropTbl;
  26. uint32_t vector4a = 64;
  27. uint32_t load1, load2, load3, load4;
  28. uint32_t p1, p2;
  29. uint32_t n1, n2;
  30. uint32_t scratch1, scratch2;
  31. uint32_t store1, store2;
  32. int32_t vector1b, vector2b, vector3b, vector4b;
  33. int32_t Temp1, Temp2;
  34. vector1b = ((const int32_t *)filter_y)[0];
  35. vector2b = ((const int32_t *)filter_y)[1];
  36. vector3b = ((const int32_t *)filter_y)[2];
  37. vector4b = ((const int32_t *)filter_y)[3];
  38. src -= 3 * src_stride;
  39. for (y = h; y--;) {
  40. /* prefetch data to cache memory */
  41. prefetch_store(dst + dst_stride);
  42. for (x = 0; x < w; x += 4) {
  43. src_ptr = src + x;
  44. dst_ptr = dst + x;
  45. __asm__ __volatile__(
  46. "ulw %[load1], 0(%[src_ptr]) \n\t"
  47. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  48. "ulw %[load2], 0(%[src_ptr]) \n\t"
  49. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  50. "ulw %[load3], 0(%[src_ptr]) \n\t"
  51. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  52. "ulw %[load4], 0(%[src_ptr]) \n\t"
  53. "mtlo %[vector4a], $ac0 \n\t"
  54. "mtlo %[vector4a], $ac1 \n\t"
  55. "mtlo %[vector4a], $ac2 \n\t"
  56. "mtlo %[vector4a], $ac3 \n\t"
  57. "mthi $zero, $ac0 \n\t"
  58. "mthi $zero, $ac1 \n\t"
  59. "mthi $zero, $ac2 \n\t"
  60. "mthi $zero, $ac3 \n\t"
  61. "preceu.ph.qbr %[scratch1], %[load1] \n\t"
  62. "preceu.ph.qbr %[p1], %[load2] \n\t"
  63. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  64. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  65. "preceu.ph.qbr %[scratch2], %[load3] \n\t"
  66. "preceu.ph.qbr %[p2], %[load4] \n\t"
  67. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  68. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  69. "dpa.w.ph $ac0, %[p1], %[vector1b] \n\t"
  70. "dpa.w.ph $ac0, %[p2], %[vector2b] \n\t"
  71. "dpa.w.ph $ac1, %[n1], %[vector1b] \n\t"
  72. "dpa.w.ph $ac1, %[n2], %[vector2b] \n\t"
  73. "preceu.ph.qbl %[scratch1], %[load1] \n\t"
  74. "preceu.ph.qbl %[p1], %[load2] \n\t"
  75. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  76. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  77. "preceu.ph.qbl %[scratch2], %[load3] \n\t"
  78. "preceu.ph.qbl %[p2], %[load4] \n\t"
  79. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  80. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  81. "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t"
  82. "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t"
  83. "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
  84. "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
  85. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  86. "ulw %[load1], 0(%[src_ptr]) \n\t"
  87. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  88. "ulw %[load2], 0(%[src_ptr]) \n\t"
  89. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  90. "ulw %[load3], 0(%[src_ptr]) \n\t"
  91. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  92. "ulw %[load4], 0(%[src_ptr]) \n\t"
  93. "preceu.ph.qbr %[scratch1], %[load1] \n\t"
  94. "preceu.ph.qbr %[p1], %[load2] \n\t"
  95. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  96. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  97. "preceu.ph.qbr %[scratch2], %[load3] \n\t"
  98. "preceu.ph.qbr %[p2], %[load4] \n\t"
  99. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  100. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  101. "dpa.w.ph $ac0, %[p1], %[vector3b] \n\t"
  102. "dpa.w.ph $ac0, %[p2], %[vector4b] \n\t"
  103. "extp %[Temp1], $ac0, 31 \n\t"
  104. "dpa.w.ph $ac1, %[n1], %[vector3b] \n\t"
  105. "dpa.w.ph $ac1, %[n2], %[vector4b] \n\t"
  106. "extp %[Temp2], $ac1, 31 \n\t"
  107. "preceu.ph.qbl %[scratch1], %[load1] \n\t"
  108. "preceu.ph.qbl %[p1], %[load2] \n\t"
  109. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  110. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  111. "lbu %[scratch1], 0(%[dst_ptr]) \n\t"
  112. "preceu.ph.qbl %[scratch2], %[load3] \n\t"
  113. "preceu.ph.qbl %[p2], %[load4] \n\t"
  114. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  115. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  116. "lbu %[scratch2], 1(%[dst_ptr]) \n\t"
  117. "lbux %[store1], %[Temp1](%[cm]) \n\t"
  118. "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t"
  119. "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
  120. "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 1 */
  121. "extp %[Temp1], $ac2, 31 \n\t"
  122. "lbux %[store2], %[Temp2](%[cm]) \n\t"
  123. "dpa.w.ph $ac3, %[n1], %[vector3b] \n\t"
  124. "dpa.w.ph $ac3, %[n2], %[vector4b] \n\t"
  125. "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 2 */
  126. "extp %[Temp2], $ac3, 31 \n\t"
  127. "lbu %[scratch1], 2(%[dst_ptr]) \n\t"
  128. "sb %[store1], 0(%[dst_ptr]) \n\t"
  129. "sb %[store2], 1(%[dst_ptr]) \n\t"
  130. "lbu %[scratch2], 3(%[dst_ptr]) \n\t"
  131. "lbux %[store1], %[Temp1](%[cm]) \n\t"
  132. "lbux %[store2], %[Temp2](%[cm]) \n\t"
  133. "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 3 */
  134. "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 4 */
  135. "sb %[store1], 2(%[dst_ptr]) \n\t"
  136. "sb %[store2], 3(%[dst_ptr]) \n\t"
  137. : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
  138. [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
  139. [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
  140. [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
  141. [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
  142. [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
  143. : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
  144. [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
  145. [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
  146. [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
  147. }
  148. /* Next row... */
  149. src += src_stride;
  150. dst += dst_stride;
  151. }
  152. }
  153. static void convolve_avg_vert_64_dspr2(const uint8_t *src, int32_t src_stride,
  154. uint8_t *dst, int32_t dst_stride,
  155. const int16_t *filter_y, int32_t h) {
  156. int32_t x, y;
  157. const uint8_t *src_ptr;
  158. uint8_t *dst_ptr;
  159. uint8_t *cm = vpx_ff_cropTbl;
  160. uint32_t vector4a = 64;
  161. uint32_t load1, load2, load3, load4;
  162. uint32_t p1, p2;
  163. uint32_t n1, n2;
  164. uint32_t scratch1, scratch2;
  165. uint32_t store1, store2;
  166. int32_t vector1b, vector2b, vector3b, vector4b;
  167. int32_t Temp1, Temp2;
  168. vector1b = ((const int32_t *)filter_y)[0];
  169. vector2b = ((const int32_t *)filter_y)[1];
  170. vector3b = ((const int32_t *)filter_y)[2];
  171. vector4b = ((const int32_t *)filter_y)[3];
  172. src -= 3 * src_stride;
  173. for (y = h; y--;) {
  174. /* prefetch data to cache memory */
  175. prefetch_store(dst + dst_stride);
  176. prefetch_store(dst + dst_stride + 32);
  177. for (x = 0; x < 64; x += 4) {
  178. src_ptr = src + x;
  179. dst_ptr = dst + x;
  180. __asm__ __volatile__(
  181. "ulw %[load1], 0(%[src_ptr]) \n\t"
  182. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  183. "ulw %[load2], 0(%[src_ptr]) \n\t"
  184. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  185. "ulw %[load3], 0(%[src_ptr]) \n\t"
  186. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  187. "ulw %[load4], 0(%[src_ptr]) \n\t"
  188. "mtlo %[vector4a], $ac0 \n\t"
  189. "mtlo %[vector4a], $ac1 \n\t"
  190. "mtlo %[vector4a], $ac2 \n\t"
  191. "mtlo %[vector4a], $ac3 \n\t"
  192. "mthi $zero, $ac0 \n\t"
  193. "mthi $zero, $ac1 \n\t"
  194. "mthi $zero, $ac2 \n\t"
  195. "mthi $zero, $ac3 \n\t"
  196. "preceu.ph.qbr %[scratch1], %[load1] \n\t"
  197. "preceu.ph.qbr %[p1], %[load2] \n\t"
  198. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  199. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  200. "preceu.ph.qbr %[scratch2], %[load3] \n\t"
  201. "preceu.ph.qbr %[p2], %[load4] \n\t"
  202. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  203. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  204. "dpa.w.ph $ac0, %[p1], %[vector1b] \n\t"
  205. "dpa.w.ph $ac0, %[p2], %[vector2b] \n\t"
  206. "dpa.w.ph $ac1, %[n1], %[vector1b] \n\t"
  207. "dpa.w.ph $ac1, %[n2], %[vector2b] \n\t"
  208. "preceu.ph.qbl %[scratch1], %[load1] \n\t"
  209. "preceu.ph.qbl %[p1], %[load2] \n\t"
  210. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  211. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  212. "preceu.ph.qbl %[scratch2], %[load3] \n\t"
  213. "preceu.ph.qbl %[p2], %[load4] \n\t"
  214. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  215. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  216. "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t"
  217. "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t"
  218. "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t"
  219. "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t"
  220. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  221. "ulw %[load1], 0(%[src_ptr]) \n\t"
  222. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  223. "ulw %[load2], 0(%[src_ptr]) \n\t"
  224. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  225. "ulw %[load3], 0(%[src_ptr]) \n\t"
  226. "add %[src_ptr], %[src_ptr], %[src_stride] \n\t"
  227. "ulw %[load4], 0(%[src_ptr]) \n\t"
  228. "preceu.ph.qbr %[scratch1], %[load1] \n\t"
  229. "preceu.ph.qbr %[p1], %[load2] \n\t"
  230. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  231. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  232. "preceu.ph.qbr %[scratch2], %[load3] \n\t"
  233. "preceu.ph.qbr %[p2], %[load4] \n\t"
  234. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  235. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  236. "dpa.w.ph $ac0, %[p1], %[vector3b] \n\t"
  237. "dpa.w.ph $ac0, %[p2], %[vector4b] \n\t"
  238. "extp %[Temp1], $ac0, 31 \n\t"
  239. "dpa.w.ph $ac1, %[n1], %[vector3b] \n\t"
  240. "dpa.w.ph $ac1, %[n2], %[vector4b] \n\t"
  241. "extp %[Temp2], $ac1, 31 \n\t"
  242. "preceu.ph.qbl %[scratch1], %[load1] \n\t"
  243. "preceu.ph.qbl %[p1], %[load2] \n\t"
  244. "precrq.ph.w %[n1], %[p1], %[scratch1] \n\t" /* pixel 2 */
  245. "append %[p1], %[scratch1], 16 \n\t" /* pixel 1 */
  246. "lbu %[scratch1], 0(%[dst_ptr]) \n\t"
  247. "preceu.ph.qbl %[scratch2], %[load3] \n\t"
  248. "preceu.ph.qbl %[p2], %[load4] \n\t"
  249. "precrq.ph.w %[n2], %[p2], %[scratch2] \n\t" /* pixel 2 */
  250. "append %[p2], %[scratch2], 16 \n\t" /* pixel 1 */
  251. "lbu %[scratch2], 1(%[dst_ptr]) \n\t"
  252. "lbux %[store1], %[Temp1](%[cm]) \n\t"
  253. "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t"
  254. "dpa.w.ph $ac2, %[p2], %[vector4b] \n\t"
  255. "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 1 */
  256. "extp %[Temp1], $ac2, 31 \n\t"
  257. "lbux %[store2], %[Temp2](%[cm]) \n\t"
  258. "dpa.w.ph $ac3, %[n1], %[vector3b] \n\t"
  259. "dpa.w.ph $ac3, %[n2], %[vector4b] \n\t"
  260. "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 2 */
  261. "extp %[Temp2], $ac3, 31 \n\t"
  262. "lbu %[scratch1], 2(%[dst_ptr]) \n\t"
  263. "sb %[store1], 0(%[dst_ptr]) \n\t"
  264. "sb %[store2], 1(%[dst_ptr]) \n\t"
  265. "lbu %[scratch2], 3(%[dst_ptr]) \n\t"
  266. "lbux %[store1], %[Temp1](%[cm]) \n\t"
  267. "lbux %[store2], %[Temp2](%[cm]) \n\t"
  268. "addqh_r.w %[store1], %[store1], %[scratch1] \n\t" /* pixel 3 */
  269. "addqh_r.w %[store2], %[store2], %[scratch2] \n\t" /* pixel 4 */
  270. "sb %[store1], 2(%[dst_ptr]) \n\t"
  271. "sb %[store2], 3(%[dst_ptr]) \n\t"
  272. : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3),
  273. [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2),
  274. [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1),
  275. [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1),
  276. [Temp2] "=&r"(Temp2), [store1] "=&r"(store1),
  277. [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr)
  278. : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b),
  279. [vector3b] "r"(vector3b), [vector4b] "r"(vector4b),
  280. [vector4a] "r"(vector4a), [src_stride] "r"(src_stride),
  281. [cm] "r"(cm), [dst_ptr] "r"(dst_ptr));
  282. }
  283. /* Next row... */
  284. src += src_stride;
  285. dst += dst_stride;
  286. }
  287. }
  288. void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
  289. uint8_t *dst, ptrdiff_t dst_stride,
  290. const InterpKernel *filter, int x0_q4,
  291. int32_t x_step_q4, int y0_q4, int y_step_q4,
  292. int w, int h) {
  293. const int16_t *const filter_y = filter[y0_q4];
  294. assert(y_step_q4 == 16);
  295. assert(((const int32_t *)filter_y)[1] != 0x800000);
  296. if (((const int32_t *)filter_y)[0] == 0) {
  297. vpx_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter,
  298. x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
  299. } else {
  300. uint32_t pos = 38;
  301. /* bit positon for extract from acc */
  302. __asm__ __volatile__("wrdsp %[pos], 1 \n\t"
  303. :
  304. : [pos] "r"(pos));
  305. prefetch_store(dst);
  306. switch (w) {
  307. case 4:
  308. case 8:
  309. case 16:
  310. case 32:
  311. convolve_avg_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w,
  312. h);
  313. break;
  314. case 64:
  315. prefetch_store(dst + 32);
  316. convolve_avg_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y,
  317. h);
  318. break;
  319. default:
  320. vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter,
  321. x0_q4, x_step_q4, y0_q4, y_step_q4, w, h);
  322. break;
  323. }
  324. }
  325. }
  326. void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
  327. uint8_t *dst, ptrdiff_t dst_stride,
  328. const InterpKernel *filter, int x0_q4,
  329. int32_t x_step_q4, int y0_q4, int y_step_q4, int w,
  330. int h) {
  331. /* Fixed size intermediate buffer places limits on parameters. */
  332. DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
  333. int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
  334. assert(w <= 64);
  335. assert(h <= 64);
  336. assert(x_step_q4 == 16);
  337. assert(y_step_q4 == 16);
  338. if (intermediate_height < h) intermediate_height = h;
  339. vpx_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter,
  340. x0_q4, x_step_q4, y0_q4, y_step_q4, w,
  341. intermediate_height);
  342. vpx_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter, x0_q4,
  343. x_step_q4, y0_q4, y_step_q4, w, h);
  344. }
  345. void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
  346. uint8_t *dst, ptrdiff_t dst_stride,
  347. const InterpKernel *filter, int x0_q4,
  348. int32_t x_step_q4, int y0_q4, int y_step_q4, int w,
  349. int h) {
  350. int x, y;
  351. uint32_t tp1, tp2, tn1, tp3, tp4, tn2;
  352. (void)filter;
  353. (void)x0_q4;
  354. (void)x_step_q4;
  355. (void)y0_q4;
  356. (void)y_step_q4;
  357. /* prefetch data to cache memory */
  358. prefetch_load(src);
  359. prefetch_load(src + 32);
  360. prefetch_store(dst);
  361. switch (w) {
  362. case 4:
  363. /* 1 word storage */
  364. for (y = h; y--;) {
  365. prefetch_load(src + src_stride);
  366. prefetch_load(src + src_stride + 32);
  367. prefetch_store(dst + dst_stride);
  368. __asm__ __volatile__(
  369. "ulw %[tp1], 0(%[src]) \n\t"
  370. "ulw %[tp2], 0(%[dst]) \n\t"
  371. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  372. "sw %[tn1], 0(%[dst]) \n\t" /* store */
  373. : [tn1] "=&r"(tn1), [tp1] "=&r"(tp1), [tp2] "=&r"(tp2)
  374. : [src] "r"(src), [dst] "r"(dst));
  375. src += src_stride;
  376. dst += dst_stride;
  377. }
  378. break;
  379. case 8:
  380. /* 2 word storage */
  381. for (y = h; y--;) {
  382. prefetch_load(src + src_stride);
  383. prefetch_load(src + src_stride + 32);
  384. prefetch_store(dst + dst_stride);
  385. __asm__ __volatile__(
  386. "ulw %[tp1], 0(%[src]) \n\t"
  387. "ulw %[tp2], 0(%[dst]) \n\t"
  388. "ulw %[tp3], 4(%[src]) \n\t"
  389. "ulw %[tp4], 4(%[dst]) \n\t"
  390. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  391. "sw %[tn1], 0(%[dst]) \n\t" /* store */
  392. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  393. "sw %[tn2], 4(%[dst]) \n\t" /* store */
  394. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
  395. [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
  396. : [src] "r"(src), [dst] "r"(dst));
  397. src += src_stride;
  398. dst += dst_stride;
  399. }
  400. break;
  401. case 16:
  402. /* 4 word storage */
  403. for (y = h; y--;) {
  404. prefetch_load(src + src_stride);
  405. prefetch_load(src + src_stride + 32);
  406. prefetch_store(dst + dst_stride);
  407. __asm__ __volatile__(
  408. "ulw %[tp1], 0(%[src]) \n\t"
  409. "ulw %[tp2], 0(%[dst]) \n\t"
  410. "ulw %[tp3], 4(%[src]) \n\t"
  411. "ulw %[tp4], 4(%[dst]) \n\t"
  412. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  413. "ulw %[tp1], 8(%[src]) \n\t"
  414. "ulw %[tp2], 8(%[dst]) \n\t"
  415. "sw %[tn1], 0(%[dst]) \n\t" /* store */
  416. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  417. "sw %[tn2], 4(%[dst]) \n\t" /* store */
  418. "ulw %[tp3], 12(%[src]) \n\t"
  419. "ulw %[tp4], 12(%[dst]) \n\t"
  420. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  421. "sw %[tn1], 8(%[dst]) \n\t" /* store */
  422. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  423. "sw %[tn2], 12(%[dst]) \n\t" /* store */
  424. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
  425. [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
  426. : [src] "r"(src), [dst] "r"(dst));
  427. src += src_stride;
  428. dst += dst_stride;
  429. }
  430. break;
  431. case 32:
  432. /* 8 word storage */
  433. for (y = h; y--;) {
  434. prefetch_load(src + src_stride);
  435. prefetch_load(src + src_stride + 32);
  436. prefetch_store(dst + dst_stride);
  437. __asm__ __volatile__(
  438. "ulw %[tp1], 0(%[src]) \n\t"
  439. "ulw %[tp2], 0(%[dst]) \n\t"
  440. "ulw %[tp3], 4(%[src]) \n\t"
  441. "ulw %[tp4], 4(%[dst]) \n\t"
  442. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  443. "ulw %[tp1], 8(%[src]) \n\t"
  444. "ulw %[tp2], 8(%[dst]) \n\t"
  445. "sw %[tn1], 0(%[dst]) \n\t" /* store */
  446. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  447. "sw %[tn2], 4(%[dst]) \n\t" /* store */
  448. "ulw %[tp3], 12(%[src]) \n\t"
  449. "ulw %[tp4], 12(%[dst]) \n\t"
  450. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  451. "ulw %[tp1], 16(%[src]) \n\t"
  452. "ulw %[tp2], 16(%[dst]) \n\t"
  453. "sw %[tn1], 8(%[dst]) \n\t" /* store */
  454. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  455. "sw %[tn2], 12(%[dst]) \n\t" /* store */
  456. "ulw %[tp3], 20(%[src]) \n\t"
  457. "ulw %[tp4], 20(%[dst]) \n\t"
  458. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  459. "ulw %[tp1], 24(%[src]) \n\t"
  460. "ulw %[tp2], 24(%[dst]) \n\t"
  461. "sw %[tn1], 16(%[dst]) \n\t" /* store */
  462. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  463. "sw %[tn2], 20(%[dst]) \n\t" /* store */
  464. "ulw %[tp3], 28(%[src]) \n\t"
  465. "ulw %[tp4], 28(%[dst]) \n\t"
  466. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  467. "sw %[tn1], 24(%[dst]) \n\t" /* store */
  468. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  469. "sw %[tn2], 28(%[dst]) \n\t" /* store */
  470. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
  471. [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
  472. : [src] "r"(src), [dst] "r"(dst));
  473. src += src_stride;
  474. dst += dst_stride;
  475. }
  476. break;
  477. case 64:
  478. prefetch_load(src + 64);
  479. prefetch_store(dst + 32);
  480. /* 16 word storage */
  481. for (y = h; y--;) {
  482. prefetch_load(src + src_stride);
  483. prefetch_load(src + src_stride + 32);
  484. prefetch_load(src + src_stride + 64);
  485. prefetch_store(dst + dst_stride);
  486. prefetch_store(dst + dst_stride + 32);
  487. __asm__ __volatile__(
  488. "ulw %[tp1], 0(%[src]) \n\t"
  489. "ulw %[tp2], 0(%[dst]) \n\t"
  490. "ulw %[tp3], 4(%[src]) \n\t"
  491. "ulw %[tp4], 4(%[dst]) \n\t"
  492. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  493. "ulw %[tp1], 8(%[src]) \n\t"
  494. "ulw %[tp2], 8(%[dst]) \n\t"
  495. "sw %[tn1], 0(%[dst]) \n\t" /* store */
  496. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  497. "sw %[tn2], 4(%[dst]) \n\t" /* store */
  498. "ulw %[tp3], 12(%[src]) \n\t"
  499. "ulw %[tp4], 12(%[dst]) \n\t"
  500. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  501. "ulw %[tp1], 16(%[src]) \n\t"
  502. "ulw %[tp2], 16(%[dst]) \n\t"
  503. "sw %[tn1], 8(%[dst]) \n\t" /* store */
  504. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  505. "sw %[tn2], 12(%[dst]) \n\t" /* store */
  506. "ulw %[tp3], 20(%[src]) \n\t"
  507. "ulw %[tp4], 20(%[dst]) \n\t"
  508. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  509. "ulw %[tp1], 24(%[src]) \n\t"
  510. "ulw %[tp2], 24(%[dst]) \n\t"
  511. "sw %[tn1], 16(%[dst]) \n\t" /* store */
  512. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  513. "sw %[tn2], 20(%[dst]) \n\t" /* store */
  514. "ulw %[tp3], 28(%[src]) \n\t"
  515. "ulw %[tp4], 28(%[dst]) \n\t"
  516. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  517. "ulw %[tp1], 32(%[src]) \n\t"
  518. "ulw %[tp2], 32(%[dst]) \n\t"
  519. "sw %[tn1], 24(%[dst]) \n\t" /* store */
  520. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  521. "sw %[tn2], 28(%[dst]) \n\t" /* store */
  522. "ulw %[tp3], 36(%[src]) \n\t"
  523. "ulw %[tp4], 36(%[dst]) \n\t"
  524. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  525. "ulw %[tp1], 40(%[src]) \n\t"
  526. "ulw %[tp2], 40(%[dst]) \n\t"
  527. "sw %[tn1], 32(%[dst]) \n\t" /* store */
  528. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  529. "sw %[tn2], 36(%[dst]) \n\t" /* store */
  530. "ulw %[tp3], 44(%[src]) \n\t"
  531. "ulw %[tp4], 44(%[dst]) \n\t"
  532. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  533. "ulw %[tp1], 48(%[src]) \n\t"
  534. "ulw %[tp2], 48(%[dst]) \n\t"
  535. "sw %[tn1], 40(%[dst]) \n\t" /* store */
  536. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  537. "sw %[tn2], 44(%[dst]) \n\t" /* store */
  538. "ulw %[tp3], 52(%[src]) \n\t"
  539. "ulw %[tp4], 52(%[dst]) \n\t"
  540. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  541. "ulw %[tp1], 56(%[src]) \n\t"
  542. "ulw %[tp2], 56(%[dst]) \n\t"
  543. "sw %[tn1], 48(%[dst]) \n\t" /* store */
  544. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  545. "sw %[tn2], 52(%[dst]) \n\t" /* store */
  546. "ulw %[tp3], 60(%[src]) \n\t"
  547. "ulw %[tp4], 60(%[dst]) \n\t"
  548. "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */
  549. "sw %[tn1], 56(%[dst]) \n\t" /* store */
  550. "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */
  551. "sw %[tn2], 60(%[dst]) \n\t" /* store */
  552. : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3),
  553. [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2)
  554. : [src] "r"(src), [dst] "r"(dst));
  555. src += src_stride;
  556. dst += dst_stride;
  557. }
  558. break;
  559. default:
  560. for (y = h; y > 0; --y) {
  561. for (x = 0; x < w; ++x) {
  562. dst[x] = (dst[x] + src[x] + 1) >> 1;
  563. }
  564. src += src_stride;
  565. dst += dst_stride;
  566. }
  567. break;
  568. }
  569. }
  570. #endif