armloop-gnu.S 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. @********************************************************************
  2. @* *
  3. @* THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
  4. @* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
  5. @* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
  6. @* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
  7. @* *
  8. @* THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2010 *
  9. @* by the Xiph.Org Foundation and contributors http://www.xiph.org/ *
  10. @* *
  11. @********************************************************************
  12. @ Original implementation:
  13. @ Copyright (C) 2009 Robin Watts for Pinknoise Productions Ltd
  14. @ last mod: $Id: armloop.s 17430 2010-09-22 21:54:09Z tterribe $
  15. @********************************************************************
  16. .text
  17. @ .include "armopts-gnu.S"
  18. @ .set OC_ARM_ASM_EDSP, 0
  19. @ .set OC_ARM_ASM_MEDIA, 0
  20. @ .set OC_ARM_ASM_NEON, 0
  21. @ .set OC_ARM_CAN_UNALIGN, 0
  22. @ .set OC_ARM_CAN_UNALIGN_LDRD, 0
  23. .global oc_loop_filter_frag_rows_arm
  24. @ Which bit this is depends on the order of packing within a bitfield.
  25. @ Hopefully that doesn't change among any of the relevant compilers.
  26. .set OC_FRAG_CODED_FLAG, 1
  27. @ Vanilla ARM v4 version
  28. loop_filter_h_arm:
  29. @ r0 = unsigned char *_pix
  30. @ r1 = int _ystride
  31. @ r2 = int *_bv
  32. @ preserves r0-r3
  33. STMFD r13!,{r3-r6,r14}
  34. MOV r14,#8
  35. MOV r6, #255
  36. lfh_arm_lp:
  37. LDRB r3, [r0, #-2] @ r3 = _pix[0]
  38. LDRB r12,[r0, #1] @ r12= _pix[3]
  39. LDRB r4, [r0, #-1] @ r4 = _pix[1]
  40. LDRB r5, [r0] @ r5 = _pix[2]
  41. SUB r3, r3, r12 @ r3 = _pix[0]-_pix[3]+4
  42. ADD r3, r3, #4
  43. SUB r12,r5, r4 @ r12= _pix[2]-_pix[1]
  44. ADD r12,r12,r12,LSL #1 @ r12= 3*(_pix[2]-_pix[1])
  45. ADD r12,r12,r3 @ r12= _pix[0]-_pix[3]+3*(_pix[2]-_pix[1])+4
  46. MOV r12,r12,ASR #3
  47. LDRSB r12,[r2, r12]
  48. @ Stall (2 on Xscale)
  49. ADDS r4, r4, r12
  50. CMPGT r6, r4
  51. EORLT r4, r6, r4, ASR #32
  52. SUBS r5, r5, r12
  53. CMPGT r6, r5
  54. EORLT r5, r6, r5, ASR #32
  55. STRB r4, [r0, #-1]
  56. STRB r5, [r0], r1
  57. SUBS r14,r14,#1
  58. BGT lfh_arm_lp
  59. SUB r0, r0, r1, LSL #3
  60. LDMFD r13!,{r3-r6,PC}
  61. loop_filter_v_arm:
  62. @ r0 = unsigned char *_pix
  63. @ r1 = int _ystride
  64. @ r2 = int *_bv
  65. @ preserves r0-r3
  66. STMFD r13!,{r3-r6,r14}
  67. MOV r14,#8
  68. MOV r6, #255
  69. lfv_arm_lp:
  70. LDRB r3, [r0, -r1, LSL #1] @ r3 = _pix[0]
  71. LDRB r12,[r0, r1] @ r12= _pix[3]
  72. LDRB r4, [r0, -r1] @ r4 = _pix[1]
  73. LDRB r5, [r0] @ r5 = _pix[2]
  74. SUB r3, r3, r12 @ r3 = _pix[0]-_pix[3]+4
  75. ADD r3, r3, #4
  76. SUB r12,r5, r4 @ r12= _pix[2]-_pix[1]
  77. ADD r12,r12,r12,LSL #1 @ r12= 3*(_pix[2]-_pix[1])
  78. ADD r12,r12,r3 @ r12= _pix[0]-_pix[3]+3*(_pix[2]-_pix[1])+4
  79. MOV r12,r12,ASR #3
  80. LDRSB r12,[r2, r12]
  81. @ Stall (2 on Xscale)
  82. ADDS r4, r4, r12
  83. CMPGT r6, r4
  84. EORLT r4, r6, r4, ASR #32
  85. SUBS r5, r5, r12
  86. CMPGT r6, r5
  87. EORLT r5, r6, r5, ASR #32
  88. STRB r4, [r0, -r1]
  89. STRB r5, [r0], #1
  90. SUBS r14,r14,#1
  91. BGT lfv_arm_lp
  92. SUB r0, r0, #8
  93. LDMFD r13!,{r3-r6,PC}
  94. oc_loop_filter_frag_rows_arm:
  95. @ r0 = _ref_frame_data
  96. @ r1 = _ystride
  97. @ r2 = _bv
  98. @ r3 = _frags
  99. @ r4 = _fragi0
  100. @ r5 = _fragi0_end
  101. @ r6 = _fragi_top
  102. @ r7 = _fragi_bot
  103. @ r8 = _frag_buf_offs
  104. @ r9 = _nhfrags
  105. MOV r12,r13
  106. STMFD r13!,{r0,r4-r11,r14}
  107. LDMFD r12,{r4-r9}
  108. ADD r2, r2, #127 @ _bv += 127
  109. CMP r4, r5 @ if(_fragi0>=_fragi0_end)
  110. BGE oslffri_arm_end @ bail
  111. SUBS r9, r9, #1 @ r9 = _nhfrags-1 if (r9<=0)
  112. BLE oslffri_arm_end @ bail
  113. ADD r3, r3, r4, LSL #2 @ r3 = &_frags[fragi]
  114. ADD r8, r8, r4, LSL #2 @ r8 = &_frag_buf_offs[fragi]
  115. SUB r7, r7, r9 @ _fragi_bot -= _nhfrags;
  116. oslffri_arm_lp1:
  117. MOV r10,r4 @ r10= fragi = _fragi0
  118. ADD r11,r4, r9 @ r11= fragi_end-1=fragi+_nhfrags-1
  119. oslffri_arm_lp2:
  120. LDR r14,[r3], #4 @ r14= _frags[fragi] _frags++
  121. LDR r0, [r13] @ r0 = _ref_frame_data
  122. LDR r12,[r8], #4 @ r12= _frag_buf_offs[fragi] _frag_buf_offs++
  123. TST r14,#OC_FRAG_CODED_FLAG
  124. BEQ oslffri_arm_uncoded
  125. CMP r10,r4 @ if (fragi>_fragi0)
  126. ADD r0, r0, r12 @ r0 = _ref_frame_data + _frag_buf_offs[fragi]
  127. BLGT loop_filter_h_arm
  128. CMP r4, r6 @ if (_fragi0>_fragi_top)
  129. BLGT loop_filter_v_arm
  130. CMP r10,r11 @ if(fragi+1<fragi_end)===(fragi<fragi_end-1)
  131. LDRLT r12,[r3] @ r12 = _frags[fragi+1]
  132. ADD r0, r0, #8
  133. ADD r10,r10,#1 @ r10 = fragi+1;
  134. ANDLT r12,r12,#OC_FRAG_CODED_FLAG
  135. CMPLT r12,#OC_FRAG_CODED_FLAG @ && _frags[fragi+1].coded==0
  136. BLLT loop_filter_h_arm
  137. CMP r10,r7 @ if (fragi<_fragi_bot)
  138. LDRLT r12,[r3, r9, LSL #2] @ r12 = _frags[fragi+1+_nhfrags-1]
  139. SUB r0, r0, #8
  140. ADD r0, r0, r1, LSL #3
  141. ANDLT r12,r12,#OC_FRAG_CODED_FLAG
  142. CMPLT r12,#OC_FRAG_CODED_FLAG
  143. BLLT loop_filter_v_arm
  144. CMP r10,r11 @ while(fragi<=fragi_end-1)
  145. BLE oslffri_arm_lp2
  146. MOV r4, r10 @ r4 = fragi0 += _nhfrags
  147. CMP r4, r5
  148. BLT oslffri_arm_lp1
  149. oslffri_arm_end:
  150. LDMFD r13!,{r0,r4-r11,PC}
  151. oslffri_arm_uncoded:
  152. ADD r10,r10,#1
  153. CMP r10,r11
  154. BLE oslffri_arm_lp2
  155. MOV r4, r10 @ r4 = _fragi0 += _nhfrags
  156. CMP r4, r5
  157. BLT oslffri_arm_lp1
  158. LDMFD r13!,{r0,r4-r11,PC}
  159. .if OC_ARM_ASM_MEDIA
  160. .global oc_loop_filter_init_v6
  161. .global oc_loop_filter_frag_rows_v6
  162. oc_loop_filter_init_v6:
  163. @ r0 = _bv
  164. @ r1 = _flimit (=L from the spec)
  165. MVN r1, r1, LSL #1 @ r1 = <0xFFFFFF|255-2*L>
  166. AND r1, r1, #255 @ r1 = ll=r10x0xFF
  167. ORR r1, r1, r1, LSL #8 @ r1 = <ll|ll>
  168. PKHBT r1, r1, r1, LSL #16 @ r1 = <ll|ll|ll|ll>
  169. STR r1, [r0]
  170. MOV PC,r14
  171. @ We could use the same strategy as the v filter below, but that would require
  172. @ 40 instructions to load the data and transpose it into columns and another
  173. @ 32 to write out the results at the end, plus the 52 instructions to do the
  174. @ filtering itself.
  175. @ This is slightly less, and less code, even assuming we could have shared the
  176. @ 52 instructions in the middle with the other function.
  177. @ It executes slightly fewer instructions than the ARMv6 approach David Conrad
  178. @ proposed for FFmpeg, but not by much:
  179. @ http://lists.mplayerhq.hu/pipermail/ffmpeg-devel/2010-February/083141.html
  180. @ His is a lot less code, though, because it only does two rows at once instead
  181. @ of four.
  182. loop_filter_h_v6:
  183. @ r0 = unsigned char *_pix
  184. @ r1 = int _ystride
  185. @ r2 = int _ll
  186. @ preserves r0-r3
  187. STMFD r13!,{r4-r11,r14}
  188. LDR r12,=0x10003
  189. BL loop_filter_h_core_v6
  190. ADD r0, r0, r1, LSL #2
  191. BL loop_filter_h_core_v6
  192. SUB r0, r0, r1, LSL #2
  193. LDMFD r13!,{r4-r11,PC}
  194. loop_filter_h_core_v6:
  195. @ r0 = unsigned char *_pix
  196. @ r1 = int _ystride
  197. @ r2 = int _ll
  198. @ r12= 0x10003
  199. @ Preserves r0-r3, r12; Clobbers r4-r11.
  200. LDR r4,[r0, #-2]! @ r4 = <p3|p2|p1|p0>
  201. @ Single issue
  202. LDR r5,[r0, r1]! @ r5 = <q3|q2|q1|q0>
  203. UXTB16 r6, r4, ROR #16 @ r6 = <p0|p2>
  204. UXTB16 r4, r4, ROR #8 @ r4 = <p3|p1>
  205. UXTB16 r7, r5, ROR #16 @ r7 = <q0|q2>
  206. UXTB16 r5, r5, ROR #8 @ r5 = <q3|q1>
  207. PKHBT r8, r4, r5, LSL #16 @ r8 = <__|q1|__|p1>
  208. PKHBT r9, r6, r7, LSL #16 @ r9 = <__|q2|__|p2>
  209. SSUB16 r6, r4, r6 @ r6 = <p3-p0|p1-p2>
  210. SMLAD r6, r6, r12,r12 @ r6 = <????|(p3-p0)+3*(p1-p2)+3>
  211. SSUB16 r7, r5, r7 @ r7 = <q3-q0|q1-q2>
  212. SMLAD r7, r7, r12,r12 @ r7 = <????|(q0-q3)+3*(q2-q1)+4>
  213. LDR r4,[r0, r1]! @ r4 = <r3|r2|r1|r0>
  214. MOV r6, r6, ASR #3 @ r6 = <??????|(p3-p0)+3*(p1-p2)+3>>3>
  215. LDR r5,[r0, r1]! @ r5 = <s3|s2|s1|s0>
  216. PKHBT r11,r6, r7, LSL #13 @ r11= <??|-R_q|??|-R_p>
  217. UXTB16 r6, r4, ROR #16 @ r6 = <r0|r2>
  218. UXTB16 r11,r11 @ r11= <__|-R_q|__|-R_p>
  219. UXTB16 r4, r4, ROR #8 @ r4 = <r3|r1>
  220. UXTB16 r7, r5, ROR #16 @ r7 = <s0|s2>
  221. PKHBT r10,r6, r7, LSL #16 @ r10= <__|s2|__|r2>
  222. SSUB16 r6, r4, r6 @ r6 = <r3-r0|r1-r2>
  223. UXTB16 r5, r5, ROR #8 @ r5 = <s3|s1>
  224. SMLAD r6, r6, r12,r12 @ r6 = <????|(r3-r0)+3*(r2-r1)+3>
  225. SSUB16 r7, r5, r7 @ r7 = <r3-r0|r1-r2>
  226. SMLAD r7, r7, r12,r12 @ r7 = <????|(s0-s3)+3*(s2-s1)+4>
  227. ORR r9, r9, r10, LSL #8 @ r9 = <s2|q2|r2|p2>
  228. MOV r6, r6, ASR #3 @ r6 = <??????|(r0-r3)+3*(r2-r1)+4>>3>
  229. PKHBT r10,r4, r5, LSL #16 @ r10= <__|s1|__|r1>
  230. PKHBT r6, r6, r7, LSL #13 @ r6 = <??|-R_s|??|-R_r>
  231. ORR r8, r8, r10, LSL #8 @ r8 = <s1|q1|r1|p1>
  232. UXTB16 r6, r6 @ r6 = <__|-R_s|__|-R_r>
  233. MOV r10,#0
  234. ORR r6, r11,r6, LSL #8 @ r6 = <-R_s|-R_q|-R_r|-R_p>
  235. @ Single issue
  236. @ There's no min, max or abs instruction.
  237. @ SSUB8 and SEL will work for abs, and we can do all the rest with
  238. @ unsigned saturated adds, which means the GE flags are still all
  239. @ set when we're done computing lflim(abs(R_i),L).
  240. @ This allows us to both add and subtract, and split the results by
  241. @ the original sign of R_i.
  242. SSUB8 r7, r10,r6
  243. @ Single issue
  244. SEL r7, r7, r6 @ r7 = abs(R_i)
  245. @ Single issue
  246. UQADD8 r4, r7, r2 @ r4 = 255-max(2*L-abs(R_i),0)
  247. @ Single issue
  248. UQADD8 r7, r7, r4
  249. @ Single issue
  250. UQSUB8 r7, r7, r4 @ r7 = min(abs(R_i),max(2*L-abs(R_i),0))
  251. @ Single issue
  252. UQSUB8 r4, r8, r7
  253. UQADD8 r5, r9, r7
  254. UQADD8 r8, r8, r7
  255. UQSUB8 r9, r9, r7
  256. SEL r8, r8, r4 @ r8 = p1+lflim(R_i,L)
  257. SEL r9, r9, r5 @ r9 = p2-lflim(R_i,L)
  258. MOV r5, r9, LSR #24 @ r5 = s2
  259. STRB r5, [r0,#2]!
  260. MOV r4, r8, LSR #24 @ r4 = s1
  261. STRB r4, [r0,#-1]
  262. MOV r5, r9, LSR #8 @ r5 = r2
  263. STRB r5, [r0,-r1]!
  264. MOV r4, r8, LSR #8 @ r4 = r1
  265. STRB r4, [r0,#-1]
  266. MOV r5, r9, LSR #16 @ r5 = q2
  267. STRB r5, [r0,-r1]!
  268. MOV r4, r8, LSR #16 @ r4 = q1
  269. STRB r4, [r0,#-1]
  270. @ Single issue
  271. STRB r9, [r0,-r1]!
  272. @ Single issue
  273. STRB r8, [r0,#-1]
  274. MOV PC,r14
  275. @ This uses the same strategy as the MMXEXT version for x86, except that UHADD8
  276. @ computes (a+b>>1) instead of (a+b+1>>1) like PAVGB.
  277. @ This works just as well, with the following procedure for computing the
  278. @ filter value, f:
  279. @ u = ~UHADD8(p1,~p2);
  280. @ v = UHADD8(~p1,p2);
  281. @ m = v-u;
  282. @ a = m^UHADD8(m^p0,m^~p3);
  283. @ f = UHADD8(UHADD8(a,u1),v1);
  284. @ where f = 127+R, with R in [-127,128] defined as in the spec.
  285. @ This is exactly the same amount of arithmetic as the version that uses PAVGB
  286. @ as the basic operator.
  287. @ It executes about 2/3 the number of instructions of David Conrad's approach,
  288. @ but requires more code, because it does all eight columns at once, instead
  289. @ of four at a time.
  290. loop_filter_v_v6:
  291. @ r0 = unsigned char *_pix
  292. @ r1 = int _ystride
  293. @ r2 = int _ll
  294. @ preserves r0-r11
  295. STMFD r13!,{r4-r11,r14}
  296. LDRD r6, [r0, -r1]! @ r7, r6 = <p5|p1>
  297. LDRD r4, [r0, -r1] @ r5, r4 = <p4|p0>
  298. LDRD r8, [r0, r1]! @ r9, r8 = <p6|p2>
  299. MVN r14,r6 @ r14= ~p1
  300. LDRD r10,[r0, r1] @ r11,r10= <p7|p3>
  301. @ Filter the first four columns.
  302. MVN r12,r8 @ r12= ~p2
  303. UHADD8 r14,r14,r8 @ r14= v1=~p1+p2>>1
  304. UHADD8 r12,r12,r6 @ r12= p1+~p2>>1
  305. MVN r10, r10 @ r10=~p3
  306. MVN r12,r12 @ r12= u1=~p1+p2+1>>1
  307. SSUB8 r14,r14,r12 @ r14= m1=v1-u1
  308. @ Single issue
  309. EOR r4, r4, r14 @ r4 = m1^p0
  310. EOR r10,r10,r14 @ r10= m1^~p3
  311. UHADD8 r4, r4, r10 @ r4 = (m1^p0)+(m1^~p3)>>1
  312. @ Single issue
  313. EOR r4, r4, r14 @ r4 = a1=m1^((m1^p0)+(m1^~p3)>>1)
  314. SADD8 r14,r14,r12 @ r14= v1=m1+u1
  315. UHADD8 r4, r4, r12 @ r4 = a1+u1>>1
  316. MVN r12,r9 @ r12= ~p6
  317. UHADD8 r4, r4, r14 @ r4 = f1=(a1+u1>>1)+v1>>1
  318. @ Filter the second four columns.
  319. MVN r14,r7 @ r14= ~p5
  320. UHADD8 r12,r12,r7 @ r12= p5+~p6>>1
  321. UHADD8 r14,r14,r9 @ r14= v2=~p5+p6>>1
  322. MVN r12,r12 @ r12= u2=~p5+p6+1>>1
  323. MVN r11,r11 @ r11=~p7
  324. SSUB8 r10,r14,r12 @ r10= m2=v2-u2
  325. @ Single issue
  326. EOR r5, r5, r10 @ r5 = m2^p4
  327. EOR r11,r11,r10 @ r11= m2^~p7
  328. UHADD8 r5, r5, r11 @ r5 = (m2^p4)+(m2^~p7)>>1
  329. @ Single issue
  330. EOR r5, r5, r10 @ r5 = a2=m2^((m2^p4)+(m2^~p7)>>1)
  331. @ Single issue
  332. UHADD8 r5, r5, r12 @ r5 = a2+u2>>1
  333. LDR r12,=0x7F7F7F7F @ r12 = {127}x4
  334. UHADD8 r5, r5, r14 @ r5 = f2=(a2+u2>>1)+v2>>1
  335. @ Now split f[i] by sign.
  336. @ There's no min or max instruction.
  337. @ We could use SSUB8 and SEL, but this is just as many instructions and
  338. @ dual issues more (for v7 without NEON).
  339. UQSUB8 r10,r4, r12 @ r10= R_i>0?R_i:0
  340. UQSUB8 r4, r12,r4 @ r4 = R_i<0?-R_i:0
  341. UQADD8 r11,r10,r2 @ r11= 255-max(2*L-abs(R_i<0),0)
  342. UQADD8 r14,r4, r2 @ r14= 255-max(2*L-abs(R_i>0),0)
  343. UQADD8 r10,r10,r11
  344. UQADD8 r4, r4, r14
  345. UQSUB8 r10,r10,r11 @ r10= min(abs(R_i<0),max(2*L-abs(R_i<0),0))
  346. UQSUB8 r4, r4, r14 @ r4 = min(abs(R_i>0),max(2*L-abs(R_i>0),0))
  347. UQSUB8 r11,r5, r12 @ r11= R_i>0?R_i:0
  348. UQADD8 r6, r6, r10
  349. UQSUB8 r8, r8, r10
  350. UQSUB8 r5, r12,r5 @ r5 = R_i<0?-R_i:0
  351. UQSUB8 r6, r6, r4 @ r6 = p1+lflim(R_i,L)
  352. UQADD8 r8, r8, r4 @ r8 = p2-lflim(R_i,L)
  353. UQADD8 r10,r11,r2 @ r10= 255-max(2*L-abs(R_i<0),0)
  354. UQADD8 r14,r5, r2 @ r14= 255-max(2*L-abs(R_i>0),0)
  355. UQADD8 r11,r11,r10
  356. UQADD8 r5, r5, r14
  357. UQSUB8 r11,r11,r10 @ r11= min(abs(R_i<0),max(2*L-abs(R_i<0),0))
  358. UQSUB8 r5, r5, r14 @ r5 = min(abs(R_i>0),max(2*L-abs(R_i>0),0))
  359. UQADD8 r7, r7, r11
  360. UQSUB8 r9, r9, r11
  361. UQSUB8 r7, r7, r5 @ r7 = p5+lflim(R_i,L)
  362. STRD r6, [r0, -r1] @ [p5:p1] = [r7: r6]
  363. UQADD8 r9, r9, r5 @ r9 = p6-lflim(R_i,L)
  364. STRD r8, [r0] @ [p6:p2] = [r9: r8]
  365. LDMFD r13!,{r4-r11,PC}
  366. oc_loop_filter_frag_rows_v6:
  367. @ r0 = _ref_frame_data
  368. @ r1 = _ystride
  369. @ r2 = _bv
  370. @ r3 = _frags
  371. @ r4 = _fragi0
  372. @ r5 = _fragi0_end
  373. @ r6 = _fragi_top
  374. @ r7 = _fragi_bot
  375. @ r8 = _frag_buf_offs
  376. @ r9 = _nhfrags
  377. MOV r12,r13
  378. STMFD r13!,{r0,r4-r11,r14}
  379. LDMFD r12,{r4-r9}
  380. LDR r2, [r2] @ ll = *(int *)_bv
  381. CMP r4, r5 @ if(_fragi0>=_fragi0_end)
  382. BGE oslffri_v6_end @ bail
  383. SUBS r9, r9, #1 @ r9 = _nhfrags-1 if (r9<=0)
  384. BLE oslffri_v6_end @ bail
  385. ADD r3, r3, r4, LSL #2 @ r3 = &_frags[fragi]
  386. ADD r8, r8, r4, LSL #2 @ r8 = &_frag_buf_offs[fragi]
  387. SUB r7, r7, r9 @ _fragi_bot -= _nhfrags;
  388. oslffri_v6_lp1:
  389. MOV r10,r4 @ r10= fragi = _fragi0
  390. ADD r11,r4, r9 @ r11= fragi_end-1=fragi+_nhfrags-1
  391. oslffri_v6_lp2:
  392. LDR r14,[r3], #4 @ r14= _frags[fragi] _frags++
  393. LDR r0, [r13] @ r0 = _ref_frame_data
  394. LDR r12,[r8], #4 @ r12= _frag_buf_offs[fragi] _frag_buf_offs++
  395. TST r14,#OC_FRAG_CODED_FLAG
  396. BEQ oslffri_v6_uncoded
  397. CMP r10,r4 @ if (fragi>_fragi0)
  398. ADD r0, r0, r12 @ r0 = _ref_frame_data + _frag_buf_offs[fragi]
  399. BLGT loop_filter_h_v6
  400. CMP r4, r6 @ if (fragi0>_fragi_top)
  401. BLGT loop_filter_v_v6
  402. CMP r10,r11 @ if(fragi+1<fragi_end)===(fragi<fragi_end-1)
  403. LDRLT r12,[r3] @ r12 = _frags[fragi+1]
  404. ADD r0, r0, #8
  405. ADD r10,r10,#1 @ r10 = fragi+1;
  406. ANDLT r12,r12,#OC_FRAG_CODED_FLAG
  407. CMPLT r12,#OC_FRAG_CODED_FLAG @ && _frags[fragi+1].coded==0
  408. BLLT loop_filter_h_v6
  409. CMP r10,r7 @ if (fragi<_fragi_bot)
  410. LDRLT r12,[r3, r9, LSL #2] @ r12 = _frags[fragi+1+_nhfrags-1]
  411. SUB r0, r0, #8
  412. ADD r0, r0, r1, LSL #3
  413. ANDLT r12,r12,#OC_FRAG_CODED_FLAG
  414. CMPLT r12,#OC_FRAG_CODED_FLAG
  415. BLLT loop_filter_v_v6
  416. CMP r10,r11 @ while(fragi<=fragi_end-1)
  417. BLE oslffri_v6_lp2
  418. MOV r4, r10 @ r4 = fragi0 += nhfrags
  419. CMP r4, r5
  420. BLT oslffri_v6_lp1
  421. oslffri_v6_end:
  422. LDMFD r13!,{r0,r4-r11,PC}
  423. oslffri_v6_uncoded:
  424. ADD r10,r10,#1
  425. CMP r10,r11
  426. BLE oslffri_v6_lp2
  427. MOV r4, r10 @ r4 = fragi0 += nhfrags
  428. CMP r4, r5
  429. BLT oslffri_v6_lp1
  430. LDMFD r13!,{r0,r4-r11,PC}
  431. .endif
  432. .if OC_ARM_ASM_NEON
  433. .global oc_loop_filter_init_neon
  434. .global oc_loop_filter_frag_rows_neon
  435. oc_loop_filter_init_neon:
  436. @ r0 = _bv
  437. @ r1 = _flimit (=L from the spec)
  438. MOV r1, r1, LSL #1 @ r1 = 2*L
  439. VDUP.S16 Q15, r1 @ Q15= 2L in U16s
  440. VST1.64 {D30,D31}, [r0,:128]
  441. MOV PC,r14
  442. loop_filter_h_neon:
  443. @ r0 = unsigned char *_pix
  444. @ r1 = int _ystride
  445. @ r2 = int *_bv
  446. @ preserves r0-r3
  447. @ We assume Q15= 2*L in U16s
  448. @ My best guesses at cycle counts (and latency)--vvv
  449. SUB r12,r0, #2
  450. @ Doing a 2-element structure load saves doing two VTRN's below, at the
  451. @ cost of using two more slower single-lane loads vs. the faster
  452. @ all-lane loads.
  453. @ It's less code this way, though, and benches a hair faster, but it
  454. @ leaves D2 and D4 swapped.
  455. VLD2.16 {D0[],D2[]}, [r12], r1 @ D0 = ____________1100 2,1
  456. @ D2 = ____________3322
  457. VLD2.16 {D4[],D6[]}, [r12], r1 @ D4 = ____________5544 2,1
  458. @ D6 = ____________7766
  459. VLD2.16 {D0[1],D2[1]},[r12], r1 @ D0 = ________99881100 3,1
  460. @ D2 = ________BBAA3322
  461. VLD2.16 {D4[1],D6[1]},[r12], r1 @ D4 = ________DDCC5544 3,1
  462. @ D6 = ________FFEE7766
  463. VLD2.16 {D0[2],D2[2]},[r12], r1 @ D0 = ____GGHH99881100 3,1
  464. @ D2 = ____JJIIBBAA3322
  465. VLD2.16 {D4[2],D6[2]},[r12], r1 @ D4 = ____KKLLDDCC5544 3,1
  466. @ D6 = ____NNMMFFEE7766
  467. VLD2.16 {D0[3],D2[3]},[r12], r1 @ D0 = PPOOGGHH99881100 3,1
  468. @ D2 = RRQQJJIIBBAA3322
  469. VLD2.16 {D4[3],D6[3]},[r12], r1 @ D4 = TTSSKKLLDDCC5544 3,1
  470. @ D6 = VVUUNNMMFFEE7766
  471. VTRN.8 D0, D4 @ D0 = SSOOKKGGCC884400 D4 = TTPPLLHHDD995511 1,1
  472. VTRN.8 D2, D6 @ D2 = UUQQMMIIEEAA6622 D6 = VVRRNNJJFFBB7733 1,1
  473. VSUBL.U8 Q0, D0, D6 @ Q0 = 00 - 33 in S16s 1,3
  474. VSUBL.U8 Q8, D2, D4 @ Q8 = 22 - 11 in S16s 1,3
  475. ADD r12,r0, #8
  476. VADD.S16 Q0, Q0, Q8 @ 1,3
  477. PLD [r12]
  478. VADD.S16 Q0, Q0, Q8 @ 1,3
  479. PLD [r12,r1]
  480. VADD.S16 Q0, Q0, Q8 @ Q0 = [0-3]+3*[2-1] 1,3
  481. PLD [r12,r1, LSL #1]
  482. VRSHR.S16 Q0, Q0, #3 @ Q0 = f = ([0-3]+3*[2-1]+4)>>3 1,4
  483. ADD r12,r12,r1, LSL #2
  484. @ We want to do
  485. @ f = CLAMP(MIN(-2L-f,0), f, MAX(2L-f,0))
  486. @ = ((f >= 0) ? MIN( f ,MAX(2L- f ,0)) : MAX( f , MIN(-2L- f ,0)))
  487. @ = ((f >= 0) ? MIN(|f|,MAX(2L-|f|,0)) : MAX(-|f|, MIN(-2L+|f|,0)))
  488. @ = ((f >= 0) ? MIN(|f|,MAX(2L-|f|,0)) :-MIN( |f|,-MIN(-2L+|f|,0)))
  489. @ = ((f >= 0) ? MIN(|f|,MAX(2L-|f|,0)) :-MIN( |f|, MAX( 2L-|f|,0)))
  490. @ So we've reduced the left and right hand terms to be the same, except
  491. @ for a negation.
  492. @ Stall x3
  493. VABS.S16 Q9, Q0 @ Q9 = |f| in U16s 1,4
  494. PLD [r12,-r1]
  495. VSHR.S16 Q0, Q0, #15 @ Q0 = -1 or 0 according to sign 1,3
  496. PLD [r12]
  497. VQSUB.U16 Q10,Q15,Q9 @ Q10= MAX(2L-|f|,0) in U16s 1,4
  498. PLD [r12,r1]
  499. VMOVL.U8 Q1, D2 @ Q2 = __UU__QQ__MM__II__EE__AA__66__22 2,3
  500. PLD [r12,r1,LSL #1]
  501. VMIN.U16 Q9, Q10,Q9 @ Q9 = MIN(|f|,MAX(2L-|f|)) 1,4
  502. ADD r12,r12,r1, LSL #2
  503. @ Now we need to correct for the sign of f.
  504. @ For negative elements of Q0, we want to subtract the appropriate
  505. @ element of Q9. For positive elements we want to add them. No NEON
  506. @ instruction exists to do this, so we need to negate the negative
  507. @ elements, and we can then just add them. a-b = a-(1+!b) = a-1+!b
  508. VADD.S16 Q9, Q9, Q0 @ 1,3
  509. PLD [r12,-r1]
  510. VEOR.S16 Q9, Q9, Q0 @ Q9 = real value of f 1,3
  511. @ Bah. No VRSBW.U8
  512. @ Stall (just 1 as Q9 not needed to second pipeline stage. I think.)
  513. VADDW.U8 Q2, Q9, D4 @ Q1 = xxTTxxPPxxLLxxHHxxDDxx99xx55xx11 1,3
  514. VSUB.S16 Q1, Q1, Q9 @ Q2 = xxUUxxQQxxMMxxIIxxEExxAAxx66xx22 1,3
  515. VQMOVUN.S16 D4, Q2 @ D4 = TTPPLLHHDD995511 1,1
  516. VQMOVUN.S16 D2, Q1 @ D2 = UUQQMMIIEEAA6622 1,1
  517. SUB r12,r0, #1
  518. VTRN.8 D4, D2 @ D4 = QQPPIIHHAA992211 D2 = MMLLEEDD6655 1,1
  519. VST1.16 {D4[0]}, [r12], r1
  520. VST1.16 {D2[0]}, [r12], r1
  521. VST1.16 {D4[1]}, [r12], r1
  522. VST1.16 {D2[1]}, [r12], r1
  523. VST1.16 {D4[2]}, [r12], r1
  524. VST1.16 {D2[2]}, [r12], r1
  525. VST1.16 {D4[3]}, [r12], r1
  526. VST1.16 {D2[3]}, [r12], r1
  527. MOV PC,r14
  528. loop_filter_v_neon:
  529. @ r0 = unsigned char *_pix
  530. @ r1 = int _ystride
  531. @ r2 = int *_bv
  532. @ preserves r0-r3
  533. @ We assume Q15= 2*L in U16s
  534. @ My best guesses at cycle counts (and latency)--vvv
  535. SUB r12,r0, r1, LSL #1
  536. VLD1.64 {D0}, [r12,:64], r1 @ D0 = SSOOKKGGCC884400 2,1
  537. VLD1.64 {D2}, [r12,:64], r1 @ D2 = TTPPLLHHDD995511 2,1
  538. VLD1.64 {D4}, [r12,:64], r1 @ D4 = UUQQMMIIEEAA6622 2,1
  539. VLD1.64 {D6}, [r12,:64] @ D6 = VVRRNNJJFFBB7733 2,1
  540. VSUBL.U8 Q8, D4, D2 @ Q8 = 22 - 11 in S16s 1,3
  541. VSUBL.U8 Q0, D0, D6 @ Q0 = 00 - 33 in S16s 1,3
  542. ADD r12, #8
  543. VADD.S16 Q0, Q0, Q8 @ 1,3
  544. PLD [r12]
  545. VADD.S16 Q0, Q0, Q8 @ 1,3
  546. PLD [r12,r1]
  547. VADD.S16 Q0, Q0, Q8 @ Q0 = [0-3]+3*[2-1] 1,3
  548. SUB r12, r0, r1
  549. VRSHR.S16 Q0, Q0, #3 @ Q0 = f = ([0-3]+3*[2-1]+4)>>3 1,4
  550. @ We want to do
  551. @ f = CLAMP(MIN(-2L-f,0), f, MAX(2L-f,0))
  552. @ = ((f >= 0) ? MIN( f ,MAX(2L- f ,0)) : MAX( f , MIN(-2L- f ,0)))
  553. @ = ((f >= 0) ? MIN(|f|,MAX(2L-|f|,0)) : MAX(-|f|, MIN(-2L+|f|,0)))
  554. @ = ((f >= 0) ? MIN(|f|,MAX(2L-|f|,0)) :-MIN( |f|,-MIN(-2L+|f|,0)))
  555. @ = ((f >= 0) ? MIN(|f|,MAX(2L-|f|,0)) :-MIN( |f|, MAX( 2L-|f|,0)))
  556. @ So we've reduced the left and right hand terms to be the same, except
  557. @ for a negation.
  558. @ Stall x3
  559. VABS.S16 Q9, Q0 @ Q9 = |f| in U16s 1,4
  560. VSHR.S16 Q0, Q0, #15 @ Q0 = -1 or 0 according to sign 1,3
  561. @ Stall x2
  562. VQSUB.U16 Q10,Q15,Q9 @ Q10= MAX(2L-|f|,0) in U16s 1,4
  563. VMOVL.U8 Q2, D4 @ Q2 = __UU__QQ__MM__II__EE__AA__66__22 2,3
  564. @ Stall x2
  565. VMIN.U16 Q9, Q10,Q9 @ Q9 = MIN(|f|,MAX(2L-|f|)) 1,4
  566. @ Now we need to correct for the sign of f.
  567. @ For negative elements of Q0, we want to subtract the appropriate
  568. @ element of Q9. For positive elements we want to add them. No NEON
  569. @ instruction exists to do this, so we need to negate the negative
  570. @ elements, and we can then just add them. a-b = a-(1+!b) = a-1+!b
  571. @ Stall x3
  572. VADD.S16 Q9, Q9, Q0 @ 1,3
  573. @ Stall x2
  574. VEOR.S16 Q9, Q9, Q0 @ Q9 = real value of f 1,3
  575. @ Bah. No VRSBW.U8
  576. @ Stall (just 1 as Q9 not needed to second pipeline stage. I think.)
  577. VADDW.U8 Q1, Q9, D2 @ Q1 = xxTTxxPPxxLLxxHHxxDDxx99xx55xx11 1,3
  578. VSUB.S16 Q2, Q2, Q9 @ Q2 = xxUUxxQQxxMMxxIIxxEExxAAxx66xx22 1,3
  579. VQMOVUN.S16 D2, Q1 @ D2 = TTPPLLHHDD995511 1,1
  580. VQMOVUN.S16 D4, Q2 @ D4 = UUQQMMIIEEAA6622 1,1
  581. VST1.64 {D2}, [r12,:64], r1
  582. VST1.64 {D4}, [r12,:64], r1
  583. MOV PC,r14
  584. oc_loop_filter_frag_rows_neon:
  585. @ r0 = _ref_frame_data
  586. @ r1 = _ystride
  587. @ r2 = _bv
  588. @ r3 = _frags
  589. @ r4 = _fragi0
  590. @ r5 = _fragi0_end
  591. @ r6 = _fragi_top
  592. @ r7 = _fragi_bot
  593. @ r8 = _frag_buf_offs
  594. @ r9 = _nhfrags
  595. MOV r12,r13
  596. STMFD r13!,{r0,r4-r11,r14}
  597. LDMFD r12,{r4-r9}
  598. CMP r4, r5 @ if(_fragi0>=_fragi0_end)
  599. BGE oslffri_neon_end@ bail
  600. SUBS r9, r9, #1 @ r9 = _nhfrags-1 if (r9<=0)
  601. BLE oslffri_neon_end @ bail
  602. VLD1.64 {D30,D31}, [r2,:128] @ Q15= 2L in U16s
  603. ADD r3, r3, r4, LSL #2 @ r3 = &_frags[fragi]
  604. ADD r8, r8, r4, LSL #2 @ r8 = &_frag_buf_offs[fragi]
  605. SUB r7, r7, r9 @ _fragi_bot -= _nhfrags;
  606. oslffri_neon_lp1:
  607. MOV r10,r4 @ r10= fragi = _fragi0
  608. ADD r11,r4, r9 @ r11= fragi_end-1=fragi+_nhfrags-1
  609. oslffri_neon_lp2:
  610. LDR r14,[r3], #4 @ r14= _frags[fragi] _frags++
  611. LDR r0, [r13] @ r0 = _ref_frame_data
  612. LDR r12,[r8], #4 @ r12= _frag_buf_offs[fragi] _frag_buf_offs++
  613. TST r14,#OC_FRAG_CODED_FLAG
  614. BEQ oslffri_neon_uncoded
  615. CMP r10,r4 @ if (fragi>_fragi0)
  616. ADD r0, r0, r12 @ r0 = _ref_frame_data + _frag_buf_offs[fragi]
  617. BLGT loop_filter_h_neon
  618. CMP r4, r6 @ if (_fragi0>_fragi_top)
  619. BLGT loop_filter_v_neon
  620. CMP r10,r11 @ if(fragi+1<fragi_end)===(fragi<fragi_end-1)
  621. LDRLT r12,[r3] @ r12 = _frags[fragi+1]
  622. ADD r0, r0, #8
  623. ADD r10,r10,#1 @ r10 = fragi+1;
  624. ANDLT r12,r12,#OC_FRAG_CODED_FLAG
  625. CMPLT r12,#OC_FRAG_CODED_FLAG @ && _frags[fragi+1].coded==0
  626. BLLT loop_filter_h_neon
  627. CMP r10,r7 @ if (fragi<_fragi_bot)
  628. LDRLT r12,[r3, r9, LSL #2] @ r12 = _frags[fragi+1+_nhfrags-1]
  629. SUB r0, r0, #8
  630. ADD r0, r0, r1, LSL #3
  631. ANDLT r12,r12,#OC_FRAG_CODED_FLAG
  632. CMPLT r12,#OC_FRAG_CODED_FLAG
  633. BLLT loop_filter_v_neon
  634. CMP r10,r11 @ while(fragi<=fragi_end-1)
  635. BLE oslffri_neon_lp2
  636. MOV r4, r10 @ r4 = _fragi0 += _nhfrags
  637. CMP r4, r5
  638. BLT oslffri_neon_lp1
  639. oslffri_neon_end:
  640. LDMFD r13!,{r0,r4-r11,PC}
  641. oslffri_neon_uncoded:
  642. ADD r10,r10,#1
  643. CMP r10,r11
  644. BLE oslffri_neon_lp2
  645. MOV r4, r10 @ r4 = _fragi0 += _nhfrags
  646. CMP r4, r5
  647. BLT oslffri_neon_lp1
  648. LDMFD r13!,{r0,r4-r11,PC}
  649. .endif
  650. @ END