ghashv8-armx-linux64.S 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. // This file is generated from a similarly-named Perl script in the BoringSSL
  2. // source tree. Do not edit by hand.
  3. #if !defined(__has_feature)
  4. #define __has_feature(x) 0
  5. #endif
  6. #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
  7. #define OPENSSL_NO_ASM
  8. #endif
  9. #if !defined(OPENSSL_NO_ASM)
  10. #if defined(__aarch64__)
  11. #include <GFp/arm_arch.h>
  12. .text
  13. .arch armv8-a+crypto
  14. .globl GFp_gcm_init_clmul
  15. .hidden GFp_gcm_init_clmul
  16. .type GFp_gcm_init_clmul,%function
  17. .align 4
  18. GFp_gcm_init_clmul:
  19. AARCH64_VALID_CALL_TARGET
  20. ld1 {v17.2d},[x1] //load input H
  21. movi v19.16b,#0xe1
  22. shl v19.2d,v19.2d,#57 //0xc2.0
  23. ext v3.16b,v17.16b,v17.16b,#8
  24. ushr v18.2d,v19.2d,#63
  25. dup v17.4s,v17.s[1]
  26. ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
  27. ushr v18.2d,v3.2d,#63
  28. sshr v17.4s,v17.4s,#31 //broadcast carry bit
  29. and v18.16b,v18.16b,v16.16b
  30. shl v3.2d,v3.2d,#1
  31. ext v18.16b,v18.16b,v18.16b,#8
  32. and v16.16b,v16.16b,v17.16b
  33. orr v3.16b,v3.16b,v18.16b //H<<<=1
  34. eor v20.16b,v3.16b,v16.16b //twisted H
  35. st1 {v20.2d},[x0],#16 //store Htable[0]
  36. //calculate H^2
  37. ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
  38. pmull v0.1q,v20.1d,v20.1d
  39. eor v16.16b,v16.16b,v20.16b
  40. pmull2 v2.1q,v20.2d,v20.2d
  41. pmull v1.1q,v16.1d,v16.1d
  42. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  43. eor v18.16b,v0.16b,v2.16b
  44. eor v1.16b,v1.16b,v17.16b
  45. eor v1.16b,v1.16b,v18.16b
  46. pmull v18.1q,v0.1d,v19.1d //1st phase
  47. ins v2.d[0],v1.d[1]
  48. ins v1.d[1],v0.d[0]
  49. eor v0.16b,v1.16b,v18.16b
  50. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
  51. pmull v0.1q,v0.1d,v19.1d
  52. eor v18.16b,v18.16b,v2.16b
  53. eor v22.16b,v0.16b,v18.16b
  54. ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
  55. eor v17.16b,v17.16b,v22.16b
  56. ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
  57. st1 {v21.2d,v22.2d},[x0] //store Htable[1..2]
  58. ret
  59. .size GFp_gcm_init_clmul,.-GFp_gcm_init_clmul
  60. .globl GFp_gcm_gmult_clmul
  61. .hidden GFp_gcm_gmult_clmul
  62. .type GFp_gcm_gmult_clmul,%function
  63. .align 4
  64. GFp_gcm_gmult_clmul:
  65. AARCH64_VALID_CALL_TARGET
  66. ld1 {v17.2d},[x0] //load Xi
  67. movi v19.16b,#0xe1
  68. ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
  69. shl v19.2d,v19.2d,#57
  70. #ifndef __ARMEB__
  71. rev64 v17.16b,v17.16b
  72. #endif
  73. ext v3.16b,v17.16b,v17.16b,#8
  74. pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
  75. eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
  76. pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
  77. pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
  78. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  79. eor v18.16b,v0.16b,v2.16b
  80. eor v1.16b,v1.16b,v17.16b
  81. eor v1.16b,v1.16b,v18.16b
  82. pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
  83. ins v2.d[0],v1.d[1]
  84. ins v1.d[1],v0.d[0]
  85. eor v0.16b,v1.16b,v18.16b
  86. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
  87. pmull v0.1q,v0.1d,v19.1d
  88. eor v18.16b,v18.16b,v2.16b
  89. eor v0.16b,v0.16b,v18.16b
  90. #ifndef __ARMEB__
  91. rev64 v0.16b,v0.16b
  92. #endif
  93. ext v0.16b,v0.16b,v0.16b,#8
  94. st1 {v0.2d},[x0] //write out Xi
  95. ret
  96. .size GFp_gcm_gmult_clmul,.-GFp_gcm_gmult_clmul
  97. .globl GFp_gcm_ghash_clmul
  98. .hidden GFp_gcm_ghash_clmul
  99. .type GFp_gcm_ghash_clmul,%function
  100. .align 4
  101. GFp_gcm_ghash_clmul:
  102. AARCH64_VALID_CALL_TARGET
  103. ld1 {v0.2d},[x0] //load [rotated] Xi
  104. //"[rotated]" means that
  105. //loaded value would have
  106. //to be rotated in order to
  107. //make it appear as in
  108. //algorithm specification
  109. subs x3,x3,#32 //see if x3 is 32 or larger
  110. mov x12,#16 //x12 is used as post-
  111. //increment for input pointer;
  112. //as loop is modulo-scheduled
  113. //x12 is zeroed just in time
  114. //to preclude overstepping
  115. //inp[len], which means that
  116. //last block[s] are actually
  117. //loaded twice, but last
  118. //copy is not processed
  119. ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2
  120. movi v19.16b,#0xe1
  121. ld1 {v22.2d},[x1]
  122. csel x12,xzr,x12,eq //is it time to zero x12?
  123. ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi
  124. ld1 {v16.2d},[x2],#16 //load [rotated] I[0]
  125. shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
  126. #ifndef __ARMEB__
  127. rev64 v16.16b,v16.16b
  128. rev64 v0.16b,v0.16b
  129. #endif
  130. ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0]
  131. b.lo .Lodd_tail_v8 //x3 was less than 32
  132. ld1 {v17.2d},[x2],x12 //load [rotated] I[1]
  133. #ifndef __ARMEB__
  134. rev64 v17.16b,v17.16b
  135. #endif
  136. ext v7.16b,v17.16b,v17.16b,#8
  137. eor v3.16b,v3.16b,v0.16b //I[i]^=Xi
  138. pmull v4.1q,v20.1d,v7.1d //H·Ii+1
  139. eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
  140. pmull2 v6.1q,v20.2d,v7.2d
  141. b .Loop_mod2x_v8
  142. .align 4
  143. .Loop_mod2x_v8:
  144. ext v18.16b,v3.16b,v3.16b,#8
  145. subs x3,x3,#32 //is there more data?
  146. pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo
  147. csel x12,xzr,x12,lo //is it time to zero x12?
  148. pmull v5.1q,v21.1d,v17.1d
  149. eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing
  150. pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi
  151. eor v0.16b,v0.16b,v4.16b //accumulate
  152. pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
  153. ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2]
  154. eor v2.16b,v2.16b,v6.16b
  155. csel x12,xzr,x12,eq //is it time to zero x12?
  156. eor v1.16b,v1.16b,v5.16b
  157. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  158. eor v18.16b,v0.16b,v2.16b
  159. eor v1.16b,v1.16b,v17.16b
  160. ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3]
  161. #ifndef __ARMEB__
  162. rev64 v16.16b,v16.16b
  163. #endif
  164. eor v1.16b,v1.16b,v18.16b
  165. pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
  166. #ifndef __ARMEB__
  167. rev64 v17.16b,v17.16b
  168. #endif
  169. ins v2.d[0],v1.d[1]
  170. ins v1.d[1],v0.d[0]
  171. ext v7.16b,v17.16b,v17.16b,#8
  172. ext v3.16b,v16.16b,v16.16b,#8
  173. eor v0.16b,v1.16b,v18.16b
  174. pmull v4.1q,v20.1d,v7.1d //H·Ii+1
  175. eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early
  176. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
  177. pmull v0.1q,v0.1d,v19.1d
  178. eor v3.16b,v3.16b,v18.16b
  179. eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
  180. eor v3.16b,v3.16b,v0.16b
  181. pmull2 v6.1q,v20.2d,v7.2d
  182. b.hs .Loop_mod2x_v8 //there was at least 32 more bytes
  183. eor v2.16b,v2.16b,v18.16b
  184. ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b
  185. adds x3,x3,#32 //re-construct x3
  186. eor v0.16b,v0.16b,v2.16b //re-construct v0.16b
  187. b.eq .Ldone_v8 //is x3 zero?
  188. .Lodd_tail_v8:
  189. ext v18.16b,v0.16b,v0.16b,#8
  190. eor v3.16b,v3.16b,v0.16b //inp^=Xi
  191. eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi
  192. pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
  193. eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
  194. pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
  195. pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
  196. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  197. eor v18.16b,v0.16b,v2.16b
  198. eor v1.16b,v1.16b,v17.16b
  199. eor v1.16b,v1.16b,v18.16b
  200. pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
  201. ins v2.d[0],v1.d[1]
  202. ins v1.d[1],v0.d[0]
  203. eor v0.16b,v1.16b,v18.16b
  204. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
  205. pmull v0.1q,v0.1d,v19.1d
  206. eor v18.16b,v18.16b,v2.16b
  207. eor v0.16b,v0.16b,v18.16b
  208. .Ldone_v8:
  209. #ifndef __ARMEB__
  210. rev64 v0.16b,v0.16b
  211. #endif
  212. ext v0.16b,v0.16b,v0.16b,#8
  213. st1 {v0.2d},[x0] //write out Xi
  214. ret
  215. .size GFp_gcm_ghash_clmul,.-GFp_gcm_ghash_clmul
  216. .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
  217. .align 2
  218. .align 2
  219. #endif
  220. #endif // !OPENSSL_NO_ASM
  221. .section .note.GNU-stack,"",%progbits