x86_64-gcc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. #include "../bn_lcl.h"
  2. #if !(defined(__GNUC__) && __GNUC__>=2)
  3. # include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
  4. #else
  5. /*-
  6. * x86_64 BIGNUM accelerator version 0.1, December 2002.
  7. *
  8. * Implemented by Andy Polyakov <[email protected]> for the OpenSSL
  9. * project.
  10. *
  11. * Rights for redistribution and usage in source and binary forms are
  12. * granted according to the OpenSSL license. Warranty of any kind is
  13. * disclaimed.
  14. *
  15. * Q. Version 0.1? It doesn't sound like Andy, he used to assign real
  16. * versions, like 1.0...
  17. * A. Well, that's because this code is basically a quick-n-dirty
  18. * proof-of-concept hack. As you can see it's implemented with
  19. * inline assembler, which means that you're bound to GCC and that
  20. * there might be enough room for further improvement.
  21. *
  22. * Q. Why inline assembler?
  23. * A. x86_64 features own ABI which I'm not familiar with. This is
  24. * why I decided to let the compiler take care of subroutine
  25. * prologue/epilogue as well as register allocation. For reference.
  26. * Win64 implements different ABI for AMD64, different from Linux.
  27. *
  28. * Q. How much faster does it get?
  29. * A. 'apps/openssl speed rsa dsa' output with no-asm:
  30. *
  31. * sign verify sign/s verify/s
  32. * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
  33. * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
  34. * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
  35. * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
  36. * sign verify sign/s verify/s
  37. * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
  38. * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
  39. * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
  40. *
  41. * 'apps/openssl speed rsa dsa' output with this module:
  42. *
  43. * sign verify sign/s verify/s
  44. * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
  45. * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
  46. * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
  47. * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
  48. * sign verify sign/s verify/s
  49. * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
  50. * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
  51. * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
  52. *
  53. * For the reference. IA-32 assembler implementation performs
  54. * very much like 64-bit code compiled with no-asm on the same
  55. * machine.
  56. */
  57. # ifdef _WIN64
  58. # define BN_ULONG unsigned long long
  59. # else
  60. # define BN_ULONG unsigned long
  61. # endif
  62. # undef mul
  63. # undef mul_add
  64. # undef sqr
  65. /*-
  66. * "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
  67. * "g"(0) let the compiler to decide where does it
  68. * want to keep the value of zero;
  69. */
  70. # define mul_add(r,a,word,carry) do { \
  71. register BN_ULONG high,low; \
  72. asm ("mulq %3" \
  73. : "=a"(low),"=d"(high) \
  74. : "a"(word),"m"(a) \
  75. : "cc"); \
  76. asm ("addq %2,%0; adcq %3,%1" \
  77. : "+r"(carry),"+d"(high)\
  78. : "a"(low),"g"(0) \
  79. : "cc"); \
  80. asm ("addq %2,%0; adcq %3,%1" \
  81. : "+m"(r),"+d"(high) \
  82. : "r"(carry),"g"(0) \
  83. : "cc"); \
  84. carry=high; \
  85. } while (0)
  86. # define mul(r,a,word,carry) do { \
  87. register BN_ULONG high,low; \
  88. asm ("mulq %3" \
  89. : "=a"(low),"=d"(high) \
  90. : "a"(word),"g"(a) \
  91. : "cc"); \
  92. asm ("addq %2,%0; adcq %3,%1" \
  93. : "+r"(carry),"+d"(high)\
  94. : "a"(low),"g"(0) \
  95. : "cc"); \
  96. (r)=carry, carry=high; \
  97. } while (0)
  98. # define sqr(r0,r1,a) \
  99. asm ("mulq %2" \
  100. : "=a"(r0),"=d"(r1) \
  101. : "a"(a) \
  102. : "cc");
  103. BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
  104. BN_ULONG w)
  105. {
  106. BN_ULONG c1 = 0;
  107. if (num <= 0)
  108. return (c1);
  109. while (num & ~3) {
  110. mul_add(rp[0], ap[0], w, c1);
  111. mul_add(rp[1], ap[1], w, c1);
  112. mul_add(rp[2], ap[2], w, c1);
  113. mul_add(rp[3], ap[3], w, c1);
  114. ap += 4;
  115. rp += 4;
  116. num -= 4;
  117. }
  118. if (num) {
  119. mul_add(rp[0], ap[0], w, c1);
  120. if (--num == 0)
  121. return c1;
  122. mul_add(rp[1], ap[1], w, c1);
  123. if (--num == 0)
  124. return c1;
  125. mul_add(rp[2], ap[2], w, c1);
  126. return c1;
  127. }
  128. return (c1);
  129. }
  130. BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
  131. {
  132. BN_ULONG c1 = 0;
  133. if (num <= 0)
  134. return (c1);
  135. while (num & ~3) {
  136. mul(rp[0], ap[0], w, c1);
  137. mul(rp[1], ap[1], w, c1);
  138. mul(rp[2], ap[2], w, c1);
  139. mul(rp[3], ap[3], w, c1);
  140. ap += 4;
  141. rp += 4;
  142. num -= 4;
  143. }
  144. if (num) {
  145. mul(rp[0], ap[0], w, c1);
  146. if (--num == 0)
  147. return c1;
  148. mul(rp[1], ap[1], w, c1);
  149. if (--num == 0)
  150. return c1;
  151. mul(rp[2], ap[2], w, c1);
  152. }
  153. return (c1);
  154. }
  155. void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
  156. {
  157. if (n <= 0)
  158. return;
  159. while (n & ~3) {
  160. sqr(r[0], r[1], a[0]);
  161. sqr(r[2], r[3], a[1]);
  162. sqr(r[4], r[5], a[2]);
  163. sqr(r[6], r[7], a[3]);
  164. a += 4;
  165. r += 8;
  166. n -= 4;
  167. }
  168. if (n) {
  169. sqr(r[0], r[1], a[0]);
  170. if (--n == 0)
  171. return;
  172. sqr(r[2], r[3], a[1]);
  173. if (--n == 0)
  174. return;
  175. sqr(r[4], r[5], a[2]);
  176. }
  177. }
  178. BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
  179. {
  180. BN_ULONG ret, waste;
  181. asm("divq %4":"=a"(ret), "=d"(waste)
  182. : "a"(l), "d"(h), "g"(d)
  183. : "cc");
  184. return ret;
  185. }
  186. BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
  187. int n)
  188. {
  189. BN_ULONG ret = 0, i = 0;
  190. if (n <= 0)
  191. return 0;
  192. asm volatile (" subq %2,%2 \n"
  193. ".p2align 4 \n"
  194. "1: movq (%4,%2,8),%0 \n"
  195. " adcq (%5,%2,8),%0 \n"
  196. " movq %0,(%3,%2,8) \n"
  197. " leaq 1(%2),%2 \n"
  198. " loop 1b \n"
  199. " sbbq %0,%0 \n":"=&a" (ret), "+c"(n),
  200. "=&r"(i)
  201. :"r"(rp), "r"(ap), "r"(bp)
  202. :"cc", "memory");
  203. return ret & 1;
  204. }
  205. # ifndef SIMICS
  206. BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
  207. int n)
  208. {
  209. BN_ULONG ret = 0, i = 0;
  210. if (n <= 0)
  211. return 0;
  212. asm volatile (" subq %2,%2 \n"
  213. ".p2align 4 \n"
  214. "1: movq (%4,%2,8),%0 \n"
  215. " sbbq (%5,%2,8),%0 \n"
  216. " movq %0,(%3,%2,8) \n"
  217. " leaq 1(%2),%2 \n"
  218. " loop 1b \n"
  219. " sbbq %0,%0 \n":"=&a" (ret), "+c"(n),
  220. "=&r"(i)
  221. :"r"(rp), "r"(ap), "r"(bp)
  222. :"cc", "memory");
  223. return ret & 1;
  224. }
  225. # else
  226. /* Simics 1.4<7 has buggy sbbq:-( */
  227. # define BN_MASK2 0xffffffffffffffffL
  228. BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
  229. {
  230. BN_ULONG t1, t2;
  231. int c = 0;
  232. if (n <= 0)
  233. return ((BN_ULONG)0);
  234. for (;;) {
  235. t1 = a[0];
  236. t2 = b[0];
  237. r[0] = (t1 - t2 - c) & BN_MASK2;
  238. if (t1 != t2)
  239. c = (t1 < t2);
  240. if (--n <= 0)
  241. break;
  242. t1 = a[1];
  243. t2 = b[1];
  244. r[1] = (t1 - t2 - c) & BN_MASK2;
  245. if (t1 != t2)
  246. c = (t1 < t2);
  247. if (--n <= 0)
  248. break;
  249. t1 = a[2];
  250. t2 = b[2];
  251. r[2] = (t1 - t2 - c) & BN_MASK2;
  252. if (t1 != t2)
  253. c = (t1 < t2);
  254. if (--n <= 0)
  255. break;
  256. t1 = a[3];
  257. t2 = b[3];
  258. r[3] = (t1 - t2 - c) & BN_MASK2;
  259. if (t1 != t2)
  260. c = (t1 < t2);
  261. if (--n <= 0)
  262. break;
  263. a += 4;
  264. b += 4;
  265. r += 4;
  266. }
  267. return (c);
  268. }
  269. # endif
  270. /* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
  271. /* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
  272. /* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
  273. /*
  274. * sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number
  275. * c=(c2,c1,c0)
  276. */
  277. /*
  278. * Keep in mind that carrying into high part of multiplication result
  279. * can not overflow, because it cannot be all-ones.
  280. */
  281. # if 0
  282. /* original macros are kept for reference purposes */
  283. # define mul_add_c(a,b,c0,c1,c2) { \
  284. BN_ULONG ta=(a),tb=(b); \
  285. t1 = ta * tb; \
  286. t2 = BN_UMULT_HIGH(ta,tb); \
  287. c0 += t1; t2 += (c0<t1)?1:0; \
  288. c1 += t2; c2 += (c1<t2)?1:0; \
  289. }
  290. # define mul_add_c2(a,b,c0,c1,c2) { \
  291. BN_ULONG ta=(a),tb=(b),t0; \
  292. t1 = BN_UMULT_HIGH(ta,tb); \
  293. t0 = ta * tb; \
  294. c0 += t0; t2 = t1+((c0<t0)?1:0);\
  295. c1 += t2; c2 += (c1<t2)?1:0; \
  296. c0 += t0; t1 += (c0<t0)?1:0; \
  297. c1 += t1; c2 += (c1<t1)?1:0; \
  298. }
  299. # else
  300. # define mul_add_c(a,b,c0,c1,c2) do { \
  301. asm ("mulq %3" \
  302. : "=a"(t1),"=d"(t2) \
  303. : "a"(a),"m"(b) \
  304. : "cc"); \
  305. asm ("addq %2,%0; adcq %3,%1" \
  306. : "+r"(c0),"+d"(t2) \
  307. : "a"(t1),"g"(0) \
  308. : "cc"); \
  309. asm ("addq %2,%0; adcq %3,%1" \
  310. : "+r"(c1),"+r"(c2) \
  311. : "d"(t2),"g"(0) \
  312. : "cc"); \
  313. } while (0)
  314. # define sqr_add_c(a,i,c0,c1,c2) do { \
  315. asm ("mulq %2" \
  316. : "=a"(t1),"=d"(t2) \
  317. : "a"(a[i]) \
  318. : "cc"); \
  319. asm ("addq %2,%0; adcq %3,%1" \
  320. : "+r"(c0),"+d"(t2) \
  321. : "a"(t1),"g"(0) \
  322. : "cc"); \
  323. asm ("addq %2,%0; adcq %3,%1" \
  324. : "+r"(c1),"+r"(c2) \
  325. : "d"(t2),"g"(0) \
  326. : "cc"); \
  327. } while (0)
  328. # define mul_add_c2(a,b,c0,c1,c2) do { \
  329. asm ("mulq %3" \
  330. : "=a"(t1),"=d"(t2) \
  331. : "a"(a),"m"(b) \
  332. : "cc"); \
  333. asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
  334. : "+r"(c0),"+r"(c1),"+r"(c2) \
  335. : "r"(t1),"r"(t2),"g"(0) \
  336. : "cc"); \
  337. asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
  338. : "+r"(c0),"+r"(c1),"+r"(c2) \
  339. : "r"(t1),"r"(t2),"g"(0) \
  340. : "cc"); \
  341. } while (0)
  342. # endif
  343. # define sqr_add_c2(a,i,j,c0,c1,c2) \
  344. mul_add_c2((a)[i],(a)[j],c0,c1,c2)
  345. void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
  346. {
  347. BN_ULONG t1, t2;
  348. BN_ULONG c1, c2, c3;
  349. c1 = 0;
  350. c2 = 0;
  351. c3 = 0;
  352. mul_add_c(a[0], b[0], c1, c2, c3);
  353. r[0] = c1;
  354. c1 = 0;
  355. mul_add_c(a[0], b[1], c2, c3, c1);
  356. mul_add_c(a[1], b[0], c2, c3, c1);
  357. r[1] = c2;
  358. c2 = 0;
  359. mul_add_c(a[2], b[0], c3, c1, c2);
  360. mul_add_c(a[1], b[1], c3, c1, c2);
  361. mul_add_c(a[0], b[2], c3, c1, c2);
  362. r[2] = c3;
  363. c3 = 0;
  364. mul_add_c(a[0], b[3], c1, c2, c3);
  365. mul_add_c(a[1], b[2], c1, c2, c3);
  366. mul_add_c(a[2], b[1], c1, c2, c3);
  367. mul_add_c(a[3], b[0], c1, c2, c3);
  368. r[3] = c1;
  369. c1 = 0;
  370. mul_add_c(a[4], b[0], c2, c3, c1);
  371. mul_add_c(a[3], b[1], c2, c3, c1);
  372. mul_add_c(a[2], b[2], c2, c3, c1);
  373. mul_add_c(a[1], b[3], c2, c3, c1);
  374. mul_add_c(a[0], b[4], c2, c3, c1);
  375. r[4] = c2;
  376. c2 = 0;
  377. mul_add_c(a[0], b[5], c3, c1, c2);
  378. mul_add_c(a[1], b[4], c3, c1, c2);
  379. mul_add_c(a[2], b[3], c3, c1, c2);
  380. mul_add_c(a[3], b[2], c3, c1, c2);
  381. mul_add_c(a[4], b[1], c3, c1, c2);
  382. mul_add_c(a[5], b[0], c3, c1, c2);
  383. r[5] = c3;
  384. c3 = 0;
  385. mul_add_c(a[6], b[0], c1, c2, c3);
  386. mul_add_c(a[5], b[1], c1, c2, c3);
  387. mul_add_c(a[4], b[2], c1, c2, c3);
  388. mul_add_c(a[3], b[3], c1, c2, c3);
  389. mul_add_c(a[2], b[4], c1, c2, c3);
  390. mul_add_c(a[1], b[5], c1, c2, c3);
  391. mul_add_c(a[0], b[6], c1, c2, c3);
  392. r[6] = c1;
  393. c1 = 0;
  394. mul_add_c(a[0], b[7], c2, c3, c1);
  395. mul_add_c(a[1], b[6], c2, c3, c1);
  396. mul_add_c(a[2], b[5], c2, c3, c1);
  397. mul_add_c(a[3], b[4], c2, c3, c1);
  398. mul_add_c(a[4], b[3], c2, c3, c1);
  399. mul_add_c(a[5], b[2], c2, c3, c1);
  400. mul_add_c(a[6], b[1], c2, c3, c1);
  401. mul_add_c(a[7], b[0], c2, c3, c1);
  402. r[7] = c2;
  403. c2 = 0;
  404. mul_add_c(a[7], b[1], c3, c1, c2);
  405. mul_add_c(a[6], b[2], c3, c1, c2);
  406. mul_add_c(a[5], b[3], c3, c1, c2);
  407. mul_add_c(a[4], b[4], c3, c1, c2);
  408. mul_add_c(a[3], b[5], c3, c1, c2);
  409. mul_add_c(a[2], b[6], c3, c1, c2);
  410. mul_add_c(a[1], b[7], c3, c1, c2);
  411. r[8] = c3;
  412. c3 = 0;
  413. mul_add_c(a[2], b[7], c1, c2, c3);
  414. mul_add_c(a[3], b[6], c1, c2, c3);
  415. mul_add_c(a[4], b[5], c1, c2, c3);
  416. mul_add_c(a[5], b[4], c1, c2, c3);
  417. mul_add_c(a[6], b[3], c1, c2, c3);
  418. mul_add_c(a[7], b[2], c1, c2, c3);
  419. r[9] = c1;
  420. c1 = 0;
  421. mul_add_c(a[7], b[3], c2, c3, c1);
  422. mul_add_c(a[6], b[4], c2, c3, c1);
  423. mul_add_c(a[5], b[5], c2, c3, c1);
  424. mul_add_c(a[4], b[6], c2, c3, c1);
  425. mul_add_c(a[3], b[7], c2, c3, c1);
  426. r[10] = c2;
  427. c2 = 0;
  428. mul_add_c(a[4], b[7], c3, c1, c2);
  429. mul_add_c(a[5], b[6], c3, c1, c2);
  430. mul_add_c(a[6], b[5], c3, c1, c2);
  431. mul_add_c(a[7], b[4], c3, c1, c2);
  432. r[11] = c3;
  433. c3 = 0;
  434. mul_add_c(a[7], b[5], c1, c2, c3);
  435. mul_add_c(a[6], b[6], c1, c2, c3);
  436. mul_add_c(a[5], b[7], c1, c2, c3);
  437. r[12] = c1;
  438. c1 = 0;
  439. mul_add_c(a[6], b[7], c2, c3, c1);
  440. mul_add_c(a[7], b[6], c2, c3, c1);
  441. r[13] = c2;
  442. c2 = 0;
  443. mul_add_c(a[7], b[7], c3, c1, c2);
  444. r[14] = c3;
  445. r[15] = c1;
  446. }
  447. void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
  448. {
  449. BN_ULONG t1, t2;
  450. BN_ULONG c1, c2, c3;
  451. c1 = 0;
  452. c2 = 0;
  453. c3 = 0;
  454. mul_add_c(a[0], b[0], c1, c2, c3);
  455. r[0] = c1;
  456. c1 = 0;
  457. mul_add_c(a[0], b[1], c2, c3, c1);
  458. mul_add_c(a[1], b[0], c2, c3, c1);
  459. r[1] = c2;
  460. c2 = 0;
  461. mul_add_c(a[2], b[0], c3, c1, c2);
  462. mul_add_c(a[1], b[1], c3, c1, c2);
  463. mul_add_c(a[0], b[2], c3, c1, c2);
  464. r[2] = c3;
  465. c3 = 0;
  466. mul_add_c(a[0], b[3], c1, c2, c3);
  467. mul_add_c(a[1], b[2], c1, c2, c3);
  468. mul_add_c(a[2], b[1], c1, c2, c3);
  469. mul_add_c(a[3], b[0], c1, c2, c3);
  470. r[3] = c1;
  471. c1 = 0;
  472. mul_add_c(a[3], b[1], c2, c3, c1);
  473. mul_add_c(a[2], b[2], c2, c3, c1);
  474. mul_add_c(a[1], b[3], c2, c3, c1);
  475. r[4] = c2;
  476. c2 = 0;
  477. mul_add_c(a[2], b[3], c3, c1, c2);
  478. mul_add_c(a[3], b[2], c3, c1, c2);
  479. r[5] = c3;
  480. c3 = 0;
  481. mul_add_c(a[3], b[3], c1, c2, c3);
  482. r[6] = c1;
  483. r[7] = c2;
  484. }
  485. void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
  486. {
  487. BN_ULONG t1, t2;
  488. BN_ULONG c1, c2, c3;
  489. c1 = 0;
  490. c2 = 0;
  491. c3 = 0;
  492. sqr_add_c(a, 0, c1, c2, c3);
  493. r[0] = c1;
  494. c1 = 0;
  495. sqr_add_c2(a, 1, 0, c2, c3, c1);
  496. r[1] = c2;
  497. c2 = 0;
  498. sqr_add_c(a, 1, c3, c1, c2);
  499. sqr_add_c2(a, 2, 0, c3, c1, c2);
  500. r[2] = c3;
  501. c3 = 0;
  502. sqr_add_c2(a, 3, 0, c1, c2, c3);
  503. sqr_add_c2(a, 2, 1, c1, c2, c3);
  504. r[3] = c1;
  505. c1 = 0;
  506. sqr_add_c(a, 2, c2, c3, c1);
  507. sqr_add_c2(a, 3, 1, c2, c3, c1);
  508. sqr_add_c2(a, 4, 0, c2, c3, c1);
  509. r[4] = c2;
  510. c2 = 0;
  511. sqr_add_c2(a, 5, 0, c3, c1, c2);
  512. sqr_add_c2(a, 4, 1, c3, c1, c2);
  513. sqr_add_c2(a, 3, 2, c3, c1, c2);
  514. r[5] = c3;
  515. c3 = 0;
  516. sqr_add_c(a, 3, c1, c2, c3);
  517. sqr_add_c2(a, 4, 2, c1, c2, c3);
  518. sqr_add_c2(a, 5, 1, c1, c2, c3);
  519. sqr_add_c2(a, 6, 0, c1, c2, c3);
  520. r[6] = c1;
  521. c1 = 0;
  522. sqr_add_c2(a, 7, 0, c2, c3, c1);
  523. sqr_add_c2(a, 6, 1, c2, c3, c1);
  524. sqr_add_c2(a, 5, 2, c2, c3, c1);
  525. sqr_add_c2(a, 4, 3, c2, c3, c1);
  526. r[7] = c2;
  527. c2 = 0;
  528. sqr_add_c(a, 4, c3, c1, c2);
  529. sqr_add_c2(a, 5, 3, c3, c1, c2);
  530. sqr_add_c2(a, 6, 2, c3, c1, c2);
  531. sqr_add_c2(a, 7, 1, c3, c1, c2);
  532. r[8] = c3;
  533. c3 = 0;
  534. sqr_add_c2(a, 7, 2, c1, c2, c3);
  535. sqr_add_c2(a, 6, 3, c1, c2, c3);
  536. sqr_add_c2(a, 5, 4, c1, c2, c3);
  537. r[9] = c1;
  538. c1 = 0;
  539. sqr_add_c(a, 5, c2, c3, c1);
  540. sqr_add_c2(a, 6, 4, c2, c3, c1);
  541. sqr_add_c2(a, 7, 3, c2, c3, c1);
  542. r[10] = c2;
  543. c2 = 0;
  544. sqr_add_c2(a, 7, 4, c3, c1, c2);
  545. sqr_add_c2(a, 6, 5, c3, c1, c2);
  546. r[11] = c3;
  547. c3 = 0;
  548. sqr_add_c(a, 6, c1, c2, c3);
  549. sqr_add_c2(a, 7, 5, c1, c2, c3);
  550. r[12] = c1;
  551. c1 = 0;
  552. sqr_add_c2(a, 7, 6, c2, c3, c1);
  553. r[13] = c2;
  554. c2 = 0;
  555. sqr_add_c(a, 7, c3, c1, c2);
  556. r[14] = c3;
  557. r[15] = c1;
  558. }
  559. void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
  560. {
  561. BN_ULONG t1, t2;
  562. BN_ULONG c1, c2, c3;
  563. c1 = 0;
  564. c2 = 0;
  565. c3 = 0;
  566. sqr_add_c(a, 0, c1, c2, c3);
  567. r[0] = c1;
  568. c1 = 0;
  569. sqr_add_c2(a, 1, 0, c2, c3, c1);
  570. r[1] = c2;
  571. c2 = 0;
  572. sqr_add_c(a, 1, c3, c1, c2);
  573. sqr_add_c2(a, 2, 0, c3, c1, c2);
  574. r[2] = c3;
  575. c3 = 0;
  576. sqr_add_c2(a, 3, 0, c1, c2, c3);
  577. sqr_add_c2(a, 2, 1, c1, c2, c3);
  578. r[3] = c1;
  579. c1 = 0;
  580. sqr_add_c(a, 2, c2, c3, c1);
  581. sqr_add_c2(a, 3, 1, c2, c3, c1);
  582. r[4] = c2;
  583. c2 = 0;
  584. sqr_add_c2(a, 3, 2, c3, c1, c2);
  585. r[5] = c3;
  586. c3 = 0;
  587. sqr_add_c(a, 3, c1, c2, c3);
  588. r[6] = c1;
  589. r[7] = c2;
  590. }
  591. #endif