ECC384.cpp 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132
  1. //////////////////////////////////////////////////////////////////////////////
  2. // This is EASY-ECC by Kenneth MacKay
  3. // https://github.com/esxgx/easy-ecc
  4. // This code is under the BSD 2-clause license, not ZeroTier's license
  5. //////////////////////////////////////////////////////////////////////////////
  6. #include <cstdio>
  7. #include <cstdlib>
  8. #include <cstdint>
  9. #include "Constants.hpp"
  10. #include "ECC384.hpp"
  11. #include "Utils.hpp"
  12. namespace ZeroTier {
  13. namespace {
  14. #define secp384r1 48
  15. #define ECC_CURVE secp384r1
  16. #define ECC_BYTES ECC_CURVE
  17. #define NUM_ECC_DIGITS (ECC_BYTES/8)
  18. #define MAX_TRIES 1024
  19. typedef unsigned int uint;
  20. #if defined(__SIZEOF_INT128__) || ((__clang_major__ * 100 + __clang_minor__) >= 302)
  21. #define SUPPORTS_INT128 1
  22. #else
  23. #define SUPPORTS_INT128 0
  24. #endif
  25. #if SUPPORTS_INT128
  26. typedef unsigned __int128 uint128_t;
  27. #else
  28. typedef struct
  29. {
  30. uint64_t m_low;
  31. uint64_t m_high;
  32. } uint128_t;
  33. #endif
  34. typedef struct EccPoint
  35. {
  36. uint64_t x[NUM_ECC_DIGITS];
  37. uint64_t y[NUM_ECC_DIGITS];
  38. } EccPoint;
  39. #define CONCAT1(a, b) a##b
  40. #define CONCAT(a, b) CONCAT1(a, b)
  41. #define Curve_P_48 {0x00000000FFFFFFFF, 0xFFFFFFFF00000000, 0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}
  42. #define Curve_B_48 {0x2A85C8EDD3EC2AEF, 0xC656398D8A2ED19D, 0x0314088F5013875A, 0x181D9C6EFE814112, 0x988E056BE3F82D19, 0xB3312FA7E23EE7E4}
  43. #define Curve_G_48 {{0x3A545E3872760AB7, 0x5502F25DBF55296C, 0x59F741E082542A38, 0x6E1D3B628BA79B98, 0x8EB1C71EF320AD74, 0xAA87CA22BE8B0537}, {0x7A431D7C90EA0E5F, 0x0A60B1CE1D7E819D, 0xE9DA3113B5F0B8C0, 0xF8F41DBD289A147C, 0x5D9E98BF9292DC29, 0x3617DE4A96262C6F}}
  44. #define Curve_N_48 {0xECEC196ACCC52973, 0x581A0DB248B0A77A, 0xC7634D81F4372DDF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}
  45. static uint64_t curve_p[NUM_ECC_DIGITS] = CONCAT(Curve_P_, ECC_CURVE);
  46. static uint64_t curve_b[NUM_ECC_DIGITS] = CONCAT(Curve_B_, ECC_CURVE);
  47. static EccPoint curve_G = CONCAT(Curve_G_, ECC_CURVE);
  48. static uint64_t curve_n[NUM_ECC_DIGITS] = CONCAT(Curve_N_, ECC_CURVE);
  49. // Use ZeroTier's secure PRNG
  50. static inline int getRandomNumber(uint64_t *p_vli)
  51. {
  52. Utils::getSecureRandom(p_vli,ECC_BYTES);
  53. return 1;
  54. }
  55. static inline void vli_clear(uint64_t *p_vli)
  56. {
  57. uint i;
  58. for(i=0; i<NUM_ECC_DIGITS; ++i)
  59. {
  60. p_vli[i] = 0;
  61. }
  62. }
  63. /* Returns 1 if p_vli == 0, 0 otherwise. */
  64. static inline int vli_isZero(uint64_t *p_vli)
  65. {
  66. uint i;
  67. for(i = 0; i < NUM_ECC_DIGITS; ++i)
  68. {
  69. if(p_vli[i])
  70. {
  71. return 0;
  72. }
  73. }
  74. return 1;
  75. }
  76. /* Returns nonzero if bit p_bit of p_vli is set. */
  77. static inline uint64_t vli_testBit(uint64_t *p_vli, uint p_bit)
  78. {
  79. return (p_vli[p_bit/64] & ((uint64_t)1 << (p_bit % 64)));
  80. }
  81. /* Counts the number of 64-bit "digits" in p_vli. */
  82. static inline uint vli_numDigits(uint64_t *p_vli)
  83. {
  84. int i;
  85. /* Search from the end until we find a non-zero digit.
  86. We do it in reverse because we expect that most digits will be nonzero. */
  87. for(i = NUM_ECC_DIGITS - 1; i >= 0 && p_vli[i] == 0; --i)
  88. {
  89. }
  90. return (i + 1);
  91. }
  92. /* Counts the number of bits required for p_vli. */
  93. static inline uint vli_numBits(uint64_t *p_vli)
  94. {
  95. uint i;
  96. uint64_t l_digit;
  97. uint l_numDigits = vli_numDigits(p_vli);
  98. if(l_numDigits == 0)
  99. {
  100. return 0;
  101. }
  102. l_digit = p_vli[l_numDigits - 1];
  103. for(i=0; l_digit; ++i)
  104. {
  105. l_digit >>= 1;
  106. }
  107. return ((l_numDigits - 1) * 64 + i);
  108. }
  109. /* Sets p_dest = p_src. */
  110. static inline void vli_set(uint64_t *p_dest, uint64_t *p_src)
  111. {
  112. uint i;
  113. for(i=0; i<NUM_ECC_DIGITS; ++i)
  114. {
  115. p_dest[i] = p_src[i];
  116. }
  117. }
  118. /* Returns sign of p_left - p_right. */
  119. static inline int vli_cmp(uint64_t *p_left, uint64_t *p_right)
  120. {
  121. int i;
  122. for(i = NUM_ECC_DIGITS-1; i >= 0; --i)
  123. {
  124. if(p_left[i] > p_right[i])
  125. {
  126. return 1;
  127. }
  128. else if(p_left[i] < p_right[i])
  129. {
  130. return -1;
  131. }
  132. }
  133. return 0;
  134. }
  135. /* Computes p_result = p_in << c, returning carry. Can modify in place (if p_result == p_in). 0 < p_shift < 64. */
  136. static inline uint64_t vli_lshift(uint64_t *p_result, uint64_t *p_in, uint p_shift)
  137. {
  138. uint64_t l_carry = 0;
  139. uint i;
  140. for(i = 0; i < NUM_ECC_DIGITS; ++i)
  141. {
  142. uint64_t l_temp = p_in[i];
  143. p_result[i] = (l_temp << p_shift) | l_carry;
  144. l_carry = l_temp >> (64 - p_shift);
  145. }
  146. return l_carry;
  147. }
  148. /* Computes p_vli = p_vli >> 1. */
  149. static inline void vli_rshift1(uint64_t *p_vli)
  150. {
  151. uint64_t *l_end = p_vli;
  152. uint64_t l_carry = 0;
  153. p_vli += NUM_ECC_DIGITS;
  154. while(p_vli-- > l_end)
  155. {
  156. uint64_t l_temp = *p_vli;
  157. *p_vli = (l_temp >> 1) | l_carry;
  158. l_carry = l_temp << 63;
  159. }
  160. }
  161. /* Computes p_result = p_left + p_right, returning carry. Can modify in place. */
  162. static inline uint64_t vli_add(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  163. {
  164. uint64_t l_carry = 0;
  165. uint i;
  166. for(i=0; i<NUM_ECC_DIGITS; ++i)
  167. {
  168. uint64_t l_sum = p_left[i] + p_right[i] + l_carry;
  169. if(l_sum != p_left[i])
  170. {
  171. l_carry = (l_sum < p_left[i]);
  172. }
  173. p_result[i] = l_sum;
  174. }
  175. return l_carry;
  176. }
  177. /* Computes p_result = p_left - p_right, returning borrow. Can modify in place. */
  178. static inline uint64_t vli_sub(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  179. {
  180. uint64_t l_borrow = 0;
  181. uint i;
  182. for(i=0; i<NUM_ECC_DIGITS; ++i)
  183. {
  184. uint64_t l_diff = p_left[i] - p_right[i] - l_borrow;
  185. if(l_diff != p_left[i])
  186. {
  187. l_borrow = (l_diff > p_left[i]);
  188. }
  189. p_result[i] = l_diff;
  190. }
  191. return l_borrow;
  192. }
  193. #if SUPPORTS_INT128
  194. /* Computes p_result = p_left * p_right. */
  195. static inline void vli_mult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  196. {
  197. uint128_t r01 = 0;
  198. uint64_t r2 = 0;
  199. uint i, k;
  200. /* Compute each digit of p_result in sequence, maintaining the carries. */
  201. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  202. {
  203. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  204. for(i=l_min; i<=k && i<NUM_ECC_DIGITS; ++i)
  205. {
  206. uint128_t l_product = (uint128_t)p_left[i] * p_right[k-i];
  207. r01 += l_product;
  208. r2 += (r01 < l_product);
  209. }
  210. p_result[k] = (uint64_t)r01;
  211. r01 = (r01 >> 64) | (((uint128_t)r2) << 64);
  212. r2 = 0;
  213. }
  214. p_result[NUM_ECC_DIGITS*2 - 1] = (uint64_t)r01;
  215. }
  216. /* Computes p_result = p_left^2. */
  217. static inline void vli_square(uint64_t *p_result, uint64_t *p_left)
  218. {
  219. uint128_t r01 = 0;
  220. uint64_t r2 = 0;
  221. uint i, k;
  222. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  223. {
  224. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  225. for(i=l_min; i<=k && i<=k-i; ++i)
  226. {
  227. uint128_t l_product = (uint128_t)p_left[i] * p_left[k-i];
  228. if(i < k-i)
  229. {
  230. r2 += l_product >> 127;
  231. l_product *= 2;
  232. }
  233. r01 += l_product;
  234. r2 += (r01 < l_product);
  235. }
  236. p_result[k] = (uint64_t)r01;
  237. r01 = (r01 >> 64) | (((uint128_t)r2) << 64);
  238. r2 = 0;
  239. }
  240. p_result[NUM_ECC_DIGITS*2 - 1] = (uint64_t)r01;
  241. }
  242. #else /* #if SUPPORTS_INT128 */
  243. static inline uint128_t mul_64_64(uint64_t p_left, uint64_t p_right)
  244. {
  245. uint128_t l_result;
  246. uint64_t a0 = p_left & 0xffffffffull;
  247. uint64_t a1 = p_left >> 32;
  248. uint64_t b0 = p_right & 0xffffffffull;
  249. uint64_t b1 = p_right >> 32;
  250. uint64_t m0 = a0 * b0;
  251. uint64_t m1 = a0 * b1;
  252. uint64_t m2 = a1 * b0;
  253. uint64_t m3 = a1 * b1;
  254. m2 += (m0 >> 32);
  255. m2 += m1;
  256. if(m2 < m1)
  257. { // overflow
  258. m3 += 0x100000000ull;
  259. }
  260. l_result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
  261. l_result.m_high = m3 + (m2 >> 32);
  262. return l_result;
  263. }
  264. static inline uint128_t add_128_128(uint128_t a, uint128_t b)
  265. {
  266. uint128_t l_result;
  267. l_result.m_low = a.m_low + b.m_low;
  268. l_result.m_high = a.m_high + b.m_high + (l_result.m_low < a.m_low);
  269. return l_result;
  270. }
  271. static inline void vli_mult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  272. {
  273. uint128_t r01 = {0, 0};
  274. uint64_t r2 = 0;
  275. uint i, k;
  276. /* Compute each digit of p_result in sequence, maintaining the carries. */
  277. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  278. {
  279. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  280. for(i=l_min; i<=k && i<NUM_ECC_DIGITS; ++i)
  281. {
  282. uint128_t l_product = mul_64_64(p_left[i], p_right[k-i]);
  283. r01 = add_128_128(r01, l_product);
  284. r2 += (r01.m_high < l_product.m_high);
  285. }
  286. p_result[k] = r01.m_low;
  287. r01.m_low = r01.m_high;
  288. r01.m_high = r2;
  289. r2 = 0;
  290. }
  291. p_result[NUM_ECC_DIGITS*2 - 1] = r01.m_low;
  292. }
  293. static inline void vli_square(uint64_t *p_result, uint64_t *p_left)
  294. {
  295. uint128_t r01 = {0, 0};
  296. uint64_t r2 = 0;
  297. uint i, k;
  298. for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
  299. {
  300. uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
  301. for(i=l_min; i<=k && i<=k-i; ++i)
  302. {
  303. uint128_t l_product = mul_64_64(p_left[i], p_left[k-i]);
  304. if(i < k-i)
  305. {
  306. r2 += l_product.m_high >> 63;
  307. l_product.m_high = (l_product.m_high << 1) | (l_product.m_low >> 63);
  308. l_product.m_low <<= 1;
  309. }
  310. r01 = add_128_128(r01, l_product);
  311. r2 += (r01.m_high < l_product.m_high);
  312. }
  313. p_result[k] = r01.m_low;
  314. r01.m_low = r01.m_high;
  315. r01.m_high = r2;
  316. r2 = 0;
  317. }
  318. p_result[NUM_ECC_DIGITS*2 - 1] = r01.m_low;
  319. }
  320. #endif /* SUPPORTS_INT128 */
  321. /* Computes p_result = (p_left + p_right) % p_mod.
  322. Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
  323. static inline void vli_modAdd(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, uint64_t *p_mod)
  324. {
  325. uint64_t l_carry = vli_add(p_result, p_left, p_right);
  326. if(l_carry || vli_cmp(p_result, p_mod) >= 0)
  327. { /* p_result > p_mod (p_result = p_mod + remainder), so subtract p_mod to get remainder. */
  328. vli_sub(p_result, p_result, p_mod);
  329. }
  330. }
  331. /* Computes p_result = (p_left - p_right) % p_mod.
  332. Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
  333. static inline void vli_modSub(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, uint64_t *p_mod)
  334. {
  335. uint64_t l_borrow = vli_sub(p_result, p_left, p_right);
  336. if(l_borrow)
  337. { /* In this case, p_result == -diff == (max int) - diff.
  338. Since -x % d == d - x, we can get the correct result from p_result + p_mod (with overflow). */
  339. vli_add(p_result, p_result, p_mod);
  340. }
  341. }
  342. //#elif ECC_CURVE == secp384r1
  343. static inline void omega_mult(uint64_t *p_result, uint64_t *p_right)
  344. {
  345. uint64_t l_tmp[NUM_ECC_DIGITS];
  346. uint64_t l_carry, l_diff;
  347. /* Multiply by (2^128 + 2^96 - 2^32 + 1). */
  348. vli_set(p_result, p_right); /* 1 */
  349. l_carry = vli_lshift(l_tmp, p_right, 32);
  350. p_result[1 + NUM_ECC_DIGITS] = l_carry + vli_add(p_result + 1, p_result + 1, l_tmp); /* 2^96 + 1 */
  351. p_result[2 + NUM_ECC_DIGITS] = vli_add(p_result + 2, p_result + 2, p_right); /* 2^128 + 2^96 + 1 */
  352. l_carry += vli_sub(p_result, p_result, l_tmp); /* 2^128 + 2^96 - 2^32 + 1 */
  353. l_diff = p_result[NUM_ECC_DIGITS] - l_carry;
  354. if(l_diff > p_result[NUM_ECC_DIGITS])
  355. { /* Propagate borrow if necessary. */
  356. uint i;
  357. for(i = 1 + NUM_ECC_DIGITS; ; ++i)
  358. {
  359. --p_result[i];
  360. if(p_result[i] != (uint64_t)-1)
  361. {
  362. break;
  363. }
  364. }
  365. }
  366. p_result[NUM_ECC_DIGITS] = l_diff;
  367. }
  368. /* Computes p_result = p_product % curve_p
  369. see PDF "Comparing Elliptic Curve Cryptography and RSA on 8-bit CPUs"
  370. section "Curve-Specific Optimizations" */
  371. static inline void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product)
  372. {
  373. uint64_t l_tmp[2*NUM_ECC_DIGITS];
  374. while(!vli_isZero(p_product + NUM_ECC_DIGITS)) /* While c1 != 0 */
  375. {
  376. uint64_t l_carry = 0;
  377. uint i;
  378. vli_clear(l_tmp);
  379. vli_clear(l_tmp + NUM_ECC_DIGITS);
  380. omega_mult(l_tmp, p_product + NUM_ECC_DIGITS); /* tmp = w * c1 */
  381. vli_clear(p_product + NUM_ECC_DIGITS); /* p = c0 */
  382. /* (c1, c0) = c0 + w * c1 */
  383. for(i=0; i<NUM_ECC_DIGITS+3; ++i)
  384. {
  385. uint64_t l_sum = p_product[i] + l_tmp[i] + l_carry;
  386. if(l_sum != p_product[i])
  387. {
  388. l_carry = (l_sum < p_product[i]);
  389. }
  390. p_product[i] = l_sum;
  391. }
  392. }
  393. while(vli_cmp(p_product, curve_p) > 0)
  394. {
  395. vli_sub(p_product, p_product, curve_p);
  396. }
  397. vli_set(p_result, p_product);
  398. }
  399. //#endif
  400. /* Computes p_result = (p_left * p_right) % curve_p. */
  401. static inline void vli_modMult_fast(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right)
  402. {
  403. uint64_t l_product[2 * NUM_ECC_DIGITS];
  404. vli_mult(l_product, p_left, p_right);
  405. vli_mmod_fast(p_result, l_product);
  406. }
  407. /* Computes p_result = p_left^2 % curve_p. */
  408. static inline void vli_modSquare_fast(uint64_t *p_result, uint64_t *p_left)
  409. {
  410. uint64_t l_product[2 * NUM_ECC_DIGITS];
  411. vli_square(l_product, p_left);
  412. vli_mmod_fast(p_result, l_product);
  413. }
  414. #define EVEN(vli) (!(vli[0] & 1))
  415. /* Computes p_result = (1 / p_input) % p_mod. All VLIs are the same size.
  416. See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
  417. https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf */
  418. static inline void vli_modInv(uint64_t *p_result, uint64_t *p_input, uint64_t *p_mod)
  419. {
  420. uint64_t a[NUM_ECC_DIGITS], b[NUM_ECC_DIGITS], u[NUM_ECC_DIGITS], v[NUM_ECC_DIGITS];
  421. uint64_t l_carry;
  422. int l_cmpResult;
  423. if(vli_isZero(p_input))
  424. {
  425. vli_clear(p_result);
  426. return;
  427. }
  428. vli_set(a, p_input);
  429. vli_set(b, p_mod);
  430. vli_clear(u);
  431. u[0] = 1;
  432. vli_clear(v);
  433. while((l_cmpResult = vli_cmp(a, b)) != 0)
  434. {
  435. l_carry = 0;
  436. if(EVEN(a))
  437. {
  438. vli_rshift1(a);
  439. if(!EVEN(u))
  440. {
  441. l_carry = vli_add(u, u, p_mod);
  442. }
  443. vli_rshift1(u);
  444. if(l_carry)
  445. {
  446. u[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  447. }
  448. }
  449. else if(EVEN(b))
  450. {
  451. vli_rshift1(b);
  452. if(!EVEN(v))
  453. {
  454. l_carry = vli_add(v, v, p_mod);
  455. }
  456. vli_rshift1(v);
  457. if(l_carry)
  458. {
  459. v[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  460. }
  461. }
  462. else if(l_cmpResult > 0)
  463. {
  464. vli_sub(a, a, b);
  465. vli_rshift1(a);
  466. if(vli_cmp(u, v) < 0)
  467. {
  468. vli_add(u, u, p_mod);
  469. }
  470. vli_sub(u, u, v);
  471. if(!EVEN(u))
  472. {
  473. l_carry = vli_add(u, u, p_mod);
  474. }
  475. vli_rshift1(u);
  476. if(l_carry)
  477. {
  478. u[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  479. }
  480. }
  481. else
  482. {
  483. vli_sub(b, b, a);
  484. vli_rshift1(b);
  485. if(vli_cmp(v, u) < 0)
  486. {
  487. vli_add(v, v, p_mod);
  488. }
  489. vli_sub(v, v, u);
  490. if(!EVEN(v))
  491. {
  492. l_carry = vli_add(v, v, p_mod);
  493. }
  494. vli_rshift1(v);
  495. if(l_carry)
  496. {
  497. v[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
  498. }
  499. }
  500. }
  501. vli_set(p_result, u);
  502. }
  503. /* ------ Point operations ------ */
  504. /* Returns 1 if p_point is the point at infinity, 0 otherwise. */
  505. static inline int EccPoint_isZero(EccPoint *p_point)
  506. {
  507. return (vli_isZero(p_point->x) && vli_isZero(p_point->y));
  508. }
  509. /* Point multiplication algorithm using Montgomery's ladder with co-Z coordinates.
  510. From http://eprint.iacr.org/2011/338.pdf
  511. */
  512. /* Double in place */
  513. static inline void EccPoint_double_jacobian(uint64_t *X1, uint64_t *Y1, uint64_t *Z1)
  514. {
  515. /* t1 = X, t2 = Y, t3 = Z */
  516. uint64_t t4[NUM_ECC_DIGITS];
  517. uint64_t t5[NUM_ECC_DIGITS];
  518. if(vli_isZero(Z1))
  519. {
  520. return;
  521. }
  522. vli_modSquare_fast(t4, Y1); /* t4 = y1^2 */
  523. vli_modMult_fast(t5, X1, t4); /* t5 = x1*y1^2 = A */
  524. vli_modSquare_fast(t4, t4); /* t4 = y1^4 */
  525. vli_modMult_fast(Y1, Y1, Z1); /* t2 = y1*z1 = z3 */
  526. vli_modSquare_fast(Z1, Z1); /* t3 = z1^2 */
  527. vli_modAdd(X1, X1, Z1, curve_p); /* t1 = x1 + z1^2 */
  528. vli_modAdd(Z1, Z1, Z1, curve_p); /* t3 = 2*z1^2 */
  529. vli_modSub(Z1, X1, Z1, curve_p); /* t3 = x1 - z1^2 */
  530. vli_modMult_fast(X1, X1, Z1); /* t1 = x1^2 - z1^4 */
  531. vli_modAdd(Z1, X1, X1, curve_p); /* t3 = 2*(x1^2 - z1^4) */
  532. vli_modAdd(X1, X1, Z1, curve_p); /* t1 = 3*(x1^2 - z1^4) */
  533. if(vli_testBit(X1, 0))
  534. {
  535. uint64_t l_carry = vli_add(X1, X1, curve_p);
  536. vli_rshift1(X1);
  537. X1[NUM_ECC_DIGITS-1] |= l_carry << 63;
  538. }
  539. else
  540. {
  541. vli_rshift1(X1);
  542. }
  543. /* t1 = 3/2*(x1^2 - z1^4) = B */
  544. vli_modSquare_fast(Z1, X1); /* t3 = B^2 */
  545. vli_modSub(Z1, Z1, t5, curve_p); /* t3 = B^2 - A */
  546. vli_modSub(Z1, Z1, t5, curve_p); /* t3 = B^2 - 2A = x3 */
  547. vli_modSub(t5, t5, Z1, curve_p); /* t5 = A - x3 */
  548. vli_modMult_fast(X1, X1, t5); /* t1 = B * (A - x3) */
  549. vli_modSub(t4, X1, t4, curve_p); /* t4 = B * (A - x3) - y1^4 = y3 */
  550. vli_set(X1, Z1);
  551. vli_set(Z1, Y1);
  552. vli_set(Y1, t4);
  553. }
  554. /* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
  555. static inline void apply_z(uint64_t *X1, uint64_t *Y1, uint64_t *Z)
  556. {
  557. uint64_t t1[NUM_ECC_DIGITS];
  558. vli_modSquare_fast(t1, Z); /* z^2 */
  559. vli_modMult_fast(X1, X1, t1); /* x1 * z^2 */
  560. vli_modMult_fast(t1, t1, Z); /* z^3 */
  561. vli_modMult_fast(Y1, Y1, t1); /* y1 * z^3 */
  562. }
  563. /* P = (x1, y1) => 2P, (x2, y2) => P' */
  564. static inline void XYcZ_initial_double(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2, uint64_t *p_initialZ)
  565. {
  566. uint64_t z[NUM_ECC_DIGITS];
  567. vli_set(X2, X1);
  568. vli_set(Y2, Y1);
  569. vli_clear(z);
  570. z[0] = 1;
  571. if(p_initialZ)
  572. {
  573. vli_set(z, p_initialZ);
  574. }
  575. apply_z(X1, Y1, z);
  576. EccPoint_double_jacobian(X1, Y1, z);
  577. apply_z(X2, Y2, z);
  578. }
  579. /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
  580. Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
  581. or P => P', Q => P + Q
  582. */
  583. static inline void XYcZ_add(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2)
  584. {
  585. /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
  586. uint64_t t5[NUM_ECC_DIGITS];
  587. vli_modSub(t5, X2, X1, curve_p); /* t5 = x2 - x1 */
  588. vli_modSquare_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */
  589. vli_modMult_fast(X1, X1, t5); /* t1 = x1*A = B */
  590. vli_modMult_fast(X2, X2, t5); /* t3 = x2*A = C */
  591. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y2 - y1 */
  592. vli_modSquare_fast(t5, Y2); /* t5 = (y2 - y1)^2 = D */
  593. vli_modSub(t5, t5, X1, curve_p); /* t5 = D - B */
  594. vli_modSub(t5, t5, X2, curve_p); /* t5 = D - B - C = x3 */
  595. vli_modSub(X2, X2, X1, curve_p); /* t3 = C - B */
  596. vli_modMult_fast(Y1, Y1, X2); /* t2 = y1*(C - B) */
  597. vli_modSub(X2, X1, t5, curve_p); /* t3 = B - x3 */
  598. vli_modMult_fast(Y2, Y2, X2); /* t4 = (y2 - y1)*(B - x3) */
  599. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y3 */
  600. vli_set(X2, t5);
  601. }
  602. /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
  603. Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
  604. or P => P - Q, Q => P + Q
  605. */
  606. static inline void XYcZ_addC(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2)
  607. {
  608. /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
  609. uint64_t t5[NUM_ECC_DIGITS];
  610. uint64_t t6[NUM_ECC_DIGITS];
  611. uint64_t t7[NUM_ECC_DIGITS];
  612. vli_modSub(t5, X2, X1, curve_p); /* t5 = x2 - x1 */
  613. vli_modSquare_fast(t5, t5); /* t5 = (x2 - x1)^2 = A */
  614. vli_modMult_fast(X1, X1, t5); /* t1 = x1*A = B */
  615. vli_modMult_fast(X2, X2, t5); /* t3 = x2*A = C */
  616. vli_modAdd(t5, Y2, Y1, curve_p); /* t4 = y2 + y1 */
  617. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y2 - y1 */
  618. vli_modSub(t6, X2, X1, curve_p); /* t6 = C - B */
  619. vli_modMult_fast(Y1, Y1, t6); /* t2 = y1 * (C - B) */
  620. vli_modAdd(t6, X1, X2, curve_p); /* t6 = B + C */
  621. vli_modSquare_fast(X2, Y2); /* t3 = (y2 - y1)^2 */
  622. vli_modSub(X2, X2, t6, curve_p); /* t3 = x3 */
  623. vli_modSub(t7, X1, X2, curve_p); /* t7 = B - x3 */
  624. vli_modMult_fast(Y2, Y2, t7); /* t4 = (y2 - y1)*(B - x3) */
  625. vli_modSub(Y2, Y2, Y1, curve_p); /* t4 = y3 */
  626. vli_modSquare_fast(t7, t5); /* t7 = (y2 + y1)^2 = F */
  627. vli_modSub(t7, t7, t6, curve_p); /* t7 = x3' */
  628. vli_modSub(t6, t7, X1, curve_p); /* t6 = x3' - B */
  629. vli_modMult_fast(t6, t6, t5); /* t6 = (y2 + y1)*(x3' - B) */
  630. vli_modSub(Y1, t6, Y1, curve_p); /* t2 = y3' */
  631. vli_set(X1, t7);
  632. }
  633. static inline void EccPoint_mult(EccPoint *p_result, EccPoint *p_point, uint64_t *p_scalar, uint64_t *p_initialZ)
  634. {
  635. /* R0 and R1 */
  636. uint64_t Rx[2][NUM_ECC_DIGITS];
  637. uint64_t Ry[2][NUM_ECC_DIGITS];
  638. uint64_t z[NUM_ECC_DIGITS];
  639. int i, nb;
  640. vli_set(Rx[1], p_point->x);
  641. vli_set(Ry[1], p_point->y);
  642. XYcZ_initial_double(Rx[1], Ry[1], Rx[0], Ry[0], p_initialZ);
  643. for(i = vli_numBits(p_scalar) - 2; i > 0; --i)
  644. {
  645. nb = !vli_testBit(p_scalar, i);
  646. XYcZ_addC(Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]);
  647. XYcZ_add(Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]);
  648. }
  649. nb = !vli_testBit(p_scalar, 0);
  650. XYcZ_addC(Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]);
  651. /* Find final 1/Z value. */
  652. vli_modSub(z, Rx[1], Rx[0], curve_p); /* X1 - X0 */
  653. vli_modMult_fast(z, z, Ry[1-nb]); /* Yb * (X1 - X0) */
  654. vli_modMult_fast(z, z, p_point->x); /* xP * Yb * (X1 - X0) */
  655. vli_modInv(z, z, curve_p); /* 1 / (xP * Yb * (X1 - X0)) */
  656. vli_modMult_fast(z, z, p_point->y); /* yP / (xP * Yb * (X1 - X0)) */
  657. vli_modMult_fast(z, z, Rx[1-nb]); /* Xb * yP / (xP * Yb * (X1 - X0)) */
  658. /* End 1/Z calculation */
  659. XYcZ_add(Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]);
  660. apply_z(Rx[0], Ry[0], z);
  661. vli_set(p_result->x, Rx[0]);
  662. vli_set(p_result->y, Ry[0]);
  663. }
  664. static inline void ecc_bytes2native(uint64_t p_native[NUM_ECC_DIGITS], const uint8_t p_bytes[ECC_BYTES])
  665. {
  666. unsigned i;
  667. for(i=0; i<NUM_ECC_DIGITS; ++i)
  668. {
  669. const uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
  670. p_native[i] = ((uint64_t)p_digit[0] << 56) | ((uint64_t)p_digit[1] << 48) | ((uint64_t)p_digit[2] << 40) | ((uint64_t)p_digit[3] << 32) |
  671. ((uint64_t)p_digit[4] << 24) | ((uint64_t)p_digit[5] << 16) | ((uint64_t)p_digit[6] << 8) | (uint64_t)p_digit[7];
  672. }
  673. }
  674. static inline void ecc_native2bytes(uint8_t p_bytes[ECC_BYTES], const uint64_t p_native[NUM_ECC_DIGITS])
  675. {
  676. unsigned i;
  677. for(i=0; i<NUM_ECC_DIGITS; ++i)
  678. {
  679. uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
  680. p_digit[0] = p_native[i] >> 56;
  681. p_digit[1] = p_native[i] >> 48;
  682. p_digit[2] = p_native[i] >> 40;
  683. p_digit[3] = p_native[i] >> 32;
  684. p_digit[4] = p_native[i] >> 24;
  685. p_digit[5] = p_native[i] >> 16;
  686. p_digit[6] = p_native[i] >> 8;
  687. p_digit[7] = p_native[i];
  688. }
  689. }
  690. /* Compute a = sqrt(a) (mod curve_p). */
  691. static inline void mod_sqrt(uint64_t a[NUM_ECC_DIGITS])
  692. {
  693. unsigned i;
  694. uint64_t p1[NUM_ECC_DIGITS] = {1};
  695. uint64_t l_result[NUM_ECC_DIGITS] = {1};
  696. /* Since curve_p == 3 (mod 4) for all supported curves, we can
  697. compute sqrt(a) = a^((curve_p + 1) / 4) (mod curve_p). */
  698. vli_add(p1, curve_p, p1); /* p1 = curve_p + 1 */
  699. for(i = vli_numBits(p1) - 1; i > 1; --i)
  700. {
  701. vli_modSquare_fast(l_result, l_result);
  702. if(vli_testBit(p1, i))
  703. {
  704. vli_modMult_fast(l_result, l_result, a);
  705. }
  706. }
  707. vli_set(a, l_result);
  708. }
  709. static inline void ecc_point_decompress(EccPoint *p_point, const uint8_t p_compressed[ECC_BYTES+1])
  710. {
  711. uint64_t _3[NUM_ECC_DIGITS] = {3}; /* -a = 3 */
  712. ecc_bytes2native(p_point->x, p_compressed+1);
  713. vli_modSquare_fast(p_point->y, p_point->x); /* y = x^2 */
  714. vli_modSub(p_point->y, p_point->y, _3, curve_p); /* y = x^2 - 3 */
  715. vli_modMult_fast(p_point->y, p_point->y, p_point->x); /* y = x^3 - 3x */
  716. vli_modAdd(p_point->y, p_point->y, curve_b, curve_p); /* y = x^3 - 3x + b */
  717. mod_sqrt(p_point->y);
  718. if((p_point->y[0] & 0x01) != (p_compressed[0] & 0x01))
  719. {
  720. vli_sub(p_point->y, curve_p, p_point->y);
  721. }
  722. }
  723. static inline int ecc_make_key(uint8_t p_publicKey[ECC_BYTES+1], uint8_t p_privateKey[ECC_BYTES])
  724. {
  725. uint64_t l_private[NUM_ECC_DIGITS];
  726. EccPoint l_public;
  727. unsigned l_tries = 0;
  728. do
  729. {
  730. if(!getRandomNumber(l_private) || (l_tries++ >= MAX_TRIES))
  731. {
  732. return 0;
  733. }
  734. if(vli_isZero(l_private))
  735. {
  736. continue;
  737. }
  738. /* Make sure the private key is in the range [1, n-1].
  739. For the supported curves, n is always large enough that we only need to subtract once at most. */
  740. if(vli_cmp(curve_n, l_private) != 1)
  741. {
  742. vli_sub(l_private, l_private, curve_n);
  743. }
  744. EccPoint_mult(&l_public, &curve_G, l_private, NULL);
  745. } while(EccPoint_isZero(&l_public));
  746. ecc_native2bytes(p_privateKey, l_private);
  747. ecc_native2bytes(p_publicKey + 1, l_public.x);
  748. p_publicKey[0] = 2 + (l_public.y[0] & 0x01);
  749. return 1;
  750. }
  751. static inline int ecdh_shared_secret(const uint8_t p_publicKey[ECC_BYTES+1], const uint8_t p_privateKey[ECC_BYTES], uint8_t p_secret[ECC_BYTES])
  752. {
  753. EccPoint l_public;
  754. uint64_t l_private[NUM_ECC_DIGITS];
  755. uint64_t l_random[NUM_ECC_DIGITS];
  756. if(!getRandomNumber(l_random))
  757. {
  758. return 0;
  759. }
  760. ecc_point_decompress(&l_public, p_publicKey);
  761. ecc_bytes2native(l_private, p_privateKey);
  762. EccPoint l_product;
  763. EccPoint_mult(&l_product, &l_public, l_private, l_random);
  764. ecc_native2bytes(p_secret, l_product.x);
  765. return !EccPoint_isZero(&l_product);
  766. }
  767. /* -------- ECDSA code -------- */
  768. /* Computes p_result = (p_left * p_right) % p_mod. */
  769. static inline void vli_modMult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, uint64_t *p_mod)
  770. {
  771. uint64_t l_product[2 * NUM_ECC_DIGITS];
  772. uint64_t l_modMultiple[2 * NUM_ECC_DIGITS];
  773. uint l_digitShift, l_bitShift;
  774. uint l_productBits;
  775. uint l_modBits = vli_numBits(p_mod);
  776. vli_mult(l_product, p_left, p_right);
  777. l_productBits = vli_numBits(l_product + NUM_ECC_DIGITS);
  778. if(l_productBits)
  779. {
  780. l_productBits += NUM_ECC_DIGITS * 64;
  781. }
  782. else
  783. {
  784. l_productBits = vli_numBits(l_product);
  785. }
  786. if(l_productBits < l_modBits)
  787. { /* l_product < p_mod. */
  788. vli_set(p_result, l_product);
  789. return;
  790. }
  791. /* Shift p_mod by (l_leftBits - l_modBits). This multiplies p_mod by the largest
  792. power of two possible while still resulting in a number less than p_left. */
  793. vli_clear(l_modMultiple);
  794. vli_clear(l_modMultiple + NUM_ECC_DIGITS);
  795. l_digitShift = (l_productBits - l_modBits) / 64;
  796. l_bitShift = (l_productBits - l_modBits) % 64;
  797. if(l_bitShift)
  798. {
  799. l_modMultiple[l_digitShift + NUM_ECC_DIGITS] = vli_lshift(l_modMultiple + l_digitShift, p_mod, l_bitShift);
  800. }
  801. else
  802. {
  803. vli_set(l_modMultiple + l_digitShift, p_mod);
  804. }
  805. /* Subtract all multiples of p_mod to get the remainder. */
  806. vli_clear(p_result);
  807. p_result[0] = 1; /* Use p_result as a temp var to store 1 (for subtraction) */
  808. while(l_productBits > NUM_ECC_DIGITS * 64 || vli_cmp(l_modMultiple, p_mod) >= 0)
  809. {
  810. int l_cmp = vli_cmp(l_modMultiple + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS);
  811. if(l_cmp < 0 || (l_cmp == 0 && vli_cmp(l_modMultiple, l_product) <= 0))
  812. {
  813. if(vli_sub(l_product, l_product, l_modMultiple))
  814. { /* borrow */
  815. vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, p_result);
  816. }
  817. vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, l_modMultiple + NUM_ECC_DIGITS);
  818. }
  819. uint64_t l_carry = (l_modMultiple[NUM_ECC_DIGITS] & 0x01) << 63;
  820. vli_rshift1(l_modMultiple + NUM_ECC_DIGITS);
  821. vli_rshift1(l_modMultiple);
  822. l_modMultiple[NUM_ECC_DIGITS-1] |= l_carry;
  823. --l_productBits;
  824. }
  825. vli_set(p_result, l_product);
  826. }
  827. static inline uint umax(uint a, uint b)
  828. {
  829. return (a > b ? a : b);
  830. }
  831. static inline int ecdsa_sign(const uint8_t p_privateKey[ECC_BYTES], const uint8_t p_hash[ECC_BYTES], uint8_t p_signature[ECC_BYTES*2])
  832. {
  833. uint64_t k[NUM_ECC_DIGITS];
  834. uint64_t l_tmp[NUM_ECC_DIGITS];
  835. uint64_t l_s[NUM_ECC_DIGITS];
  836. EccPoint p;
  837. unsigned l_tries = 0;
  838. do
  839. {
  840. if(!getRandomNumber(k) || (l_tries++ >= MAX_TRIES))
  841. {
  842. return 0;
  843. }
  844. if(vli_isZero(k))
  845. {
  846. continue;
  847. }
  848. if(vli_cmp(curve_n, k) != 1)
  849. {
  850. vli_sub(k, k, curve_n);
  851. }
  852. /* tmp = k * G */
  853. EccPoint_mult(&p, &curve_G, k, NULL);
  854. /* r = x1 (mod n) */
  855. if(vli_cmp(curve_n, p.x) != 1)
  856. {
  857. vli_sub(p.x, p.x, curve_n);
  858. }
  859. } while(vli_isZero(p.x));
  860. ecc_native2bytes(p_signature, p.x);
  861. ecc_bytes2native(l_tmp, p_privateKey);
  862. vli_modMult(l_s, p.x, l_tmp, curve_n); /* s = r*d */
  863. ecc_bytes2native(l_tmp, p_hash);
  864. vli_modAdd(l_s, l_tmp, l_s, curve_n); /* s = e + r*d */
  865. vli_modInv(k, k, curve_n); /* k = 1 / k */
  866. vli_modMult(l_s, l_s, k, curve_n); /* s = (e + r*d) / k */
  867. ecc_native2bytes(p_signature + ECC_BYTES, l_s);
  868. return 1;
  869. }
  870. static inline int ecdsa_verify(const uint8_t p_publicKey[ECC_BYTES+1], const uint8_t p_hash[ECC_BYTES], const uint8_t p_signature[ECC_BYTES*2])
  871. {
  872. uint64_t u1[NUM_ECC_DIGITS], u2[NUM_ECC_DIGITS];
  873. uint64_t z[NUM_ECC_DIGITS];
  874. EccPoint l_public, l_sum;
  875. uint64_t rx[NUM_ECC_DIGITS];
  876. uint64_t ry[NUM_ECC_DIGITS];
  877. uint64_t tx[NUM_ECC_DIGITS];
  878. uint64_t ty[NUM_ECC_DIGITS];
  879. uint64_t tz[NUM_ECC_DIGITS];
  880. uint64_t l_r[NUM_ECC_DIGITS], l_s[NUM_ECC_DIGITS];
  881. ecc_point_decompress(&l_public, p_publicKey);
  882. ecc_bytes2native(l_r, p_signature);
  883. ecc_bytes2native(l_s, p_signature + ECC_BYTES);
  884. if(vli_isZero(l_r) || vli_isZero(l_s))
  885. { /* r, s must not be 0. */
  886. return 0;
  887. }
  888. if(vli_cmp(curve_n, l_r) != 1 || vli_cmp(curve_n, l_s) != 1)
  889. { /* r, s must be < n. */
  890. return 0;
  891. }
  892. /* Calculate u1 and u2. */
  893. vli_modInv(z, l_s, curve_n); /* Z = s^-1 */
  894. ecc_bytes2native(u1, p_hash);
  895. vli_modMult(u1, u1, z, curve_n); /* u1 = e/s */
  896. vli_modMult(u2, l_r, z, curve_n); /* u2 = r/s */
  897. /* Calculate l_sum = G + Q. */
  898. vli_set(l_sum.x, l_public.x);
  899. vli_set(l_sum.y, l_public.y);
  900. vli_set(tx, curve_G.x);
  901. vli_set(ty, curve_G.y);
  902. vli_modSub(z, l_sum.x, tx, curve_p); /* Z = x2 - x1 */
  903. XYcZ_add(tx, ty, l_sum.x, l_sum.y);
  904. vli_modInv(z, z, curve_p); /* Z = 1/Z */
  905. apply_z(l_sum.x, l_sum.y, z);
  906. /* Use Shamir's trick to calculate u1*G + u2*Q */
  907. EccPoint *l_points[4] = {NULL, &curve_G, &l_public, &l_sum};
  908. uint l_numBits = umax(vli_numBits(u1), vli_numBits(u2));
  909. EccPoint *l_point = l_points[(!!vli_testBit(u1, l_numBits-1)) | ((!!vli_testBit(u2, l_numBits-1)) << 1)];
  910. vli_set(rx, l_point->x);
  911. vli_set(ry, l_point->y);
  912. vli_clear(z);
  913. z[0] = 1;
  914. int i;
  915. for(i = l_numBits - 2; i >= 0; --i)
  916. {
  917. EccPoint_double_jacobian(rx, ry, z);
  918. int l_index = (!!vli_testBit(u1, i)) | ((!!vli_testBit(u2, i)) << 1);
  919. EccPoint *l_point = l_points[l_index];
  920. if(l_point)
  921. {
  922. vli_set(tx, l_point->x);
  923. vli_set(ty, l_point->y);
  924. apply_z(tx, ty, z);
  925. vli_modSub(tz, rx, tx, curve_p); /* Z = x2 - x1 */
  926. XYcZ_add(tx, ty, rx, ry);
  927. vli_modMult_fast(z, z, tz);
  928. }
  929. }
  930. vli_modInv(z, z, curve_p); /* Z = 1/Z */
  931. apply_z(rx, ry, z);
  932. /* v = x1 (mod n) */
  933. if(vli_cmp(curve_n, rx) != 1)
  934. {
  935. vli_sub(rx, rx, curve_n);
  936. }
  937. /* Accept only if v == r. */
  938. return (vli_cmp(rx, l_r) == 0);
  939. }
  940. //////////////////////////////////////////////////////////////////////////////
  941. //////////////////////////////////////////////////////////////////////////////
  942. //////////////////////////////////////////////////////////////////////////////
  943. } // anonymous namespace
  944. void ECC384GenerateKey(uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE])
  945. {
  946. if (!ecc_make_key(pub,priv)) {
  947. fprintf(stderr,"FATAL: ecdsa_make_key() failed!" ZT_EOL_S);
  948. abort();
  949. }
  950. }
  951. void ECC384ECDSASign(const uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],uint8_t sig[ZT_ECC384_SIGNATURE_SIZE])
  952. {
  953. if (!ecdsa_sign(priv,hash,sig)) {
  954. fprintf(stderr,"FATAL: ecdsa_sign() failed!" ZT_EOL_S);
  955. abort();
  956. }
  957. }
  958. bool ECC384ECDSAVerify(const uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],const uint8_t sig[ZT_ECC384_SIGNATURE_SIZE])
  959. {
  960. return (ecdsa_verify(pub,hash,sig) != 0);
  961. }
  962. bool ECC384ECDH(const uint8_t theirPub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t ourPriv[ZT_ECC384_PRIVATE_KEY_SIZE],uint8_t secret[ZT_ECC384_SHARED_SECRET_SIZE])
  963. {
  964. return (ecdh_shared_secret(theirPub,ourPriv,secret) != 0);
  965. }
  966. } // namespace ZeroTier